-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathjet-ssd-ceva-export.py
95 lines (79 loc) · 3.18 KB
/
jet-ssd-ceva-export.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import argparse
import onnx
import onnxruntime
import torch
import torch.nn as nn
import yaml
from ssd.net import build_ssd
from utils import *
if __name__ == '__main__':
parser = argparse.ArgumentParser("Convert PyTorch SSD to ONNX")
parser.add_argument('model',
type=str,
help='Input model name')
parser.add_argument('-c', '--config',
action=IsValidFile,
type=str,
help='Path to config file',
default='ssd-config.yml')
parser.add_argument('-s', '--structure',
action=IsValidFile,
type=str,
help='Path to config file',
default='net-config.yml')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Output verbosity')
args = parser.parse_args()
config = yaml.safe_load(open(args.config))
net_config = yaml.safe_load(open(args.structure))
net_channels = net_config['network_channels']
logger = set_logging('CEVA_SSD',
'{}/PF-Jet-SSD-Test.log'.format(
config['output']['model']),
args.verbose)
logger.info('Converting {} model to ONNX'.format(args.model))
ssd_settings = config['ssd_settings']
input_dimensions = ssd_settings['input_dimensions']
jet_size = ssd_settings['object_size']
num_workers = config['evaluation_pref']['workers']
dataset = config['dataset']['validation'][0]
ssd_settings['n_classes'] += 1
base = '{}/{}'.format(config['output']['model'], args.model)
source_path = '{}.pth'.format(base)
export_path = '{}-ceva.onnx'.format(base)
torch.set_default_tensor_type('torch.FloatTensor')
logger.info('Prepare PyTorch model')
net = build_ssd(torch.device('cpu'),
ssd_settings,
net_channels,
ceva=True,
inference=True,
onnx=True)
net.load_weights(source_path)
net.eval()
logger.info('Prepare inputs')
loader = get_data_loader(dataset,
1,
num_workers,
input_dimensions,
jet_size,
cpu=True,
shuffle=False)
batch_iterator = iter(loader)
dummy_input, _ = next(batch_iterator)
logger.info('Export as ONNX model')
torch.onnx.export(net,
dummy_input,
export_path,
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
logger.info('Validating graph')
onnx_model = onnx.load(export_path)
onnx.checker.check_model(onnx_model)
logger.info("Exported model has been successfully tested with ONNXRuntime")