def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--pretrained-model', default='voc0712') parser.add_argument('--out', '-o', type=str, default='yolo_dump') parser.add_argument('--verbose', default=False, action='store_true', help='Verbose mode') args = parser.parse_args() model = YOLOv2(n_fg_class=len(voc_bbox_label_names), pretrained_model=args.pretrained_model) x = chainer.Variable(np.random.random((1, 3, 416, 416)).astype(np.float32)) if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() x.to_gpu() retriever = chainer_trt.ModelRetriever(args.out, verbose=args.verbose) retriever.register_inputs(x, 'x') with chainer.using_config('train', False), chainer_trt.RetainHook(): locs, objs, confs = model(x) retriever(locs, name='locs') retriever(objs, name='objs') retriever(confs, name='confs') retriever.save()
def main(): parser = argparse.ArgumentParser() parser.add_argument("--filter", "-f", type=str, required=False, default="*", help="Wildcard filter") args = parser.parse_args() test_cases = [] for gen, errs, batch_sizes, name_suffix, int8_calib_cache in generators: case_name = gen.__name__ + name_suffix if not fnmatch.fnmatch(case_name, args.filter): continue out = 'test/fixtures/model/' + case_name retriever = chainer_trt.ModelRetriever(out) with chainer.using_config('train', False): with chainer_trt.RetainHook(): inputs, outputs = gen() # Let ModelRetriever know what is the input input_csv_names = [] for name, x in inputs.items(): fn = name + '.csv' input_csv_names.append(fn) open(out + '/' + fn, "wt").write(atos(x)) retriever.register_inputs(x, name=name) # Let ModelRetriever retrieve from outputs output_csv_names = [] output_shapes = [] for name, y in outputs.items(): fn = name + '.csv' output_csv_names.append(fn) retriever(y, name=name) open(out + '/' + fn, "wt").write(atos(y)) output_shapes.append(y.shape[1:]) # Currently only 1 output is supported... assert len(output_csv_names) == 1, \ "Currently only 1 output is supported..." # Save dumped test case network (model.json) retriever.save() # Generate test case definition for bs in batch_sizes: for dtype, error in zip(['kFLOAT', 'kHALF', 'kINT8'], errs): test_cases.append({ "fixture": case_name, "inputs": input_csv_names, "expected_output": output_csv_names[0], "output_dims": output_shapes[0], "batch_size": bs, "dtype": dtype, "acceptable_absolute_error": error, # This is only used for int8 mode "int8_calib_cache": int8_calib_cache }) print("Generated {} (batch={}, dtype={}, err={})".format( case_name, bs, dtype, error)) # Save test case definitions dst = 'test/fixtures/model_fixtures.json' with open(dst, 'wt') as f: json.dump(test_cases, f, indent=2) print("Saved to", dst)
class Net(chainer.Chain): def __init__(self): pass def __call__(self, x): return increment(x) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-o", "--out", required=True, type=str, help="Output path") args = parser.parse_args() x = chainer.Variable(np.random.random((1, 3, 2, 2)).astype(np.float32)) retriever = chainer_trt.ModelRetriever(args.out) retriever.register_inputs(x, name="input") retriever.add_dump_function(Increment, dump_increment) # Run forward pass with chainer.using_config('train', False), chainer_trt.RetainHook(): net = Net() y = net(x) retriever(y, name="output") retriever.save()
type=int, default=227, help='Input image width') parser.add_argument('-H', '--in-height', type=int, default=227, help='Input image height') parser.add_argument('-D', '--in-depth', type=int, default=3, help='Input image depth in bytes') args = parser.parse_args() print('Loading network...') net = caffe.CaffeFunction(args.caffemodel) retriever = chainer_trt.ModelRetriever(args.dest, verbose=args.verbose) retriever.preprocess_caffemodel(net) print('Calling forward pass...') x = numpy.random.rand(1, args.in_depth, args.in_height, args.in_width) x = x.astype(numpy.float32) * 255.0 # batch==1 with chainer.using_config('train', False): with chainer_trt.RetainHook(): y, = net(inputs={'data': x}, outputs=args.output_layers) y = chainer.functions.softmax(y) retriever(y) retriever.save()