os.remove(intermediate_symbol) os.remove(intermediate_params) if __name__ == '__main__': config = Configs(models_dir='/models') models_to_convert = [ name for name in config.mxnet_models if config.in_official_package(name) ] custom_shape = (1, 3, 480, 640) for model in models_to_convert: print(f"Downloading model: {model}...") get_model_file(model, root=config.mxnet_models_dir) for model in models_to_convert: mxnet_symbol, mxnet_params = config.get_mxnet_model_paths(model) reshape = config.mxnet_models[model].get('reshape') shape = config.mxnet_models[model].get('shape', (1, 3, 112, 112)) if custom_shape and reshape == True: shape = custom_shape output_onnx_dir, output_onnx_model = config.build_model_paths( model, "onnx") os.makedirs(output_onnx_dir, exist_ok=True) print(f'Converting "{model}" model to ONNX, shape {shape}...') convert_insight_model(mxnet_symbol, mxnet_params, output_onnx_model, shape)
print("TensorRT model ready.") if __name__ == '__main__': configs = Configs(models_dir='/models') #model_name = 'retinaface_r50_v1' model_name = 'retinaface_mnet025_v2' im_size = [1024, 768] # W, H prepare_folders([ configs.mxnet_models_dir, configs.onnx_models_dir, configs.trt_engines_dir ]) mx_symbol, mx_params = configs.get_mxnet_model_paths(model_name) output_onnx_model_path, output_onnx = configs.build_model_paths( model_name, 'onnx') output_trt_engine_path, output_engine = configs.build_model_paths( model_name, 'plan') prepare_folders([output_onnx_model_path, output_trt_engine_path]) prepare_retina_engine(mx_symbol, mx_params, output_onnx, output_engine, model_name, configs.mxnet_models_dir, shape=(1, 3, im_size[1], im_size[0]),