def OnnxEmit(original_framework, architecture_name, architecture_path, weight_path, image_path): from mmdnn.conversion.onnx.onnx_emitter import OnnxEmitter original_framework = checkfrozen(original_framework) # IR to code converted_file = original_framework + '_onnx_' + architecture_name + "_converted" converted_file = converted_file.replace('.', '_') emitter = OnnxEmitter(architecture_path, weight_path) emitter.run(converted_file + '.py', None, 'test') del emitter del OnnxEmitter # import converted model from onnx_tf.backend import prepare model_converted = __import__(converted_file).KitModel(weight_path) tf_rep = prepare(model_converted) func = TestKit.preprocess_func[original_framework][architecture_name] img = func(image_path) input_data = np.expand_dims(img, 0) predict = tf_rep.run(input_data)[0] del prepare del model_converted del tf_rep os.remove(converted_file + '.py') return predict
def OnnxEmit(original_framework, architecture_name, architecture_path, weight_path, image_path): try: from mmdnn.conversion.onnx.onnx_emitter import OnnxEmitter original_framework = checkfrozen(original_framework) # IR to code converted_file = original_framework + '_onnx_' + architecture_name + "_converted" converted_file = converted_file.replace('.', '_') emitter = OnnxEmitter(architecture_path, weight_path) emitter.run(converted_file + '.py', converted_file + '.npy', 'test') del emitter del OnnxEmitter # import converted model from onnx_tf.backend import prepare model_converted = imp.load_source( 'OnnxModel', converted_file + '.py').KitModel(converted_file + '.npy') tf_rep = prepare(model_converted) func = TestKit.preprocess_func[original_framework][ architecture_name] img = func(image_path) input_data = np.expand_dims(img, 0) predict = tf_rep.run(input_data)[0] return predict except ImportError: print( 'Please install Onnx! Or Onnx is not supported in your platform.', file=sys.stderr) except: raise ValueError finally: del prepare del model_converted del tf_rep del sys.modules['OnnxModel'] os.remove(converted_file + '.py') os.remove(converted_file + '.npy')
def _convert(args): if args.dstFramework == 'caffe': from mmdnn.conversion.caffe.caffe_emitter import CaffeEmitter if args.IRWeightPath is None: emitter = CaffeEmitter(args.IRModelPath) else: assert args.dstWeightPath emitter = CaffeEmitter((args.IRModelPath, args.IRWeightPath)) elif args.dstFramework == 'keras': from mmdnn.conversion.keras.keras2_emitter import Keras2Emitter emitter = Keras2Emitter((args.IRModelPath, args.IRWeightPath)) elif args.dstFramework == 'tensorflow': from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter if args.IRWeightPath is None: # Convert network architecture only emitter = TensorflowEmitter(args.IRModelPath) else: emitter = TensorflowEmitter((args.IRModelPath, args.IRWeightPath)) elif args.dstFramework == 'cntk': from mmdnn.conversion.cntk.cntk_emitter import CntkEmitter if args.IRWeightPath is None: emitter = CntkEmitter(args.IRModelPath) else: emitter = CntkEmitter((args.IRModelPath, args.IRWeightPath)) elif args.dstFramework == 'coreml': raise NotImplementedError("CoreML emitter is not finished yet.") elif args.dstFramework == 'pytorch': if not args.dstWeightPath or not args.IRWeightPath: raise ValueError("Need to set a target weight filename.") from mmdnn.conversion.pytorch.pytorch_emitter import PytorchEmitter emitter = PytorchEmitter((args.IRModelPath, args.IRWeightPath)) elif args.dstFramework == 'mxnet': from mmdnn.conversion.mxnet.mxnet_emitter import MXNetEmitter if args.IRWeightPath is None: emitter = MXNetEmitter(args.IRModelPath) else: if args.dstWeightPath is None: raise ValueError( "MXNet emitter needs argument [dstWeightPath(dw)], like -dw mxnet_converted-0000.param" ) emitter = MXNetEmitter( (args.IRModelPath, args.IRWeightPath, args.dstWeightPath)) elif args.dstFramework == 'onnx': from mmdnn.conversion.onnx.onnx_emitter import OnnxEmitter if args.IRWeightPath is None: raise NotImplementedError("ONNX emitter needs IR weight file") else: emitter = OnnxEmitter(args.IRModelPath, args.IRWeightPath) else: assert False emitter.run(args.dstModelPath, args.dstWeightPath, args.phase) return 0
def onnx_emit(original_framework, architecture_name, architecture_path, weight_path, test_input_path): from mmdnn.conversion.onnx.onnx_emitter import OnnxEmitter # IR to code converted_file = TestModels.tmpdir + original_framework + '_onnx_' + architecture_name + "_converted" converted_file = converted_file.replace('.', '_') emitter = OnnxEmitter(architecture_path, weight_path) emitter.run(converted_file + '.py', converted_file + '.npy', 'test') del emitter del OnnxEmitter # import converted model from onnx_tf.backend import prepare model_converted = imp.load_source( 'OnnxModel', converted_file + '.py').KitModel(converted_file + '.npy') tf_rep = prepare(model_converted) original_framework = checkfrozen(original_framework) func = TestKit.preprocess_func[original_framework][architecture_name] img = func(test_input_path) input_data = np.expand_dims(img, 0) predict = tf_rep.run(input_data)[0] del prepare del model_converted del tf_rep del sys.modules['OnnxModel'] os.remove(converted_file + '.py') os.remove(converted_file + '.npy') return predict