def TensorflowEmit(original_framework, architecture_name, architecture_path, weight_path, image_path):
        import tensorflow as tf
        from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter

        # IR to code
        converted_file = original_framework + '_tensorflow_' + architecture_name + "_converted"
        converted_file = converted_file.replace('.', '_')
        emitter = TensorflowEmitter((architecture_path, weight_path))
        emitter.run(converted_file + '.py', None, 'test')
        del emitter
        del TensorflowEmitter

        # import converted model
        model_converted = __import__(converted_file).KitModel(weight_path)
        input_tf, model_tf = model_converted

        func = TestKit.preprocess_func[original_framework][architecture_name]
        img = func(image_path)
        input_data = np.expand_dims(img, 0)
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            predict = sess.run(model_tf, feed_dict = {input_tf : input_data})
        del model_converted
        del sys.modules[converted_file]
        os.remove(converted_file + '.py')
        converted_predict = np.squeeze(predict)

        del tf

        return converted_predict
Esempio n. 2
0
    def TensorflowEmit(original_framework, architecture_name, architecture_path, weight_path, image_path):
        import tensorflow as tf
        from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter

        original_framework = checkfrozen(original_framework)

        # IR to code
        converted_file = original_framework + '_tensorflow_' + architecture_name + "_converted"
        converted_file = converted_file.replace('.', '_')
        print(architecture_path)
        print(weight_path)
        emitter = TensorflowEmitter((architecture_path, weight_path))
        emitter.run(converted_file + '.py', None, 'test')
        del emitter
        del TensorflowEmitter

        # import converted model
        model_converted = __import__(converted_file).KitModel(weight_path)
        input_tf, model_tf = model_converted

        func = TestKit.preprocess_func[original_framework][architecture_name]
        img = func(image_path)
        input_data = np.expand_dims(img, 0)
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            predict = sess.run(model_tf, feed_dict = {input_tf : input_data})
        del model_converted
        del sys.modules[converted_file]
        os.remove(converted_file + '.py')
        converted_predict = np.squeeze(predict)

        del tf

        return converted_predict
Esempio n. 3
0
    def TensorflowEmit(original_framework, architecture_name,
                       architecture_path, weight_path, image_path):
        print("Testing {} from {} to Tensorflow.".format(
            architecture_name, original_framework))

        # IR to code
        emitter = TensorflowEmitter((architecture_path, weight_path))
        emitter.run("converted_model.py", None, 'test')
        del emitter

        # import converted model
        import converted_model
        reload_module(converted_model)
        model_converted = converted_model.KitModel(TestModels.tmpdir +
                                                   architecture_name +
                                                   "_converted.npy")
        input_tf, model_tf = model_converted

        func = TestKit.preprocess_func[original_framework][architecture_name]
        img = func(image_path)
        input_data = np.expand_dims(img, 0)
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            predict = sess.run(model_tf, feed_dict={input_tf: input_data})
        del model_converted
        del converted_model
        os.remove("converted_model.py")
        converted_predict = np.squeeze(predict)
        return converted_predict
Esempio n. 4
0
def _convert(args):
    if args.dstFramework == 'caffe':
        from mmdnn.conversion.caffe.caffe_emitter import CaffeEmitter
        if args.IRWeightPath is None:
            emitter = CaffeEmitter(args.IRModelPath)
        else:
            assert args.dstWeightPath
            emitter = CaffeEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstFramework == 'keras':
        from mmdnn.conversion.keras.keras2_emitter import Keras2Emitter
        emitter = Keras2Emitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstFramework == 'tensorflow':
        from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter
        if args.IRWeightPath is None:
            # Convert network architecture only
            emitter = TensorflowEmitter(args.IRModelPath)
        else:
            emitter = TensorflowEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstFramework == 'cntk':
        from mmdnn.conversion.cntk.cntk_emitter import CntkEmitter
        if args.IRWeightPath is None:
            emitter = CntkEmitter(args.IRModelPath)
        else:
            emitter = CntkEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstFramework == 'coreml':
        raise NotImplementedError("CoreML emitter is not finished yet.")

    elif args.dstFramework == 'pytorch':
        if not args.dstWeightPath or not args.IRWeightPath:
            raise ValueError("Need to set a target weight filename.")
        from mmdnn.conversion.pytorch.pytorch_emitter import PytorchEmitter
        emitter = PytorchEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstFramework == 'mxnet':
        from mmdnn.conversion.mxnet.mxnet_emitter import MXNetEmitter
        if args.IRWeightPath is None:
            emitter = MXNetEmitter(args.IRModelPath)
        else:
            if args.dstWeightPath is None:
                raise ValueError(
                    "MXNet emitter needs argument [dstWeightPath(dw)], like -dw mxnet_converted-0000.param"
                )
            emitter = MXNetEmitter(
                (args.IRModelPath, args.IRWeightPath, args.dstWeightPath))
    elif args.dstFramework == 'onnx':
        from mmdnn.conversion.onnx.onnx_emitter import OnnxEmitter
        if args.IRWeightPath is None:
            raise NotImplementedError("ONNX emitter needs IR weight file")
        else:
            emitter = OnnxEmitter(args.IRModelPath, args.IRWeightPath)
    else:
        assert False

    emitter.run(args.dstModelPath, args.dstWeightPath, args.phase)

    return 0
Esempio n. 5
0
    def tensorflow_emit(original_framework, architecture_name,
                        architecture_path, weight_path, test_input_path):
        import tensorflow as tf
        from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter
        # IR to code
        converted_file = TestModels.tmpdir + original_framework + '_tensorflow_' + architecture_name + "_converted"
        converted_file = converted_file.replace('.', '_')

        emitter = TensorflowEmitter((architecture_path, weight_path))
        emitter.run(converted_file + '.py', None, 'test')
        del emitter
        del TensorflowEmitter

        # import converted model
        model_converted = imp.load_source('TFModel', converted_file +
                                          '.py').KitModel(weight_path)

        input_tf, model_tf = model_converted

        original_framework = checkfrozen(original_framework)

        if 'rnn' not in architecture_name:
            func = TestKit.preprocess_func[original_framework][
                architecture_name]
            img = func(test_input_path(architecture_name))
            input_data = np.expand_dims(img, 0)
        else:
            input_data = np.load(test_input_path(architecture_name))

        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            predict = sess.run(model_tf, feed_dict={input_tf: input_data})
        del model_converted
        del sys.modules['TFModel']
        os.remove(converted_file + '.py')
        converted_predict = np.squeeze(predict)

        del tf

        return converted_predict
Esempio n. 6
0
def _convert(args):
    if args.dstModelFormat == 'caffe':
        raise NotImplementedError("Destination [Caffe] is not implemented yet.")

    elif args.dstModelFormat == 'keras':
        from mmdnn.conversion.keras.keras2_emitter import Keras2Emitter
        emitter = Keras2Emitter(args.IRModelPath)

    elif args.dstModelFormat == 'tensorflow':
        from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter
        if args.IRWeightPath == None:        
            emitter = TensorflowEmitter(args.IRModelPath)
        else:
            emitter = TensorflowEmitter((args.IRModelPath, args.IRWeightPath))
    
    elif args.dstModelFormat == 'cntk':
        from mmdnn.conversion.cntk.cntk_emitter import CntkEmitter
        if args.IRWeightPath == None:
            emitter = CntkEmitter(args.IRModelPath)
        else:
            emitter = CntkEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstModelFormat == 'coreml':
        raise NotImplementedError("CoreML emitter is not finished yet.")
        assert args.IRWeightPath != None
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        emitter = CoreMLEmitter((args.IRModelPath, args.IRWeightPath))
        model = emitter.gen_model()
        print ("Saving the CoreML model [{}].".format(args.dstModelPath + '.mlmodel'))
        model.save(args.dstModelPath + '.mlmodel')
        print ("The converted CoreML model saved as [{}].".format(args.dstModelPath + '.mlmodel'))
        return 0
    
    elif args.dstModelFormat == 'pytorch':
        if not args.dstWeightPath or not args.IRWeightPath:
            raise ValueError("Need to set a target weight filename.")
        from mmdnn.conversion.pytorch.pytorch_emitter import PytorchEmitter
        emitter = PytorchEmitter((args.IRModelPath, args.IRWeightPath))        

    elif args.dstModelFormat == 'mxnet':
        from mmdnn.conversion.mxnet.mxnet_emitter import MXNetEmitter
        if args.IRWeightPath == None:
            emitter = MXNetEmitter(args.IRModelPath)
        else:
            emitter = MXNetEmitter((args.IRModelPath, args.IRWeightPath, args.inputShape, args.dstWeightPath))
        
    else:
        assert False
    
    emitter.run(args.dstModelPath, args.dstWeightPath, args.phase)

    return 0
Esempio n. 7
0
def _convert(args):
    if args.dstModelFormat == 'caffe':
        raise NotImplementedError(
            "Destination [Caffe] is not implemented yet.")

    elif args.dstModelFormat == 'keras':
        from mmdnn.conversion.keras.keras2_emitter import Keras2Emitter
        emitter = Keras2Emitter(args.IRModelPath)

    elif args.dstModelFormat == 'tensorflow':
        from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter
        if args.IRWeightPath == None:
            emitter = TensorflowEmitter(args.IRModelPath)
        else:
            emitter = TensorflowEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstModelFormat == 'cntk':
        from mmdnn.conversion.cntk.cntk_emitter import CntkEmitter
        if args.IRWeightPath == None:
            emitter = CntkEmitter(args.IRModelPath)
        else:
            emitter = CntkEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstModelFormat == 'coreml':
        raise NotImplementedError("CoreML emitter is not finished yet.")
        assert args.IRWeightPath != None
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        emitter = CoreMLEmitter((args.IRModelPath, args.IRWeightPath))
        model = emitter.gen_model()
        print("Saving the CoreML model [{}].".format(args.dstModelPath +
                                                     '.mlmodel'))
        model.save(args.dstModelPath + '.mlmodel')
        print("The converted CoreML model saved as [{}].".format(
            args.dstModelPath + '.mlmodel'))
        return 0

    elif args.dstModelFormat == 'pytorch':
        if not args.dstWeightPath or not args.IRWeightPath:
            raise ValueError("Need to set a target weight filename.")
        from mmdnn.conversion.pytorch.pytorch_emitter import PytorchEmitter
        emitter = PytorchEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstModelFormat == 'mxnet':
        from mmdnn.conversion.mxnet.mxnet_emitter import MXNetEmitter
        if args.IRWeightPath == None:
            emitter = MXNetEmitter(args.IRModelPath)
        else:
            emitter = MXNetEmitter((args.IRModelPath, args.IRWeightPath,
                                    args.inputShape, args.dstWeightPath))

    else:
        assert False

    emitter.run(args.dstModelPath, args.dstWeightPath, args.phase)

    return 0