Example #1
0
    def CoreMLEmit(original_framework, architecture_name, architecture_path,
                   weight_path, image_path):
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        from coremltools.models import MLModel

        original_framework = checkfrozen(original_framework)

        # scale, b, g, r, BGRTranspose
        prep_for_coreml = {
            'inception_v3': [0.00784313771874, -1.0, -1.0, -1.0, False],
            'vgg16':
            [1.0, -103.939002991, -116.778999329, -123.680000305, True],
            'resnet50':
            [1.0, -103.939002991, -116.778999329, -123.680000305, True],
            'mobilenet': [
                0.0170000009239, -1.76698005199, -1.98526000977,
                -2.10256004333, True
            ],
            'tinyyolo': [0.00392156885937, 0, 0, 0, False]
        }

        # IR to Model
        # converted_file = original_framework + '_coreml_' + architecture_name + "_converted"
        # converted_file = converted_file.replace('.', '_')

        # image
        func = TestKit.preprocess_func[original_framework][architecture_name]
        img = func(image_path)

        prep_list = prep_for_coreml[architecture_name]

        emitter = CoreMLEmitter(architecture_path, weight_path)
        model, input_name, output_name = emitter.gen_model(
            input_names=None,
            output_names=None,
            image_input_names=image_path,
            is_bgr=prep_list[4],
            red_bias=prep_list[3],
            green_bias=prep_list[2],
            blue_bias=prep_list[1],
            gray_bias=0.0,
            image_scale=prep_list[0],
            class_labels=None,
            predicted_feature_name=None,
            predicted_probabilities_output='')

        input_name = str(input_name[0][0])
        output_name = str(output_name[0][0])

        # load model
        model = MLModel(model)

        # inference

        coreml_input = {input_name: img}
        coreml_output = model.predict(coreml_input)
        prob = coreml_output[output_name]
        prob = np.array(prob).squeeze()

        return prob
Example #2
0
def _convert(args):
    if args.dstModelFormat == 'caffe':
        raise NotImplementedError(
            "Destination [Caffe] is not implemented yet.")

    elif args.dstModelFormat == 'keras':
        from mmdnn.conversion.keras.keras2_emitter import Keras2Emitter
        emitter = Keras2Emitter(args.IRModelPath)

    elif args.dstModelFormat == 'tensorflow':
        from mmdnn.conversion.tensorflow.tensorflow_emitter import TensorflowEmitter
        if args.IRWeightPath == None:
            emitter = TensorflowEmitter(args.IRModelPath)
        else:
            emitter = TensorflowEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstModelFormat == 'cntk':
        from mmdnn.conversion.cntk.cntk_emitter import CntkEmitter
        if args.IRWeightPath == None:
            emitter = CntkEmitter(args.IRModelPath)
        else:
            emitter = CntkEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstModelFormat == 'coreml':
        raise NotImplementedError("CoreML emitter is not finished yet.")
        assert args.IRWeightPath != None
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        emitter = CoreMLEmitter((args.IRModelPath, args.IRWeightPath))
        model = emitter.gen_model()
        print("Saving the CoreML model [{}].".format(args.dstModelPath +
                                                     '.mlmodel'))
        model.save(args.dstModelPath + '.mlmodel')
        print("The converted CoreML model saved as [{}].".format(
            args.dstModelPath + '.mlmodel'))
        return 0

    elif args.dstModelFormat == 'pytorch':
        if not args.dstWeightPath or not args.IRWeightPath:
            raise ValueError("Need to set a target weight filename.")
        from mmdnn.conversion.pytorch.pytorch_emitter import PytorchEmitter
        emitter = PytorchEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.dstModelFormat == 'mxnet':
        from mmdnn.conversion.mxnet.mxnet_emitter import MXNetEmitter
        if args.IRWeightPath == None:
            emitter = MXNetEmitter(args.IRModelPath)
        else:
            emitter = MXNetEmitter((args.IRModelPath, args.IRWeightPath,
                                    args.inputShape, args.dstWeightPath))

    else:
        assert False

    emitter.run(args.dstModelPath, args.dstWeightPath, args.phase)

    return 0
Example #3
0
    def CoreMLEmit(original_framework, architecture_name, architecture_path, weight_path, image_path):


        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        from coremltools.models import MLModel
        import coremltools
        from PIL import Image


        original_framework = checkfrozen(original_framework)


        def prep_for_coreml(prename, BGRTranspose):
            # The list is in RGB oder
            if prename == 'Standard':
                return 0.00784313725490196,-1, -1, -1
            elif prename == 'ZeroCenter' :
                return 1, -123.68, -116.779, -103.939
            elif prename == 'Identity':
                return 1, 1, 1, 1
            else:
                raise ValueError()


        # IR to Model
        # converted_file = original_framework + '_coreml_' + architecture_name + "_converted"
        # converted_file = converted_file.replace('.', '_')

        func = TestKit.preprocess_func[original_framework][architecture_name]

        import inspect
        funcstr = inspect.getsource(func)

        coreml_pre = funcstr.split('(')[0].split('.')[-1]

        if len(funcstr.split(',')) == 3:
            BGRTranspose = bool(0)
            size = int(funcstr.split('path,')[1].split(')')[0])
            prep_list = prep_for_coreml(coreml_pre, BGRTranspose)
        elif  len(funcstr.split(',')) == 4:
            BGRTranspose = funcstr.split(',')[-2].split(')')[0].strip() == str(True)
            size = int(funcstr.split('path,')[1].split(',')[0])
            prep_list = prep_for_coreml(coreml_pre, BGRTranspose)

        elif len(funcstr.split(',')) == 11:
            BGRTranspose = funcstr.split(',')[-2].split(')')[0].strip() == str(True)

            size = int(funcstr.split('path,')[1].split(',')[0])
            prep_list = (   float(funcstr.split(',')[2]),
                            float(funcstr.split(',')[3].split('[')[-1]),
                            float(funcstr.split(',')[4]),
                            float(funcstr.split(',')[5].split(']')[0])
                        )




        emitter = CoreMLEmitter(architecture_path, weight_path)
        model, input_name, output_name = emitter.gen_model(
                input_names=None,
                output_names=None,
                image_input_names=image_path,
                is_bgr=BGRTranspose,
                red_bias=prep_list[1],
                green_bias=prep_list[2],
                blue_bias=prep_list[3],
                gray_bias=0.0,
                image_scale=prep_list[0],
                class_labels=None,
                predicted_feature_name=None,
                predicted_probabilities_output=''
            )

        input_name = str(input_name[0][0])
        output_name = str(output_name[0][0])

        # load model
        model = MLModel(model)


        # save model
        # coremltools.utils.save_spec(model.get_spec(), converted_file)

        from coremltools.models.utils import macos_version

        if macos_version() < (10, 13):
            return None
        else:

            from PIL import Image as pil_image
            img = pil_image.open(image_path)
            img = img.resize((size, size))

            # inference

            coreml_input = {input_name: img}
            coreml_output = model.predict(coreml_input)
            prob = coreml_output[output_name]
            prob = np.array(prob).squeeze()

            return prob
    def CoreMLEmit(original_framework, architecture_name, architecture_path,
                   weight_path, image_path):
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        from coremltools.models import MLModel

        original_framework = checkfrozen(original_framework)

        # scale, b, g, r, BGRTranspose
        CoreML_preprocess = {
            'inception_v3': [0.00784313771874, -1.0, -1.0, -1.0, False],
            'vgg16':
            [1.0, -103.939002991, -116.778999329, -123.680000305, True],
            'resnet50':
            [1.0, -103.939002991, -116.778999329, -123.680000305, True],
            'mobilenet': [
                0.0170000009239, -1.76698005199, -1.98526000977,
                -2.10256004333, True
            ],
            'tinyyolo': [0.00392156885937, 0, 0, 0, False]
        }

        # IR to Model
        # image
        func = TestKit.preprocess_func[original_framework][architecture_name]
        img = func(image_path)

        if original_framework == 'CoreML':
            prep_list = CoreML_preprocess[architecture_name]

        else:
            import inspect
            funcstr = inspect.getsource(func)
            coreml_pre = funcstr.split('(')[0].split('.')[-1]

            if len(funcstr.split(',')) == 3:
                BGRTranspose = bool(0)
                img_size = int(funcstr.split('path,')[1].split(')')[0])
            else:
                BGRTranspose = bool(funcstr.split(',')[-2].split(')')[0])
                img_size = int(funcstr.split('path,')[1].split(',')[0])

            prep_list = TestModels.prep_for_coreml(coreml_pre, BGRTranspose)

        emitter = CoreMLEmitter(architecture_path, weight_path)
        model, input_name, output_name = emitter.gen_model(
            input_names=None,
            output_names=None,
            image_input_names=image_path,
            is_bgr=prep_list[4],
            red_bias=prep_list[3],
            green_bias=prep_list[2],
            blue_bias=prep_list[1],
            gray_bias=0.0,
            image_scale=prep_list[0],
            class_labels=None,
            predicted_feature_name=None,
            predicted_probabilities_output='')

        input_name = str(input_name[0][0])
        output_name = str(output_name[0][0])

        # load model
        model = MLModel(model)

        # inference
        coreml_input = {input_name: img}
        coreml_output = model.predict(coreml_input)
        prob = coreml_output[output_name]
        prob = np.array(prob).squeeze()

        return prob
    def CoreMLEmit(original_framework, architecture_name, architecture_path, weight_path, image_path):
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter

        original_framework = checkfrozen(original_framework)

        def prep_for_coreml(prepname, BGRTranspose):
            if prepname == 'Standard':
                return 0.00784313725490196, -1, -1, -1
            elif prepname == 'ZeroCenter' and BGRTranspose == True:
                return 1, -123.68, -116.779, -103.939
            elif prepname == 'ZeroCenter' and BGRTranspose == False:
                return 1, -103.939, -116.779, -123.68
            elif prepname == 'Identity':
                return 1, 1, 1, 1
            else:
                raise ValueError()

        # IR to Model
        converted_file = original_framework + '_coreml_' + architecture_name + "_converted"
        converted_file = converted_file.replace('.', '_')
        print(converted_file)

        func = TestKit.preprocess_func[original_framework][architecture_name]

        import inspect
        funcstr = inspect.getsource(func)

        coreml_pre = funcstr.split('(')[0].split('.')[-1]

        if len(funcstr.split(',')) == 3:
            BGRTranspose = bool(0)
            img_size = int(funcstr.split('path,')[1].split(')')[0])
        else:
            BGRTranspose = bool(funcstr.split(',')[-2].split(')')[0])
            img_size = int(funcstr.split('path,')[1].split(',')[0])

        prep_list = prep_for_coreml(coreml_pre, BGRTranspose)

        emitter = CoreMLEmitter(architecture_path, weight_path)
        model, input_name, output_name = emitter.gen_model(
                input_names=None,
                output_names=None,
                image_input_names=image_path,
                is_bgr=BGRTranspose,
                red_bias=prep_list[1],
                green_bias=prep_list[2],
                blue_bias=prep_list[3],
                gray_bias=0.0,
                image_scale=prep_list[0],
                class_labels=None,
                predicted_feature_name=None,
                predicted_probabilities_output=''
            )

        import coremltools
        con_model = coremltools.models.MLModel(model)
        print("Model loading success.")

        from coremltools.models.utils import macos_version

        if macos_version() < (10, 13):
            return None

        else:
            from PIL import Image as pil_image
            img = pil_image.open(image_path)
            img = img.resize((img_size, img_size))

            input_data = img
            coreml_inputs = {str(input_name[0][0]): input_data}
            coreml_output = con_model.predict(coreml_inputs, useCPUOnly=False)
            converted_predict = coreml_output[str(output_name[0][0])]
            converted_predict = np.squeeze(converted_predict)

            return converted_predict
Example #6
0
def _convert(args):
    if args.framework == 'caffe':
        raise NotImplementedError("Destination [Caffe] is not implemented yet.")

    elif args.framework == 'keras':
        raise NotImplementedError("Destination [Keras] is not implemented yet.")

    elif args.framework == 'tensorflow':
        raise NotImplementedError("Destination [Tensorflow] is not implemented yet.")

    elif args.framework == 'cntk':
        raise NotImplementedError("Destination [Tensorflow] is not implemented yet.")

    elif args.framework == 'coreml':
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        assert args.inputNetwork is not None
        assert args.inputWeight is not None
        emitter = CoreMLEmitter(args.inputNetwork, args.inputWeight)
        model = emitter.gen_model(
            args.inputNames,
            args.outputNames,
            image_input_names = set(args.imageInputNames) if args.imageInputNames else None,
            is_bgr = args.isBGR,
            red_bias = args.redBias,
            blue_bias = args.blueBias,
            green_bias = args.greenBias,
            gray_bias = args.grayBias,
            image_scale = args.scale,
            class_labels = args.classInputPath if args.classInputPath else None,
            predicted_feature_name = args.predictedFeatureName)

        """
        from google.protobuf import text_format
        with open(args.output+'.txt', 'w') as f:
            f.write(text_format.MessageToString(model))
        """

        with open(args.output, 'wb') as f:
            model = model.SerializeToString()
            f.write(model)


        return 0

    elif args.framework == 'pytorch':
        if not args.dstWeightPath or not args.IRWeightPath:
            raise ValueError("Need to set a target weight filename.")
        from mmdnn.conversion.pytorch.pytorch_emitter import PytorchEmitter
        emitter = PytorchEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.framework == 'mxnet':
        from mmdnn.conversion.mxnet.mxnet_emitter import MXNetEmitter
        if args.IRWeightPath == None:
            emitter = MXNetEmitter(args.IRModelPath)
        else:
            emitter = MXNetEmitter((args.IRModelPath, args.IRWeightPath, args.inputShape, args.dstWeightPath))

    else:
        assert False

    emitter.run(args.output)

    return 0
Example #7
0
def _convert(args):
    if args.framework == 'caffe':
        raise NotImplementedError(
            "Destination [Caffe] is not implemented yet.")

    elif args.framework == 'keras':
        raise NotImplementedError(
            "Destination [Keras] is not implemented yet.")

    elif args.framework == 'tensorflow':
        raise NotImplementedError(
            "Destination [Tensorflow] is not implemented yet.")

    elif args.framework == 'cntk':
        raise NotImplementedError(
            "Destination [Tensorflow] is not implemented yet.")

    elif args.framework == 'coreml':
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter
        assert args.inputNetwork is not None
        assert args.inputWeight is not None
        emitter = CoreMLEmitter(args.inputNetwork, args.inputWeight)
        model = emitter.gen_model(
            args.inputNames,
            args.outputNames,
            image_input_names=set(args.imageInputNames)
            if args.imageInputNames else None,
            is_bgr=args.isBGR,
            red_bias=args.redBias,
            blue_bias=args.blueBias,
            green_bias=args.greenBias,
            gray_bias=args.grayBias,
            image_scale=args.scale,
            class_labels=args.classInputPath if args.classInputPath else None,
            predicted_feature_name=args.predictedFeatureName)
        """
        from google.protobuf import text_format
        with open(args.output+'.txt', 'w') as f:
            f.write(text_format.MessageToString(model))
        """

        with open(args.output, 'wb') as f:
            model = model.SerializeToString()
            f.write(model)

        return 0

    elif args.framework == 'pytorch':
        if not args.dstWeightPath or not args.IRWeightPath:
            raise ValueError("Need to set a target weight filename.")
        from mmdnn.conversion.pytorch.pytorch_emitter import PytorchEmitter
        emitter = PytorchEmitter((args.IRModelPath, args.IRWeightPath))

    elif args.framework == 'mxnet':
        from mmdnn.conversion.mxnet.mxnet_emitter import MXNetEmitter
        if args.IRWeightPath == None:
            emitter = MXNetEmitter(args.IRModelPath)
        else:
            emitter = MXNetEmitter((args.IRModelPath, args.IRWeightPath,
                                    args.inputShape, args.dstWeightPath))

    else:
        assert False

    emitter.run(args.output)

    return 0
    def CoreMLEmit(original_framework, architecture_name, architecture_path, weight_path, image_path):
        from mmdnn.conversion.coreml.coreml_emitter import CoreMLEmitter

        def prep_for_coreml(prepname, BGRTranspose):
            if prepname == 'Standard' and BGRTranspose == False:
                return 0.00784313725490196, -1, -1, -1
            elif prepname == 'ZeroCenter' and BGRTranspose == True:
                return 1, -123.68, -116.779, -103.939
            elif prepname == 'ZeroCenter' and BGRTranspose == False:
                return 1,-103.939,-116.779,-123.68
            elif prepname == 'Identity':
                return 1, 1, 1, 1


        # IR to Model
        converted_file = original_framework + '_coreml_' + architecture_name + "_converted"
        converted_file = converted_file.replace('.', '_')
        print(converted_file)

        func = TestKit.preprocess_func[original_framework][architecture_name]

        import inspect
        funcstr = inspect.getsource(func)

        coreml_pre = funcstr.split('(')[0].split('.')[-1]

        if len(funcstr.split(',')) == 3:
            BGRTranspose = bool(0)
            img_size = int(funcstr.split('path,')[1].split(')')[0])
        else:
            BGRTranspose = bool(funcstr.split(',')[-2].split(')')[0])
            img_size = int(funcstr.split('path,')[1].split(',')[0])

        print(BGRTranspose)
        print(coreml_pre)
        print(img_size)

        prep_list = prep_for_coreml(coreml_pre, BGRTranspose )
        print(prep_list)
        # assert False

        # print(func.__name__)

        emitter = CoreMLEmitter(architecture_path, weight_path)
        model, input_name, output_name = emitter.gen_model(
                input_names=None,
                output_names=None,
                image_input_names=image_path,
                is_bgr=BGRTranspose,
                red_bias=prep_list[1],
                green_bias=prep_list[2],
                blue_bias=prep_list[3],
                gray_bias=0.0,
                image_scale=prep_list[0],
                class_labels=None,
                predicted_feature_name=None,
                predicted_probabilities_output=''
            )

        import coremltools
        con_model = coremltools.models.MLModel(model)
        print("Model loading success.")

        from PIL import Image as pil_image
        img = pil_image.open(image_path)
        img = img.resize((img_size, img_size))

        input_data = img
        coreml_inputs = {str(input_name[0][0]): input_data}
        coreml_output = con_model.predict(coreml_inputs, useCPUOnly=False)
        converted_predict = coreml_output[str(output_name[0][0])]

        return converted_predict