Ejemplo n.º 1
0
def convert_model(conf):
    option = cvt.ConverterOption()

    if ModelKeys.graph_optimize_options in conf:
        option.transformer_option = conf[ModelKeys.graph_optimize_options]
    if ModelKeys.winograd in conf:
        option.winograd = conf[ModelKeys.winograd]
    if ModelKeys.quantize in conf:
        option.quantize = conf[ModelKeys.quantize]
    if ModelKeys.quantize_large_weights in conf:
        option.quantize_large_weights = conf[ModelKeys.quantize_large_weights]
    if ModelKeys.quantize_range_file in conf:
        option.quantize_range_file = conf[ModelKeys.quantize_range_file]
    if ModelKeys.change_concat_ranges in conf:
        option.change_concat_ranges = conf[ModelKeys.change_concat_ranges]
    if ModelKeys.cl_mem_type in conf:
        option.cl_mem_type = conf[ModelKeys.cl_mem_type]
    if ModelKeys.runtime in conf:
        option.device = conf[ModelKeys.runtime]
        if option.device == DeviceType.CPU_GPU:
            # when convert, cpu and gpu share the same model
            option.device = DeviceType.CPU
        # we don't need `value`, but to be consistent with legacy code
        # used by `base_converter`
        option.device = option.device.value

    option.data_type = conf[ModelKeys.data_types]

    for i in range(len(conf[ModelKeys.input_tensors])):
        input_node = cvt.NodeInfo()
        input_node.name = conf[ModelKeys.input_tensors][i]
        input_node.shape = conf[ModelKeys.input_shapes][i]
        input_node.data_type = conf[ModelKeys.input_data_types][i]
        input_node.data_format = conf[ModelKeys.input_data_formats][i]
        if (input_node.data_format == DataFormat.NCHW and len(
                input_node.shape) == 4):
            input_node.shape = transpose_shape(input_node.shape, [0, 2, 3, 1])
            input_node.data_format = DataFormat.NHWC
        input_node.range = conf[ModelKeys.input_ranges][i]
        option.add_input_node(input_node)

    for i in range(len(conf[ModelKeys.output_tensors])):
        output_node = cvt.NodeInfo()
        output_node.name = conf[ModelKeys.output_tensors][i]
        output_node.shape = conf[ModelKeys.output_shapes][i]
        output_node.data_type = conf[ModelKeys.output_data_types][i]
        output_node.data_format = conf[ModelKeys.output_data_formats][i]
        if output_node.data_format == DataFormat.NCHW and len(
                output_node.shape) == 4:
            output_node.shape = transpose_shape(output_node.shape,
                                                [0, 2, 3, 1])
            output_node.data_format = DataFormat.NHWC
        option.add_output_node(output_node)

    if ModelKeys.check_tensors in conf:
        for i in range(len(conf[ModelKeys.check_tensors])):
            check_node = cvt.NodeInfo()
            check_node.name = conf[ModelKeys.check_tensors][i]
            check_node.shape = conf[ModelKeys.check_shapes][i]
            option.add_check_node(check_node)
    else:
        option.check_nodes = option.output_nodes

    option.build()

    print("Transform model to one that can better run on device")
    platform = conf[ModelKeys.platform]
    if platform == Platform.TENSORFLOW:
        from transform import tensorflow_converter
        converter = tensorflow_converter.TensorflowConverter(
            option, conf["model_file_path"])
    elif platform == Platform.CAFFE:
        from transform import caffe_converter
        converter = caffe_converter.CaffeConverter(option,
                                                   conf["model_file_path"],
                                                   conf["weight_file_path"])
    elif platform == Platform.ONNX:
        from transform import onnx_converter
        converter = onnx_converter.OnnxConverter(option,
                                                 conf["model_file_path"])
    else:
        mace_check(False, "Mace do not support platorm %s yet." % platform)

    output_graph_def = converter.run()
    mace_transformer = transformer.Transformer(
        option, output_graph_def)
    output_graph_def, quantize_activation_info = mace_transformer.run()

    runtime = conf[ModelKeys.runtime]
    if runtime in [DeviceType.HEXAGON,
                   DeviceType.HTA]:
        from transform import hexagon_converter
        converter = hexagon_converter.HexagonConverter(
            option, output_graph_def, quantize_activation_info)
        output_graph_def = converter.run()
    elif runtime == DeviceType.APU:
        mace_check(platform == Platform.TENSORFLOW,
                   "apu only support model from tensorflow")
        from transform import apu_converter
        converter = apu_converter.ApuConverter(
            option, output_graph_def, quantize_activation_info)
        output_graph_def = converter.run()

    return output_graph_def
Ejemplo n.º 2
0
def convert_model(conf):
    print(conf)
    platform = conf["platform"]
    mace_check(platform in ['tensorflow', 'caffe', 'onnx'],
               "platform not supported")
    runtime = conf["runtime"]
    mace_check(runtime in ['cpu', 'gpu', 'dsp', 'hta', 'apu', 'cpu+gpu'],
               "runtime not supported")

    option = cvt.ConverterOption()
    if "graph_optimize_options" in conf:
        option.transformer_option = conf["graph_optimize_options"].split(',')
    option.winograd = conf.get("winograd", 0)
    option.quantize = bool(conf.get("quantize", 0))
    option.quantize_large_weights = bool(conf.get("quantize_large_weights", 0))
    option.quantize_range_file = conf.get("quantize_range_file", "")
    option.change_concat_ranges = bool(conf.get("change_concat_ranges", 0))
    option.cl_mem_type = conf.get("cl_mem_type", "image")
    option.device = device_type_map[conf.get("runtime", "cpu")]
    option.data_type = parse_data_type(conf.get("data_type", "fp16_fp32"),
                                       option.quantize)
    input_tensors = to_list(conf["input_tensors"])
    input_shapes = [
        parse_int_array_from_str(shape)
        for shape in to_list(conf["input_shapes"])
    ]
    mace_check(
        len(input_tensors) == len(input_shapes),
        "input node count and shape count do not match")
    input_count = len(input_tensors)
    input_data_types = [
        data_type_map[dt] for dt in to_list(
            conf.get("input_data_types", ["float32"] * input_count))
    ]
    input_data_formats = [
        data_format_map[df] for df in to_list(
            conf.get("input_data_formats", ["NHWC"] * input_count))
    ]
    input_ranges = [
        parse_float_array_from_str(r)
        for r in to_list(conf.get("input_ranges", ["-1.0,1.0"] * input_count))
    ]
    for i in range(len(input_tensors)):
        input_node = cvt.NodeInfo()
        input_node.name = input_tensors[i]
        input_node.shape = input_shapes[i]
        input_node.data_type = input_data_types[i]
        input_node.data_format = input_data_formats[i]
        if (input_node.data_format == cvt.DataFormat.NCHW
                and len(input_node.shape) == 4):
            input_node.shape = transpose_shape(input_node.shape, [0, 2, 3, 1])
            input_node.data_format = cvt.DataFormat.NHWC
        input_node.range = input_ranges[i]
        option.add_input_node(input_node)

    output_tensors = to_list(conf["output_tensors"])
    output_shapes = [
        parse_int_array_from_str(shape)
        for shape in to_list(conf["output_shapes"])
    ]
    mace_check(
        len(output_tensors) == len(output_shapes),
        "output node count and shape count do not match")
    output_count = len(output_tensors)
    output_data_types = [
        data_type_map[dt] for dt in to_list(
            conf.get("output_data_types", ["float32"] * output_count))
    ]
    output_data_formats = [
        data_format_map[df] for df in to_list(
            conf.get("output_data_formats", ["NHWC"] * output_count))
    ]
    for i in range(len(output_tensors)):
        output_node = cvt.NodeInfo()
        output_node.name = output_tensors[i]
        output_node.shape = output_shapes[i]
        output_node.data_type = output_data_types[i]
        output_node.data_format = output_data_formats[i]
        if output_node.data_format == cvt.DataFormat.NCHW and len(
                output_node.shape) == 4:
            output_node.shape = transpose_shape(output_node.shape,
                                                [0, 2, 3, 1])
            output_node.data_format = cvt.DataFormat.NHWC
        option.add_output_node(output_node)

    if "check_tensors" in conf:
        check_tensors = to_list(conf["check_tensors"])
        check_tensors_shapes = [
            parse_int_array_from_str(shape)
            for shape in to_list(conf["check_shapes"])
        ]
        mace_check(
            len(check_tensors) == len(check_tensors_shapes),
            "check tensors count and shape count do not match.")
        for i in range(len(check_tensors)):
            check_node = cvt.NodeInfo()
            check_node.name = check_tensors[i]
            check_node.shape = check_tensors_shapes[i]
            option.add_check_node(check_node)
    else:
        option.check_nodes = option.output_nodes

    option.build()

    print("Transform model to one that can better run on device")

    if platform == 'tensorflow':
        from transform import tensorflow_converter
        converter = tensorflow_converter.TensorflowConverter(
            option, conf["model_file_path"])
    elif platform == 'caffe':
        from transform import caffe_converter
        converter = caffe_converter.CaffeConverter(option,
                                                   conf["model_file_path"],
                                                   conf["weight_file_path"])
    elif platform == 'onnx':
        from transform import onnx_converter
        converter = onnx_converter.OnnxConverter(option,
                                                 conf["model_file_path"])
    else:
        mace_check(False, "Mace do not support platorm %s yet." % platform)

    output_graph_def = converter.run()
    mace_transformer = transformer.Transformer(option, output_graph_def)
    output_graph_def, quantize_activation_info = mace_transformer.run()

    if option.device in [
            cvt.DeviceType.HEXAGON.value, cvt.DeviceType.HTA.value
    ]:
        from transform import hexagon_converter
        converter = hexagon_converter.HexagonConverter(
            option, output_graph_def, quantize_activation_info)
        output_graph_def = converter.run()
    elif runtime == 'apu':
        mace_check(platform == "tensorflow",
                   "apu only support model from tensorflow")
        from transform import apu_converter
        converter = apu_converter.ApuConverter(option, output_graph_def,
                                               quantize_activation_info)
        output_graph_def = converter.run()

    return output_graph_def