Ejemplo n.º 1
0
def validate_with_file(platform, output_names, output_shapes, mace_out_file,
                       validation_outputs_data, validation_threshold, log_file,
                       output_data_formats):
    for i in range(len(output_names)):
        if validation_outputs_data[i].startswith("http://") or \
                validation_outputs_data[i].startswith("https://"):
            validation_file_name = util.formatted_file_name(
                mace_out_file, output_names[i] + '_validation')
            six.moves.urllib.request.urlretrieve(validation_outputs_data[i],
                                                 validation_file_name)
        else:
            validation_file_name = validation_outputs_data[i]
        value = load_data(validation_file_name)
        out_shape = output_shapes[i]
        output_file_name = util.formatted_file_name(mace_out_file,
                                                    output_names[i])
        mace_out_value = load_data(output_file_name)
        mace_out_value, real_output_shape, real_output_data_format = \
            get_real_out_value_shape_df(platform,
                                        mace_out_value,
                                        output_shapes[i],
                                        output_data_formats[i])
        compare_output(output_names[i], mace_out_value, value,
                       validation_threshold, log_file, real_output_shape,
                       real_output_data_format)
Ejemplo n.º 2
0
def validate_onnx_model(model_file,
                        input_file, mace_out_file,
                        input_names, input_shapes, input_data_formats,
                        output_names, output_shapes, output_data_formats,
                        validation_threshold, input_data_types,
                        backend, log_file):
    import onnx
    if backend == "tensorflow":
        from onnx_tf.backend import prepare
        print("valivate on onnx tensorflow backend.")
    elif backend == "caffe2" or backend == "pytorch":
        from caffe2.python.onnx.backend import prepare
        print("valivate on onnx caffe2 backend.")
    else:
        util.MaceLogger.error(
            VALIDATION_MODULE,
            "onnx backend framwork '" + backend + "' is invalid.")
    if not os.path.isfile(model_file):
        util.MaceLogger.error(
            VALIDATION_MODULE,
            "Input graph file '" + model_file + "' does not exist!")
    model = onnx.load(model_file)
    input_dict = {}
    for i in range(len(input_names)):
        input_value = load_data(util.formatted_file_name(input_file,
                                                         input_names[i]),
                                input_data_types[i])
        input_value = input_value.reshape(input_shapes[i])
        if input_data_formats[i] == DataFormat.NHWC and \
                len(input_shapes[i]) == 4:
            input_value = input_value.transpose((0, 3, 1, 2))
        input_dict[input_names[i]] = input_value
    onnx_outputs = []
    for i in range(len(output_names)):
        out_shape = output_shapes[i][:]
        if output_data_formats[i] == DataFormat.NHWC and \
                len(out_shape) == 4:
            out_shape[1], out_shape[2], out_shape[3] = \
                out_shape[3], out_shape[1], out_shape[2]
        onnx_outputs.append(
            onnx.helper.make_tensor_value_info(output_names[i],
                                               onnx.TensorProto.FLOAT,
                                               out_shape))
    model.graph.output.extend(onnx_outputs)
    rep = prepare(model)

    output_values = rep.run(input_dict)
    for i in range(len(output_names)):
        out_name = output_names[i]
        value = output_values[out_name].flatten()
        output_file_name = util.formatted_file_name(mace_out_file,
                                                    output_names[i])
        mace_out_value = load_data(output_file_name)
        if output_data_formats[i] == DataFormat.NHWC and \
                len(output_shapes[i]) == 4:
            mace_out_value = mace_out_value.reshape(output_shapes[i]) \
                .transpose((0, 3, 1, 2))
        compare_output(output_names[i],
                       mace_out_value, value,
                       validation_threshold, log_file)
Ejemplo n.º 3
0
def validate_tf_model(platform, model_file, input_file, mace_out_file,
                      input_names, input_shapes, input_data_formats,
                      output_names, output_shapes, output_data_formats,
                      validation_threshold, input_data_types, log_file):
    import tensorflow as tf
    if not os.path.isfile(model_file):
        util.MaceLogger.error(
            VALIDATION_MODULE,
            "Input graph file '" + model_file + "' does not exist!")

    tf.reset_default_graph()
    input_graph_def = tf.GraphDef()
    with open(model_file, "rb") as f:
        data = f.read()
        input_graph_def.ParseFromString(data)
        tf.import_graph_def(input_graph_def, name="")

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(input_graph_def, name="")
                input_dict = {}
                for i in range(len(input_names)):
                    input_value = load_data(
                        util.formatted_file_name(input_file, input_names[i]),
                        input_data_types[i])
                    input_value = input_value.reshape(input_shapes[i])
                    if input_data_formats[i] == DataFormat.NCHW and \
                            len(input_shapes[i]) == 4:
                        input_value = input_value.transpose((0, 2, 3, 1))
                    elif input_data_formats[i] == DataFormat.OIHW and \
                            len(input_shapes[i]) == 4:
                        # OIHW -> HWIO
                        input_value = input_value.transpose((2, 3, 1, 0))
                    input_node = graph.get_tensor_by_name(
                        normalize_tf_tensor_name(input_names[i]))
                    input_dict[input_node] = input_value

                output_nodes = []
                for name in output_names:
                    output_nodes.extend([
                        graph.get_tensor_by_name(
                            normalize_tf_tensor_name(name))
                    ])
                output_values = session.run(output_nodes, feed_dict=input_dict)
                for i in range(len(output_names)):
                    output_file_name = util.formatted_file_name(
                        mace_out_file, output_names[i])
                    mace_out_value = load_data(
                        output_file_name,
                        get_data_type_by_value(output_values[i]))
                    mace_out_value, real_out_shape, real_out_data_format = \
                        get_real_out_value_shape_df(platform,
                                                    mace_out_value,
                                                    output_shapes[i],
                                                    output_data_formats[i])
                    compare_output(output_names[i], mace_out_value,
                                   output_values[i], validation_threshold,
                                   log_file, real_out_shape,
                                   real_out_data_format)
Ejemplo n.º 4
0
def validate_onnx_model(platform, model_file, input_file, mace_out_file,
                        input_names, input_shapes, input_data_formats,
                        output_names, output_shapes, output_data_formats,
                        validation_threshold, input_data_types, backend,
                        log_file):
    print("validate on onnxruntime.")
    import onnx
    import onnxruntime as onnxrt

    if not os.path.isfile(model_file):
        util.MaceLogger.error(
            VALIDATION_MODULE,
            "Input graph file '" + model_file + "' does not exist!")

    model = onnx.load(model_file)
    remove_initializer_from_input(model)
    model_outputs = set()
    for output in model.graph.output:
        model_outputs.add(output.name)
    for output_name in output_names:
        if output_name not in model_outputs:
            layer_value_info = onnx.helper.ValueInfoProto()
            layer_value_info.name = output_name
            model.graph.output.append(layer_value_info)

    input_dict = {}
    for i in range(len(input_names)):
        input_value = load_data(
            util.formatted_file_name(input_file, input_names[i]),
            input_data_types[i])
        input_value = input_value.reshape(input_shapes[i])
        if input_data_formats[i] == DataFormat.NHWC and \
                len(input_shapes[i]) == 4:
            input_value = input_value.transpose((0, 3, 1, 2))
        input_dict[input_names[i]] = input_value

    sess = onnxrt.InferenceSession(model.SerializeToString())
    output_values = sess.run(output_names, input_dict)

    for i in range(len(output_names)):
        value = output_values[i].flatten()
        output_file_name = util.formatted_file_name(mace_out_file,
                                                    output_names[i])
        mace_out_value = load_data(output_file_name)
        mace_out_value, real_output_shape, real_output_data_format = \
            get_real_out_value_shape_df(platform,
                                        mace_out_value,
                                        output_shapes[i],
                                        output_data_formats[i])
        compare_output(output_names[i], mace_out_value, value,
                       validation_threshold, log_file, real_output_shape,
                       real_output_data_format)
Ejemplo n.º 5
0
def validate_caffe_model(platform, model_file, input_file, mace_out_file,
                         weight_file, input_names, input_shapes,
                         input_data_formats, output_names, output_shapes,
                         output_data_formats, validation_threshold, log_file):
    os.environ['GLOG_minloglevel'] = '1'  # suprress Caffe verbose prints
    import caffe
    if not os.path.isfile(model_file):
        util.MaceLogger.error(
            VALIDATION_MODULE,
            "Input graph file '" + model_file + "' does not exist!")
    if not os.path.isfile(weight_file):
        util.MaceLogger.error(
            VALIDATION_MODULE,
            "Input weight file '" + weight_file + "' does not exist!")

    caffe.set_mode_cpu()

    net = caffe.Net(model_file, caffe.TEST, weights=weight_file)

    for i in range(len(input_names)):
        input_value = load_data(
            util.formatted_file_name(input_file, input_names[i]))
        input_value = input_value.reshape(input_shapes[i])
        if input_data_formats[i] == DataFormat.NHWC and \
                len(input_shapes[i]) == 4:
            input_value = input_value.transpose((0, 3, 1, 2))
        input_blob_name = input_names[i]
        try:
            if input_names[i] in net.top_names:
                input_blob_name = net.top_names[input_names[i]][0]
        except ValueError:
            pass
        new_shape = input_value.shape
        net.blobs[input_blob_name].reshape(*new_shape)
        for index in range(input_value.shape[0]):
            net.blobs[input_blob_name].data[index] = input_value[index]

    net.forward()

    for i in range(len(output_names)):
        value = net.blobs[output_names[i]].data
        output_file_name = util.formatted_file_name(mace_out_file,
                                                    output_names[i])
        mace_out_value = load_data(output_file_name)
        mace_out_value, real_output_shape, real_output_data_format = \
            get_real_out_value_shape_df(platform,
                                        mace_out_value,
                                        output_shapes[i],
                                        output_data_formats[i])
        compare_output(output_names[i], mace_out_value, value,
                       validation_threshold, log_file, real_output_shape,
                       real_output_data_format)
Ejemplo n.º 6
0
def validate_keras_model(platform, model_file, input_file, mace_out_file,
                         input_names, input_shapes, input_data_formats,
                         output_names, output_shapes, output_data_formats,
                         validation_threshold, input_data_types, log_file):
    from tensorflow import keras
    import tensorflow_model_optimization as tfmot

    if not os.path.isfile(model_file):
        util.MaceLogger.error(
            VALIDATION_MODULE,
            "Input model file '" + model_file + "' does not exist!")

    with tfmot.quantization.keras.quantize_scope():
        keras_model = keras.models.load_model(model_file, compile=False)

        input = []
        for i in range(len(input_names)):
            input_value = load_data(
                util.formatted_file_name(input_file, input_names[i]),
                input_data_types[i])
            input_value = input_value.reshape(input_shapes[i])
            if input_data_formats[i] == DataFormat.NCHW and \
                    len(input_shapes[i]) == 4:
                input_value = input_value.transpose((0, 2, 3, 1))
            elif input_data_formats[i] == DataFormat.OIHW and \
                    len(input_shapes[i]) == 4:
                # OIHW -> HWIO
                input_value = input_value.transpose((2, 3, 1, 0))
            input.append(input_value)

        output_values = keras_model.predict(input)

        mace_check(len(output_names) == 1, "Unexpected")

        for i in range(len(output_names)):
            output_file_name = util.formatted_file_name(
                mace_out_file, output_names[i])
            mace_out_value = load_data(output_file_name,
                                       get_data_type_by_value(output_values))
            mace_out_value, real_output_shape, real_output_data_format = \
                get_real_out_value_shape_df(platform,
                                            mace_out_value,
                                            output_shapes[i],
                                            output_data_formats[i])
            compare_output(output_names[i], mace_out_value, output_values,
                           validation_threshold, log_file, real_output_shape,
                           real_output_data_format)
Ejemplo n.º 7
0
def validate_megengine_model(platform, model_file, input_file, mace_out_file,
                             input_names, input_shapes, input_data_formats,
                             output_names, output_shapes, output_data_formats,
                             validation_threshold, input_data_types, log_file):
    import megengine._internal as mgb

    if not os.path.isfile(model_file):
        common.MaceLogger.error(
            VALIDATION_MODULE,
            "Input graph file '" + model_file + "' does not exist!",
        )

    feed_inputs = []
    for i in range(len(input_names)):
        input_value = load_data(
            util.formatted_file_name(input_file, input_names[i]),
            input_data_types[i])
        input_value = input_value.reshape(input_shapes[i])
        if (input_data_formats[i] == DataFormat.NHWC
                and len(input_shapes[i]) == 4):
            input_value = input_value.transpose((0, 3, 1, 2))
        feed_inputs.append(input_value)

    cg, _, outputs = mgb.load_comp_graph_from_file(model_file)
    inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
    inputs = sorted(inputs, key=lambda i: i.name)
    outputs = list(map(mgb.copy_output, outputs))
    if len(outputs) == 1:
        (outputs, ) = outputs
    func = cg.compile(inputs, outputs)

    mge_output_value = func(*feed_inputs)

    for i in range(len(output_names)):
        output_file_name = \
            util.formatted_file_name(mace_out_file, output_names[i])
        mace_out_value = load_data(output_file_name)
        mace_out_value, real_output_shape, real_output_data_format = \
            get_real_out_value_shape_df(platform,
                                        mace_out_value,
                                        output_shapes[i],
                                        output_data_formats[i])
        compare_output(output_names[i], mace_out_value, mge_output_value,
                       validation_threshold, log_file, real_output_shape,
                       real_output_data_format)
Ejemplo n.º 8
0
def validate_pytorch_model(model_file,
                           input_file, mace_out_file,
                           input_names, input_shapes, input_data_formats,
                           output_names, output_shapes, output_data_formats,
                           validation_threshold, input_data_types, log_file):
    import torch
    loaded_model = torch.jit.load(model_file)
    pytorch_inputs = []
    for i in range(len(input_names)):
        input_value = load_data(
            util.formatted_file_name(input_file, input_names[i]),
            input_data_types[i])
        input_value = input_value.reshape(input_shapes[i])
        if input_data_formats[i] == DataFormat.NHWC and \
                len(input_shapes[i]) == 4:
            input_value = input_value.transpose((0, 3, 1, 2))
        input_value = torch.from_numpy(input_value)
        pytorch_inputs.append(input_value)
    with torch.no_grad():
        pytorch_outputs = loaded_model(*pytorch_inputs)

    if isinstance(pytorch_outputs, torch.Tensor):
        pytorch_outputs = [pytorch_outputs]
    else:
        if not isinstance(pytorch_outputs, (list, tuple)):
            print('return type {} unsupported'.format(type(pytorch_outputs)))
            sys.exit(1)
    for i in range(len(output_names)):
        value = pytorch_outputs[i].numpy()
        output_file_name = util.formatted_file_name(
            mace_out_file, output_names[i])
        mace_out_value = load_data(output_file_name)
        # MACE: always returns tensor of dim 1
        # pytorch: NCHW, conversion is needed
        if output_data_formats[i] == DataFormat.NHWC and \
                len(output_shapes[i]) == 4:
            mace_out_value = mace_out_value.reshape(output_shapes[i])\
                .transpose((0, 3, 1, 2))
        compare_output(output_names[i], mace_out_value,
                       value, validation_threshold, log_file)
Ejemplo n.º 9
0
def generate_input_data(input_file, input_node, input_shape, input_ranges,
                        input_data_type):
    np.random.seed()
    for i in range(len(input_node)):
        data = np.random.random(input_shape[i]) * (
            input_ranges[i][1] - input_ranges[i][0]) + input_ranges[i][0]
        input_file_name = util.formatted_file_name(input_file, input_node[i])
        MaceLogger.info('Generate input file: %s' % input_file_name)
        if input_data_type[i] == mace_pb2.DT_FLOAT:
            np_data_type = np.float32
        elif input_data_type[i] == mace_pb2.DT_INT32:
            np_data_type = np.int32

        data.astype(np_data_type).tofile(input_file_name)
Ejemplo n.º 10
0
def validate_with_file(output_names, output_shapes,
                       mace_out_file, validation_outputs_data,
                       validation_threshold, log_file):
    for i in range(len(output_names)):
        if validation_outputs_data[i].startswith("http://") or \
                validation_outputs_data[i].startswith("https://"):
            validation_file_name = util.formatted_file_name(
                mace_out_file, output_names[i] + '_validation')
            six.moves.urllib.request.urlretrieve(validation_outputs_data[i],
                                                 validation_file_name)
        else:
            validation_file_name = validation_outputs_data[i]
        value = load_data(validation_file_name)
        out_shape = output_shapes[i]
        if len(out_shape) == 4:
            out_shape[1], out_shape[2], out_shape[3] = \
                out_shape[3], out_shape[1], out_shape[2]
            value = value.reshape(out_shape).transpose((0, 2, 3, 1))
        output_file_name = util.formatted_file_name(
            mace_out_file, output_names[i])
        mace_out_value = load_data(output_file_name)
        compare_output(output_names[i], mace_out_value,
                       value, validation_threshold, log_file)
Ejemplo n.º 11
0
def run_model_for_device(flags, args, dev, model_name, model_conf):
    target_abi = flags.target_abi
    install_dir = run_target.default_install_dir(target_abi) + "/" + model_name
    sysdir = install_dir + "/interior"
    dev.mkdir(sysdir)

    runtime_list = []
    for graph_name, graph_conf in model_conf[ModelKeys.subgraphs].items():
        runtime = graph_conf[ModelKeys.runtime]
        runtime_list.append(runtime)
        mace_check(runtime != DeviceType.APU or target_abi == "arm64-v8a",
                   "APU runtime does only support arm64-v8a")

    # install models to devices
    workdir = flags.output + "/" + model_name
    model_file = model_name + ".pb"
    model_data_file = model_name + ".data"
    model_path = workdir + "/model/" + model_file
    model_data_path = workdir + "/model/" + model_data_file
    if os.path.exists(model_path) and os.path.exists(model_data_path):
        dev.install(Target(model_path), install_dir)
        dev.install(Target(model_data_path), install_dir)
    else:
        MaceLogger.warning("No models exist in %s, use --model_file and"
                           " --model_data_file specified in args" % model_path)

    if ModelKeys.check_tensors in model_conf:
        model_conf[ModelKeys.output_tensors] = model_conf[
            ModelKeys.check_tensors]
        model_conf[ModelKeys.output_shapes] = model_conf[
            ModelKeys.check_shapes]

    model_file_path = ""
    if not flags.gencode_model:
        model_file_path = install_dir + "/" + model_file
    model_data_file_path = ""
    if not flags.gencode_param:
        model_data_file_path = install_dir + "/" + model_data_file

    input_tensors_info = config_parser.find_input_tensors_info(
        model_conf[ModelKeys.subgraphs], model_conf[ModelKeys.input_tensors])
    output_tensors_info = config_parser.find_output_tensors_info(
        model_conf[ModelKeys.subgraphs], model_conf[ModelKeys.output_tensors])

    model_args = {
        "model_name":
        model_name,
        "model_file":
        model_file_path,
        "model_data_file":
        model_data_file_path,
        "input_node":
        ",".join(model_conf[ModelKeys.input_tensors]),
        "input_shape":
        join_2d_array(input_tensors_info[ModelKeys.input_shapes]),
        "output_node":
        ",".join(model_conf[ModelKeys.output_tensors]),
        "output_shape":
        join_2d_array(output_tensors_info[ModelKeys.output_shapes]),
        "input_data_format":
        ",".join([
            df.name for df in input_tensors_info[ModelKeys.input_data_formats]
        ]),
        "output_data_format":
        ",".join([
            df.name
            for df in output_tensors_info[ModelKeys.output_data_formats]
        ])
    }

    opts = [
        "--%s='%s'" % (arg_key, arg_val)
        for arg_key, arg_val in model_args.items()
    ] + args
    should_generate_data = (flags.validate or flags.tune
                            or "--benchmark" in opts)

    if should_generate_data:
        tmpdirname = tempfile.mkdtemp()
        input_file_prefix = tmpdirname + "/" + model_name

        if ModelKeys.validation_inputs_data in model_conf:
            input_tensor = model_conf[ModelKeys.input_tensors]
            input_data = model_conf[ModelKeys.validation_inputs_data]
            mace_check(
                len(input_tensor) == len(input_data),
                "len(input_tensor) != len(validate_data")

            for i in range(len(input_tensor)):
                util.download_or_get_file(
                    model_conf[ModelKeys.validation_inputs_data][i], "",
                    util.formatted_file_name(input_file_prefix,
                                             input_tensor[i]))
        else:
            generate_input_data(input_file_prefix,
                                model_conf[ModelKeys.input_tensors],
                                input_tensors_info[ModelKeys.input_shapes],
                                input_tensors_info[ModelKeys.input_ranges],
                                input_tensors_info[ModelKeys.input_data_types])

        dev.install(Target(tmpdirname), install_dir + "/validate_in")
        target_input_file = "%s/validate_in/%s" % (install_dir, model_name)
        target_output_dir = "%s/validate_out" % install_dir
        dev.mkdir(target_output_dir)
        target_output_file = target_output_dir + "/" + model_name
        opts += [
            "--input_file=%s" % target_input_file,
            "--output_file=%s" % target_output_file
        ]

    # run
    envs = flags.envs.split(" ") + ["MACE_INTERNAL_STORAGE_PATH=%s" % sysdir]
    if flags.tune:
        envs += [
            "MACE_TUNING=1",
            "MACE_RUN_PARAMETER_PATH=%s/interior/tune_params" % install_dir
        ]
        opts += ["--round=0"]

    mace_check(flags.vlog_level >= 0,
               "vlog_level should be greater than zeror")
    envs += ["MACE_CPP_MIN_VLOG_LEVEL=%s" % flags.vlog_level]

    build_dir = flags.build_dir + "/" + target_abi
    libs = []
    if DeviceType.HEXAGON in runtime_list:
        libs += ["third_party/nnlib/%s/libhexagon_controller.so" % target_abi]
    elif runtime == DeviceType.HTA:
        libs += ["third_party/hta/%s/libhta_hexagon_runtime.so" % target_abi]
    elif DeviceType.APU in runtime_list:
        apu_libs = get_apu_so_paths(dev)
        libs += apu_libs

    cpp_shared_lib_path = os.path.join(build_dir,
                                       "install/lib/libc++_shared.so")
    if os.path.exists(cpp_shared_lib_path):
        libs.append(cpp_shared_lib_path)

    target = Target(build_dir + "/install/bin/mace_run",
                    libs,
                    opts=opts,
                    envs=envs)
    run_target.run_target(target_abi, install_dir, target, dev)

    if DeviceType.GPU in runtime_list:
        opencl_dir = workdir + "/opencl"
        util.mkdir_p(opencl_dir)
        dev.pull(
            Target(install_dir + "/interior/mace_cl_compiled_program.bin"),
            "%s/%s_compiled_opencl_kernel.%s.%s.bin" %
            (opencl_dir, model_name, dev.info()["ro.product.model"].replace(
                ' ', ''), dev.info()["ro.board.platform"]))
        if flags.tune:
            dev.pull(
                Target(install_dir + "/interior/tune_params"),
                "%s/%s_tuned_opencl_parameter.%s.%s.bin" %
                (opencl_dir, model_name,
                 dev.info()["ro.product.model"].replace(
                     ' ', ''), dev.info()["ro.board.platform"]))

    if flags.validate:
        validate_model_file = util.download_or_get_model(
            model_conf[ModelKeys.model_file_path],
            model_conf[ModelKeys.model_sha256_checksum], tmpdirname)

        validate_weight_file = ""
        if ModelKeys.weight_file_path in model_conf:
            validate_weight_file = util.download_or_get_model(
                model_conf[ModelKeys.weight_file_path],
                model_conf[ModelKeys.weight_sha256_checksum], tmpdirname)

        dev.pull(Target(target_output_dir), tmpdirname + "/validate_out")
        output_file_prefix = tmpdirname + "/validate_out/" + model_name
        validate.validate(model_conf[ModelKeys.platform], validate_model_file,
                          validate_weight_file, input_file_prefix,
                          output_file_prefix,
                          input_tensors_info[ModelKeys.input_shapes],
                          output_tensors_info[ModelKeys.output_shapes],
                          input_tensors_info[ModelKeys.input_data_formats],
                          output_tensors_info[ModelKeys.output_data_formats],
                          input_tensors_info[ModelKeys.input_tensors],
                          output_tensors_info[ModelKeys.output_tensors],
                          flags.validate_threshold,
                          input_tensors_info[ModelKeys.input_data_types],
                          flags.backend, "", "")
    if should_generate_data:
        shutil.rmtree(tmpdirname)
Ejemplo n.º 12
0
def run_model_with_conf(flags, args, model_name, model_conf):
    target_abi = "host"
    dev = device.HostDevice("host", target_abi)
    install_dir = "/tmp/micro_run/" + model_name

    if ModelKeys.check_tensors in model_conf:
        model_conf[ModelKeys.output_tensors] = model_conf[
            ModelKeys.check_tensors]
        model_conf[ModelKeys.output_shapes] = model_conf[
            ModelKeys.check_shapes]

    model_args = {
        "model_name":
        model_name,
        "input_node":
        ",".join(model_conf[ModelKeys.input_tensors]),
        "input_shape":
        join_2d_array(model_conf[ModelKeys.input_shapes]),
        "output_node":
        ",".join(model_conf[ModelKeys.output_tensors]),
        "output_shape":
        join_2d_array(model_conf[ModelKeys.output_shapes]),
        "input_data_format":
        ",".join([df.name for df in model_conf[ModelKeys.input_data_formats]]),
        "output_data_format":
        ",".join([df.name for df in model_conf[ModelKeys.output_data_formats]])
    }

    opts = [
        "--%s=%s" % (arg_key, arg_val)
        for arg_key, arg_val in model_args.items()
    ] + args

    # generate data start
    tmp_dir_name = tempfile.mkdtemp()
    input_file_prefix = tmp_dir_name + "/" + model_name
    if ModelKeys.validation_inputs_data in model_conf:
        input_tensor = model_conf[ModelKeys.input_tensors]
        input_data = model_conf[ModelKeys.validation_inputs_data]
        mace_check(
            len(input_tensor) == len(input_data),
            "len(input_tensor) != len(validate_data")

        for i in range(len(input_tensor)):
            util.download_or_get_file(
                model_conf[ModelKeys.validation_inputs_data][i], "",
                util.formatted_file_name(input_file_prefix, input_tensor[i]))
    else:
        generate_input_data(input_file_prefix,
                            model_conf[ModelKeys.input_tensors],
                            model_conf[ModelKeys.input_shapes],
                            model_conf[ModelKeys.input_ranges],
                            model_conf[ModelKeys.input_data_types])

    dev.install(Target(tmp_dir_name), install_dir + "/validate_in")
    target_input_file = "%s/validate_in/%s" % (install_dir, model_name)
    target_output_dir = "%s/validate_out" % install_dir
    dev.mkdir(target_output_dir)
    target_output_file = target_output_dir + "/" + model_name
    opts += [
        "--input_file=%s" % target_input_file,
        "--output_file=%s" % target_output_file
    ]
    # generate data end

    envs = []
    if flags.vlog_level > 0:
        envs += ["MACE_CPP_MIN_VLOG_LEVEL=%s" % flags.vlog_level]

    target = Target("build/micro/host/tools/micro_run_static", [],
                    opts=opts,
                    envs=envs)
    run_target.run_target(target_abi, install_dir, target, device_ids="host")

    if flags.validate:
        validate_model_file = util.download_or_get_model(
            model_conf[ModelKeys.model_file_path],
            model_conf[ModelKeys.model_sha256_checksum], tmp_dir_name)

        validate_weight_file = ""
        if ModelKeys.weight_file_path in model_conf:
            validate_weight_file = util.download_or_get_model(
                model_conf[ModelKeys.weight_file_path],
                model_conf[ModelKeys.weight_sha256_checksum], tmp_dir_name)

        dev.pull(Target(target_output_dir), tmp_dir_name + "/validate_out")
        output_file_prefix = tmp_dir_name + "/validate_out/" + model_name
        validate.validate(
            model_conf[ModelKeys.platform], validate_model_file,
            validate_weight_file, input_file_prefix, output_file_prefix,
            model_conf[ModelKeys.input_shapes],
            model_conf[ModelKeys.output_shapes],
            model_conf[ModelKeys.input_data_formats],
            model_conf[ModelKeys.output_data_formats],
            model_conf[ModelKeys.input_tensors],
            model_conf[ModelKeys.output_tensors], flags.validate_threshold,
            model_conf[ModelKeys.input_data_types], flags.backend, "", "")
    shutil.rmtree(tmp_dir_name)