Example #1
0
def run_models_for_device(flags, args, dev):
    conf = config_parser.parse(flags.config)
    for name, model_conf in conf["models"].items():
        if not flags.model_name or name == flags.model_name:
            MaceLogger.info("Run model %s" % name)
            model_conf = config_parser.normalize_model_config(model_conf)
            run_model_for_device(flags, args, dev, name, model_conf)
Example #2
0
def run_layers_validate(flags, args, original_conf):
    model_name = flags.model_name
    original_model_dir = flags.output + "/" + \
        original_conf['library_name'] + "/model"
    model_dir = "/tmp/micro_run/model"
    device.execute("mkdir -p %s" % model_dir)
    device.execute("cp -p %s/%s.pb %s" %
                   (original_model_dir, model_name, model_dir))
    params_file_path = "%s/%s.data" % (original_model_dir, model_name)
    output_configs = layers_validate.get_layers(model_dir, model_name,
                                                flags.layers)

    for i in range(len(output_configs)):
        sub_model_conf = gen_sub_model_conf(output_configs[i], flags,
                                            original_conf)
        with open(output_configs[i]['model_file_path'], "rb") as model_file:
            net_def = mace_pb2.NetDef()
            net_def.ParseFromString(model_file.read())
            with open(params_file_path, "rb") as params_file:
                weights = bytearray(params_file.read())
                micro_conf = \
                    config_parser.normalize_model_config(sub_model_conf)
                MicroConverter(micro_conf, net_def, weights,
                               model_name).gen_code()
                build_engine(model_name, micro_conf[ModelKeys.data_type])
                run_model_with_conf(flags, args, model_name, micro_conf)
Example #3
0
def run_model(flags, args, conf):
    model_conf = get_model_conf_by_name(flags, conf)
    mace_check(model_conf is not None, "Get model conf failed.")
    model_conf = config_parser.normalize_model_config(model_conf)
    mace_check(len(model_conf[ModelKeys.subgraphs].items()) == 1,
               "MACE Micro does not support multi graph.")
    for graph_name, graph_config in model_conf[ModelKeys.subgraphs].items():
        run_model_with_conf(flags, args, flags.model_name, graph_config)
Example #4
0
def convert(conf, output, enable_micro=False):
    if ModelKeys.quantize_stat in conf:
        quantize_stat = conf[ModelKeys.quantize_stat]
    else:
        quantize_stat = False
    for model_name, model_conf in conf["models"].items():
        model_output = output + "/" + model_name + "/model"
        org_model_dir = output + "/" + model_name + "/org_model"
        util.mkdir_p(model_output)
        util.mkdir_p(org_model_dir)

        model_conf = normalize_model_config(model_conf)

        model_file = util.download_or_get_model(
            model_conf[ModelKeys.model_file_path],  # noqa
            model_conf[ModelKeys.model_sha256_checksum],  # noqa
            output + "/" + model_name + "/org_model")
        model_conf[ModelKeys.model_file_path] = model_file
        if ModelKeys.weight_file_path in model_conf:
            weight_file = util.download_or_get_model(
                model_conf[ModelKeys.weight_file_path],
                model_conf[ModelKeys.weight_sha256_checksum], "/tmp/")
            model_conf[ModelKeys.weight_file_path] = weight_file

        # TODO: remove the following after quantize tool is made
        if ModelKeys.quantize_range_file in model_conf:
            range_file = util.download_or_get_model(
                model_conf[ModelKeys.quantize_range_file],
                "", model_output)
            model_conf[ModelKeys.quantize_range_file] = range_file

        mace_model = convert_model(model_conf, quantize_stat)

        try:
            visualizer = visualize_model.ModelVisualizer(model_name,
                                                         mace_model,
                                                         model_output)
            visualizer.save_html()
        except:  # noqa
            print("Failed to visualize model:", sys.exc_info())

        model, params = merge_params(mace_model,
                                     model_conf[ModelKeys.data_type])
        if enable_micro:
            micro_converter = MicroConverter(model_conf, copy.deepcopy(model),
                                             copy.deepcopy(params), model_name)
            micro_converter.gen_code()
            micro_converter.package(model_output + "/" +
                                    model_name + "_micro.tar.gz")
        output_model_file = model_output + "/" + model_name + ".pb"
        output_params_file = model_output + "/" + model_name + ".data"
        with open(output_model_file, "wb") as f:
            f.write(model.SerializeToString())
        with open(output_params_file, "wb") as f:
            f.write(bytearray(params))
        with open(output_model_file + "_txt", "w") as f:
            f.write(str(model))
Example #5
0
def convert(conf, output, enable_micro=False):
    for model_name, model_conf in conf["models"].items():
        model_output = output + "/" + model_name + "/model"
        org_model_dir = output + "/" + model_name + "/org_model"
        util.mkdir_p(model_output)
        util.mkdir_p(org_model_dir)

        model_conf = normalize_model_config(model_conf, model_output,
                                            org_model_dir)
        conf["models"][model_name] = model_conf
        net_confs = model_conf[ModelKeys.subgraphs]

        model = mace_pb2.MultiNetDef()
        add_input_output_tensor(model, model_conf)

        model_params = []
        for net_name, net_conf in net_confs.items():
            if "quantize_stat" in conf:
                net_conf["quantize_stat"] = conf["quantize_stat"]
            net_def_with_Data = convert_net(net_name, net_conf, enable_micro)
            try:
                visualizer = visualize_model.ModelVisualizer(
                    net_name, net_def_with_Data, model_output)
                visualizer.save_html()
            except:  # noqa
                print("Failed to visualize graph:", sys.exc_info())
            net_def, params = merge_params(net_def_with_Data,
                                           net_conf[ModelKeys.data_type])
            if enable_micro:
                convert_micro(
                    model_name,
                    net_confs,
                    net_def,
                    params,
                    model_output,
                )

            net_def.data_offset = len(model_params)
            net_def.data_size = len(params)
            model.net_def.extend([net_def])
            model_params.extend(params)
        # store model and weight to files
        output_model_file = model_output + "/" + model_name + ".pb"
        output_params_file = model_output + "/" + model_name + ".data"
        with open(output_model_file, "wb") as f:
            f.write(model.SerializeToString())
        with open(output_params_file, "wb") as f:
            f.write(bytearray(model_params))
        with open(output_model_file + "_txt", "w") as f:
            f.write(str(model))
Example #6
0
def run_model(flags, args, conf):
    model_conf = get_model_conf_by_name(flags, conf)
    mace_check(model_conf is not None, "Get model conf failed.")
    model_conf = config_parser.normalize_model_config(model_conf)
    run_model_with_conf(flags, args, flags.model_name, model_conf)
Example #7
0
    parser.add_argument("--backend",
                        type=str,
                        default="tensorflow",
                        help="onnx backend framework")
    parser.add_argument("--build",
                        action="store_true",
                        help="if build before run")
    parser.add_argument('--output',
                        type=str,
                        default="build",
                        help="output dir")
    parser.add_argument('--vlog_level',
                        type=int,
                        default="0",
                        help="vlog level")

    return parser.parse_known_args()


if __name__ == "__main__":
    flags, args = parse_args()
    conf = config_parser.parse(flags.config)
    if flags.build or flags.validate:
        micro_conf = config_parser.normalize_model_config(
            conf[ModelKeys.models][flags.model_name])
        build_engine(flags.model_name, micro_conf[ModelKeys.data_type])
    if flags.validate and flags.layers != "-1":
        run_layers_validate(flags, args, conf)
    else:
        run_model(flags, args, conf)
Example #8
0
    )
    with open(output + '/mace_engine_factory.h', "w") as f:
        f.write(source)


if __name__ == '__main__':
    flags = parse_args()
    codegen_dir = "mace/codegen/models"
    device.execute("rm -rf %s/*" % codegen_dir)

    models = []
    if flags.config:
        conf = config_parser.parse(flags.config)

        for name, model_conf in conf["models"].items():
            model_conf = config_parser.normalize_model_config(model_conf)
            if not flags.model_name or name == flags.model_name:
                MaceLogger.info("Encrypt model %s" % name)
                encrypt(name, "build/%s/model/%s.pb" % (name, name),
                        "build/%s/model/%s.data" % (name, name),
                        model_conf[ModelKeys.runtime],
                        codegen_dir + "/" + name, not flags.no_obfuscate,
                        flags.gencode_model, flags.gencode_param)
                models.append(name)
                os.rename("%s/%s/%s.pb" % (codegen_dir, name, name),
                          "build/%s/model/%s.pb" % (name, name))
                os.rename("%s/%s/%s.data" % (codegen_dir, name, name),
                          "build/%s/model/%s.data" % (name, name))
    else:
        device_type = config_parser.parse_device_type(flags.device)
        encrypt(flags.model_name, flags.model_file, flags.params_file,