Beispiel #1
0
def merge_opencl_binaries(binaries_dirs,
                          cl_compiled_program_file_name,
                          output_file_path):
    platform_info_key = 'mace_opencl_precompiled_platform_info_key'
    cl_bin_dirs = []
    for d in binaries_dirs:
        cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
    # create opencl binary output dir
    opencl_binary_dir = os.path.dirname(output_file_path)
    if not os.path.exists(opencl_binary_dir):
        sh.mkdir("-p", opencl_binary_dir)
    kvs = {}
    for binary_dir in cl_bin_dirs:
        binary_path = os.path.join(binary_dir, cl_compiled_program_file_name)
        if not os.path.exists(binary_path):
            continue

        print 'generate opencl code from', binary_path
        with open(binary_path, "rb") as f:
            binary_array = np.fromfile(f, dtype=np.uint8)

        idx = 0
        size, = struct.unpack("Q", binary_array[idx:idx + 8])
        idx += 8
        for _ in xrange(size):
            key_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            key, = struct.unpack(
                str(key_size) + "s", binary_array[idx:idx + key_size])
            idx += key_size
            value_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            if key == platform_info_key and key in kvs:
                common.mace_check(
                    (kvs[key] == binary_array[idx:idx + value_size]).all(),
                    "",
                    "There exists more than one OpenCL version for models:"
                    " %s vs %s " %
                    (kvs[key], binary_array[idx:idx + value_size]))
            else:
                kvs[key] = binary_array[idx:idx + value_size]
            idx += value_size

    output_byte_array = bytearray()
    data_size = len(kvs)
    output_byte_array.extend(struct.pack("Q", data_size))
    for key, value in kvs.iteritems():
        key_size = len(key)
        output_byte_array.extend(struct.pack("i", key_size))
        output_byte_array.extend(struct.pack(str(key_size) + "s", key))
        value_size = len(value)
        output_byte_array.extend(struct.pack("i", value_size))
        output_byte_array.extend(value)

    np.array(output_byte_array).tofile(output_file_path)
Beispiel #2
0
def merge_opencl_binaries(binaries_dirs, cl_compiled_program_file_name,
                          output_file_path):
    platform_info_key = 'mace_opencl_precompiled_platform_info_key'
    cl_bin_dirs = []
    for d in binaries_dirs:
        cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
    # create opencl binary output dir
    opencl_binary_dir = os.path.dirname(output_file_path)
    if not os.path.exists(opencl_binary_dir):
        sh.mkdir("-p", opencl_binary_dir)
    kvs = {}
    for binary_dir in cl_bin_dirs:
        binary_path = os.path.join(binary_dir, cl_compiled_program_file_name)
        if not os.path.exists(binary_path):
            continue

        print 'generate opencl code from', binary_path
        with open(binary_path, "rb") as f:
            binary_array = np.fromfile(f, dtype=np.uint8)

        idx = 0
        size, = struct.unpack("Q", binary_array[idx:idx + 8])
        idx += 8
        for _ in xrange(size):
            key_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            key, = struct.unpack(
                str(key_size) + "s", binary_array[idx:idx + key_size])
            idx += key_size
            value_size, = struct.unpack("i", binary_array[idx:idx + 4])
            idx += 4
            if key == platform_info_key and key in kvs:
                common.mace_check(
                    (kvs[key] == binary_array[idx:idx + value_size]).all(), "",
                    "There exists more than one OpenCL version for models:"
                    " %s vs %s " %
                    (kvs[key], binary_array[idx:idx + value_size]))
            else:
                kvs[key] = binary_array[idx:idx + value_size]
            idx += value_size

    output_byte_array = bytearray()
    data_size = len(kvs)
    output_byte_array.extend(struct.pack("Q", data_size))
    for key, value in kvs.iteritems():
        key_size = len(key)
        output_byte_array.extend(struct.pack("i", key_size))
        output_byte_array.extend(struct.pack(str(key_size) + "s", key))
        value_size = len(value)
        output_byte_array.extend(struct.pack("i", value_size))
        output_byte_array.extend(value)

    np.array(output_byte_array).tofile(output_file_path)
Beispiel #3
0
def gen_input(model_output_dir,
              input_nodes,
              input_shapes,
              input_files=None,
              input_ranges=None,
              input_data_types=None,
              input_data_map=None,
              input_file_name="model_input"):
    for input_name in input_nodes:
        formatted_name = common.formatted_file_name(input_file_name,
                                                    input_name)
        if os.path.exists("%s/%s" % (model_output_dir, formatted_name)):
            sh.rm("%s/%s" % (model_output_dir, formatted_name))
    input_file_list = []
    if isinstance(input_files, list):
        input_file_list.extend(input_files)
    else:
        input_file_list.append(input_files)
    if input_data_map:
        for i in range(len(input_nodes)):
            dst_input_file = model_output_dir + '/' + \
                             common.formatted_file_name(input_file_name,
                                                        input_nodes[i])
            input_name = input_nodes[i]
            common.mace_check(
                input_name in input_data_map, common.ModuleName.RUN,
                "The preprocessor API in PrecisionValidator"
                " script should return all inputs of model")
            if input_data_types[i] == 'float32':
                input_data = np.array(input_data_map[input_name],
                                      dtype=np.float32)
            elif input_data_types[i] == 'int32':
                input_data = np.array(input_data_map[input_name],
                                      dtype=np.int32)
            else:
                common.mace_check(
                    False, common.ModuleName.RUN,
                    'Do not support input data type %s' % input_data_types[i])
            common.mace_check(
                list(map(int, common.split_shape(input_shapes[i]))) == list(
                    input_data.shape), common.ModuleName.RUN,
                "The shape return from preprocessor API of"
                " PrecisionValidator script is not same with"
                " model deployment file. %s vs %s" %
                (str(input_shapes[i]), str(input_data.shape)))
            input_data.tofile(dst_input_file)
    elif len(input_file_list) != 0:
        input_name_list = []
        if isinstance(input_nodes, list):
            input_name_list.extend(input_nodes)
        else:
            input_name_list.append(input_nodes)
        common.mace_check(
            len(input_file_list) == len(input_name_list),
            common.ModuleName.RUN,
            'If input_files set, the input files should '
            'match the input names.')
        for i in range(len(input_file_list)):
            if input_file_list[i] is not None:
                dst_input_file = model_output_dir + '/' + \
                                 common.formatted_file_name(input_file_name,
                                                            input_name_list[i])
                if input_file_list[i].startswith("http://") or \
                        input_file_list[i].startswith("https://"):
                    six.moves.urllib.request.urlretrieve(
                        input_file_list[i], dst_input_file)
                else:
                    sh.cp("-f", input_file_list[i], dst_input_file)
    else:
        # generate random input files
        input_nodes_str = ",".join(input_nodes)
        input_shapes_str = ":".join(input_shapes)
        input_ranges_str = ":".join(input_ranges)
        input_data_types_str = ",".join(input_data_types)
        generate_input_data("%s/%s" % (model_output_dir, input_file_name),
                            input_nodes_str, input_shapes_str,
                            input_ranges_str, input_data_types_str)
Beispiel #4
0
def get_soc_skel_info(soc_id):
    for info in SocSkelInfo:
        if info.soc_id == soc_id:
            return info

    mace_check(False, ModuleName.RUN, "Unsupported dsp soc %d" % soc_id)
Beispiel #5
0
def merge_opencl_binaries_or_parameters(binaries_dirs,
                                        cl_compiled_program_file_name,
                                        output_file_path,
                                        merge_type):
    common.mace_check(merge_type in [MergeType.binary, MergeType.parameter],
                      "",
                      "Only support merge binaries or parameters, "
                      "but merge type {} is got".format(merge_type))
    if merge_type == MergeType.binary:
        platform_info_key = 'mace_opencl_precompiled_platform_info_key'
    cl_bin_dirs = []
    for d in binaries_dirs:
        cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
    # create opencl binary output dir
    opencl_binary_dir = os.path.dirname(output_file_path)
    if not os.path.exists(opencl_binary_dir):
        sh.mkdir("-p", opencl_binary_dir)
    kvs = {}
    CRC32SIZE = 4
    for binary_dir in cl_bin_dirs:
        binary_path = os.path.join(binary_dir, cl_compiled_program_file_name)
        if not os.path.exists(binary_path):
            continue

        if merge_type == MergeType.binary:
            six.print_('generate opencl code from', binary_path)
        elif merge_type == MergeType.parameter:
            six.print_('generate opencl parameter from', binary_path)
        all_bytes = open(binary_path, "rb").read()
        if len(all_bytes) < CRC32SIZE:
            print("File size of {} is less than CRC bytes".format(binary_path))
            continue
        saved_crc = struct.unpack("I", all_bytes[-CRC32SIZE:])[0]
        current_crc = zlib.crc32(all_bytes[:-CRC32SIZE])
        # In Python2, zlib.crc32 may return negative value,
        # so casting is needed for Python2.
        current_crc = np.array([current_crc]).astype(np.uint32)[0]
        if saved_crc != current_crc:
            print("CRC value of {} is invalid".format(binary_path))
            continue
        container = mace_pb2.PairContainer()
        container.ParseFromString(all_bytes[:-CRC32SIZE])
        size = len(container.pairs)
        for i in range(size):
            pair = container.pairs[i]
            key = pair.key
            if merge_type == MergeType.binary:
                if key == platform_info_key and key in kvs:
                    common.mace_check(
                        kvs[key] == pair.bytes_value,
                        "",
                        "There exists more than one OpenCL version for models:"
                        " %s vs %s " % (kvs[key], pair.bytes_value))

                else:
                    kvs[pair.key] = pair.bytes_value
            elif merge_type == MergeType.parameter:
                kvs[key] = pair.uint32s_value
    new_container = mace_pb2.PairContainer()
    for key in kvs:
        pair = new_container.pairs.add()
        pair.key = key
        if merge_type == MergeType.binary:
            pair.bytes_value = kvs[key]
        elif merge_type == MergeType.parameter:
            pair.uint32s_value.extend(kvs[key])

    message_str = new_container.SerializeToString()
    crc_val = zlib.crc32(message_str)
    crc_val = np.array([crc_val]).astype(np.uint32)[0]
    crc_bytes = struct.pack("<I", crc_val)
    with open(output_file_path, "wb") as ofp:
        ofp.write(message_str + crc_bytes)
Beispiel #6
0
def convert_model(configs):
    # Remove previous output dirs
    library_name = configs[YAMLKeyword.library_name]
    if not os.path.exists(BUILD_OUTPUT_DIR):
        os.makedirs(BUILD_OUTPUT_DIR)
    elif os.path.exists(os.path.join(BUILD_OUTPUT_DIR, library_name)):
        # sh.rm("-rf", os.path.join(BUILD_OUTPUT_DIR, library_name))
        shutil.rmtree(os.path.join(BUILD_OUTPUT_DIR, library_name))
    os.mkdir(os.path.join(BUILD_OUTPUT_DIR, library_name))
    # if not os.path.exists(BUILD_DOWNLOADS_DIR):
    #     os.mkdir(BUILD_DOWNLOADS_DIR)

    model_output_dir = \
        '%s/%s/%s' % (BUILD_OUTPUT_DIR, library_name, MODEL_OUTPUT_DIR_NAME)
    model_header_dir = \
        '%s/%s/%s' % (BUILD_OUTPUT_DIR, library_name, MODEL_HEADER_DIR_PATH)
    # clear output dir
    if os.path.exists(model_output_dir):
        # sh.rm("-rf", model_output_dir)
        shutil.rmtree(model_output_dir)
    os.makedirs(model_output_dir)
    if os.path.exists(model_header_dir):
        # sh.rm("-rf", model_header_dir)
        shutil.rmtree(model_header_dir)

    embed_model_data = configs[
        YAMLKeyword.model_data_format] == ModelFormat.code
    if embed_model_data:
        mace_check(False, ModuleName.YAML_CONFIG,
                   "only model_data_format = file support")

    if os.path.exists(MODEL_CODEGEN_DIR):
        # sh.rm("-rf", MODEL_CODEGEN_DIR)
        shutil.rmtree(MODEL_CODEGEN_DIR)
    if os.path.exists(ENGINE_CODEGEN_DIR):
        # sh.rm("-rf", ENGINE_CODEGEN_DIR)
        shutil.rmtree(ENGINE_CODEGEN_DIR)

    for model_name in configs[YAMLKeyword.models]:
        MaceLogger.header(
            StringFormatter.block("Convert %s model" % model_name))
        model_config = configs[YAMLKeyword.models][model_name]
        runtime = model_config[YAMLKeyword.runtime]

        model_file_path, weight_file_path = get_model_files(
            model_config[YAMLKeyword.model_file_path],
            model_config[YAMLKeyword.model_sha256_checksum],
            BUILD_DOWNLOADS_DIR, model_config[YAMLKeyword.weight_file_path],
            model_config[YAMLKeyword.weight_sha256_checksum])

        data_type = model_config[YAMLKeyword.data_type]
        # TODO(liuqi): support multiple subgraphs
        subgraphs = model_config[YAMLKeyword.subgraphs]

        model_codegen_dir = "%s/%s" % (MODEL_CODEGEN_DIR, model_name)
        gen_model_code(
            model_codegen_dir, model_config[YAMLKeyword.platform],
            model_file_path, weight_file_path,
            model_config[YAMLKeyword.model_sha256_checksum],
            model_config[YAMLKeyword.weight_sha256_checksum],
            ",".join(subgraphs[0][YAMLKeyword.input_tensors]),
            ",".join(subgraphs[0][YAMLKeyword.output_tensors]), runtime,
            model_name, ":".join(subgraphs[0][YAMLKeyword.input_shapes]),
            model_config[YAMLKeyword.nnlib_graph_mode], embed_model_data,
            model_config[YAMLKeyword.winograd],
            model_config[YAMLKeyword.obfuscate],
            configs[YAMLKeyword.model_graph_format], data_type, ",".join(
                model_config.get(YAMLKeyword.graph_optimize_options, [])))

        if configs[YAMLKeyword.model_graph_format] == ModelFormat.file:
            shutil.move('%s/%s.pb_txt' % (model_codegen_dir, model_name),
                        model_output_dir)
            shutil.move('%s/%s.pb' % (model_codegen_dir, model_name),
                        model_output_dir)
            shutil.move('%s/%s.data' % (model_codegen_dir, model_name),
                        model_output_dir)
        else:
            mace_check(False, ModuleName.YAML_CONFIG,
                       "only model_graph_format = file support")

        MaceLogger.summary(
            StringFormatter.block("Model %s converted" % model_name))
Beispiel #7
0
def format_model_config(flags):
    with open(flags.config) as f:
        configs = yaml.load(f)

    library_name = configs.get(YAMLKeyword.library_name, "")
    mace_check(
        len(library_name) > 0, ModuleName.YAML_CONFIG,
        "library name should not be empty")

    if flags.target_abis:
        target_abis = flags.target_abis.split(',')
    else:
        target_abis = configs.get(YAMLKeyword.target_abis, [])
    mace_check((isinstance(target_abis, list) and len(target_abis) > 0),
               ModuleName.YAML_CONFIG, "target_abis list is needed")
    configs[YAMLKeyword.target_abis] = target_abis
    for abi in target_abis:
        mace_check(abi in ABITypeStrs, ModuleName.YAML_CONFIG,
                   "target_abis must be in " + str(ABITypeStrs))

    target_socs = configs.get(YAMLKeyword.target_socs, "")
    if flags.target_socs:
        configs[YAMLKeyword.target_socs] = \
               [soc.lower() for soc in flags.target_socs.split(',')]
    elif not target_socs:
        configs[YAMLKeyword.target_socs] = []
    elif not isinstance(target_socs, list):
        configs[YAMLKeyword.target_socs] = [target_socs]

    configs[YAMLKeyword.target_socs] = \
        [soc.lower() for soc in configs[YAMLKeyword.target_socs]]

    if ABIType.armeabi_v7a in target_abis \
            or ABIType.arm64_v8a in target_abis:
        available_socs = set([])
        target_socs = configs[YAMLKeyword.target_socs]
        if ALL_SOC_TAG in target_socs:
            mace_check(
                available_socs, ModuleName.YAML_CONFIG,
                "Build for all SOCs plugged in computer, "
                "you at least plug in one phone")
        else:
            for soc in target_socs:
                mace_check(
                    soc in available_socs, ModuleName.YAML_CONFIG,
                    "Build specified SOC library, "
                    "you must plug in a phone using the SOC")

    if flags.model_graph_format:
        model_graph_format = flags.model_graph_format
    else:
        model_graph_format = configs.get(YAMLKeyword.model_graph_format, "")
    mace_check(
        model_graph_format in ModelFormatStrs, ModuleName.YAML_CONFIG,
        'You must set model_graph_format and '
        "model_graph_format must be in " + str(ModelFormatStrs))
    configs[YAMLKeyword.model_graph_format] = model_graph_format
    if flags.model_data_format:
        model_data_format = flags.model_data_format
    else:
        model_data_format = configs.get(YAMLKeyword.model_data_format, "")
    configs[YAMLKeyword.model_data_format] = model_data_format
    mace_check(
        model_data_format in ModelFormatStrs, ModuleName.YAML_CONFIG,
        'You must set model_data_format and '
        "model_data_format must be in " + str(ModelFormatStrs))

    mace_check(
        not (model_graph_format == ModelFormat.file
             and model_data_format == ModelFormat.code),
        ModuleName.YAML_CONFIG, "If model_graph format is 'file',"
        " the model_data_format must be 'file' too")

    model_names = configs.get(YAMLKeyword.models, [])
    mace_check(
        len(model_names) > 0, ModuleName.YAML_CONFIG,
        "no model found in config file")

    model_name_reg = re.compile(r'^[a-zA-Z0-9_]+$')
    for model_name in model_names:
        # check model_name legality
        mace_check(model_name not in CPP_KEYWORDS, ModuleName.YAML_CONFIG,
                   "model name should not be c++ keyword.")
        mace_check((model_name[0] == '_' or model_name[0].isalpha())
                   and bool(model_name_reg.match(model_name)),
                   ModuleName.YAML_CONFIG,
                   "model name should Meet the c++ naming convention"
                   " which start with '_' or alpha"
                   " and only contain alpha, number and '_'")

        model_config = configs[YAMLKeyword.models][model_name]
        platform = model_config.get(YAMLKeyword.platform, "")
        mace_check(platform in PlatformTypeStrs, ModuleName.YAML_CONFIG,
                   "'platform' must be in " + str(PlatformTypeStrs))

        for key in [YAMLKeyword.model_file_path]:
            value = model_config.get(key, "")
            mace_check(value != "", ModuleName.YAML_CONFIG,
                       "'%s' is necessary" % key)

        weight_file_path = model_config.get(YAMLKeyword.weight_file_path, "")
        if weight_file_path:
            pass
        else:
            model_config[YAMLKeyword.weight_sha256_checksum] = ""

        runtime = model_config.get(YAMLKeyword.runtime, "")
        mace_check(runtime in RuntimeTypeStrs, ModuleName.YAML_CONFIG,
                   "'runtime' must be in " + str(RuntimeTypeStrs))
        if ABIType.host in target_abis:
            mace_check(runtime == RuntimeType.cpu, ModuleName.YAML_CONFIG,
                       "host only support cpu runtime now.")

        data_type = model_config.get(YAMLKeyword.data_type, "")
        if runtime == RuntimeType.cpu_gpu and data_type not in GPUDataTypeStrs:
            model_config[YAMLKeyword.data_type] = \
                GPUDataType.fp16_fp32.value
        elif runtime == RuntimeType.cpu:
            if len(data_type) > 0:
                mace_check(
                    data_type in CPUDataTypeStrs, ModuleName.YAML_CONFIG,
                    "'data_type' must be in " + str(CPUDataTypeStrs) +
                    " for cpu runtime")
            else:
                model_config[YAMLKeyword.data_type] = \
                    CPUDataType.fp32.value
        elif runtime == RuntimeType.gpu:
            if len(data_type) > 0:
                mace_check(
                    data_type in GPUDataTypeStrs, ModuleName.YAML_CONFIG,
                    "'data_type' must be in " + str(GPUDataTypeStrs) +
                    " for gpu runtime")
            else:
                model_config[YAMLKeyword.data_type] =\
                    GPUDataType.fp16_fp32.value
        elif runtime == RuntimeType.dsp:
            if len(data_type) > 0:
                mace_check(
                    data_type in DSPDataTypeStrs, ModuleName.YAML_CONFIG,
                    "'data_type' must be in " + str(DSPDataTypeStrs) +
                    " for dsp runtime")
            else:
                model_config[YAMLKeyword.data_type] = \
                    DSPDataType.uint8.value

        subgraphs = model_config.get(YAMLKeyword.subgraphs, "")
        mace_check(
            len(subgraphs) > 0, ModuleName.YAML_CONFIG,
            "at least one subgraph is needed")

        for subgraph in subgraphs:
            for key in [
                    YAMLKeyword.input_tensors, YAMLKeyword.input_shapes,
                    YAMLKeyword.output_tensors, YAMLKeyword.output_shapes
            ]:
                value = subgraph.get(key, "")
                mace_check(value != "", ModuleName.YAML_CONFIG,
                           "'%s' is necessary in subgraph" % key)
                if not isinstance(value, list):
                    subgraph[key] = [value]
            validation_inputs_data = subgraph.get(
                YAMLKeyword.validation_inputs_data, [])
            if not isinstance(validation_inputs_data, list):
                subgraph[YAMLKeyword.validation_inputs_data] = [
                    validation_inputs_data
                ]
            else:
                subgraph[YAMLKeyword.validation_inputs_data] = \
                    validation_inputs_data
            input_ranges = subgraph.get(YAMLKeyword.input_ranges, [])
            if not isinstance(input_ranges, list):
                subgraph[YAMLKeyword.input_ranges] = [input_ranges]
            else:
                subgraph[YAMLKeyword.input_ranges] = input_ranges

        for key in [
                YAMLKeyword.limit_opencl_kernel_time,
                YAMLKeyword.nnlib_graph_mode, YAMLKeyword.obfuscate,
                YAMLKeyword.winograd
        ]:
            value = model_config.get(key, "")
            if value == "":
                model_config[key] = 0

        mace_check(
            model_config[YAMLKeyword.winograd] in WinogradParameters,
            ModuleName.YAML_CONFIG, "'winograd' parameters must be in " +
            str(WinogradParameters) + ". 0 for disable winograd convolution")

        weight_file_path = model_config.get(YAMLKeyword.weight_file_path, "")
        model_config[YAMLKeyword.weight_file_path] = weight_file_path

    return configs