Exemple #1
0
def run(cmd_string, work_dir=None, timeout=None, is_shell=True):
    """
         执行一个SHELL命令 封装了subprocess的Popen方法, 支持超时判断,支持读取stdout和stderr
        :parameter:
              cwd: 运行命令时更改路径,如果被设定,子进程会直接先更改当前路径到cwd
              timeout: 超时时间,秒,支持小数,精度0.1秒
              shell: 是否通过shell运行
        :return return_code
        :exception 执行超时
    """

    if is_shell:
        cmd_string_list = cmd_string
    else:
        cmd_string_list = shlex.split(cmd_string)
    if timeout:
        end_time = datetime.datetime.now() + \
                   datetime.timedelta(seconds=timeout)

    sub = subprocess.Popen(cmd_string_list,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE,
                           shell=True,
                           bufsize=4096,
                           cwd=work_dir,
                           close_fds=True)
    (stdout, stderr) = sub.communicate()
    logging.debug(str(stdout.decode('utf-8')))
    rc = sub.poll()
    if rc != 0:
        logging.error(str(stderr.decode('utf-8')))
    return rc
def run(cmd_string, work_dir=None, timeout=None, is_shell=True):
    """
         执行一个SHELL命令 封装了subprocess的Popen方法, 支持超时判断,支持读取stdout和stderr
        :parameter:
              cwd: 运行命令时更改路径,如果被设定,子进程会直接先更改当前路径到cwd
              timeout: 超时时间,秒,支持小数,精度0.1秒
              shell: 是否通过shell运行
        :return return_code
        :exception 执行超时
    """

    if is_shell:
        cmd_string_list = cmd_string
    else:
        cmd_string_list = shlex.split(cmd_string)
    if timeout:
        end_time = datetime.datetime.now() + \
                   datetime.timedelta(seconds=timeout)

    sub = subprocess.Popen(cmd_string_list,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.STDOUT,
                           shell=True,
                           bufsize=0,
                           cwd=work_dir,
                           close_fds=True)
    while True:
        line = sub.stdout.readline().decode('utf-8')
        logging.debug(str(line))
        if line == '' and sub.poll() is not None:
            break
    rc = sub.poll()
    return rc
Exemple #3
0
def gene_random_data(input_info: dict) -> str:
    data = {}
    current_dir = pathlib.Path(__file__).parent.parent
    data_dir = os.path.join(current_dir, "temp_data")
    command = "mkdir -p " + data_dir

    logging.debug(command)

    cmd.run(command)
    checker.check_file_exist(data_dir)
    data_path = os.path.join(data_dir, "input.txt")
    data_file = open(data_path, "w")
    data_file.write(str(len(input_info)) + '\n')
    for name, info in input_info.items():
        shape = info['shape']
        data_type = info['data_type']
        data_file.write(name + ' ' + str(len(shape)) + ' ' +
                        ' '.join([str(dim) for dim in shape]) + ' ' +
                        str(data_type) + '\n')
        if data_type == 0:
            data[name] = np.random.rand(*shape)
            np.savetxt(data_file, data[name].reshape(-1), fmt="%0.6f")
        elif data_type == 3:
            data[name] = np.random.randint(low=0, high=1, size=shape)
            np.savetxt(data_file, data[name].reshape(-1), fmt="%i")
    data_file.close()
    return data_path
Exemple #4
0
def run_tnn_model_check(proto_path,
                        model_path,
                        input_path,
                        reference_output_path,
                        is_tflite=False,
                        align_batch=False):
    cmd.run("pwd")
    relative_path = "bin/model_check"
    model_check_path = parse_path.parse_path(relative_path)
    checker.check_file_exist(model_check_path)
    command = model_check_path + " -e -p  " + proto_path + " -m " + \
        model_path + " -i " + input_path + " -f " + reference_output_path + " -d NAIVE"

    if align_batch:
        command += " -b "

    logging.debug(command)
    ret = cmd.run(command)

    if ret == 0:
        print_align_message(is_tflite)
    else:
        print_not_align_message(None, is_tflite)

    return
Exemple #5
0
def run_tnn_model_check(proto_path, model_path, input_path, reference_output_path):
    cmd.run("pwd")
    relative_path = "bin/model_check"
    model_check_path = parse_path.parse_path(relative_path)
    checker.check_file_exist(model_check_path)
    command = model_check_path + " -p  " + proto_path + " -m " + \
        model_path + " -i " + input_path + " -f " + reference_output_path + " -d NAIVE"

    logging.debug(command)
    ret = cmd.run(command)

    if ret == 0:
        print_align_message()
    else:
        print_not_align_message()

    return
def tflite2tnn(tf_path, tnn_path, not_fold_const=False):
    cmd.run("pwd")
    relative_path = "bin/TnnConverter"
    TnnConverter_path = parse_path.parse_path(relative_path)
    checker.check_file_exist(TnnConverter_path)
    command = TnnConverter_path + " -mt TFLITE  -mp " + tf_path
    checker.check_file_exist(TnnConverter_path)
    checker.check_file_exist(tf_path)
    if tnn_path is None:
        tnn_path = os.path.dirname(tf_path)
    checker.check_file_exist(tnn_path)
    command = command + " -od " + tnn_path + "/"
    logging.debug(command)
    result = cmd.run(command)
    if result == 0:
        return True
    else:
        return False
Exemple #7
0
def gene_random_data(input_info: dict) -> str:
    data = {}
    current_dir = pathlib.Path(__file__).parent.parent
    data_dir = os.path.join(current_dir, "temp_data")
    command = "mkdir -p " + data_dir

    logging.debug(command)

    cmd.run("pwd")
    cmd.run(command)
    checker.check_file_exist(data_dir)
    data_path = os.path.join(data_dir, "input.txt")
    data_file = open(data_path, "w")
    for name, shape in input_info.items():
        data[name] = np.random.rand(*shape)
        np.savetxt(data_file, data[name].reshape(-1), fmt="%0.18f")
    data_file.close()
    return data_path
Exemple #8
0
def tf2onnx(tf_path, input_names, output_name, onnx_path, not_fold_const=False):
    work_dir = "./"
    inputs, inputs_as_nchw = process_input_names(input_names)
    command = "python3 -m tf2onnx.convert  --graphdef " + tf_path

    command = command + " --inputs " + inputs
    command = command + " --inputs-as-nchw " + inputs_as_nchw

    command = command + " --outputs " + hack_name(output_name)
    command = command + " --output " + onnx_path
    command = command + " --opset 11"
    if not_fold_const is False:
        command = command + " --fold_const"

    logging.debug(command)
    result = cmd.run(command, work_dir=work_dir)
    if result == 0:
        return True
    else:
        return False
Exemple #9
0
def tf2onnx(tf_path, input_names, output_name, onnx_path, not_fold_const=False):
    work_dir = "./"
    input_info: dict = format_input(input_names)
    input_info_str: str = ""
    input_nchw_names: str = ""
    for item in input_info.items():
        input_info_str += item[0] + item[1] + ","
        input_nchw_names += item[0] + ","
    command = "python3 -m tf2onnx.convert  --graphdef " + tf_path
    command = command + " --inputs " + input_info_str
    command = command + " --inputs-as-nchw " + input_nchw_names

    command = command + " --outputs " + hack_name(output_name)
    command = command + " --output " + onnx_path
    command = command + " --opset 11"
    if not_fold_const is False:
        command = command + " --fold_const"

    logging.debug(command)
    result = cmd.run(command, work_dir=work_dir)
    if result == 0:
        return True
    else:
        return False
Exemple #10
0
def convert(onnx_path,
            output_dir=None,
            version="v1.0",
            optimize=True,
            half=False,
            align=False,
            input_path=None,
            refer_path=None,
            input_names: str = None):
    """
    执行 onnx 转换为 tnn 的转换指令
    :parameter:
          onnx_path:    需要转换的 onnx 文件的路径
          output_path:  生成的 tnn 文件的路径
          version:      转换模型的版本号
          optimize:     是否需要对模型进行优化,默认是需要进行优化
          half:         是否需要转为 FP16 的模型,减小模型的大小
    :return return_code
    :exception 执行超时
    """
    logging.info("Converter ONNX to TNN Model\n")

    checker.check_file_exist(onnx_path)

    ret, current_shape = checker.check_onnx_dim(onnx_path)

    if ret is False and current_shape is not None:
        if input_names is None:
            throw_exception(current_shape)
        if input_names is not None and not ("[" in input_names
                                            and "]" in input_names):
            throw_exception(current_shape)

    proto_suffix = '.tnnproto'
    model_suffix = '.tnnmodel'
    command = "python3 onnx2tnn.py " + onnx_path
    command = command + " -version=v1.0"
    checker.check_file_exist(onnx_path)
    if optimize is True:
        command = command + " -optimize=1"
    else:
        command = command + " -optimize=0"
    if half is True:
        command = command + " -half=1"
    else:
        command = command + " -half=0"

    if output_dir is None:
        output_dir = os.path.dirname(onnx_path)
    checker.check_file_exist(output_dir)
    command = command + " -o " + output_dir
    logging.debug("The onnx2tnn command:" + command + "\n")

    if input_names is not None:
        new_input_names = ""
        for char in input_names:
            if char == "[":
                char = ":"
            if char == "]":
                continue
            new_input_names += char
        command = command + " -input_shape " + new_input_names

    work_dir = "../onnx2tnn/onnx-converter/"
    result = cmd.run(command, work_dir=work_dir)

    if result == 0:
        logging.info("Converter ONNX to TNN model succeed!\n")
    else:
        logging.error("Converter ONNX to TNN model failed!\n")
        sys.exit(return_code.CONVERT_FAILED)
    onnx_base_name = os.path.basename(onnx_path)

    if align is True:
        if optimize is True:
            tnn_proto_name = onnx_base_name[:-len('.onnx'
                                                  )] + '.opt' + proto_suffix
            tnn_model_name = onnx_base_name[:-len('.onnx'
                                                  )] + '.opt' + model_suffix
        else:
            tnn_proto_name = onnx_base_name[:-len('.onnx')] + proto_suffix
            tnn_model_name = onnx_base_name[:-len('.onnx')] + model_suffix
        tnn_proto_path = os.path.join(output_dir, tnn_proto_name)
        tnn_model_path = os.path.join(output_dir, tnn_model_name)

        if input_names is None:
            align_model.align_model(onnx_path, tnn_proto_path, tnn_model_path,
                                    input_path, refer_path)
        else:
            align_model.align_model(onnx_path, tnn_proto_path, tnn_model_path,
                                    input_path, refer_path, new_input_names)
Exemple #11
0
def convert(onnx_path,
            output_dir=None,
            version="v1.0",
            optimize=True,
            half=False,
            align='',
            align_batch=False,
            input_path=None,
            refer_path=None,
            input_names: str = None,
            is_ssd=False,
            debug_mode: bool = False):
    """
    执行 onnx 转换为 tnn 的转换指令
    :parameter:
          onnx_path:    需要转换的 onnx 文件的路径
          output_path:  生成的 tnn 文件的路径
          version:      转换模型的版本号
          optimize:     是否需要对模型进行优化,默认是需要进行优化
          half:         是否需要转为 FP16 的模型,减小模型的大小
    :return return_code
    :exception 执行超时
    """
    logging.info("Converter ONNX to TNN Model...\n")

    checker.check_file_exist(onnx_path)

    try:
        if not is_ssd:
            logging.info("Converter ONNX to TNN check_onnx_dim...\n")
            ret, current_shape = checker.check_onnx_dim(onnx_path)
            logging.info("Converter ONNX to TNN check_onnx_dim...\n")
            if ret is False and current_shape is not None:
                if input_names is None:
                    logging.info("Converter ONNX to TNN current_shape...\n")
                    throw_exception(current_shape)
            if input_names is not None:
                input_names = input_names.strip()
                if ":" not in input_names and " " not in input_names:
                    input_names = list(
                        current_shape.keys())[0] + ":" + input_names
                check_input_names(input_names, current_shape)
    except Exception as e:
        print(e)
        logging.error(
            "check_onnx_dim failed, next stage of convertion may failed too\n")

    proto_suffix = '.tnnproto'
    model_suffix = '.tnnmodel'
    command = "python3 onnx2tnn.py " + onnx_path
    command = command + " -version=" + version
    checker.check_file_exist(onnx_path)
    if optimize is True:
        command = command + " -optimize=1"
    else:
        command = command + " -optimize=0"
    if half is True:
        command = command + " -half=1"
    else:
        command = command + " -half=0"

    if output_dir is None:
        output_dir = os.path.dirname(onnx_path)
    checker.check_file_exist(output_dir)
    command = command + " -o " + output_dir

    if input_names is not None:
        command = command + " -input_shape " + input_names
    logging.debug("The onnx2tnn command:" + command + "\n")

    work_dir = "../onnx2tnn/onnx-converter/"
    result = cmd.run(command, work_dir=work_dir)

    if result == 0:
        logging.info("Converter ONNX to TNN model succeed!\n")
    else:
        logging.error("Converter ONNX to TNN model failed!\n")
        sys.exit(return_code.CONVERT_FAILED)
    onnx_base_name = os.path.basename(onnx_path)

    if optimize is True:
        tnn_proto_name = onnx_base_name[:-len('.onnx')] + '.opt' + proto_suffix
        tnn_model_name = onnx_base_name[:-len('.onnx')] + '.opt' + model_suffix
    else:
        tnn_proto_name = onnx_base_name[:-len('.onnx')] + proto_suffix
        tnn_model_name = onnx_base_name[:-len('.onnx')] + model_suffix
    tnn_proto_path = os.path.join(output_dir, tnn_proto_name)
    tnn_model_path = os.path.join(output_dir, tnn_model_name)

    if align == 'output' or align_batch is True:
        if input_names is None:
            align_model.align_model(onnx_path,
                                    tnn_proto_path,
                                    tnn_model_path,
                                    input_path,
                                    refer_path,
                                    debug_mode=debug_mode,
                                    align_batch=align_batch)
        else:
            align_model.align_model(onnx_path,
                                    tnn_proto_path,
                                    tnn_model_path,
                                    input_path,
                                    refer_path,
                                    input_names,
                                    debug_mode=debug_mode,
                                    align_batch=align_batch)
    elif align == 'all':
        is_align_all = (align == 'all')
        align_model.align_all(onnx_path, tnn_proto_path, is_align_all,
                              input_names, input_path, refer_path)