コード例 #1
0
def send_framework_info(framework: str):
    """
    This function sends information about used framework.
    :param framework: framework name.
    """
    t = tm.Telemetry()
    t.send_event('mo', 'framework', framework)
コード例 #2
0
ファイル: main.py プロジェクト: MaximProshin/openvino
def prepare_ir(argv: argparse.Namespace):
    argv = arguments_post_parsing(argv)
    t = tm.Telemetry()
    graph = None
    ngraph_function = None
    moc_front_end, available_moc_front_ends = get_moc_frontends(argv)
    if moc_front_end:
        fallback_reasons = check_fallback(argv)
        if len(fallback_reasons) == 0:
            t.send_event("mo", "conversion_method",
                         moc_front_end.get_name() + "_frontend")
            moc_front_end.add_extension(
                TelemetryExtension("mo", t.send_event, t.send_error,
                                   t.send_stack_trace))
            moc_front_end.add_extension(
                ProgressReporterExtension(progress_printer(argv)))
            ngraph_function = moc_pipeline(argv, moc_front_end)
            return graph, ngraph_function
        else:  # apply fallback
            reasons_message = ", ".join(fallback_reasons)
            load_extensions(argv, *list(deduce_framework_by_namespace(argv)))
            t.send_event("mo", "fallback_reason", reasons_message)
            log.warning(
                "The IR preparation was executed by the legacy MO path. "
                "This is a fallback scenario applicable only for some specific cases. "
                f"The detailed reason why fallback was executed: not supported {reasons_message} were used. "
                "You can specify --use_new_frontend flag to force using the Frontend MO path to avoid additional checks. "
                + refer_to_faq_msg(105))

    t.send_event("mo", "conversion_method", "mo_legacy")
    graph = unified_pipeline(argv)

    return graph, ngraph_function
コード例 #3
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
def refer_to_faq_msg(question_num: int):
    t = tm.Telemetry()
    t.send_event('mo', 'error_info', "faq:" + str(question_num))

    return '\n For more information please refer to Model Optimizer FAQ, question #{0}. ' \
           '(https://docs.openvinotoolkit.org/latest/openvino_docs_MO_DG_prepare_model_Model_Optimizer_FAQ.html' \
           '?question={0}#question-{0})'.format(question_num)
コード例 #4
0
def send_shapes_info(framework: str, graph: Graph):
    """
    This function sends information about model input shapes.
    :param framework: framework name.
    :param graph: model graph.
    """
    shapes = []
    for node in graph.get_op_nodes():
        op_type = node.soft_get('type', None)
        if op_type == 'Parameter':
            if 'shape' in node:
                shapes.append(node['shape'])
    t = tm.Telemetry()

    if shapes:
        shape_str = ""
        is_partially_defined = "0"
        for shape in shapes:
            shape_str += (np.array2string(int64_array(unmask_shape(shape)))
                          if shape is not None else "Undefined") + ","
            if not is_fully_defined(shape):
                is_partially_defined = "1"
        message_str = "{fw:" + framework + ",shape:\"" + shape_str[:-1] + "\"}"
        t.send_event('mo', 'input_shapes', message_str)
        t.send_event(
            'mo', 'partially_defined_shape', "{partially_defined_shape:" +
            is_partially_defined + ",fw:" + framework + "}")
コード例 #5
0
ファイル: telemetry.py プロジェクト: pavel-esir/openvino
def send_configuration(algo_config, engine, interface='API'):
    try:
        telemetry = tm.Telemetry(tid=get_tid_telemetry(), app_name='pot', app_version=pot_version)

        target_device = ','.join(set(algorithm['params'].get('target_device', 'ANY') for algorithm in algo_config))
        algorithms = {f'algorithm_{i}': algorithm['name'] for i, algorithm in enumerate(algo_config)}
        get_subset_size = lambda i: min(algo_config[i]['params'].get('stat_subset_size', len(engine.data_loader)),
                                        len(engine.data_loader))
        stat_subset_size = ','.join([str(get_subset_size(i)) for i, _ in enumerate(algo_config)])
        model_type = algo_config[0]['params'].get('model_type', None) if len(algo_config) > 0 else str(None)
        engine_type = 'simplified' if isinstance(engine, SimplifiedEngine) else \
                      'accuracy_checker' if isinstance(engine, ACEngine) else 'engine'

        for algo in algo_config:
            if algo['name'] == 'AccuracyAwareQuantization':
                drop_type_aa = algo['params'].get('drop_type', 'absolute')
                maximal_drop_aa = algo['params'].get('maximal_drop', None)
                tune_hyperparams = algo['params'].get('tune_hyperparams', False)
                send_event('drop_type_aa', drop_type_aa, telemetry)
                send_event('maximal_drop_aa', str(maximal_drop_aa), telemetry)
                send_event('tune_hyperparams', tune_hyperparams, telemetry)

        send_event('target_device', target_device, telemetry)
        send_event('algorithms', str(algorithms), telemetry)
        send_event('stat_subset_size', stat_subset_size, telemetry)
        send_event('model_type', str(model_type), telemetry)
        send_event('engine_type', engine_type, telemetry)
        send_event('interface', interface, telemetry)
    except Exception as e: # pylint: disable=broad-except
        logger.info("Error occurred while trying to send telemetry. Details:" + str(e))
コード例 #6
0
ファイル: telemetry.py プロジェクト: pavel-esir/openvino
def start_session_telemetry():
    try:
        telemetry = tm.Telemetry(tid=get_tid_telemetry(), app_name='pot', app_version=pot_version)
        telemetry.start_session('pot')
        return telemetry
    except Exception as e: # pylint: disable=broad-except
        logger.info("Error occurred while trying to send telemetry. Details:" + str(e))
        return None
コード例 #7
0
def main(cli_parser: argparse.ArgumentParser, fem: FrontEndManager,
         framework: str):
    telemetry = tm.Telemetry(tid=get_tid(),
                             app_name='Model Optimizer',
                             app_version=get_simplified_mo_version())
    telemetry.start_session('mo')
    telemetry.send_event('mo', 'version', get_simplified_mo_version())
    try:
        # Initialize logger with 'ERROR' as default level to be able to form nice messages
        # before arg parser deliver log_level requested by user
        init_logger('ERROR', False)

        argv = cli_parser.parse_args()
        send_params_info(argv, cli_parser)
        if framework:
            argv.framework = framework
        argv.feManager = fem

        ov_update_message = None
        if not hasattr(argv, 'silent') or not argv.silent:
            ov_update_message = get_ov_update_message()
        ret_code = driver(argv)
        if ov_update_message:
            print(ov_update_message)
        telemetry.send_event('mo', 'conversion_result', 'success')
        telemetry.end_session('mo')
        telemetry.force_shutdown(1.0)
        return ret_code
    except (FileNotFoundError, NotADirectoryError) as e:
        log.error('File {} was not found'.format(
            str(e).split('No such file or directory:')[1]))
        log.debug(traceback.format_exc())
    except Error as err:
        analysis_results = AnalysisResults()
        if analysis_results.get_messages() is not None:
            for el in analysis_results.get_messages():
                log.error(el, extra={'analysis_info': True})
        log.error(err)
        log.debug(traceback.format_exc())
    except FrameworkError as err:
        log.error(err, extra={'framework_error': True})
        log.debug(traceback.format_exc())
    except Exception as err:
        log.error("-------------------------------------------------")
        log.error("----------------- INTERNAL ERROR ----------------")
        log.error("Unexpected exception happened.")
        log.error(
            "Please contact Model Optimizer developers and forward the following information:"
        )
        log.error(str(err))
        log.error(traceback.format_exc())
        log.error("---------------- END OF BUG REPORT --------------")
        log.error("-------------------------------------------------")

    telemetry.send_event('mo', 'conversion_result', 'fail')
    telemetry.end_session('mo')
    telemetry.force_shutdown(1.0)
    return 1
コード例 #8
0
ファイル: telemetry.py プロジェクト: KodiaqQ/openvino
def send_event(action,
               label,
               telemetry=tm.Telemetry(app_name='pot',
                                      app_version=pot_version)):
    try:
        telemetry.send_event('pot', action, label)
    except Exception as e:  # pylint: disable=broad-except
        logger.info("Error occurred while trying to send telemetry. Details:" +
                    str(e))
コード例 #9
0
def init_telemetry_session(app_name, app_version):
    telemetry = tm.Telemetry(
        app_name=app_name,
        app_version=app_version,
        tid='UA-17808594-29')
    telemetry.start_session('dm')
    send_version_info(telemetry, app_version)

    return telemetry
コード例 #10
0
def send_telemetry(mo_version: str, message: str, event_type: str):
    t = tm.Telemetry(app_name='Version Checker', app_version=mo_version)
    # do not trigger new session if we are executing from the check from within the MO because it is actually not model
    # conversion run which we want to send
    if execution_type != 'mo':
        t.start_session(execution_type)
    t.send_event(execution_type, event_type, message)
    if execution_type != "mo":
        t.end_session(execution_type)
    t.force_shutdown(1.0)
コード例 #11
0
def init_telemetry():
    try:
        import openvino_telemetry as tm # pylint:disable=C0415
    except ImportError:
        return None
    try:
        telemetry = tm.Telemetry(tid='UA-17808594-29', app_name='Accuracy Checker', app_version=__version__)
        return telemetry
    except Exception: # pylint:disable=W0703
        return None
コード例 #12
0
ファイル: main.py プロジェクト: Flex-plaidml-team/openvino
def prepare_ir(argv):
    argv = arguments_post_parsing(argv)

    t = tm.Telemetry()
    graph = None
    ngraph_function = None
    moc_front_end, available_moc_front_ends = get_moc_frontends(argv)

    if moc_front_end:
        t.send_event("mo", "conversion_method", moc_front_end.get_name() + "_frontend")
        moc_front_end.add_extension(TelemetryExtension("mo", t.send_event, t.send_error, t.send_stack_trace))
        ngraph_function = moc_pipeline(argv, moc_front_end)
    else:
        t.send_event("mo", "conversion_method", "mo_legacy")
        graph = unified_pipeline(argv)

    return graph, ngraph_function
コード例 #13
0
def send_op_names_info(framework: str, graph: Graph):
    """
    This function sends information about operations in model.
    :param framework: framework name.
    :param graph: model graph.
    """
    op_counter = Counter()

    def gather_op_statistics(g: Graph, op_c: Counter = op_counter):
        if hasattr(g, 'op_names_statistic'):
            op_c += g.op_names_statistic

    for_graph_and_each_sub_graph_recursively(graph, gather_op_statistics)

    t = tm.Telemetry()
    for op_name in op_counter:
        t.send_event('mo', 'op_count', "{}_{}".format(framework, op_name),
                     op_counter[op_name])
コード例 #14
0
def send_params_info(argv: argparse.Namespace,
                     cli_parser: argparse.ArgumentParser):
    """
    This function sends information about used command line parameters.
    :param argv: command line parameters.
    :param cli_parser: command line parameters parser.
    """
    t = tm.Telemetry()
    params_with_paths = get_params_with_paths_list()
    for arg in vars(argv):
        arg_value = getattr(argv, arg)
        if arg_value != cli_parser.get_default(arg):
            if arg in params_with_paths:
                # If command line argument value is a directory or a path to file it is not sent
                # as it may contain confidential information. "1" value is used instead.
                param_str = arg + ":" + str(1)
            else:
                param_str = arg + ":" + str(arg_value)

            t.send_event('mo', 'cli_parameters', param_str)
コード例 #15
0
def emit_ir(graph: Graph, argv: argparse.Namespace):
    NormalizeTI().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(
        graph,
        RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(
        graph,
        CreateConstNodesReplacement().find_and_replace_pattern)

    if 'feManager' in argv:
        del argv.feManager

    mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
    input_names = deepcopy(
        graph.graph['input_names']) if 'input_names' in graph.graph else []

    prepare_emit_ir(graph=graph,
                    data_type=graph.graph['cmd_params'].data_type,
                    output_dir=argv.output_dir,
                    output_model_name=argv.model_name,
                    mean_data=mean_data,
                    input_names=input_names,
                    meta_info=get_meta_info(argv),
                    use_temporary_path=True)

    # This graph cleanup is required to avoid double memory consumption
    graph.clear()

    if not (argv.framework == 'tf'
            and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        orig_model_name = os.path.normpath(
            os.path.join(output_dir, argv.model_name))

        return_code = "not executed"
        # This try-except is additional reinsurance that the IE
        # dependency search does not break the MO pipeline
        try:
            if not argv.legacy_ir_generation:
                path_to_offline_transformations = os.path.join(
                    os.path.realpath(os.path.dirname(__file__)), 'back',
                    'offline_transformations.py')
                cmd = [
                    sys.executable, path_to_offline_transformations,
                    "--input_model", orig_model_name, "--framework",
                    argv.framework, "--transform", argv.transform
                ]
                if "compress_fp16" in argv and argv.compress_fp16:
                    cmd += ["--compress_fp16"]
                    # restore data_type cmd parameter
                    argv.data_type = 'FP16'
                status = subprocess.run(cmd, env=os.environ)
                return_code = status.returncode
        except Exception as e:
            return_code = "failed"
            log.error(e)

        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": get_simplified_mo_version(),
                "ie_version": get_simplified_ie_version(env=os.environ),
                "python_version": sys.version,
                "return_code": return_code
            }))
        t = tm.Telemetry()
        t.send_event('mo', 'offline_transformations_status', message)

        if return_code != 0:
            raise Error("offline transformations step has failed.")

        for suf in [".xml", ".bin", ".mapping"]:
            # remove existing files
            path_to_file = orig_model_name + "_tmp" + suf
            if os.path.exists(path_to_file):
                os.remove(path_to_file)

        # add meta information to IR
        append_ir_info(file=orig_model_name,
                       meta_info=get_meta_info(argv),
                       mean_data=mean_data,
                       input_names=input_names)

        print('[ SUCCESS ] Generated IR version {} model.'.format(
            get_ir_version(argv)))
        print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
        print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))

    return 0
コード例 #16
0
def prepare_ir(argv: argparse.Namespace):
    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = deduce_framework_by_namespace(
        argv)

    if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        raise Error(
            'Framework {} is not a valid target. Please use --framework with one from the list: caffe, tf, '
            'mxnet, kaldi, onnx. ' + refer_to_faq_msg(15), argv.framework)

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    log.debug(str(argv))
    log.debug("Model Optimizer started")
    t = tm.Telemetry()

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(
        argv.model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   argv.model_name)

    # This try-except is additional reinsurance that the IE
    # dependency search does not break the MO pipeline
    try:
        argv.ie_is_available = find_ie_version(silent=argv.silent)

        if not argv.ie_is_available and not argv.silent:
            print(
                "[ WARNING ] Could not find the Inference Engine Python API. At this moment, the Inference Engine dependency is not required, but will be required in future releases."
            )
            print(
                "[ WARNING ] Consider building the Inference Engine Python API from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
                .format("bat" if sys.platform == "windows" else "sh"))
            # If the IE was not found, it will not print the MO version, so we have to print it manually
            print("{}: \t{}".format("Model Optimizer version", get_version()))
    except Exception as e:
        argv.ie_is_available = False

    # This is just to check that transform key is valid and transformations are available
    check_available_transforms(parse_transform(argv.transform),
                               argv.ie_is_available)

    if argv.legacy_ir_generation and len(argv.transform) != 0:
        raise Error(
            "--legacy_ir_generation and --transform keys can not be used at the same time."
        )

    ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        raise Error(
            'check_requirements exit with return code {}'.format(ret_code))

    if is_tf and argv.tensorflow_use_custom_operations_config is not None:
        argv.transformations_config = argv.tensorflow_use_custom_operations_config

    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:
        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = np.array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
        argv.mean_file_offsets = mean_file_offsets

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
        argv.input, argv.input_shape, argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    if hasattr(argv,
               'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)
    if is_tf:
        t.send_event('mo', 'framework', 'tf')
        from mo.front.tf.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_caffe:
        t.send_event('mo', 'framework', 'caffe')
        from mo.front.caffe.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_mxnet:
        t.send_event('mo', 'framework', 'mxnet')
        from mo.front.mxnet.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_kaldi:
        t.send_event('mo', 'framework', 'kaldi')
        from mo.front.kaldi.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_onnx:
        t.send_event('mo', 'framework', 'onnx')
        from mo.front.onnx.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    graph = unified_pipeline(argv)
    return graph
コード例 #17
0
def emit_ir(graph: Graph, argv: argparse.Namespace):
    NormalizeTI().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(
        graph,
        RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(
        graph,
        CreateConstNodesReplacement().find_and_replace_pattern)

    mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
    input_names = deepcopy(
        graph.graph['input_names']) if 'input_names' in graph.graph else []

    # Remove temporary ie_is_available key from argv no to have it in IR
    ie_is_available = argv.ie_is_available
    del argv.ie_is_available

    prepare_emit_ir(graph=graph,
                    data_type=graph.graph['cmd_params'].data_type,
                    output_dir=argv.output_dir,
                    output_model_name=argv.model_name,
                    mean_data=mean_data,
                    input_names=input_names,
                    meta_info=get_meta_info(argv),
                    use_temporary_path=True)

    # This graph cleanup is required to avoid double memory consumption
    graph.clear()

    if not (argv.framework == 'tf'
            and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        orig_model_name = os.path.normpath(
            os.path.join(output_dir, argv.model_name))

        return_code = "not executed"
        # This try-except is additional reinsurance that the IE
        # dependency search does not break the MO pipeline
        try:
            if not argv.legacy_ir_generation and ie_is_available:
                path_to_offline_transformations = os.path.join(
                    os.path.realpath(os.path.dirname(__file__)), 'back',
                    'offline_transformations.py')
                status = subprocess.run([
                    sys.executable, path_to_offline_transformations,
                    "--input_model", orig_model_name, "--framework",
                    argv.framework, "--transform", argv.transform
                ],
                                        env=os.environ)
                return_code = status.returncode
        except Exception as e:
            return_code = "failed"
            log.error(e, extra={'is_warning': True})

        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": get_simplified_mo_version(),
                "ie_version": get_simplified_ie_version(env=os.environ),
                "python_version": sys.version,
                "return_code": return_code
            }))
        t = tm.Telemetry()
        t.send_event('mo', 'offline_transformations_status', message)

        # if IR wasn't produced by offline_transformations step we need to fallback to IR
        # produced by prepare_ir. This IR needs to be renamed from XXX_tmp.xml to XXX.xml
        suffixes = [".xml", ".bin", ".mapping"]
        if return_code != 0:
            if len(argv.transform) != 0:
                # Remove temporary IR before throwing exception
                for suf in suffixes:
                    path_to_file = orig_model_name + "_tmp" + suf
                    if os.path.exists(path_to_file):
                        os.remove(path_to_file)
                raise Error("Failed to apply transformations: {}".format(
                    argv.transform))

            log.error("Using fallback to produce IR.",
                      extra={'is_warning': True})
            for suf in suffixes:
                # remove existing files
                path_to_file = orig_model_name + suf
                if os.path.exists(path_to_file):
                    os.remove(path_to_file)

                # rename tmp IR to original name
                os.rename(orig_model_name + "_tmp" + suf,
                          orig_model_name + suf)
        else:
            for suf in suffixes:
                # remove existing files
                path_to_file = orig_model_name + "_tmp" + suf
                if os.path.exists(path_to_file):
                    os.remove(path_to_file)

            # add meta information to IR
            append_ir_info(file=orig_model_name,
                           meta_info=get_meta_info(argv),
                           mean_data=mean_data,
                           input_names=input_names)

        print('[ SUCCESS ] Generated IR version {} model.'.format(
            get_ir_version(argv)))
        print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
        print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))

    return 0
コード例 #18
0
 def setUpClass(cls):
     cls.telemetry = tm.Telemetry('Datumaro')