コード例 #1
0
def main(cli_parser: argparse.ArgumentParser, fem: FrontEndManager,
         framework: str):
    telemetry = tm.Telemetry(tid=get_tid(),
                             app_name='Model Optimizer',
                             app_version=get_simplified_mo_version())
    telemetry.start_session('mo')
    telemetry.send_event('mo', 'version', get_simplified_mo_version())
    try:
        # Initialize logger with 'ERROR' as default level to be able to form nice messages
        # before arg parser deliver log_level requested by user
        init_logger('ERROR', False)

        argv = cli_parser.parse_args()
        send_params_info(argv, cli_parser)
        if framework:
            argv.framework = framework
        argv.feManager = fem

        ov_update_message = None
        if not hasattr(argv, 'silent') or not argv.silent:
            ov_update_message = get_ov_update_message()
        ret_code = driver(argv)
        if ov_update_message:
            print(ov_update_message)
        telemetry.send_event('mo', 'conversion_result', 'success')
        telemetry.end_session('mo')
        telemetry.force_shutdown(1.0)
        return ret_code
    except (FileNotFoundError, NotADirectoryError) as e:
        log.error('File {} was not found'.format(
            str(e).split('No such file or directory:')[1]))
        log.debug(traceback.format_exc())
    except Error as err:
        analysis_results = AnalysisResults()
        if analysis_results.get_messages() is not None:
            for el in analysis_results.get_messages():
                log.error(el, extra={'analysis_info': True})
        log.error(err)
        log.debug(traceback.format_exc())
    except FrameworkError as err:
        log.error(err, extra={'framework_error': True})
        log.debug(traceback.format_exc())
    except Exception as err:
        log.error("-------------------------------------------------")
        log.error("----------------- INTERNAL ERROR ----------------")
        log.error("Unexpected exception happened.")
        log.error(
            "Please contact Model Optimizer developers and forward the following information:"
        )
        log.error(str(err))
        log.error(traceback.format_exc())
        log.error("---------------- END OF BUG REPORT --------------")
        log.error("-------------------------------------------------")

    telemetry.send_event('mo', 'conversion_result', 'fail')
    telemetry.end_session('mo')
    telemetry.force_shutdown(1.0)
    return 1
コード例 #2
0
def emit_ir(graph: Graph, argv: argparse.Namespace):
    NormalizeTI().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(graph, RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(graph, CreateConstNodesReplacement().find_and_replace_pattern)

    prepare_emit_ir(graph=graph,
                    data_type=graph.graph['cmd_params'].data_type,
                    output_dir=argv.output_dir,
                    output_model_name=argv.model_name,
                    mean_data=graph.graph['mf'] if 'mf' in graph.graph else None,
                    input_names=graph.graph['input_names'] if 'input_names' in graph.graph else [],
                    meta_info=get_meta_info(argv))

    if not (argv.framework == 'tf' and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        orig_model_name = os.path.normpath(os.path.join(output_dir, argv.model_name))

        return_code = "not executed"
        # This try-except is additional reinsurance that the IE
        # dependency search does not break the MO pipeline
        try:
            if find_ie_version(silent=True):
                path_to_offline_transformations = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'back',
                                                               'offline_transformations.py')
                status = subprocess.run([sys.executable, path_to_offline_transformations, orig_model_name], env=os.environ, timeout=10)
                return_code = status.returncode
                if return_code != 0 and not argv.silent:
                    print("[ WARNING ] offline_transformations return code {}".format(return_code))
        except Exception as e:
            pass

        message = str(dict({
            "platform": platform.system(),
            "mo_version": get_simplified_mo_version(),
            "ie_version": get_simplified_ie_version(env=os.environ),
            "python_version": sys.version,
            "return_code": return_code
        }))
        t = tm.Telemetry()
        t.send_event('mo', 'offline_transformations_status', message)

        print('[ SUCCESS ] Generated IR version {} model.'.format(get_ir_version(argv)))
        print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
        print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))

    return 0
コード例 #3
0
def emit_ir(graph: Graph, argv: argparse.Namespace):
    NormalizeTI().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(
        graph,
        RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(
        graph,
        CreateConstNodesReplacement().find_and_replace_pattern)

    if 'feManager' in argv:
        del argv.feManager

    mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
    input_names = deepcopy(
        graph.graph['input_names']) if 'input_names' in graph.graph else []

    prepare_emit_ir(graph=graph,
                    data_type=graph.graph['cmd_params'].data_type,
                    output_dir=argv.output_dir,
                    output_model_name=argv.model_name,
                    mean_data=mean_data,
                    input_names=input_names,
                    meta_info=get_meta_info(argv),
                    use_temporary_path=True)

    # This graph cleanup is required to avoid double memory consumption
    graph.clear()

    if not (argv.framework == 'tf'
            and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        orig_model_name = os.path.normpath(
            os.path.join(output_dir, argv.model_name))

        return_code = "not executed"
        # This try-except is additional reinsurance that the IE
        # dependency search does not break the MO pipeline
        try:
            if not argv.legacy_ir_generation:
                path_to_offline_transformations = os.path.join(
                    os.path.realpath(os.path.dirname(__file__)), 'back',
                    'offline_transformations.py')
                cmd = [
                    sys.executable, path_to_offline_transformations,
                    "--input_model", orig_model_name, "--framework",
                    argv.framework, "--transform", argv.transform
                ]
                if "compress_fp16" in argv and argv.compress_fp16:
                    cmd += ["--compress_fp16"]
                    # restore data_type cmd parameter
                    argv.data_type = 'FP16'
                status = subprocess.run(cmd, env=os.environ)
                return_code = status.returncode
        except Exception as e:
            return_code = "failed"
            log.error(e)

        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": get_simplified_mo_version(),
                "ie_version": get_simplified_ie_version(env=os.environ),
                "python_version": sys.version,
                "return_code": return_code
            }))
        t = tm.Telemetry()
        t.send_event('mo', 'offline_transformations_status', message)

        if return_code != 0:
            raise Error("offline transformations step has failed.")

        for suf in [".xml", ".bin", ".mapping"]:
            # remove existing files
            path_to_file = orig_model_name + "_tmp" + suf
            if os.path.exists(path_to_file):
                os.remove(path_to_file)

        # add meta information to IR
        append_ir_info(file=orig_model_name,
                       meta_info=get_meta_info(argv),
                       mean_data=mean_data,
                       input_names=input_names)

        print('[ SUCCESS ] Generated IR version {} model.'.format(
            get_ir_version(argv)))
        print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
        print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))

    return 0
コード例 #4
0
def import_core_modules(silent: bool, path_to_module: str):
    """
        This function checks that InferenceEngine Python API is available
        and necessary python modules exists. So the next list of imports
        must contain all IE/NG Python API imports that are used inside MO.

    :param silent: enables or disables logs printing to stdout
    :param path_to_module: path where python API modules were found
    :return: True if all imports were successful and False otherwise
    """
    try:
        from openvino.inference_engine import get_version, read_network  # pylint: disable=import-error,no-name-in-module
        from openvino.offline_transformations import ApplyMOCTransformations, ApplyLowLatencyTransformation, \
            ApplyMakeStatefulTransformation, GenerateMappingFile  # pylint: disable=import-error,no-name-in-module

        # TODO: it is temporary import to check that nGraph python API is available. But in future
        # we need to replace it with Frontend imports
        from ngraph.impl.op import Parameter  # pylint: disable=import-error,no-name-in-module
        from _pyngraph import PartialShape, Dimension  # pylint: disable=import-error,no-name-in-module

        import openvino  # pylint: disable=import-error,no-name-in-module
        import ngraph  # pylint: disable=import-error,no-name-in-module
        import ngraph.frontend  # pylint: disable=import-error,no-name-in-module

        if silent:
            return True

        ie_version = str(get_version())
        mo_version = str(v.get_version())  # pylint: disable=no-member,no-name-in-module

        print("\t- {}: \t{}".format("Inference Engine found in",
                                    os.path.dirname(openvino.__file__)))
        # TODO: when nGraph version will be available we need to start compare it to IE and MO versions. Ticket: 58091
        print("\t- {}: \t{}".format("nGraph found in",
                                    os.path.dirname(ngraph.__file__)))
        print("{}: \t{}".format("Inference Engine version", ie_version))
        print("{}: \t{}".format("Model Optimizer version", mo_version))

        versions_mismatch = False
        if mo_version != ie_version:
            versions_mismatch = True
            extracted_mo_release_version = v.extract_release_version(
                mo_version)
            mo_is_custom = extracted_mo_release_version == (None, None)

            print(
                "[ WARNING ] Model Optimizer and Inference Engine versions do no match."
            )
            print(
                "[ WARNING ] Consider building the Inference Engine Python API from sources or reinstall OpenVINO "
                "(TM) toolkit using",
                end=" ")
            if mo_is_custom:
                print(
                    "\"pip install openvino\" (may be incompatible with the current Model Optimizer version)"
                )
            else:
                print("\"pip install openvino=={}.{}\"".format(
                    *extracted_mo_release_version))

        simplified_mo_version = v.get_simplified_mo_version()
        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": simplified_mo_version,
                "ie_version": v.get_simplified_ie_version(version=ie_version),
                "versions_mismatch": versions_mismatch,
            }))
        send_telemetry(simplified_mo_version, message, 'ie_version_check')

        return True
    except Exception as e:
        # Do not print a warning if module wasn't found or silent mode is on
        if "No module named 'openvino'" not in str(e):
            print(
                "[ WARNING ] Failed to import Inference Engine Python API in: {}"
                .format(path_to_module))
            print("[ WARNING ] {}".format(e))

            # Send telemetry message about warning
            simplified_mo_version = v.get_simplified_mo_version()
            message = str(
                dict({
                    "platform": platform.system(),
                    "mo_version": simplified_mo_version,
                    "ie_version": v.get_simplified_ie_version(env=os.environ),
                    "python_version": sys.version,
                    "error_type": classify_error_type(e),
                }))
            send_telemetry(simplified_mo_version, message, 'ie_import_failed')

        return False
コード例 #5
0
def emit_ir(graph: Graph, argv: argparse.Namespace):
    NormalizeTI().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(
        graph,
        RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(
        graph,
        CreateConstNodesReplacement().find_and_replace_pattern)

    mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
    input_names = deepcopy(
        graph.graph['input_names']) if 'input_names' in graph.graph else []

    # Remove temporary ie_is_available key from argv no to have it in IR
    ie_is_available = argv.ie_is_available
    del argv.ie_is_available

    prepare_emit_ir(graph=graph,
                    data_type=graph.graph['cmd_params'].data_type,
                    output_dir=argv.output_dir,
                    output_model_name=argv.model_name,
                    mean_data=mean_data,
                    input_names=input_names,
                    meta_info=get_meta_info(argv),
                    use_temporary_path=True)

    # This graph cleanup is required to avoid double memory consumption
    graph.clear()

    if not (argv.framework == 'tf'
            and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        orig_model_name = os.path.normpath(
            os.path.join(output_dir, argv.model_name))

        return_code = "not executed"
        # This try-except is additional reinsurance that the IE
        # dependency search does not break the MO pipeline
        try:
            if not argv.legacy_ir_generation and ie_is_available:
                path_to_offline_transformations = os.path.join(
                    os.path.realpath(os.path.dirname(__file__)), 'back',
                    'offline_transformations.py')
                status = subprocess.run([
                    sys.executable, path_to_offline_transformations,
                    "--input_model", orig_model_name, "--framework",
                    argv.framework, "--transform", argv.transform
                ],
                                        env=os.environ)
                return_code = status.returncode
        except Exception as e:
            return_code = "failed"
            log.error(e, extra={'is_warning': True})

        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": get_simplified_mo_version(),
                "ie_version": get_simplified_ie_version(env=os.environ),
                "python_version": sys.version,
                "return_code": return_code
            }))
        t = tm.Telemetry()
        t.send_event('mo', 'offline_transformations_status', message)

        # if IR wasn't produced by offline_transformations step we need to fallback to IR
        # produced by prepare_ir. This IR needs to be renamed from XXX_tmp.xml to XXX.xml
        suffixes = [".xml", ".bin", ".mapping"]
        if return_code != 0:
            if len(argv.transform) != 0:
                # Remove temporary IR before throwing exception
                for suf in suffixes:
                    path_to_file = orig_model_name + "_tmp" + suf
                    if os.path.exists(path_to_file):
                        os.remove(path_to_file)
                raise Error("Failed to apply transformations: {}".format(
                    argv.transform))

            log.error("Using fallback to produce IR.",
                      extra={'is_warning': True})
            for suf in suffixes:
                # remove existing files
                path_to_file = orig_model_name + suf
                if os.path.exists(path_to_file):
                    os.remove(path_to_file)

                # rename tmp IR to original name
                os.rename(orig_model_name + "_tmp" + suf,
                          orig_model_name + suf)
        else:
            for suf in suffixes:
                # remove existing files
                path_to_file = orig_model_name + "_tmp" + suf
                if os.path.exists(path_to_file):
                    os.remove(path_to_file)

            # add meta information to IR
            append_ir_info(file=orig_model_name,
                           meta_info=get_meta_info(argv),
                           mean_data=mean_data,
                           input_names=input_names)

        print('[ SUCCESS ] Generated IR version {} model.'.format(
            get_ir_version(argv)))
        print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
        print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))

    return 0
コード例 #6
0
 def test_simplify_mo_version_custom(self, mock_open, mock_isfile):
     mock_isfile.return_value = True
     mock_open.return_value.__enter__ = mock_open
     self.assertEqual(get_simplified_mo_version(), "custom")
コード例 #7
0
 def test_simplify_mo_version_release(self, mock_open, mock_isfile):
     mock_isfile.return_value = True
     mock_open.return_value.__enter__ = mock_open
     self.assertEqual(get_simplified_mo_version(), "2021.1")
コード例 #8
0
def import_core_modules(silent: bool, path_to_module: str):
    try:
        from openvino.inference_engine import IECore, get_version  # pylint: disable=import-error
        from openvino.offline_transformations import ApplyMOCTransformations, CheckAPI  # pylint: disable=import-error

        import openvino  # pylint: disable=import-error

        if silent:
            return True

        ie_version = str(get_version())
        mo_version = str(v.get_version())  # pylint: disable=no-member

        print("\t- {}: \t{}".format("Inference Engine found in",
                                    os.path.dirname(openvino.__file__)))
        print("{}: \t{}".format("Inference Engine version", ie_version))
        print("{}: \t    {}".format("Model Optimizer version", mo_version))

        versions_mismatch = False
        # MO and IE version have a small difference in the beginning of version because
        # IE version also includes API version. For example:
        #   Inference Engine version: 2.1.custom_HEAD_4c8eae0ee2d403f8f5ae15b2c9ad19cfa5a9e1f9
        #   Model Optimizer version:      custom_HEAD_4c8eae0ee2d403f8f5ae15b2c9ad19cfa5a9e1f9
        # So to match this versions we skip IE API version.
        if not re.match(r"^([0-9]+).([0-9]+).{}$".format(mo_version),
                        ie_version):
            versions_mismatch = True
            extracted_mo_release_version = v.extract_release_version(
                mo_version)
            mo_is_custom = extracted_mo_release_version == (None, None)

            print(
                "[ WARNING ] Model Optimizer and Inference Engine versions do no match."
            )
            print(
                "[ WARNING ] Consider building the Inference Engine Python API from sources or reinstall OpenVINO (TM) toolkit using",
                end=" ")
            if mo_is_custom:
                print(
                    "\"pip install openvino\" (may be incompatible with the current Model Optimizer version)"
                )
            else:
                print("\"pip install openvino=={}.{}\"".format(
                    *extracted_mo_release_version))

        simplified_mo_version = v.get_simplified_mo_version()
        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": simplified_mo_version,
                "ie_version": v.get_simplified_ie_version(version=ie_version),
                "versions_mismatch": versions_mismatch,
            }))
        send_telemetry(simplified_mo_version, message, 'ie_version_check')

        return True
    except Exception as e:
        # Do not print a warning if module wasn't found or silent mode is on
        if "No module named 'openvino'" not in str(e) and not silent:
            print(
                "[ WARNING ] Failed to import Inference Engine Python API in: {}"
                .format(path_to_module))
            print("[ WARNING ] {}".format(e))

            # Send telemetry message about warning
            simplified_mo_version = v.get_simplified_mo_version()
            message = str(
                dict({
                    "platform": platform.system(),
                    "mo_version": simplified_mo_version,
                    "ie_version": v.get_simplified_ie_version(env=os.environ),
                    "python_version": sys.version,
                    "error_type": classify_error_type(e),
                }))
            send_telemetry(simplified_mo_version, message, 'ie_import_failed')

        return False