def apply_offline_transformations(input_model: str, argv: argparse.Namespace):
    # This variable is only needed by GenerateMappingFile transformation
    # to produce correct mapping
    extract_names = argv.framework in ['tf', 'mxnet', 'kaldi']

    from openvino.offline_transformations import generate_mapping_file, serialize  # pylint: disable=import-error,no-name-in-module
    from openvino.frontend import FrontEndManager  # pylint: disable=no-name-in-module,import-error
    from openvino.tools.mo.back.preprocessing import apply_preprocessing  # pylint: disable=no-name-in-module,import-error

    fem = FrontEndManager()

    # We have to separate fe object lifetime from fem to
    # avoid segfault during object destruction. So fe must
    # be destructed before fem object explicitly.
    def read_model(path_to_xml):
        fe = fem.load_by_framework(framework="ir")
        function = fe.convert(fe.load(path_to_xml))
        return function

    func = read_model(input_model + "_tmp.xml")

    # TODO: use ngraph preprocessing (Mean/Scale/ReverseInputChannels) for legacy frontends
    reverse_input_channels = False
    if 'reverse_input_channels' in argv:
        reverse_input_channels = argv.reverse_input_channels
        argv.reverse_input_channels = False
    mean_scale_values = {}
    if 'mean_scale_values' in argv:
        mean_scale_values = argv.mean_scale_values
        argv.mean_scale_values = {}
    scale = None
    if 'scale' in argv:
        scale = argv.scale
        argv.scale = None

    # Apply preprocessing for layouts only
    apply_preprocessing(ov_function=func, argv=argv)

    if 'reverse_input_channels' in argv:
        argv.reverse_input_channels = reverse_input_channels
    if 'mean_scale_values' in argv:
        argv.mean_scale_values = mean_scale_values
    if 'scale' in argv:
        argv.scale = scale

    apply_moc_transformations(func)

    params_with_custom_types = create_params_with_custom_types(
        argv.packed_user_shapes)
    apply_moc_legacy_transformations(func, params_with_custom_types)
    apply_user_transformations(func, parse_transform(argv.transform))

    if "compress_fp16" in argv and argv.compress_fp16:
        compress_model(func)

    serialize(func,
              str(input_model + ".xml").encode('utf-8'),
              (input_model + ".bin").encode('utf-8'))
    path_to_mapping = input_model + ".mapping"
    generate_mapping_file(func, path_to_mapping.encode('utf-8'), extract_names)
Exemplo n.º 2
0
def moc_emit_ir(ngraph_function: Model, argv: argparse.Namespace):
    output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()

    # Apply preprocessing (mean/scale/reverse_channels/convert_layout/etc)
    apply_preprocessing(ov_function=ngraph_function, argv=argv)

    # Apply transformations
    from openvino.tools.mo.back.offline_transformations import apply_user_transformations, apply_moc_transformations
    apply_user_transformations(ngraph_function, parse_transform(argv.transform))
    apply_moc_transformations(ngraph_function)

    if argv.compress_fp16:
        from openvino.tools.mo.back.offline_transformations import compress_model
        compress_model(ngraph_function)

    orig_model_name = os.path.normpath(os.path.join(output_dir, argv.model_name))

    from openvino.offline_transformations_pybind import serialize # pylint: disable=import-error,no-name-in-module
    serialize(ngraph_function, (orig_model_name + ".xml").encode('utf-8'), (orig_model_name + ".bin").encode('utf-8'))

    del argv.feManager

    # add meta information to IR
    append_ir_info(file=orig_model_name,
                   meta_info=get_meta_info(argv),
                   mean_data=None,
                   input_names=None)

    print('[ SUCCESS ] Generated IR version {} model.'.format(get_ir_version(argv)))
    print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
    print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))
    return 0
Exemplo n.º 3
0
 def test_single_pass_with_multiple_args(self):
     self.assertEqual(
         parse_transform(
             "LowLatency2[use_const_initializer=True;dummy_attr=3.14]"),
         [("LowLatency2", {
             "use_const_initializer": True,
             "dummy_attr": 3.14
         })])
Exemplo n.º 4
0
 def test_multiple_passes_with_args(self):
     self.assertEqual(
         parse_transform(
             "LowLatency2[use_const_initializer=True],DummyPass[type=ReLU]"
         ), [("LowLatency2", {
             "use_const_initializer": True
         }), ("DummyPass", {
             "type": "ReLU"
         })])
Exemplo n.º 5
0
def moc_emit_ir(ngraph_function: Model, argv: argparse.Namespace):
    output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()

    # Apply preprocessing (mean/scale/reverse_channels/convert_layout/etc)
    apply_preprocessing(ov_function=ngraph_function, argv=argv)

    # Apply transformations
    from openvino.tools.mo.back.offline_transformations import apply_user_transformations, apply_moc_transformations, \
        apply_moc_legacy_transformations

    apply_moc_transformations(ngraph_function)
    from openvino.offline_transformations import compress_quantize_weights_transformation
    compress_quantize_weights_transformation(ngraph_function)

    if argv.framework == "onnx":
        # set OldApi map in IR to be executed via OV API 1.x and for parity with legacy MO
        params_with_custom_types = [] if argv.placeholder_data_types is None \
            else list(argv.placeholder_data_types.keys())
        apply_moc_legacy_transformations(ngraph_function,
                                         params_with_custom_types)

    apply_user_transformations(ngraph_function,
                               parse_transform(argv.transform))

    if argv.compress_fp16:
        from openvino.tools.mo.back.offline_transformations import compress_model
        compress_model(ngraph_function)

    orig_model_name = os.path.normpath(
        os.path.join(output_dir, argv.model_name))

    from openvino.runtime import serialize  # pylint: disable=import-error,no-name-in-module
    from openvino.offline_transformations import generate_mapping_file  # pylint: disable=import-error,no-name-in-module
    serialize(ngraph_function, (orig_model_name + ".xml").encode('utf-8'),
              (orig_model_name + ".bin").encode('utf-8'))

    del argv.feManager

    path_to_mapping = orig_model_name + ".mapping"
    extract_names = argv.framework in ['tf', 'mxnet', 'kaldi']
    generate_mapping_file(ngraph_function, path_to_mapping.encode('utf-8'),
                          extract_names)

    # add meta information to IR
    append_ir_info(file=orig_model_name,
                   meta_info=get_meta_info(argv),
                   mean_data=None,
                   input_names=None,
                   legacy_path=False)

    print('[ SUCCESS ] Generated IR version {} model.'.format(
        get_ir_version(argv)))
    print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
    print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))
    return 0
Exemplo n.º 6
0
 def test_multiple_passes_with_args2(self):
     self.assertEqual(
         parse_transform(
             "LowLatency2[use_const_initializer=True,False],DummyPass1,"
             "DummyPass2[types=ReLU,PReLU;values=1,2,3]"),
         [("LowLatency2", {
             "use_const_initializer": [True, False]
         }), ("DummyPass1", {}),
          ("DummyPass2", {
              "types": ["ReLU", "PReLU"],
              "values": [1, 2, 3]
          })])
Exemplo n.º 7
0
def arguments_post_parsing(argv: argparse.Namespace):
    moc_front_end, available_moc_front_ends = get_moc_frontends(argv)

    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx =\
        deduce_framework_by_namespace(argv) if not moc_front_end else [False, False, False, False, False]

    if any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        if new_extensions_used(argv):
            raise Error('New kind of extensions used on legacy path')
        if new_transformations_config_used(argv):
            raise Error(
                'New kind of transformations configuration used on legacy path'
            )
    else:  # new frontend used
        frameworks = ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx']
        frameworks = list(set(frameworks + available_moc_front_ends))
        if argv.framework not in frameworks:
            if argv.use_legacy_frontend:
                raise Error(
                    'Framework {} is not a valid target when using the --use_legacy_frontend flag. '
                    'The following legacy frameworks are available: {}' +
                    refer_to_faq_msg(15), argv.framework, frameworks)
            else:
                raise Error(
                    'Framework {} is not a valid target. Please use --framework with one from the list: {}. '
                    + refer_to_faq_msg(15), argv.framework, frameworks)

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    log.debug(str(argv))
    log.debug("Model Optimizer started")

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(
        argv.model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   argv.model_name)

    # This try-except is additional reinsurance that the IE
    # dependency search does not break the MO pipeline
    def raise_ie_not_found():
        raise Error(
            "Could not find the Inference Engine or nGraph Python API.\n"
            "Consider building the Inference Engine and nGraph Python APIs from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
            .format("bat" if sys.platform == "windows" else "sh"))

    try:
        if not find_ie_version(silent=argv.silent):
            raise_ie_not_found()
    except Exception as e:
        log.error(e)
        raise_ie_not_found()

    if 'data_type' in argv and argv.data_type in ['FP16', 'half']:
        argv.data_type = 'FP32'
        argv.compress_fp16 = True
    else:
        argv.compress_fp16 = False

    # This is just to check that transform key is valid and transformations are available
    check_available_transforms(parse_transform(argv.transform))

    if argv.legacy_ir_generation and len(argv.transform) != 0:
        raise Error(
            "--legacy_ir_generation and --transform keys can not be used at the same time."
        )

    # For C++ frontends there are no specific Python installation requirements, check only generic ones
    if moc_front_end:
        ret_code = check_requirements()
    else:
        ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        raise Error(
            'check_requirements exited with return code {}'.format(ret_code))

    if is_tf and argv.tensorflow_use_custom_operations_config is not None:
        argv.transformations_config = argv.tensorflow_use_custom_operations_config

    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:
        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = mo_array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
        argv.mean_file_offsets = mean_file_offsets

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    inputs_list, argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
        argv.input, argv.input_shape, argv.batch)
    argv.inputs_list = inputs_list

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale
    argv.layout_values = get_layout_values(argv.layout, argv.source_layout,
                                           argv.target_layout)

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)

    load_extensions(argv, is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx)

    return argv
        function = fe.convert(fe.load(path_to_xml))
        return function

    func = read_model(input_model + "_tmp.xml")

    apply_user_transformations(func, transforms)
    apply_moc_transformations(func)

    if compress_fp16:
        compress_model(func)

    serialize(func,
              str(input_model + ".xml").encode('utf-8'),
              (input_model + ".bin").encode('utf-8'))
    path_to_mapping = input_model + ".mapping"
    generate_mapping_file(func, path_to_mapping.encode('utf-8'), extract_names)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_model")
    parser.add_argument("--framework")
    parser.add_argument("--transform")
    parser.add_argument("--compress_fp16", action='store_true')

    args = parser.parse_args()

    apply_offline_transformations(args.input_model, args.framework,
                                  parse_transform(args.transform),
                                  args.compress_fp16)
Exemplo n.º 9
0
 def test_multiple_passes_no_args(self):
     self.assertEqual(parse_transform("DummyPass,LowLatency22"),
                      [("DummyPass", {}), ("LowLatency22", {})])
Exemplo n.º 10
0
 def test_single_pass_with_args(self):
     self.assertEqual(
         parse_transform("LowLatency2[use_const_initializer=True]"),
         [("LowLatency2", {
             "use_const_initializer": True
         })])
Exemplo n.º 11
0
 def test_single_pass(self):
     self.assertEqual(parse_transform("LowLatency2"), [("LowLatency2", {})])
Exemplo n.º 12
0
 def test_empty(self):
     self.assertEqual(parse_transform(""), [])