예제 #1
0
    def test_get_shapes_several_inputs_several_shapes5(self):
        # some values for freezing specified using --freeze_placeholder_with_value
        argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]"
        input_shapes = "(3,1), (3,2,3), (5)"
        argv_freeze_placeholder_with_value = "inp2->[5.0 7.0 3.0],inp4->[100.0 200.0]"

        result, _ = get_placeholder_shapes(argv_input, input_shapes)
        exp_res = {
            'inp1': np.array([3, 1]),
            'inp2': np.array([3, 2, 3]),
            'inp3': np.array([5])
        }
        self.assertEqual(list(exp_res.keys()), list(result.keys()))
        for i in exp_res.keys():
            npt.assert_array_equal(result[i], exp_res[i])
        placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(
            argv_input, argv_freeze_placeholder_with_value)
        placeholder_values_ref = {
            'inp1': np.array(['1.0', '2.0', '3.0']),
            'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'], ),
            'inp2': np.array(['5.0', '7.0', '3.0']),
            'inp4': np.array(['100.0', '200.0'])
        }
        input_node_names_ref = "inp1,inp2,inp3"
        self.assertEqual(sorted(list(placeholder_values_res.keys())),
                         sorted(list(placeholder_values_ref.keys())))
        for i in placeholder_values_ref.keys():
            npt.assert_array_equal(placeholder_values_res[i],
                                   placeholder_values_ref[i])
        self.assertEqual(input_node_names_ref, input_node_names_res)
예제 #2
0
 def test_get_shapes_several_inputs_several_shapes4(self):
     # shapes specified using --input_shape and values for freezing using --input command line parameter
     argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]"
     input_shapes = "(3,1), (3,2,3), (5)"
     result, _ = get_placeholder_shapes(argv_input, input_shapes)
     exp_res = {
         'inp1': np.array([3, 1]),
         'inp2': np.array([3, 2, 3]),
         'inp3': np.array([5])
     }
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         npt.assert_array_equal(result[i], exp_res[i])
     placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(
         argv_input, None)
     placeholder_values_ref = {
         'inp1': np.array(['1.0', '2.0', '3.0']),
         'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])
     }
     input_node_names_ref = "inp1,inp2,inp3"
     self.assertEqual(list(placeholder_values_res.keys()),
                      list(placeholder_values_ref.keys()))
     for i in placeholder_values_ref.keys():
         npt.assert_array_equal(placeholder_values_res[i],
                                placeholder_values_ref[i])
     self.assertEqual(input_node_names_ref, input_node_names_res)
예제 #3
0
 def test_get_shapes_one_input_first_neg_shape1(self):
     argv_input = "inp1,inp2"
     input_shapes = "(-1,4,1),(4,6,8)"
     result, _ = get_placeholder_shapes(argv_input, input_shapes)
     exp_res = {'inp1': np.array([-1, 4, 1]), 'inp2': np.array([4, 6, 8])}
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         npt.assert_array_equal(result[i], exp_res[i])
예제 #4
0
 def test_get_shapes_one_input_one_shape(self):
     argv_input = "inp1"
     input_shapes = "(1,22,333,123)"
     result, _ = get_placeholder_shapes(argv_input, input_shapes)
     exp_res = {'inp1': np.array([1, 22, 333, 123])}
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         np.testing.assert_array_equal(result[i], exp_res[i])
예제 #5
0
 def test_get_shapes_one_input_no_shape(self):
     argv_input = "inp1"
     input_shapes = ""
     result, _ = get_placeholder_shapes(argv_input, input_shapes)
     exp_res = {'inp1': np.array([None])}
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         npt.assert_array_equal(result[i], exp_res[i])
예제 #6
0
 def test_get_shapes_several_inputs_several_shapes(self):
     argv_input = "inp1,inp2"
     input_shapes = "(1,22,333,123), (-1,45,7,1)"
     result, _ = get_placeholder_shapes(argv_input, input_shapes)
     exp_res = {
         'inp1': np.array([1, 22, 333, 123]),
         'inp2': np.array([-1, 45, 7, 1])
     }
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         npt.assert_array_equal(result[i], exp_res[i])
예제 #7
0
 def test_get_shapes_and_data_types_shape_with_ports_only(self):
     argv_input = "placeholder1:4[3 1],placeholder2,2:placeholder3"
     result_shapes, result_data_types = get_placeholder_shapes(
         argv_input, "")
     ref_result_shapes = {
         'placeholder1:4': np.array([3, 1]),
         'placeholder2': None,
         '2:placeholder3': None
     }
     ref_result_data_types = {}
     self.assertEqual(list(ref_result_shapes.keys()),
                      list(result_shapes.keys()))
     for i in ref_result_shapes.keys():
         npt.assert_array_equal(result_shapes[i], ref_result_shapes[i])
     self.assertEqual(list(ref_result_data_types.keys()),
                      list(result_data_types.keys()))
     for i in ref_result_data_types.keys():
         np.testing.assert_equal(result_data_types[i],
                                 ref_result_data_types[i])
예제 #8
0
 def test_get_shapes_and_data_types_with_output_ports(self):
     argv_input = "inp1:1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]"
     result_shapes, result_data_types = get_placeholder_shapes(
         argv_input, "")
     ref_result_shapes = {
         'inp1:1': np.array([3, 1]),
         'inp2': np.array([3, 2, 3]),
         'inp3:4': np.array([5])
     }
     ref_result_data_types = {'inp2': np.int32, 'inp3:4': np.float32}
     self.assertEqual(list(ref_result_shapes.keys()),
                      list(result_shapes.keys()))
     for i in ref_result_shapes.keys():
         npt.assert_array_equal(result_shapes[i], ref_result_shapes[i])
     self.assertEqual(list(ref_result_data_types.keys()),
                      list(result_data_types.keys()))
     for i in ref_result_data_types.keys():
         np.testing.assert_equal(result_data_types[i],
                                 ref_result_data_types[i])
예제 #9
0
 def test_get_shapes_several_inputs_several_shapes2(self):
     # shapes specified using --input command line parameter and no values
     argv_input = "inp1[1 22 333 123],inp2[-1 45 7 1]"
     result, _ = get_placeholder_shapes(argv_input, None)
     exp_res = {
         'inp1': np.array([1, 22, 333, 123]),
         'inp2': np.array([-1, 45, 7, 1])
     }
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         npt.assert_array_equal(result[i], exp_res[i])
     placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(
         argv_input, None)
     placeholder_values_ref = {}
     input_node_names_ref = "inp1,inp2"
     self.assertEqual(list(placeholder_values_res.keys()),
                      list(placeholder_values_ref.keys()))
     for i in placeholder_values_ref.keys():
         npt.assert_array_equal(placeholder_values_res[i],
                                placeholder_values_ref[i])
예제 #10
0
 def test_get_shapes_and_data_types_when_no_freeze_value(self):
     argv_input = "placeholder1{i32}[3 1],placeholder2,placeholder3{i32}"
     result_shapes, result_data_types = get_placeholder_shapes(
         argv_input, "")
     ref_result_shapes = {
         'placeholder1': np.array([3, 1]),
         'placeholder2': None,
         'placeholder3': None
     }
     ref_result_data_types = {
         'placeholder1': np.int32,
         'placeholder3': np.int32
     }
     self.assertEqual(list(ref_result_shapes.keys()),
                      list(result_shapes.keys()))
     for i in ref_result_shapes.keys():
         npt.assert_array_equal(result_shapes[i], ref_result_shapes[i])
     self.assertEqual(list(ref_result_data_types.keys()),
                      list(result_data_types.keys()))
     for i in ref_result_data_types.keys():
         np.testing.assert_equal(result_data_types[i],
                                 ref_result_data_types[i])
예제 #11
0
 def test_get_shapes_several_inputs_several_shapes7(self):
     # 0D shape and value for freezing specified using --input command line parameter
     argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[]->True"
     result, _ = get_placeholder_shapes(argv_input, None)
     exp_res = {
         'inp1': np.array([3, 1]),
         'inp2': np.array([3, 2, 3]),
         'inp3': np.array(False).shape
     }
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         npt.assert_array_equal(result[i], exp_res[i])
     placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(
         argv_input, None)
     placeholder_values_ref = {
         'inp1': np.array(['1.0', '2.0', '3.0']),
         'inp3': True
     }
     self.assertEqual(list(placeholder_values_res.keys()),
                      list(placeholder_values_ref.keys()))
     for i in placeholder_values_ref.keys():
         npt.assert_array_equal(placeholder_values_res[i],
                                placeholder_values_ref[i])
예제 #12
0
 def test_get_shapes_several_inputs_several_shapes3(self):
     # shapes and value for freezing specified using --input command line parameter
     argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5]->[1.0 1.0 2.0 3.0 5.0]"
     result = get_placeholder_shapes(argv_input, None)
     exp_res = {
         'inp1': np.array([3, 1]),
         'inp2': np.array([3, 2, 3]),
         'inp3': np.array([5])
     }
     self.assertEqual(list(exp_res.keys()), list(result.keys()))
     for i in exp_res.keys():
         np.testing.assert_array_equal(result[i], exp_res[i])
     placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(
         argv_input, None)
     placeholder_values_ref = {
         'inp1': np.array(['1.0', '2.0', '3.0']),
         'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])
     }
     input_node_names_ref = "inp1,inp2,inp3"
     self.assertEqual(list(placeholder_values_res.keys()),
                      list(placeholder_values_ref.keys()))
     for i in placeholder_values_ref.keys():
         np.testing.assert_array_equal(placeholder_values_res[i],
                                       placeholder_values_ref[i])
예제 #13
0
 def test_get_shapes_no_input_one_shape2(self):
     argv_input = ""
     input_shapes = "[12,4,1]"
     result, _ = get_placeholder_shapes(argv_input, input_shapes)
     exp_res = np.array([12, 4, 1])
     npt.assert_array_equal(result, exp_res)
예제 #14
0
 def test_get_shapes_no_input_no_shape(self):
     argv_input = ""
     input_shapes = ""
     result, _ = get_placeholder_shapes(argv_input, input_shapes)
     exp_res = np.array([None])
     npt.assert_array_equal(result, exp_res)
예제 #15
0
파일: main.py 프로젝트: EdgarDaniel/Projeto
def driver(argv: argparse.Namespace):
    if argv.version:
        print('Version of Model Optimizer is: {}'.format(get_version()))
        return 0

    init_logger(argv.log_level.upper(), argv.silent)
    start_time = datetime.datetime.now()

    if not argv.framework:
        if 'saved_model_dir' in argv and argv.saved_model_dir or \
                'input_meta_graph' in argv and argv.input_meta_graph:
            argv.framework = 'tf'
        elif 'input_symbol ' in argv and argv.input_symbol or \
                'pretrained_model_name' in argv and argv.pretrained_model_name:
            argv.framework = 'mxnet'
        elif 'input_proto' in argv and argv.input_proto:
            argv.framework = 'caffe'
        elif argv.input_model is None:
            raise Error('Path to input model is required: use --input_model.')
        else:
            argv.framework = guess_framework_by_ext(argv.input_model)
        if not argv.framework:
            raise Error(
                'Framework name can not be deduced from the given options: {}={}. '
                +
                'Use --framework to choose one of caffe, tf, mxnet, kaldi, onnx',
                '--input_model',
                argv.input_model,
                refer_to_faq_msg(15),
            )

    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = (
        argv.framework == x for x in ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'])

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    log.debug(str(argv))
    log.debug("Model Optimizer started")

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   model_name)

    if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        raise Error(
            'Framework {} is not a valid target. ' +
            'Please use --framework with one from the list: caffe, tf, mxnet, kaldi, onnx. '
            + refer_to_faq_msg(15), argv.framework)

    ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        return ret_code

    if is_mxnet and not argv.input_shape:
        raise Error(
            'Input shape is required to convert MXNet model. Please provide it with --input_shape. '
            + refer_to_faq_msg(16))

    mean_file_offsets = None
    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:

        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = np.array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
    custom_layers_mapping_path = argv.k if is_caffe and argv.k else None

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes = get_placeholder_shapes(argv.input,
                                                     argv.input_shape,
                                                     argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    ret_res = 1
    if hasattr(argv,
               'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)

    if is_tf:
        import mo.pipeline.tf as mo_tf
        from mo.front.tf.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        ret_res = mo_tf.tf2nx(argv,
                              argv.input_model,
                              model_name,
                              argv.output_dir,
                              is_binary=not argv.input_model_is_text)

    elif is_caffe:
        import mo.pipeline.caffe as mo_caffe
        from mo.front.caffe.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        ret_res = mo_caffe.driver(
            argv,
            argv.input_proto,
            argv.input_model,
            model_name,
            argv.output_dir,
            argv.caffe_parser_path,
            mean_file=argv.mean_file,
            mean_file_offsets=mean_file_offsets,
            custom_layers_mapping_path=custom_layers_mapping_path)

    elif is_mxnet:
        import mo.pipeline.mx as mo_mxnet
        from mo.front.mxnet.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        ret_res = mo_mxnet.driver(argv, argv.input_model, model_name,
                                  argv.output_dir)

    elif is_kaldi:
        import mo.pipeline.kaldi as mo_kaldi
        from mo.front.kaldi.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        ret_res = mo_kaldi.driver(argv, argv.input_model, model_name,
                                  argv.output_dir)
    elif is_onnx:
        import mo.pipeline.onnx as mo_onnx
        from mo.front.onnx.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        ret_res = mo_onnx.driver(argv, argv.input_model, model_name,
                                 argv.output_dir)

    if ret_res != 0:
        return ret_res
    if not (is_tf and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        print('\n[ SUCCESS ] Generated IR model.')
        print('[ SUCCESS ] XML file: {}.xml'.format(
            os.path.join(output_dir, model_name)))
        print('[ SUCCESS ] BIN file: {}.bin'.format(
            os.path.join(output_dir, model_name)))
        elapsed_time = datetime.datetime.now() - start_time
        print('[ SUCCESS ] Total execution time: {:.2f} seconds. '.format(
            elapsed_time.total_seconds()))
    return ret_res
예제 #16
0
def prepare_ir(argv: argparse.Namespace):
    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = deduce_framework_by_namespace(
        argv)

    if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        raise Error(
            'Framework {} is not a valid target. Please use --framework with one from the list: caffe, tf, '
            'mxnet, kaldi, onnx. ' + refer_to_faq_msg(15), argv.framework)

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    log.debug(str(argv))
    log.debug("Model Optimizer started")
    t = tm.Telemetry()
    t.start_session()

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(
        argv.model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   argv.model_name)

    # This try-except is additional reinsurance that the IE
    # dependency search does not break the MO pipeline
    try:
        if not find_ie_version(silent=argv.silent) and not argv.silent:
            print(
                "[ WARNING ] Could not find the Inference Engine Python API. At this moment, the Inference Engine dependency is not required, but will be required in future releases."
            )
            print(
                "[ WARNING ] Consider building the Inference Engine Python API from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
                .format("bat" if sys.platform == "windows" else "sh"))
            # If the IE was not found, it will not print the MO version, so we have to print it manually
            print("{}: \t{}".format("Model Optimizer version", get_version()))
    except Exception as e:
        pass

    ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        raise Error(
            'check_requirements exit with return code {}'.format(ret_code))

    if is_tf and argv.tensorflow_use_custom_operations_config is not None:
        argv.transformations_config = argv.tensorflow_use_custom_operations_config

    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:
        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = np.array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
        argv.mean_file_offsets = mean_file_offsets

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
        argv.input, argv.input_shape, argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    if hasattr(argv,
               'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)
    if is_tf:
        t.send_event('mo', 'framework', 'tf')
        from mo.front.tf.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_caffe:
        t.send_event('mo', 'framework', 'caffe')
        from mo.front.caffe.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_mxnet:
        t.send_event('mo', 'framework', 'mxnet')
        from mo.front.mxnet.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_kaldi:
        t.send_event('mo', 'framework', 'kaldi')
        from mo.front.kaldi.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_onnx:
        t.send_event('mo', 'framework', 'onnx')
        from mo.front.onnx.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    graph = unified_pipeline(argv)
    return graph
예제 #17
0
def arguments_post_parsing(argv: argparse.Namespace):
    moc_front_end, available_moc_front_ends = get_moc_frontends(argv)

    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx =\
        deduce_framework_by_namespace(argv) if not moc_front_end else [False, False, False, False, False]

    if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        frameworks = ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx']
        frameworks = list(set(frameworks + available_moc_front_ends))
        if argv.framework not in frameworks:
            if argv.use_legacy_frontend:
                raise Error(
                    'Framework {} is not a valid target when using the --use_legacy_frontend flag. '
                    'The following legacy frameworks are available: {}' +
                    refer_to_faq_msg(15), argv.framework, frameworks)
            else:
                raise Error(
                    'Framework {} is not a valid target. Please use --framework with one from the list: {}. '
                    + refer_to_faq_msg(15), argv.framework, frameworks)

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    log.debug(str(argv))
    log.debug("Model Optimizer started")

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(
        argv.model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   argv.model_name)

    # This try-except is additional reinsurance that the IE
    # dependency search does not break the MO pipeline
    def raise_ie_not_found():
        raise Error(
            "Could not find the Inference Engine or nGraph Python API.\n"
            "Consider building the Inference Engine and nGraph Python APIs from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
            .format("bat" if sys.platform == "windows" else "sh"))

    try:
        if not find_ie_version(silent=argv.silent):
            raise_ie_not_found()
    except Exception as e:
        raise_ie_not_found()

    if 'data_type' in argv and argv.data_type in ['FP16', 'half']:
        argv.data_type = 'FP32'
        argv.compress_fp16 = True
    else:
        argv.compress_fp16 = False

    # This is just to check that transform key is valid and transformations are available
    check_available_transforms(parse_transform(argv.transform))

    if argv.legacy_ir_generation and len(argv.transform) != 0:
        raise Error(
            "--legacy_ir_generation and --transform keys can not be used at the same time."
        )

    # For C++ frontends there are no specific Python installation requirements, check only generic ones
    if moc_front_end:
        ret_code = check_requirements()
    else:
        ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        raise Error(
            'check_requirements exited with return code {}'.format(ret_code))

    if is_tf and argv.tensorflow_use_custom_operations_config is not None:
        argv.transformations_config = argv.tensorflow_use_custom_operations_config

    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:
        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = np.array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
        argv.mean_file_offsets = mean_file_offsets

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
        argv.input, argv.input_shape, argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    if hasattr(argv,
               'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)
    if is_tf:
        from mo.front.tf.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_caffe:
        send_framework_info('caffe')
        from mo.front.caffe.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_mxnet:
        send_framework_info('mxnet')
        from mo.front.mxnet.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_kaldi:
        send_framework_info('kaldi')
        from mo.front.kaldi.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_onnx:
        send_framework_info('onnx')
        from mo.front.onnx.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)

    return argv
예제 #18
0
def prepare_ir(argv: argparse.Namespace):
    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = deduce_framework_by_namespace(
        argv)

    if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        raise Error(
            'Framework {} is not a valid target. Please use --framework with one from the list: caffe, tf, '
            'mxnet, kaldi, onnx. ' + refer_to_faq_msg(15), argv.framework)

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    if is_kaldi:
        argv.generate_experimental_IR_V10 = False

    log.debug(str(argv))
    log.debug("Model Optimizer started")

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(
        argv.model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   argv.model_name)

    ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        raise Error(
            'check_requirements exit with return code {}'.format(ret_code))

    if is_tf and argv.tensorflow_use_custom_operations_config is not None:
        argv.transformations_config = argv.tensorflow_use_custom_operations_config

    mean_file_offsets = None
    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:

        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = np.array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
        argv.mean_file_offsets = mean_file_offsets

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
        argv.input, argv.input_shape, argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    ret_res = 1
    if hasattr(argv,
               'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)
    graph = None
    if is_tf:
        import mo.pipeline.tf as mo_tf
        from mo.front.tf.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        graph = mo_tf.driver(argv)
    elif is_caffe:
        import mo.pipeline.caffe as mo_caffe
        from mo.front.caffe.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        graph = mo_caffe.driver(argv)
    elif is_mxnet:
        import mo.pipeline.mx as mo_mxnet
        from mo.front.mxnet.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        graph = mo_mxnet.driver(argv)
    elif is_kaldi:
        import mo.pipeline.kaldi as mo_kaldi
        from mo.front.kaldi.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        graph = mo_kaldi.driver(argv)
    elif is_onnx:
        import mo.pipeline.onnx as mo_onnx
        from mo.front.onnx.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
        graph = mo_onnx.driver(argv)
    return graph
예제 #19
0
def _prepare_ir(argv):
    log.debug(str(argv))
    log.debug("Model Optimizer started")

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = argv.input_model.__class__.__name__
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(argv.model_name))

    if not argv.silent:
        print_argv(argv, False, False, False, False, False, argv.model_name)

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input ' +
            'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error("The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
                  "floating point value which all input values will be *divided*.", extra={'is_warning': True})

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(argv.input, argv.input_shape,
                                                                                  argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values, argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error("Failed to create directory {}. Permission denied! " +
                        refer_to_faq_msg(22),
                        argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error("Output directory {} is not writable for current user. " +
                        refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    ret_res = 1
    if hasattr(argv, 'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(argv.input,
                                                                                   argv.freeze_placeholder_with_value)

    import_extensions.load_dirs(argv.framework, extensions, get_front_classes)

    graph = unified_pipeline(argv)
    return graph