Exemplo n.º 1
0
def read_token_value(file_desc: io.BufferedReader,
                     token: bytes = b'',
                     value_type: type = np.uint32):
    """
    Get value of the token.
    Read next token (until whitespace) and check if next teg equals token
    :param file_desc: file descriptor
    :param token: token
    :param value_type:  type of the reading value
    :return: value of the token
    """
    getters = {
        np.uint32: read_binary_integer32_token,
        np.uint64: read_binary_integer64_token,
        bool: read_binary_bool_token
    }
    current_token = collect_until_whitespace(file_desc)
    if token != b'' and token != current_token:
        raise Error('Can not load token {} from Kaldi model'.format(token) +
                    refer_to_faq_msg(94))
    return getters[value_type](file_desc)
Exemplo n.º 2
0
 def node_by_pattern(self, pattern: str):
     """
     Returns Node from the list of sub-graph nodes matching node name regular expression 'pattern'. If there are more
     than one nodes matched then the function raises exception.
     :param pattern: the regular expression for the node name.
     :return: matched Node.
     """
     if self.scope != '':
         if self.scope[-1] == '/':
             pattern = self.scope + pattern
         else:
             pattern = self.scope + '/' + pattern
     found_names = find_object_by_pattern(self._matched_nodes_names,
                                          pattern)
     if len(found_names) > 1:
         raise Error(
             'The amount of nodes matched pattern "{}" is more than 1. '.
             format(pattern) + refer_to_faq_msg(78))
     if len(found_names) == 0:
         return None
     return Node(self.graph, found_names[0])
Exemplo n.º 3
0
 def infer(node: Node):
     if len(node.in_nodes()) > 0:
         # In case this is a memory node with input,
         # It should not have output
         # However in order not to break MO pipeline,
         # we just set the same shape to the output
         # node that will be removed later in pipeline
         copy_shape_infer(node)
         return
     elif node.has_valid('shape'):
         # For Memories, that has not input infer shapes is very difficult
         # But often we can know shape in extracting attributes
         # And we can set the attribute 'shape' in extracting
         batch = 1
         for out_node in node.out_nodes().values():
             out_node.shape = int64_array([batch, *node.shape[:]])
         return
     else:
         raise Error('Model Optimizer is unable to calculate output shape of Memory node {}. ' +
                     refer_to_faq_msg(88),
                     node.id)
Exemplo n.º 4
0
def serialize_node_attributes(
        graph: Graph,  # the current network graph
        node,  # dictionary-like object that should be serialized
        schema: list,
        parent_element: Element,
        edges: Element,
        unsupported):
    # the Result op may be marked so it should not appear in the IR. For example, refer to transformation
    # model-optimizer/extensions/back/TopKNormalizer.py
    if isinstance(node, Node) and node.soft_get('result' == 'Result') and node.has_and_set('remove_from_xml'):
        return
    try:
        for s in schema:
            if not isinstance(s, tuple):
                if s == '@ports':
                    try:
                        # TODO make sure that edges are generated regardless of the existence of @ports
                        xml_ports(node, parent_element, edges)
                    except Exception as e:
                        raise Error(('Unable to create ports for node with id {}. ' +
                                     refer_to_faq_msg(3)).format(node.id)) from e
                elif s == '@consts':
                    xml_consts(graph, node, parent_element)
                else:
                    log.warning('Unknown xml schema tag: {}'.format(s))
            else:
                name = s[0]
                if name == '@list':
                    serialize_meta_list(graph, node, s, parent_element, edges, unsupported)
                elif name == '@network':
                    serialize_network(node[s[1]], parent_element, unsupported)
                else:
                    serialize_element(graph, node, s, parent_element, edges, unsupported)
    except Exception as e:
        raise Error(
            'Error while emitting attributes for layer {} (id = {}). It usually means that there is unsupported '
            'pattern around this node or unsupported combination of attributes.',
            soft_get(node, 'name'),
            node.id
        ) from e
Exemplo n.º 5
0
def load_params(input_model, data_names=('data',)):
    arg_params = {}
    aux_params = {}
    arg_keys = []
    aux_keys = []
    file_format = input_model.split('.')[-1]
    loaded_weight = mx.nd.load(input_model)
    if file_format == 'params':
        for key in loaded_weight:
            keys = key.split(':')
            if len(keys) > 1 and 'aux' == keys[0]:
                aux_keys.append(keys[1])
                aux_params[keys[1]] = loaded_weight[key]
            elif len(keys) > 1 and 'arg' == keys[0]:
                arg_keys.append(keys[1])
                arg_params[keys[1]] = loaded_weight[key]
            else:
                arg_keys.append(key)
                arg_params[key] = loaded_weight[key]
    elif file_format == 'nd':
        for key in loaded_weight:
            if 'auxs' in input_model:
                aux_keys.append(key)
                aux_params[key] = loaded_weight[key]
            elif 'args' in input_model:
                arg_keys.append(key)
                arg_params[key] = loaded_weight[key]
    else:
        raise Error(
            'Unsupported Input model file type {}. Model Optimizer support only .params and .nd files format. ' +
            refer_to_faq_msg(85), file_format)

    data = mx.sym.Variable(data_names[0])
    model_params = mx.mod.Module(data, data_names=(data_names[0],), label_names=(data_names[0],))
    model_params._arg_params = arg_params
    model_params._aux_params = aux_params
    model_params._param_names = arg_keys
    model_params._aux_names = aux_keys
    return model_params
Exemplo n.º 6
0
def override_batch(graph: Graph, batch: int):
    """
    Overrides batch for nodes with 'op' param set to 'Parameter'
    Parameters
    ----------
    graph: graph to operate on
    batch: user defined integer value to override batch
    """
    if batch is not None:
        for node_id, data in graph.nodes(data=True):
            if 'op' in data and data['op'] == 'Parameter' and not data.get('fixed_batch', False):
                if len(data['shape']) == 0 or data['shape'][0] not in (-1, 0, 1):
                    raise Error(('The input layer {} has a shape {} defined in the model. \n\n' +
                                 'When you use -b (--batch) option, Model Optimizer applies its value to the first ' +
                                 'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous ' +
                                 'situation - Model Optimizer can not know in advance whether the layer has the batch ' +
                                 'dimension or not.\n\n For example, you want to set batch dimension equals 100 ' +
                                 'for the input layer "data" with shape (10,34). Although you can not use --batch, ' +
                                 'you should pass --input_shape (100,34) instead of --batch 100. \n\n' +
                                 refer_to_faq_msg(39))
                                .format(data['name'], data['shape']))
                data['shape'][0] = batch
Exemplo n.º 7
0
def deduce_framework_by_namespace(argv: Namespace):
    if not argv.framework:
        if getattr(argv, 'saved_model_dir', None) or getattr(
                argv, 'input_meta_graph', None):
            argv.framework = 'tf'
        elif getattr(argv, 'input_symbol', None) or getattr(
                argv, 'pretrained_model_name', None):
            argv.framework = 'mxnet'
        elif getattr(argv, 'input_proto', None):
            argv.framework = 'caffe'
        elif argv.input_model is None:
            raise Error('Path to input model is required: use --input_model.')
        else:
            argv.framework = guess_framework_by_ext(argv.input_model)
        if not argv.framework:
            raise Error(
                'Framework name can not be deduced from the given options: {}={}. Use --framework to choose '
                'one of caffe, tf, mxnet, kaldi, onnx', '--input_model',
                argv.input_model, refer_to_faq_msg(15))

    return map(lambda x: argv.framework == x,
               ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'])
Exemplo n.º 8
0
def _update(cls, registered_list: list, registered_dict: dict, key: str,
            enabled_transforms: list, disabled_transforms: list):
    new_keys = {}  # maps a custom name to class
    new_keys_lower = {}  # translates lowered custom name to its original form
    # print('Registering new subclasses for', cls)

    for c in cls.__subclasses__():
        # Force enabling operations
        if hasattr(c, 'id') and c.id in enabled_transforms or \
                ".".join([c.__module__, c.__name__]) in enabled_transforms:
            setattr(c, 'enabled', True)

        # Force disabling operations
        if hasattr(c, 'id') and c.id in disabled_transforms or \
                ".".join([c.__module__, c.__name__]) in disabled_transforms:
            setattr(c, 'enabled', False)

        if c not in registered_list:
            if hasattr(cls, 'excluded_classes') and c in cls.excluded_classes:
                continue
            registered_list.append(c)
            log.info('New subclass: {}'.format(c))
            if hasattr(c, key) and getattr(c, key) is not None:
                k = getattr(c, key)
                if k.lower() in new_keys_lower:
                    raise Error(
                        'Attempt to register of custom name {} for the second time as class {}. ' \
                        'Note that custom names are case-insensitive. ' +
                        refer_to_faq_msg(55), k, c)
                else:
                    new_keys_lower[k.lower()] = k
                    new_keys[k] = c
                    log.info(
                        'Registered a new subclass with key: {}'.format(k))
        else:
            log.warning(
                'Skipped {} registration because it was already registered or it was disabled. '
                .format(c))
    registered_dict.update(new_keys)
Exemplo n.º 9
0
def serialize_node_attributes(
        graph: nx.MultiDiGraph,  # the current network graph
        node,   # dictionry-like object that should be serialized
        schema: list,
        parent_element: xml.etree.ElementTree.Element,
        edges: xml.etree.ElementTree.Element,
        unsupported):

    try:
        for s in schema:
            if not isinstance(s, tuple):
                if s == '@ports':
                    try:
                        # TODO make sure that edges are generated regardless of the existence of @ports
                        xml_ports(node, parent_element, edges)
                    except Exception as e:
                        raise Error(('Unable to create ports for node with id {}. ' +
                                     refer_to_faq_msg(3)).format(node.id)) from e
                elif s == '@consts':
                    xml_consts(graph, node, parent_element)
                else:
                    log.warning('Unknown xml schema tag: {}'.format(s))
            else:
                name = s[0]
                if name == '@list':
                    serialize_meta_list(
                        graph, node, s, parent_element, edges, unsupported)
                elif name == '@network':
                    serialize_network(node[s[1]], parent_element, unsupported)
                else:
                    serialize_element(
                        graph, node, s, parent_element, edges, unsupported)
    except Exception as e:
        raise Error(
            'Error while emitting attributes for layer {} (id = {}). '
            'It usually means that there is unsupported pattern around this node or unsupported combination of attributes.',
            soft_get(node, 'name'),
            node.id
        ) from e
Exemplo n.º 10
0
def driver(argv: argparse.Namespace):
    try:
        model_nodes, model_params, model_name, iteration_number = load_symbol_def(
            argv.input_model, argv.input_symbol, argv.input,
            argv.nd_prefix_name, argv.pretrained_model_name,
            argv.legacy_mxnet_model)
    except (ValueError, mxnet.base.MXNetError) as e:
        raise FrameworkError(
            'The following error happened while loading mxnet model {}: {}. ' +
            refer_to_faq_msg(53), argv.input_model, str(e)) from e

    if argv.nd_prefix_name and argv.pretrained_model_name and argv.save_params_from_nd:
        save_params_file(model_name, model_params._arg_params,
                         model_params._aux_params, iteration_number)

    update_extractors_with_extensions(mxnet_op_extractors)
    graph = symbol2nx(model_nodes, model_params, argv.input)
    graph.check_empty_graph(
        'symbol2nx. It may happen due to problems with loaded model')

    graph.__setattr__('name', argv.model_name)
    graph.graph['layout'] = 'NCHW'
    graph.graph['cmd_params'] = argv
    graph.graph['fw'] = 'mxnet'
    graph.graph['feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3
    graph.graph['ir_version'] = get_ir_version(argv)

    extract_node_attrs(graph, mxnet_op_extractor)

    # --------------------------------- LOAD END ------------------------------------------------------

    class_registration.apply_replacements(graph, [
        class_registration.ClassType.FRONT_REPLACER,
        class_registration.ClassType.MIDDLE_REPLACER,
        class_registration.ClassType.BACK_REPLACER
    ])

    return graph
Exemplo n.º 11
0
def tf_tensor_content(tf_dtype, shape, pb_tensor):
    type_helper = tf_data_type_decode[
        tf_dtype] if tf_dtype in tf_data_type_decode else None
    if type_helper is None:
        raise Error("Data type is unsupported: {}. " + refer_to_faq_msg(50),
                    tf_dtype)

    if pb_tensor.tensor_content:
        value = np.array(
            np.frombuffer(pb_tensor.tensor_content, type_helper[0]))
    else:
        # load typed value
        value = np.array(type_helper[1](pb_tensor), dtype=type_helper[0])

    if len(shape) == 0 or shape.prod() == 0:
        if len(value) == 1:
            # return scalar if shape is [] otherwise broadcast according to shape
            return np.array(value[0], dtype=type_helper[0])
        else:
            # no shape, return value as is
            return value

    if len(value) != shape.prod():
        log.warning(
            "Shape and content size of tensor don't match, shape: {} content size: {}"
            .format(shape, len(value)))
        # broadcast semantics according to TensorFlow v1.5 documentation:
        # The argument value can be a constant value, or a list of values of type dtype. If value is a list,
        # then the length of the list must be less than or equal to the number of elements implied by the shape
        # argument (if specified). In the case where the list length is less than the number of elements specified
        # by shape, the last element in the list will be used to fill the remaining entries.
        value_flatten = value.flatten()
        add_value = value_flatten[-1]
        add_length = shape.prod() - len(value_flatten)
        value = np.concatenate(
            [value_flatten, np.full([add_length], add_value)])

    return value.reshape(shape)
def driver(argv):
    try:
        graph = load_kaldi_model(argv.input_model)
    except Exception as e:
        raise Error('Model Optimizer is not able to parse Kaldi model {}. '.
                    format(argv.input_model) + refer_to_faq_msg(91)) from e
    graph.check_empty_graph('load_kaldi_nnet_model')
    graph.graph['layout'] = 'NCHW'
    graph.graph['cmd_params'] = argv
    graph.graph['fw'] = 'kaldi'
    graph.graph['ir_version'] = get_ir_version(argv)

    update_extractors_with_extensions(kaldi_type_extractors)
    extract_node_attrs(graph, lambda node: kaldi_extractor(node))

    # --------------------------------- LOAD END ------------------------------------------------------
    class_registration.apply_replacements(graph, [
        class_registration.ClassType.FRONT_REPLACER,
        class_registration.ClassType.MIDDLE_REPLACER,
        class_registration.ClassType.BACK_REPLACER
    ])

    return graph
Exemplo n.º 13
0
def validate_batch_in_shape(shape, layer_name: str):
    """
    Raises Error #39 if shape is not valid for setting batch size
    Parameters
    ----------
    shape: current shape of layer under validation
    layer_name: name of layer under validation
    """
    if len(shape) == 0 or shape[0] not in (-1, 0, 1):
        raise Error((
            'The input layer {} has a shape {} defined in the model. \n\n' +
            'When you use -b (--batch) option, Model Optimizer applies its value to the first '
            +
            'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous '
            +
            'situation - Model Optimizer can not know in advance whether the layer has the batch '
            +
            'dimension or not.\n\n For example, you want to set batch dimension equals 100 '
            +
            'for the input layer "data" with shape (10,34). Although you can not use --batch, '
            +
            'you should pass --input_shape (100,34) instead of --batch 100. \n\n'
            + refer_to_faq_msg(39)).format(layer_name, shape))
Exemplo n.º 14
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        caffe_pb2 = loader.import_caffe_pb2(argv.caffe_parser_path)

        proto, model = loader.load_caffe_proto_model(caffe_pb2, argv.input_proto, argv.input_model)

        update_extractors_with_extensions(
            caffe_type_extractors,
            argv.disable_omitting_optional if hasattr(argv, 'disable_omitting_optional') else False,
            argv.disable_flattening_optional_params if hasattr(argv, 'disable_flattening_optional_params') else False
        )

        try:
            original_shapes = loader.caffe_pb_to_nx(graph, proto, model)
        except ValueError as e:
            raise Error('Invalid prototxt file: value error {}. ' +
                        refer_to_faq_msg(11), str(e)) from e
        graph.check_empty_graph('load_caffe_proto_model')

        graph.__setattr__('proto_path', argv.input_proto)
        graph.__setattr__('caffemodel_path', argv.input_model)
        graph.__setattr__('name', getattr(proto, 'name', None) or argv.model_name)
        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'caffe'
        graph.graph['original_shapes'] = original_shapes
        graph.graph['caffe_pb2'] = caffe_pb2

        custom_layers_map = custom_layers_mapping.load_layers_xml(argv.k)
        custom_layers_mapping.update_extractors(
            caffe_type_extractors,
            custom_layers_map,
            argv.disable_omitting_optional if hasattr(argv, 'disable_omitting_optional') else False,
            argv.enable_flattening_nested_params if hasattr(argv, 'enable_flattening_nested_params') else False
        )
        extract_node_attrs(graph, lambda node: caffe_extractor(node, check_for_duplicates(caffe_type_extractors)))
        send_op_names_info('caffe', graph)
        send_shapes_info('caffe', graph)
Exemplo n.º 15
0
def load_kaldi_model(graph, nnet_path):
    """
    Structure of the file is the following:
    magic-number(16896)<Nnet> <Next Layer Name> weights etc.
    :param nnet_path:
    :return:
    """
    nnet_name = None
    if isinstance(nnet_path, str):
        file_desc = open(nnet_path, "rb")
        nnet_name = get_name_from_path(nnet_path)
    elif isinstance(nnet_path, IOBase):
        file_desc = nnet_path
    else:
        raise Error('Unsupported type of Kaldi model')

    tag = find_next_tag(file_desc)
    # start new model / submodel
    if tag == '<Nnet>':
        load_function = load_kalid_nnet1_model
    elif tag == '<TransitionModel>':
        while tag != '<Nnet>' and tag != '<Nnet3>':
            tag = find_next_tag(file_desc)

        if tag == '<Nnet3>':
            load_function = load_kaldi_nnet3_model
        else:
            load_function = load_kalid_nnet2_model
    elif tag == '<Nnet3>':
        load_function = load_kaldi_nnet3_model
    else:
        raise Error(
            'Kaldi model should start with <Nnet> or <TransitionModel> tag. ',
            refer_to_faq_msg(89))
    read_placeholder(file_desc, 1)

    return load_function(graph, file_desc, nnet_name)
Exemplo n.º 16
0
def _update(cls, registered_list: list, registered_dict: dict, key: str):
    new_keys = {}  # maps a custom name to class
    new_keys_lower = {}  # translates lowered custom name to its original form
    # print('Registering new subclasses for', cls)
    for c in cls.__subclasses__():
        if c not in registered_list and (not hasattr(c, 'enabled') or c.enabled):
            if hasattr(cls, 'excluded_classes') and c in cls.excluded_classes:
                continue
            registered_list.append(c)
            log.info('New subclass: {}'.format(c))
            if hasattr(c, key):
                k = getattr(c, key)
                if k.lower() in new_keys_lower:
                    raise Error(
                        'Attempt to register of custom name {} for the second time as class {}. ' \
                        'Note that custom names are case-insensitive. ' +
                        refer_to_faq_msg(55), k, c)
                else:
                    new_keys_lower[k.lower()] = k
                    new_keys[k] = c
                    log.info('Registered a new subclass with key: {}'.format(k))
        else:
            log.warning('Skipped {} registration because it was already registered or it was disabled. '.format(c))
    registered_dict.update(new_keys)
Exemplo n.º 17
0
    def replace_pattern(self, graph: Graph, match: dict):
        """
            Adds layers with type 'Const' that produce blob from 'bin' file. The pass finds data nodes with one output which
            doesn't have edge with 'bin' attribute (or with two outputs and at least one output doesn't have 'bin' attr)
            and generate Const op node before the node and data node before the Const node. The data node before 'Const'
            node is needed because the op node dumps input tensors to bin file.
        """
        node = match['data']
        if len(node.in_nodes()) > 0:
            return

        if self._check_bin_attrs(node):
            if node.has_valid('value'):
                const_node_name = node.soft_get('name', node.id)
                const_node_name = graph.unique_id(re.sub(r'\/Output_\d+\/Data_(.?)+', '', const_node_name))
                log.debug("Added Const node '{}'".format(const_node_name))
                const_node = Const(graph, {'name': const_node_name, 'value': node.value,
                                           'force_shape': node.soft_get('force_shape', None),
                                           'override_output_shape': node.has_valid('force_shape'),
                                           'force_type': node.soft_get('force_type', None),
                                           'correct_data_type': node.soft_get('correct_data_type', None),
                                           }).create_node()
                const_node.add_input_port(0)
                graph.add_edges_from([(const_node_name, node.id, {'out': 0})])

                node_copy = node.copy_node()
                const_node.type_infer(const_node)
                graph.add_edges_from([(node_copy.id, const_node_name, {'in': 0, 'bin': 'custom'})])
            elif not self._check_that_node_from_body(node):
                log.debug('node = {}'.format(node.graph.node[node.id]))
                raise Error(
                    'Discovered data node without inputs and value, node.name = {}, consumer.name = {}. ' +
                    refer_to_faq_msg(23),
                    node.soft_get('name'),
                    node.out_node().soft_get('name') if len(node.out_nodes()) else "<no consumer>"
                )
Exemplo n.º 18
0
def caffe_pb_to_nx(graph, proto, model):
    """
    Converts proto/model layers to a graph. Edges are restored by bottom/top attributes.
    Graph nodes has two attributes: pb for prototxt definition and model_pb for caffemodel definition.

    Parameters
    ----------
    proto : NetParameter
       Protobuf message for NetParameter, representing .prototxt.
    model : NetParameter
       Protobuf message for NetParameter, representing .caffemodel.

    Returns
    ----------
        Graph
        built NX Directed graph.
    """
    # Blobs in prototxt model can be reused by inplace layer.
    # This requires loading of pb layers in order and tracking the latest
    # layer that writes a particular blob.
    blob_producers = {}  # maps layer blob name to node id in graph, port and layer name
    proto_layers = get_layers(proto)
    model_layers = None
    if model:
        model_layers = get_layers(model)

    input_dims = []
    input_names = []
    if len(proto.input_dim) > 0 and len(list(proto.input)) > 1:
        # example of proto input
        # input: "data"
        # input_dim: 1
        # input_dim: 3
        # input_dim: 500
        # input_dim: 500
        # input: "info"
        # input_dim: 1
        # input_dim: 3
        raise Error('Old-style inputs (via "input_dims") are not supported. ' +
                    'Please specify inputs via  "input_shape". ' +
                    refer_to_faq_msg(8))
    elif len(list(proto.input)) == 1 and len(list(proto.input_dim)):
        # example of proto input
        # input: "data"
        # input_dim: 1
        # input_dim: 3
        # input_dim: 500
        # input_dim: 500
        input_dims = [np.array(list(proto.input_dim), dtype=np.int64)]
        input_names = [proto.input[0]]

    elif len(list(proto.input)) == 1 and len(list(proto.input_shape)):
        # example of proto input
        # input: "data"
        # input_shape
        # {
        #     dim: 1
        #     dim: 3
        #     dim: 227
        #     dim: 227
        # }
        input_dims = [np.array(proto.input_shape[0].dim, dtype=np.int64)]
        input_names = [proto.input[0]]

    elif len(proto.input_shape) > 0:
        # example of proto input
        # input: "data"
        # input_shape
        # {
        #     dim: 1
        #     dim: 3
        #     dim: 600
        #     dim: 1000
        # }
        # input: "im_info"
        # input_shape
        # {
        #     dim: 1
        #     dim: 3
        # }
        for i in range(len(proto.input_shape)):
            input_dims.append(np.array(proto.input_shape[i].dim, dtype=np.int64))
            input_names.append(proto.input[i])

    for i in range(len(input_names)):
        input_name = input_names[i]
        input_dim = input_dims[i]
        # Input is defined at the top level of proto instead of distinct Input layer
        graph.add_node(input_name, pb=None, model_pb=None, type='GlobalInput', name=input_name, shape=input_dim,
                       kind='op')
        blob_producers[input_name] = (input_name, 0, input_name)

    used_blobs = set()
    for i, layer in enumerate(proto_layers):

        model_layer = None

        if model_layers:
            for ml in model_layers:
                if ml.name == layer.name:
                    model_layer = ml
                    break
        if layer.type == 'Input':
            if hasattr(layer, 'input_param'):
                input_param = layer.input_param
            else:
                raise Error('Input layer has no input dims. ' +
                            refer_to_faq_msg(8))
            if hasattr(input_param, 'shape'):
                """
                example of proto input
                layer
                {
                    name: "data"
                    type: "Input"
                    top: "data"
                    input_param {shape: {dim: 1 dim: 3 dim: 600 dim: 1000}}
                }

                layer
                {
                    name: "im_info"
                    type: "Input"
                    top: "im_info"
                    input_param {shape: {dim: 1 dim: 3}}
                }
                """
                dims = map(int, list(filter(None, str(list(input_param.shape)[0]).split('dim:'))))
                input_dims.append(np.array(list(dims), dtype=np.int64))
                input_names.append(layer.name)

        node_id = graph.unique_id(layer.name)
        graph.add_node(node_id, pb=layer, model_pb=model_layer, kind='op', type='Parameter')

        # connect inputs based on blob_producers dictionary
        for dst_port, bottom in enumerate(layer.bottom):
            add_edge_caffe(graph, bottom, node_id, blob_producers, dst_port)
            used_blobs.add(bottom)

        # update blob producers dictionary by output ports
        for src_port, top in enumerate(layer.top):
            if top in blob_producers:
                log.debug("Detected reuse of blob {} by layer {}".format(top, node_id))
            blob_producers[top] = (node_id, src_port, layer.name)

    # Tensor names information corresponding to a node is stored on outgoing edges.
    # As output nodes do not have outgoing edges, fake outputs are required. In the following code
    # for each output Identity node is added, and tensor name for the output is kept
    # on (output, fake output) edge. After Result nodes adding transformation fake outputs
    # are deleted from graph.
    all_blobs = set(blob_producers.keys())
    add_outputs_identity(graph, all_blobs - used_blobs, add_edge_caffe,
                         {'blob_producers': blob_producers, 'dst_port': 0})

    if len(input_names) <= 0:
        raise Error('The topology contains no "input" layers. ' +
                    refer_to_faq_msg(79))
    return {fake_node_name: shape for (fake_node_name, shape) in zip(input_names, input_dims)}
Exemplo n.º 19
0
    def find_and_replace_pattern(self, graph: Graph):
        input_nodes = {}
        values = graph.graph['cmd_params'].mean_scale_values
        for node in graph.nodes():
            node = Node(graph, node)
            if node.has_valid('op') and node.op == 'Parameter':
                input_nodes.update({node.id: node})

        if not isinstance(values, dict):
            if len(values) != len(input_nodes):
                raise Error(
                    'Numbers of inputs and mean/scale values do not match. ' +
                    refer_to_faq_msg(61))

            data = np.copy(values)
            values = {}
            for idx, key in enumerate(input_nodes.keys()):
                values.update({
                    input_nodes[key]['name']: {
                        'mean': data[idx][0],
                        'scale': data[idx][1]
                    }
                })

        for node_name in values:
            node_mean_scale_values = values[node_name]
            node_name, port = split_node_in_port(node_name)
            node_id = None
            try:
                node_id = graph.get_node_id_by_name(node_name)
            except Error as e:
                log.warning(
                    'node_name {} is not found in graph'.format(node_name))
            if node_id not in input_nodes:
                # if the user cutted-off input of the network then input node name specified in the --scale_values
                # or --mean_values doesn't correspond to a real input node generated by Model Optimizer. But
                # the information about initial input node name is stored in Placeholder's attribute 'initial_node_name'
                new_node_id = None
                for placeholder in input_nodes.values():
                    try:
                        placeholder_port = int(placeholder.id.split("_")[-1])
                    except Exception as ex:
                        log.debug(
                            'Can not get the port number from the node {}'.
                            format(placeholder.id))
                        log.debug('Port will be defined as None')
                        port = None
                    if placeholder.has(
                            'initial_node_name'
                    ) and placeholder.initial_node_name == node_name and (
                            port is None or placeholder_port == port):
                        new_node_id = placeholder.id
                        break
                if new_node_id is None:
                    raise Error(
                        'Input with name {} wasn\'t found!'.format(node_name) +
                        refer_to_faq_msg(83))
                node_id = new_node_id

            input_node = Node(graph, node_id)
            AddMeanScaleValues.apply_scale(graph, input_node,
                                           node_mean_scale_values)
            AddMeanScaleValues.apply_mean_value(graph, input_node,
                                                node_mean_scale_values)
Exemplo n.º 20
0
def caffe_pb_to_nx(graph, proto, model):
    """
    Converts proto/model layers to a graph. Edges are restored by bottom/top attributes.
    Graph nodes has two attributes: pb for prototxt definition and model_pb for caffemodel definition.

    Parameters
    ----------
    proto : NetParameter
       Protobuf message for NetParameter, representing .prototxt.
    model : NetParameter
       Protobuf message for NetParameter, representing .caffemodel.

    Returns
    ----------
        Graph
        built NX Directed graph.
    """
    # Blobs in prototxt model can be reused by inplace layer.
    # This requires loading of pb layers in order and tracking the latest
    # layer that writes a particular blob.
    blob_producers = {}  # maps layer blob name to the layer name and port
    proto_layers = get_layers(proto)
    model_layers = None
    if model:
        model_layers = get_layers(model)

    input_dims = []
    input_names = []
    if len(proto.input_dim) > 0 and len(list(proto.input)) > 1:
        # example of proto input
        # input: "data"
        # input_dim: 1
        # input_dim: 3
        # input_dim: 500
        # input_dim: 500
        # input: "info"
        # input_dim: 1
        # input_dim: 3
        raise Error('Old-style inputs (via "input_dims") are not supported. ' +
                    'Please specify inputs via  "input_shape". ' +
                    refer_to_faq_msg(8))
    elif len(list(proto.input)) == 1 and len(list(proto.input_dim)):
        # example of proto input
        # input: "data"
        # input_dim: 1
        # input_dim: 3
        # input_dim: 500
        # input_dim: 500
        input_dims = [np.array(list(proto.input_dim), dtype=np.int64)]
        input_names = [proto.input[0]]

    elif len(list(proto.input)) == 1 and len(list(proto.input_shape)):
        # example of proto input
        # input: "data"
        # input_shape
        # {
        #     dim: 1
        #     dim: 3
        #     dim: 227
        #     dim: 227
        # }
        input_dims = [np.array(proto.input_shape[0].dim, dtype=np.int64)]
        input_names = [proto.input[0]]

    elif len(proto.input_shape) > 0:
        # example of proto input
        # input: "data"
        # input_shape
        # {
        #     dim: 1
        #     dim: 3
        #     dim: 600
        #     dim: 1000
        # }
        # input: "im_info"
        # input_shape
        # {
        #     dim: 1
        #     dim: 3
        # }
        for i in range(len(proto.input_shape)):
            input_dims.append(
                np.array(proto.input_shape[i].dim, dtype=np.int64))
            input_names.append(proto.input[i])

    for i in range(len(input_names)):
        input_name = input_names[i]
        input_dim = input_dims[i]
        # Input is defined at the top level of proto instead of distinct Input layer
        graph.add_node(input_name,
                       pb=None,
                       model_pb=None,
                       type='GlobalInput',
                       name=input_name,
                       shape=input_dim,
                       kind='op')
        blob_producers[input_name] = (input_name, 0)

    for i, layer in enumerate(proto_layers):

        model_layer = None

        if model_layers:
            for ml in model_layers:
                if ml.name == layer.name:
                    model_layer = ml
                    break
        if layer.type == 'Input':
            if hasattr(layer, 'input_param'):
                input_param = layer.input_param
            else:
                raise Error('Input layer has no input dims. ' +
                            refer_to_faq_msg(8))
            if hasattr(input_param, 'shape'):
                """
                example of proto input
                layer
                {
                    name: "data"
                    type: "Input"
                    top: "data"
                    input_param {shape: {dim: 1 dim: 3 dim: 600 dim: 1000}}
                }

                layer
                {
                    name: "im_info"
                    type: "Input"
                    top: "im_info"
                    input_param {shape: {dim: 1 dim: 3}}
                }
                """
                dims = map(
                    int,
                    list(
                        filter(None,
                               str(list(input_param.shape)[0]).split('dim:'))))
                input_dims.append(np.array(list(dims), dtype=np.int64))
                input_names.append(layer.name)

        layer.name = graph.unique_id(layer.name)
        graph.add_node(layer.name,
                       pb=layer,
                       model_pb=model_layer,
                       kind='op',
                       type='Parameter')

        # connect inputs based on blob_producers dictionary
        for dst_port, bottom in enumerate(layer.bottom):
            src_layer = blob_producers[bottom][0]
            src_port = blob_producers[bottom][1]
            assert (graph.has_node(src_layer))
            edge_attrs = {
                'out': src_port,
                'in': dst_port,
                'name': bottom,
                # debug anchor for a framework name, out port and tensor name
                'fw_tensor_debug_info': [(src_layer, src_port, bottom)],
                'in_attrs': ['in', 'name'],
                'out_attrs': ['out', 'name'],
                'data_attrs': ['fw_tensor_debug_info']
            }
            graph.add_edge(src_layer, layer.name, **edge_attrs)

        # update blob producers dictionary by output ports
        for src_port, top in enumerate(layer.top):
            if top in blob_producers:
                log.debug("Detected reuse of blob {} by layer {}".format(
                    top, layer.name))
            blob_producers[top] = (layer.name, src_port)

    if len(input_names) <= 0:
        raise Error('The topology contains no "input" layers. ' +
                    refer_to_faq_msg(79))
    return {name: shape for (name, shape) in zip(input_names, input_dims)}
Exemplo n.º 21
0
def load_caffe_proto_model(caffe_pb2,
                           proto_path: str,
                           model_path: [str, None] = None):
    # 1. python protobuf is used
    if api_implementation._implementation_type == 'python':
        message = 'Please expect that Model Optimizer conversion might be slow. ' \
                  'You are currently using Python protobuf library implementation. \n'
        try:
            from google.protobuf.pyext import cpp_message
            # Check os windows and env variable PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION
            if os.name == 'nt' and os.environ.get(
                    'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
                    default='') != 'cpp':
                # 2. cpp implementation is available but not used
                message += 'However, cpp implementation is available, you can boost ' \
                           'model conversion by setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION env variable to cpp. \n' \
                           'Run: set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp \n'
        except ImportError:
            # 3. cpp implementation is not available
            message += 'However you can use the C++ protobuf implementation that is supplied with the OpenVINO toolkit' \
                       'or build protobuf library from sources. \n' \
                       'Navigate to "install_prerequisites" folder and run: ' \
                       'python -m easy_install protobuf-3.5.1-py($your_python_version)-win-amd64.egg \n' \
                       'set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp'
        print(message + '\n\n' + refer_to_faq_msg(80))

    # Read proto layers
    try:
        proto = caffe_pb2.NetParameter()
        with open(proto_path, "r") as file:
            text_format.Merge(str(file.read()), proto)
    except Exception as e:
        log.error(
            'Exception message: {}\n\n'.format(e) + '    Possible reasons:\n' +
            '      1. {} does not exist\n'.format(proto_path) +
            '      2. {} does not have a valid structure, for example, it was downloaded as html\n'
            .format(proto_path) +
            '      3. {} contains custom layers or attributes that are not supported\n'
            .format(proto_path) +
            '         in Model Optimizer by default.\n\n' +
            '    After you made sure that {} has a valid structure and still see this issue, then\n'
            .format(proto_path) +
            '    you need to generate a python parser for caffe.proto that was used when the model\n'
            + '    was created.\n' +
            '    Run "python3 generate_caffe_pb2.py --input_proto ${PATH_TO_CAFFE}/src/caffe/proto/caffe.proto"'
            + refer_to_faq_msg(1) + '\n\n',
            extra={'framework_error': True})
        raise FrameworkError('Model Optimizer is not able to parse {}'.format(
            proto_path)) from e

    # Read model layer if exists
    model = None
    try:
        if model_path:
            model = caffe_pb2.NetParameter()
            with open(model_path, "rb") as infile:
                map = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
                model.MergeFromString(map)
    except Exception as e:
        log.error(
            'Exception message: {}\n\n'.format(e) + '    Possible reasons:\n' +
            '      1. {} does not exist\n'.format(model_path) +
            '      2. {} does not have a valid structure\n'.format(model_path),
            extra={'framework_error': True})
        raise FrameworkError('Model Optimizer is not able to parse {}'.format(
            model_path)) from e

    return proto, model
Exemplo n.º 22
0
    def helper_value_propagation(node_name, value, dst_type):
        new_blob, finite_match_count, zero_match_count = convert_blob(value, dst_type)

        if finite_match_count:
            log.error("{} elements of {} were clipped to infinity while converting an input blob for node '{}' to {}."
                      " ".format(finite_match_count, new_blob.size, node_name, dst_type) + refer_to_faq_msg(76))
        if zero_match_count:
            log.warning("{} elements of {} were clipped to zero while converting an input blob for node '{}' to {}."
                        " ".format(zero_match_count, new_blob.size, node_name, dst_type) + refer_to_faq_msg(77))
        return new_blob
Exemplo n.º 23
0
def prepare_ir(argv: argparse.Namespace):
    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = deduce_framework_by_namespace(
        argv)

    if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        raise Error(
            'Framework {} is not a valid target. Please use --framework with one from the list: caffe, tf, '
            'mxnet, kaldi, onnx. ' + refer_to_faq_msg(15), argv.framework)

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    log.debug(str(argv))
    log.debug("Model Optimizer started")
    t = tm.Telemetry()
    t.start_session()

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(
        argv.model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   argv.model_name)

    # This try-except is additional reinsurance that the IE
    # dependency search does not break the MO pipeline
    try:
        if not find_ie_version(silent=argv.silent) and not argv.silent:
            print(
                "[ WARNING ] Could not find the Inference Engine Python API. At this moment, the Inference Engine dependency is not required, but will be required in future releases."
            )
            print(
                "[ WARNING ] Consider building the Inference Engine Python API from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
                .format("bat" if sys.platform == "windows" else "sh"))
            # If the IE was not found, it will not print the MO version, so we have to print it manually
            print("{}: \t{}".format("Model Optimizer version", get_version()))
    except Exception as e:
        pass

    ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        raise Error(
            'check_requirements exit with return code {}'.format(ret_code))

    if is_tf and argv.tensorflow_use_custom_operations_config is not None:
        argv.transformations_config = argv.tensorflow_use_custom_operations_config

    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:
        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = np.array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
        argv.mean_file_offsets = mean_file_offsets

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
        argv.input, argv.input_shape, argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    if hasattr(argv,
               'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)
    if is_tf:
        t.send_event('mo', 'framework', 'tf')
        from mo.front.tf.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_caffe:
        t.send_event('mo', 'framework', 'caffe')
        from mo.front.caffe.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_mxnet:
        t.send_event('mo', 'framework', 'mxnet')
        from mo.front.mxnet.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_kaldi:
        t.send_event('mo', 'framework', 'kaldi')
        from mo.front.kaldi.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_onnx:
        t.send_event('mo', 'framework', 'onnx')
        from mo.front.onnx.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    graph = unified_pipeline(argv)
    return graph
Exemplo n.º 24
0
def tf2nx(argv: argparse.Namespace, model_file_name: str, output_model_name: str, outputs: list, output_dir: str,
          scale: float, is_binary: bool,
          user_shapes: [None, list, np.array] = None,
          mean_scale_values: [dict, list] = ()):
    """
    Convert TF GraphDef object to NetworkX representation.
    The resulting graph is still TF-specific and needs normalization passes to be applied.
    The specific TF structure assumes each GraphDef node is converted to a single
    NetworkX node, node id is an original TF node name, and edges go directly from one op   to another op.
    """
    meta_info = get_meta_info(argv)

    if argv.tensorflow_custom_layer_libraries:
        libraries = argv.tensorflow_custom_layer_libraries.split(',')
        for library in libraries:
            log.info('Loading library "{}" with custom operations'.format(library))
            tf.load_op_library(library)

    graph_def, variables_values = load_tf_graph_def(graph_file_name=model_file_name, is_binary=is_binary,
                                                    checkpoint=argv.input_checkpoint,
                                                    user_output_node_names_list=outputs,
                                                    model_dir=argv.saved_model_dir,
                                                    meta_graph_file=argv.input_meta_graph,
                                                    saved_model_tags=argv.saved_model_tags)

    try:
        tf.import_graph_def(graph_def, name='')
    except:
        log.warning("TensorFlow post-processing of loaded model was unsuccessful. "
                    "This is an optional step that Model Optimizer performs for any input model but it is not usually "
                    "required for all models."
                    "It likely means that the original model is ill-formed. "
                    "Model Optimizer will continue converting this model.")

    log.debug("Number of nodes in graph_def: {}".format(len(graph_def.node)))  # pylint: disable=no-member

    if argv.tensorboard_logdir:
        tensorboard.dump_for_tensorboard(graph_def, argv.tensorboard_logdir)

    update_extractors_with_extensions(tf_op_extractors)

    try:
        graph = protobuf2nx(graph_def)
        graph.__setattr__('name', output_model_name)
        # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer
        # and convert_nhwc_to_nchw(graph)
        graph.graph['layout'] = 'NCHW' if argv.disable_nhwc_to_nchw else 'NHWC'
        graph.graph['cmd_params'] = argv
        graph.graph['fw'] = 'tf'
        graph.graph['ir_version'] = 2 if argv.generate_deprecated_IR_V2 else 4

        if graph.graph['ir_version'] == 2:
            # When the deprecated IR version was requested,
            # we configure only those phases that can lead to
            # functional regressions in the version 2.
            # BasicLSTMCell is one such transformation; when it is turned off,
            # the body of TF basic_lstm_cell is converted as-is in a decomposed form,
            # and should work in version 2.
            BasicLSTMCell.enabled = False

        # placeholder for request from a transformation pass to repeat the entire conversion
        graph.graph['repeat_conversion'] = False

        graph = restore_edges(graph, get_tf_edges)
        graph = remove_control_dependency_inputs(graph)
        # extract basic attributes earlier to enable some passes that relies on them before full attribute
        # extractor is called
        extract_node_attrs(graph, lambda node: (True, common_tf_fields(node)))
    except Exception as e:
        raise Error(
            'Cannot pre-process TensorFlow graph after reading from model file "{}". ' \
            'File is corrupt or has unsupported format. Details: {}. ' +
            refer_to_faq_msg(44),
            model_file_name,
            str(e)
        ) from e

    check_empty_graph(graph, 'protobuf2nx. It may happen due to problems with loaded model')

    packed_user_shapes, packed_outputs, freeze_placeholder = user_data_repack(graph, user_shapes, outputs,
                                                                              argv.freeze_placeholder_with_value)
    if freeze_placeholder is not None:
        FreezePlaceholderValue.enabled = True
        FreezePlaceholderValue.replacement_dict = freeze_placeholder
        update_registration()

    GemmResolver.enabled = False

    inputs = list(packed_user_shapes.keys()) if packed_user_shapes is not None and isinstance(packed_user_shapes,
                                                                                              dict) else None
    graph.graph['inputs'] = inputs  # save user defined inputs for other extensions

    output_op_nodes = add_output_ops(graph, packed_outputs, inputs=packed_user_shapes)
    input_op_nodes = add_input_ops(graph, packed_user_shapes, True)

    # this call of 'graph_clean_up' removes child nodes of outputs which is useful when custom output is specified
    graph_clean_up_tf(graph)

    check_empty_graph(graph, 'add_output_ops and add_input_ops. It may happen due to absence of \'Placeholder\' layer '
                             'in the model')

    variables_to_constants(graph, variables_values)
    del variables_values
    graph_clean_up_tf(graph)

    if argv.tensorflow_custom_operations_config_update:
        if update_custom_replacement_config_file(graph, argv.tensorflow_custom_operations_config_update):
            return 0
        else:
            return 1

    unsupported_ops_to_offload_to_tf = list()

    MAX_ITERATIONS = 5
    cur_iteration = 0
    while cur_iteration < MAX_ITERATIONS:
        graph_copy = copy.deepcopy(graph)  # create a copy of graph for the case when some ops are unsupported

        if argv.tensorflow_subgraph_patterns is not None:
            csc.replace_subgraph_calls(graph, argv.tensorflow_subgraph_patterns)

        if argv.tensorflow_operation_patterns is not None:
            csc.offload_operations_to_tf(graph, argv.tensorflow_operation_patterns)

        if argv.offload_unsupported_operations_to_tf and len(unsupported_ops_to_offload_to_tf):
            csc.offload_unsupported_operations_to_tf(graph, unsupported_ops_to_offload_to_tf)

        extract_node_attrs(graph, lambda node: tf_op_extractor(node, check_for_duplicates(tf_op_extractors)))

        if argv.tensorflow_use_custom_operations_config is not None:
            registry = CustomReplacementRegistry()
            registry.add_custom_replacement_description_from_config(argv.tensorflow_use_custom_operations_config)

            # automatically generate sub-classes for custom replacements that replace sub-graph with a single node
            for replacement_desc in registry.get_all_replacements_descriptions():
                if replacement_desc.has('op'):
                    type('FrontReplacementFromConfigFileOp' + replacement_desc.op, (FrontReplacementFromConfigFileOp,),
                         {'replacement_id': replacement_desc.id})
            update_registration()

        override_placeholder_shapes(graph, packed_user_shapes)

        # the user shapes are used to convert TensorFlow Object Detection API models
        graph.graph['user_shapes'] = packed_user_shapes
        class_registration.apply_replacements(graph, class_registration.ClassType.FRONT_REPLACER)

        override_batch(graph, argv.batch)

        create_tensor_nodes(graph)
        graph_clean_up_tf(graph)

        remove_output_ops(graph)
        partial_infer(graph)
        delete_control_flow_edges(graph)

        replacer = AddIsCyclicAttribute()
        replacer.find_and_replace_pattern(graph)

        # TENSOR ITERATOR CREATING BEGINS
        if graph.graph['is_cyclic']:
            replacer = DeleteSelect()
            replacer.find_and_replace_pattern(graph)

            replacer = SmartInputMatcher()
            replacer.find_and_replace_pattern(graph)

            replacer = SmartOutputMatcher()
            replacer.find_and_replace_pattern(graph)

            replacer = LoopConditionMatcher()
            replacer.find_and_replace_pattern(graph)

            replacer = SimpleConditionMather()
            replacer.find_and_replace_pattern(graph)

            replacer = BackEdgesMatching()
            replacer.find_and_replace_pattern(graph)

            replacer = ConditionChecks()
            replacer.find_and_replace_pattern(graph)

        delete_not_executable(graph)
        graph_clean_up_tf(graph)
        if graph.graph['is_cyclic']:
            replacer = SimpleInputMatcher()
            replacer.find_and_replace_pattern(graph)

            replacer = BackEdgeSimpleInputMatcher()
            replacer.find_and_replace_pattern(graph)

            # Here will be optimizing path (ops after Enter and before body take out of body)

            replacer = TensorIteratorMerge()
            replacer.find_and_replace_pattern(graph)
        # TENSOR ITERATOR CREATING ENDS

        check_for_cycle(graph)

        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)
        check_empty_graph(graph, 'partial_infer')

        csc.prepare_tf_call_nodes(graph)
        graph_clean_up_tf(graph)

        duplicate_shared_weights(graph)

        input_op_nodes = add_input_ops(graph, packed_user_shapes, False)
        graph_clean_up_tf(graph)
        check_empty_graph(graph, 'add_input_ops')

        change_placeholders_types_to_FP32(graph)

        scale_input(graph, scale)
        add_mean_scale_values(graph, mean_scale_values)

        convert_dilated_convolution(graph)
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        l2_norm_to_norm(graph)
        graph_clean_up_tf(graph)

        remove_op_nodes(graph, {'identity': True})
        remove_useless_split(graph)

        class_registration.apply_replacements(graph, class_registration.ClassType.MIDDLE_REPLACER)

        mean_to_avgpool(graph)
        convert_nasnet(graph)

        fuse_pad(graph)
        graph_clean_up_tf(graph)

        convert_matmul_to_fully_connected(graph)

        # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
        for_graph_and_each_sub_graph_recursively(graph, lambda graph: mark_unfused_nodes(graph, argv.finegrain_fusing))

        # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
        # IE doesn't support BN with 4 inputs, so we have to split it to two ScaleShift
        convert_batch_norm(graph)
        graph_clean_up_tf(graph)

        if not argv.disable_fusing:
            # Converting ScaleShift layer to Mul->Add
            for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add)
            for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

            # Fusing the sequences of Mul/Add operations
            for_graph_and_each_sub_graph_recursively(graph, fuse_mul_add_sequence)
            for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

            # Fusing linear operation to Convolution
            for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops)
            for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        if not argv.disable_gfusing:
            grouped_convolutions_fusing(graph)
            graph_clean_up_tf(graph)
            if not argv.disable_fusing:
                fuse_linear_ops(graph)
                graph_clean_up_tf(graph)

        # Converting Mul->Add to ScaleShift node
        for_graph_and_each_sub_graph_recursively(graph, convert_muladd_to_scaleshift_or_power)
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        for_graph_and_each_sub_graph_recursively(graph, convert_mul_add_to_power)

        # Need to eliminate dead nodes before doing update_fully_connected_shapes
        # because update_fully_connected_shapes does partial inference and dead
        # nodes will lead to sporadic failures.
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)
        for_graph_and_each_sub_graph_recursively(graph, update_fully_connected_shapes)

        for_graph_and_each_sub_graph_recursively(graph, convert_mul_eltwise_to_leaky_relu)
        graph_clean_up_tf(graph)
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        for_graph_and_each_sub_graph_recursively(graph, convert_reshape)
        for_graph_and_each_sub_graph_recursively(graph, convert_squeeze)

        for_graph_and_each_sub_graph_recursively(graph, convert_add_to_scaleshift)  # scale = 1
        for_graph_and_each_sub_graph_recursively(graph, convert_mul_to_scaleshift)  # biases = 0

        if argv.reverse_input_channels:
            reverse_input_channels(graph)

        if argv.move_to_preprocess:
            move_scaleshift_to_preprocess(graph)
            graph_clean_up_tf(graph)

        for_graph_and_each_sub_graph_recursively(graph, fuse_sequence_of_reshapes)

        pattern = EltwiseInputNormalize()
        pattern.find_and_replace_pattern(graph)

        conv_flatten_concat(graph)

        for_graph_and_each_sub_graph_recursively(graph, apply_nhwc_to_nchw_permutation)
        for_graph_and_each_sub_graph_recursively(graph, merge_nodes_permutations)
        for_graph_and_each_sub_graph_recursively(graph, permute_data_nodes_attrs)
        for_graph_and_each_sub_graph_recursively(graph, permute_op_nodes_attrs)

        for_graph_and_each_sub_graph_recursively(graph, repack_fully_connected_weights_nhwc_to_nchw)
        for_graph_and_each_sub_graph_recursively(graph, transpose_fully_connected_weights)

        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        if argv.offload_unsupported_operations_to_tf:
            unsupported_ops_to_offload_to_tf = find_unsupported_ops(graph)
            if len(unsupported_ops_to_offload_to_tf) == 0:
                log.info('All operations are supported! Exit from the loop.')
                if not need_to_repeat_conversion(graph):
                    break
            else:
                print('After {} iteration there are {} unsupported ops'.format(cur_iteration + 1,
                                                                               len(unsupported_ops_to_offload_to_tf)))
        else:
            if not need_to_repeat_conversion(graph):
                break

        graph = graph_copy
        cur_iteration += 1

    class_registration.apply_replacements(graph, class_registration.ClassType.BACK_REPLACER)

    prepare_emit_ir(graph=graph, data_type=argv.data_type, output_dir=output_dir, output_model_name=output_model_name,
                    meta_info=meta_info)

    return 0
Exemplo n.º 25
0
def arguments_post_parsing(argv: argparse.Namespace):
    moc_front_end, available_moc_front_ends = get_moc_frontends(argv)

    is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx =\
        deduce_framework_by_namespace(argv) if not moc_front_end else [False, False, False, False, False]

    if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
        frameworks = ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx']
        frameworks = list(set(frameworks + available_moc_front_ends))
        if argv.framework not in frameworks:
            if argv.use_legacy_frontend:
                raise Error(
                    'Framework {} is not a valid target when using the --use_legacy_frontend flag. '
                    'The following legacy frameworks are available: {}' +
                    refer_to_faq_msg(15), argv.framework, frameworks)
            else:
                raise Error(
                    'Framework {} is not a valid target. Please use --framework with one from the list: {}. '
                    + refer_to_faq_msg(15), argv.framework, frameworks)

    if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
        raise Error(
            'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
            '--input_meta_graph')
    elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name:
        raise Error(
            'Path to input model or input symbol or pretrained_model_name is required: use --input_model or '
            '--input_symbol or --pretrained_model_name')
    elif is_caffe and not argv.input_model and not argv.input_proto:
        raise Error(
            'Path to input model or input proto is required: use --input_model or --input_proto'
        )
    elif (is_kaldi or is_onnx) and not argv.input_model:
        raise Error('Path to input model is required: use --input_model.')

    log.debug(str(argv))
    log.debug("Model Optimizer started")

    model_name = "<UNKNOWN_NAME>"
    if argv.model_name:
        model_name = argv.model_name
    elif argv.input_model:
        model_name = get_model_name(argv.input_model)
    elif is_tf and argv.saved_model_dir:
        model_name = "saved_model"
    elif is_tf and argv.input_meta_graph:
        model_name = get_model_name(argv.input_meta_graph)
    elif is_mxnet and argv.input_symbol:
        model_name = get_model_name(argv.input_symbol)
    argv.model_name = model_name

    log.debug('Output model name would be {}{{.xml, .bin}}'.format(
        argv.model_name))

    # if --input_proto is not provided, try to retrieve another one
    # by suffix substitution from model file name
    if is_caffe and not argv.input_proto:
        argv.input_proto = replace_ext(argv.input_model, '.caffemodel',
                                       '.prototxt')

        if not argv.input_proto:
            raise Error(
                "Cannot find prototxt file: for Caffe please specify --input_proto - a "
                +
                "protobuf file that stores topology and --input_model that stores "
                + "pretrained weights. " + refer_to_faq_msg(20))
        log.info('Deduced name for prototxt: {}'.format(argv.input_proto))

    if not argv.silent:
        print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx,
                   argv.model_name)

    # This try-except is additional reinsurance that the IE
    # dependency search does not break the MO pipeline
    def raise_ie_not_found():
        raise Error(
            "Could not find the Inference Engine or nGraph Python API.\n"
            "Consider building the Inference Engine and nGraph Python APIs from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\""
            .format("bat" if sys.platform == "windows" else "sh"))

    try:
        if not find_ie_version(silent=argv.silent):
            raise_ie_not_found()
    except Exception as e:
        raise_ie_not_found()

    if 'data_type' in argv and argv.data_type in ['FP16', 'half']:
        argv.data_type = 'FP32'
        argv.compress_fp16 = True
    else:
        argv.compress_fp16 = False

    # This is just to check that transform key is valid and transformations are available
    check_available_transforms(parse_transform(argv.transform))

    if argv.legacy_ir_generation and len(argv.transform) != 0:
        raise Error(
            "--legacy_ir_generation and --transform keys can not be used at the same time."
        )

    # For C++ frontends there are no specific Python installation requirements, check only generic ones
    if moc_front_end:
        ret_code = check_requirements()
    else:
        ret_code = check_requirements(framework=argv.framework)
    if ret_code:
        raise Error(
            'check_requirements exited with return code {}'.format(ret_code))

    if is_tf and argv.tensorflow_use_custom_operations_config is not None:
        argv.transformations_config = argv.tensorflow_use_custom_operations_config

    if is_caffe and argv.mean_file and argv.mean_values:
        raise Error(
            'Both --mean_file and mean_values are specified. Specify either mean file or mean values. '
            + refer_to_faq_msg(17))
    elif is_caffe and argv.mean_file and argv.mean_file_offsets:
        values = get_tuple_values(argv.mean_file_offsets,
                                  t=int,
                                  num_exp_values=2)
        mean_file_offsets = np.array([int(x) for x in values[0].split(',')])
        if not all([offset >= 0 for offset in mean_file_offsets]):
            raise Error(
                "Negative value specified for --mean_file_offsets option. "
                "Please specify positive integer values in format '(x,y)'. " +
                refer_to_faq_msg(18))
        argv.mean_file_offsets = mean_file_offsets

    if argv.scale and argv.scale_values:
        raise Error(
            'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input '
            + 'channels. ' + refer_to_faq_msg(19))

    if argv.scale and argv.scale < 1.0:
        log.error(
            "The scale value is less than 1.0. This is most probably an issue because the scale value specifies "
            "floating point value which all input values will be *divided*.",
            extra={'is_warning': True})

    if argv.input_model and (is_tf and argv.saved_model_dir):
        raise Error('Both --input_model and --saved_model_dir are defined. '
                    'Specify either input model or saved model directory.')
    if is_tf:
        if argv.saved_model_tags is not None:
            if ' ' in argv.saved_model_tags:
                raise Error(
                    'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it'
                )
            argv.saved_model_tags = argv.saved_model_tags.split(',')

    argv.output = argv.output.split(',') if argv.output else None

    argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes(
        argv.input, argv.input_shape, argv.batch)

    mean_values = parse_tuple_pairs(argv.mean_values)
    scale_values = parse_tuple_pairs(argv.scale_values)
    mean_scale = get_mean_scale_dictionary(mean_values, scale_values,
                                           argv.input)
    argv.mean_scale_values = mean_scale

    if not os.path.exists(argv.output_dir):
        try:
            os.makedirs(argv.output_dir)
        except PermissionError as e:
            raise Error(
                "Failed to create directory {}. Permission denied! " +
                refer_to_faq_msg(22), argv.output_dir) from e
    else:
        if not os.access(argv.output_dir, os.W_OK):
            raise Error(
                "Output directory {} is not writable for current user. " +
                refer_to_faq_msg(22), argv.output_dir)

    log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes))

    if hasattr(argv,
               'extensions') and argv.extensions and argv.extensions != '':
        extensions = argv.extensions.split(',')
    else:
        extensions = None

    argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values(
        argv.input, argv.freeze_placeholder_with_value)
    if is_tf:
        from mo.front.tf.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_caffe:
        send_framework_info('caffe')
        from mo.front.caffe.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_mxnet:
        send_framework_info('mxnet')
        from mo.front.mxnet.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_kaldi:
        send_framework_info('kaldi')
        from mo.front.kaldi.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)
    elif is_onnx:
        send_framework_info('onnx')
        from mo.front.onnx.register_custom_ops import get_front_classes
        import_extensions.load_dirs(argv.framework, extensions,
                                    get_front_classes)

    return argv
Exemplo n.º 26
0
def driver_R1(onnx_modelproto_bytes,
              precision: str,
              output_model_name: str,
              outputs: list,
              output_dir: str,
              scale: float,
              user_shapes: [None, list, np.array] = None,
              mean_scale_values: [dict, list] = ()):

    try:
        model_proto = onnx.load_from_string(bytes(onnx_modelproto_bytes))
    except Exception as e:
        print("[python] onnx exception: ", str(e))

    model_graph = model_proto.graph  # pylint: disable=no-member

    update_extractors_with_extensions(onnx_op_extractors)

    try:
        graph = protobuf2nx(model_proto)
        log.debug("Number of nodes in NX graph: {}".format(
            graph.number_of_nodes()))
        graph.__setattr__(
            'name',
            output_model_name if output_model_name else model_proto.graph.name)  # pylint: disable=no-member
        graph.graph['layout'] = 'NCHW'
        graph.graph['cmd_params'] = argparse.Namespace(
            batch=None,
            data_type='float',
            disable_fusing=False,
            disable_gfusing=False,
            disable_resnet_optimization=False,
            enable_concat_optimization=False,
            extensions=mo_extensions,
            finegrain_fusing=None,
            framework='onnx',
            freeze_placeholder_with_value=None,
            generate_deprecated_IR_V2=False,
            input=None,
            input_model=None,
            input_shape=None,
            keep_shape_ops=False,
            log_level='ERROR',
            mean_scale_values={},
            mean_values=(),
            model_name=None,
            move_to_preprocess=False,
            output=None,
            output_dir='.',
            placeholder_shapes=None,
            reverse_input_channels=False,
            scale=None,
            scale_values=(),
            silent=False,
            version=False)
        graph.graph['fw'] = 'onnx'
        graph.graph[
            'feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3
        graph.graph['ir_version'] = 5
    except Exception as e:
        raise Error(
            'Cannot pre-process ONNX graph after reading from model file "{}". '
            'File is corrupt or has unsupported format. Details: {}. ' +
            refer_to_faq_msg(44), model_file_name, str(e)) from e
    graph.check_empty_graph(
        'protobuf2nx. It may happen due to problems with loaded model')
    extract_node_attrs(
        graph, lambda node: onnx_op_extractor(
            node, check_for_duplicates(onnx_op_extractors)))

    # --------------------------------- LOAD END ------------------------------------------------------
    class_registration.apply_replacements(
        graph, class_registration.ClassType.FRONT_REPLACER)
    class_registration.apply_replacements(
        graph, class_registration.ClassType.MIDDLE_REPLACER)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    mark_unfused_nodes(graph, 'False')
    convert_batch_norm(graph)
    graph_clean_up_onnx(graph)

    convert_scale_shift_to_mul_add(graph)
    graph_clean_up_onnx(graph)

    fuse_mul_add_sequence(graph)
    graph_clean_up_onnx(graph)

    fuse_linear_ops(graph)
    graph_clean_up_onnx(graph)

    grouped_convolutions_fusing(graph)
    graph_clean_up_onnx(graph)

    fuse_linear_ops(graph)
    graph_clean_up_onnx(graph)

    convert_muladd_to_scaleshift_or_power(graph)
    graph_clean_up_onnx(graph)

    convert_mul_add_to_power(graph)
    graph_clean_up_onnx(graph)

    convert_reshape(graph)
    graph_clean_up_onnx(graph)
    convert_add_or_mul_to_scaleshift(graph)  # scale = 1
    graph_clean_up_onnx(graph)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    fuse_sequence_of_reshapes(graph)
    graph_clean_up_onnx(graph)

    pattern = EltwiseInputNormalize()
    pattern.find_and_replace_pattern(graph)

    merge_nodes_permutations(graph)
    permute_data_nodes_attrs(graph)
    permute_op_nodes_attrs(graph)

    class_registration.apply_replacements(
        graph, class_registration.ClassType.BACK_REPLACER)

    for_graph_and_each_sub_graph_recursively(graph, remove_const_ops)

    CreateConstNodesReplacement().find_and_replace_pattern(graph)

    for_graph_and_each_sub_graph_recursively(graph, remove_output_ops)

    weights, xml_string = prepare_emit_ir(graph=graph,
                                          data_type=precision,
                                          output_dir=output_dir,
                                          output_model_name=output_model_name,
                                          meta_info={'unset': []})

    return weights, xml_string
Exemplo n.º 27
0
def driver_R5(onnx_modelproto_bytes,
              precision: str,
              output_model_name: str,
              outputs: list,
              output_dir: str,
              scale: float,
              user_shapes: [None, list, np.array] = None,
              mean_scale_values: [dict, list] = ()):

    try:
        model_proto = onnx.load_from_string(bytes(onnx_modelproto_bytes))
    except Exception as e:
        print("[python] onnx exception: ", str(e))

    model_graph = model_proto.graph  # pylint: disable=no-member
    log.debug("Number of nodes in graph_def: {}".format(len(model_graph.node)))
    log.debug(
        "Number of all input ports (not true inputs) in graph_def: {}".format(
            len(model_graph.input)))
    log.debug("Number of initializers in graph_def: {}".format(
        len(model_graph.initializer)))
    log.debug("Number of real inputs in graph_def: {}".format(
        len(model_graph.input) - len(model_graph.initializer)))
    update_extractors_with_extensions(onnx_op_extractors)

    try:
        graph = protobuf2nx(model_proto)
        log.debug("Number of nodes in NX graph: {}".format(
            graph.number_of_nodes()))
        graph.__setattr__(
            'name',
            output_model_name if output_model_name else model_proto.graph.name)  # pylint: disable=no-member
        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'onnx'
        graph.graph[
            'feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3
        graph.graph['ir_version'] = 4
        extract_node_attrs(graph, lambda node:
                           (True, common_onnx_fields(node)))
    except Exception as e:
        raise Error(
            'Cannot pre-process ONNX graph after reading from model file "{}". '
            'File is corrupt or has unsupported format. Details: {}. ' +
            refer_to_faq_msg(44), model_file_name, str(e)) from e
    check_empty_graph(
        graph, 'protobuf2nx. It may happen due to problems with loaded model')
    packed_user_shapes, packed_outputs, _ = user_data_repack(
        graph, user_shapes, outputs, None)

    output_op_nodes = add_output_ops(graph, packed_outputs)
    input_op_nodes = add_input_ops(graph, packed_user_shapes, True)

    graph_clean_up(graph)
    check_empty_graph(graph, 'add_output_ops and add_input_ops')
    extract_node_attrs(
        graph, lambda node: onnx_op_extractor(
            node, check_for_duplicates(onnx_op_extractors)))

    class_registration.apply_replacements(
        graph, class_registration.ClassType.FRONT_REPLACER)

    create_tensor_nodes(graph)
    graph_clean_up(graph)

    override_placeholder_shapes(graph, packed_user_shapes)

    graph_clean_up(graph)
    remove_op_nodes(graph, {'op': 'Identity'})

    graph_clean_up(graph)

    remove_output_ops(graph)

    partial_infer(graph)
    graph_clean_up(graph)
    check_empty_graph(graph, 'partial_infer')

    input_op_nodes = add_input_ops(graph, packed_user_shapes, False)
    graph_clean_up(graph)
    check_empty_graph(graph, 'add_input_ops')

    scale_input(graph, scale)
    add_mean_scale_values(graph, mean_scale_values)

    convert_dilated_convolution(graph)
    graph_clean_up(graph)

    graph_clean_up(graph)

    remove_op_nodes(graph, {'op': 'Identity'})
    remove_useless_split(graph)

    class_registration.apply_replacements(
        graph, class_registration.ClassType.MIDDLE_REPLACER)

    convert_gemm_to_fully_connected(graph)
    NormalizeFullyConnected().find_and_replace_pattern(graph)

    fuse_pad(graph)
    graph_clean_up(graph)

    convert_batch_norm(graph)
    graph_clean_up(graph)

    convert_scale_shift_to_mul_add(graph)
    graph_clean_up(graph)

    fuse_mul_add_sequence(graph)
    graph_clean_up(graph)

    fuse_linear_ops(graph)
    graph_clean_up(graph)

    grouped_convolutions_fusing(graph)
    graph_clean_up(graph)

    fuse_linear_ops(graph)
    graph_clean_up(graph)

    convert_muladd_to_scaleshift_or_power(graph)
    graph_clean_up(graph)

    convert_mul_add_to_power(graph)
    graph_clean_up(graph)

    convert_reshape(graph)
    convert_add_to_scaleshift(graph)  # scale = 1
    convert_mul_to_scaleshift(graph)  # biases = 0

    fuse_pad(graph)
    graph_clean_up(graph)

    fuse_sequence_of_reshapes(graph)
    graph_clean_up(graph)

    pattern = EltwiseInputNormalize()
    pattern.find_and_replace_pattern(graph)

    merge_nodes_permutations(graph)
    permute_data_nodes_attrs(graph)
    permute_op_nodes_attrs(graph)

    class_registration.apply_replacements(
        graph, class_registration.ClassType.BACK_REPLACER)

    weights, xml_string = prepare_emit_ir(graph=graph,
                                          data_type=precision,
                                          output_dir=output_dir,
                                          output_model_name=output_model_name,
                                          meta_info={'unset': []})

    return weights, xml_string
Exemplo n.º 28
0
def driver(argv: argparse.Namespace, model_file_name: str, output_model_name: str, output_dir: str):
    meta_info = get_meta_info(argv)

    model_proto = load_onnx_model(model_file_name)
    model_graph = model_proto.graph  # pylint: disable=no-member
    # print(model_graph)
    # assert len(model_graph) == 1, "An ONNX model contains more than 1 graph: unsupported"
    log.debug("Number of nodes in graph_def: {}".format(len(model_graph.node)))
    log.debug("Number of all input ports (not true inputs) in graph_def: {}".format(len(model_graph.input)))
    log.debug("Number of initializers in graph_def: {}".format(len(model_graph.initializer)))
    log.debug("Number of real inputs in graph_def: {}".format(len(model_graph.input) - len(model_graph.initializer)))
    update_extractors_with_extensions(onnx_op_extractors)

    try:
        graph = protobuf2nx(model_proto)
        log.debug("Number of nodes in NX graph: {}".format(graph.number_of_nodes()))
        graph.__setattr__('name',
                          output_model_name if output_model_name else model_proto.graph.name)  # pylint: disable=no-member
        graph.graph['layout'] = 'NCHW'
        graph.graph['cmd_params'] = argv
        graph.graph['fw'] = 'onnx'
        graph.graph['feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3
        graph.graph['ir_version'] = 2 if argv.generate_deprecated_IR_V2 else 5
    except Exception as e:
        raise Error(
            'Cannot pre-process ONNX graph after reading from model file "{}". ' \
            'File is corrupt or has unsupported format. Details: {}. ' +
            refer_to_faq_msg(44),
            model_file_name,
            str(e)
        ) from e
    graph.check_empty_graph('protobuf2nx. It may happen due to problems with loaded model')
    extract_node_attrs(graph, lambda node: onnx_op_extractor(node, check_for_duplicates(onnx_op_extractors)))

    # --------------------------------- LOAD END ------------------------------------------------------
    class_registration.apply_replacements(graph, class_registration.ClassType.FRONT_REPLACER)
    class_registration.apply_replacements(graph, class_registration.ClassType.MIDDLE_REPLACER)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
    mark_unfused_nodes(graph, argv.finegrain_fusing)

    # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
    # IE doesn't support BN with 4 inputs, so we have to split it to two ScaleShift
    convert_batch_norm(graph)
    graph_clean_up_onnx(graph)

    if not argv.disable_fusing:
        # Converting ScaleShift layer to Mul->Add
        convert_scale_shift_to_mul_add(graph)
        graph_clean_up_onnx(graph)

        # Fusing the sequences of Mul/Add operations
        fuse_mul_add_sequence(graph)
        graph_clean_up_onnx(graph)

        # Fusing linear operation to Convolution
        fuse_linear_ops(graph)
        graph_clean_up_onnx(graph)

    if not argv.disable_gfusing:
        grouped_convolutions_fusing(graph)
        graph_clean_up_onnx(graph)
        if not argv.disable_fusing:
            fuse_linear_ops(graph)
            graph_clean_up_onnx(graph)

    AddQuantizeFuse().find_and_replace_pattern(graph)
    MulQuantizeFuse().find_and_replace_pattern(graph)

    convert_muladd_to_scaleshift_or_power(graph)
    graph_clean_up_onnx(graph)

    convert_mul_add_to_power(graph)
    graph_clean_up_onnx(graph)

    convert_reshape(graph)
    graph_clean_up_onnx(graph)
    convert_add_or_mul_to_scaleshift(graph)  # scale = 1
    graph_clean_up_onnx(graph)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    if argv.reverse_input_channels:
        reverse_input_channels(graph)

    if argv.move_to_preprocess:
        move_scaleshift_to_preprocess(graph)
        graph_clean_up_onnx(graph)

    fuse_sequence_of_reshapes(graph)
    graph_clean_up_onnx(graph)

    pattern = EltwiseInputNormalize()
    pattern.find_and_replace_pattern(graph)

    merge_nodes_permutations(graph)
    permute_data_nodes_attrs(graph)
    permute_op_nodes_attrs(graph)

    class_registration.apply_replacements(graph, class_registration.ClassType.BACK_REPLACER)

    for_graph_and_each_sub_graph_recursively(graph, remove_const_ops)

    CreateConstNodesReplacement().find_and_replace_pattern(graph)

    for_graph_and_each_sub_graph_recursively(graph, remove_output_ops)

    prepare_emit_ir(graph=graph, data_type=argv.data_type, output_dir=output_dir, output_model_name=output_model_name,
                    meta_info=meta_info)

    return 0
Exemplo n.º 29
0
 def find_and_replace_pattern(self, graph: Graph):
     is_acyclic = nx.is_directed_acyclic_graph(graph)
     if not is_acyclic:
         raise Error('Graph contains a cycle. Can not proceed. ' +
                     refer_to_faq_msg(97))
Exemplo n.º 30
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        if argv.tensorflow_custom_layer_libraries:
            libraries = argv.tensorflow_custom_layer_libraries.split(',')
            for library in libraries:
                log.info('Loading library "{}" with custom operations'.format(
                    library))
                tf_v1.load_op_library(library)

        graph_def, variables_values = load_tf_graph_def(
            graph_file_name=argv.input_model,
            is_binary=not argv.input_model_is_text,
            checkpoint=argv.input_checkpoint,
            user_output_node_names_list=argv.output,
            model_dir=argv.saved_model_dir,
            meta_graph_file=argv.input_meta_graph,
            saved_model_tags=argv.saved_model_tags)

        try:
            tf_v1.import_graph_def(graph_def, name='')
        except:
            log.warning(
                "TensorFlow post-processing of loaded model was unsuccessful. "
                "This is an optional step that Model Optimizer performs for any input model but it is not usually "
                "required for all models."
                "It likely means that the original model is ill-formed. "
                "Model Optimizer will continue converting this model.")

        log.debug("Number of nodes in graph_def: {}".format(len(
            graph_def.node)))  # pylint: disable=no-member

        if argv.tensorboard_logdir:
            tensorboard_util.dump_for_tensorboard(graph_def,
                                                  argv.tensorboard_logdir)

        update_extractors_with_extensions(tf_op_extractors)

        try:
            protobuf2nx(graph, graph_def)
        except Exception as e:
            raise Error(
                'Cannot pre-process TensorFlow graph after reading from model file "{}". ' \
                'File is corrupt or has unsupported format. Details: {}. ' +
                refer_to_faq_msg(44),
                argv.model_name,
                str(e)
            ) from e

        graph.__setattr__('name', argv.model_name)
        # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer
        # and convert_nhwc_to_nchw(graph)
        graph.graph['layout'] = 'NCHW' if argv.disable_nhwc_to_nchw else 'NHWC'
        graph.graph['fw'] = 'tf'

        graph.graph['variables_values'] = variables_values
        del variables_values

        restore_edges(graph, get_tf_edges)
        remove_control_dependency_inputs(graph)

        graph.check_empty_graph(
            'protobuf2nx. It may happen due to problems with loaded model')
        extract_node_attrs(
            graph, lambda node: tf_op_extractor(
                node, check_for_duplicates(tf_op_extractors)))