def find_and_replace_pattern(self, graph: Graph):
        params = graph.get_op_nodes(op="Parameter")
        batch = params[0].shape[0]

        # check that all Parameters have the same batch
        for p in params:
            assert p.shape[0] == batch, \
                   "Parameter {} has batch different from the {}".format(p.soft_get('name', p.id),
                                                                          params[0].soft_get('name', params[0].id))

        # make constants for initialization of ReadValue reshapable
        for read in graph.get_op_nodes(op='ReadValue'):
            input_node = read.in_port(0).get_source().node
            if input_node.soft_get('op') == "Const":
                const_shape = input_node.out_port(0).data.get_shape()
                # extra check to be sure that we don't break shapes compatibility in graph
                # in Kaldi models we have only 2 dimensions
                # and batch should be set the same as we will get from Parameter
                # otherwise just skip such node
                if len(const_shape) != 2 or const_shape[0] != batch:
                    continue
                new_const = create_const_with_batch_from_input(
                    params[0].out_port(0),
                    const_shape[1],
                    value=input_node.value[0],
                    precision=input_node.data_type)
                input_node.out_port(0).get_connection().set_source(
                    new_const.out_port(0))
Exemplo n.º 2
0
    def analyze(self, graph: Graph):
        inputs_desc = dict()
        message = InputsAnalysis.iterator_get_next_analysis(graph, inputs_desc)
        inputs_to_ignore = InputsAnalysis.fifo_queue_analysis(graph, inputs_desc)
        if graph.graph['fw'] == 'mxnet':
            inputs_to_ignore.update(InputsAnalysis.ignore_mxnet_softmax_inputs(graph))

        inputs = graph.get_op_nodes(op='Parameter')
        for input in inputs:
            inputs_desc[input.name] = {'shape': input.soft_get('shape', None),
                                       'data_type': input.soft_get('data_type', None),
                                       'value': None,
                                       }
        placeholders_with_default = graph.get_op_nodes(op='PlaceholderWithDefault')
        for input in placeholders_with_default:
            inputs_desc[input.name] = {'shape': input.soft_get('shape', None),
                                       'data_type': input.soft_get('data_type', None),
                                       'value': input.in_node(0).value if 0 in input.in_nodes() and
                                                                          input.in_node(0).has_valid('value') else None}

        for input_to_ignore in inputs_to_ignore:
            del inputs_desc[input_to_ignore]

        # workaround for the ONNX models case where input shape is specified as string value like: "width", "height".
        # In this case the string value is converted to 0, but in fact it is an arbitrary value so should be -1
        if graph.graph['fw'] == 'onnx':
            for inp in inputs_desc.values():
                inp['shape'] = [-1 if item == 0 else item for item in inp['shape']]
        return {'inputs': inputs_desc}, message
Exemplo n.º 3
0
    def find_and_replace_pattern(self, graph: Graph):
        names_set = set()
        for node in graph.get_op_nodes():
            if node.has_valid('name'):
                names_set.add(node['name'])

        for node in graph.get_op_nodes(type='Result'):
            if node.in_ports():
                prev_node_out_port = node.in_port(
                    0).get_connection().get_source()
                tensor_names = prev_node_out_port.get_tensor_names()
                # Graph may contain Result nodes with names equal to input tensors and
                # renaming in this case is not needed. The example of such situation is
                # IR reader check when graph is read with correct Result names.
                if not tensor_names:
                    result_name = prev_node_out_port.node.soft_get('name', prev_node_out_port.node.id) + \
                                  '/sink_port_' + str(prev_node_out_port.idx)
                    node['name'] = result_name
                    continue
                # If Result name is equal to some tensor name from list, then renaming is not needed
                if node.soft_get('name') in tensor_names:
                    continue

                # Try to find tensor name, that is not intersects with graph node names
                result_name = None
                for tensor_name in tensor_names:
                    if tensor_name not in names_set:
                        result_name = tensor_name
                        break

                # If we didn't find appropriate tensor name, then Result is named by default naming
                if result_name is None:
                    result_name = prev_node_out_port.node.soft_get('name', prev_node_out_port.node.id) + \
                                  '/sink_port_' + str(prev_node_out_port.idx)
                node['name'] = result_name
Exemplo n.º 4
0
def get_all_operation_nodes(graph: Graph, recursively: bool = False):
    """ Returns sequence of all nodes in graph
    :param graph: NetworkX model to take nodes
    :param recursively: whether return all nodes from the graph
    and each subgraph or only from the external graph
    :return list of all nodes
    """
    if recursively:
        get_all_op_nodes_func = FunctionResultsAccumulator(
            lambda graph: graph.get_op_nodes())
        for_graph_and_each_sub_graph_recursively(graph, get_all_op_nodes_func)
        return get_all_op_nodes_func.results

    return graph.get_op_nodes()
Exemplo n.º 5
0
    def find_and_replace_pattern(self, graph: Graph):
        # to prevent fusing of non per channel lin ops, we run EltwiseChecker to mark nodes with can_be_fused attribute
        EltwiseChecker().find_and_replace_pattern(graph)
        self.mark_fusable_muls_on_weights(graph)
        eltwise_nodes = graph.get_op_nodes(op='Mul', can_be_fused=True) + \
                        graph.get_op_nodes(op='Sub', can_be_fused=True) + \
                        graph.get_op_nodes(op='Add', can_be_fused=True)
        for elt in eltwise_nodes:
            if elt.in_port(0).data.get_value() is not None or elt.in_port(
                    1).data.get_value() is not None:
                elt['fuse_up_to_quantize_ports'] = [3, 4]

        slice = graph.get_op_nodes(op='Slice')
        for sl in slice:
            sl['fuse_up_to_quantize_ports'] = [0]
    def find_and_replace_pattern(self, graph: Graph):
        for eltwise_node in graph.get_op_nodes(
                op='EltwiseN', operation='sum') + graph.get_op_nodes(op='Add'):
            if eltwise_node.has_valid('coeff') and len(eltwise_node.coeff):
                coeff = eltwise_node.coeff

                for i in range(len(coeff)):
                    __class__.__insert_mul_node_with_coeff(
                        eltwise_node, i, coeff[i])

                eltwise_node.coeff = None
                if len(coeff) > 2:
                    eltwise_node.op = "EltwiseN"
                    eltwise_node.type = "EltwiseN"
                    eltwise_node['operation'] = "sum"
Exemplo n.º 7
0
def get_node_by_name(graph: Graph,
                     name: str,
                     recursively: bool = False) -> Node:
    """ Returns node by name
    :param graph: NetworkX model to take node
    :param name: name of the node
    :param recursively: whether return all nodes from the graph
    and each subgraph or only from the external graph
    :return node from NetworkX model (of type Node or None if there's no such node)
    """
    if recursively:

        def get_node_by_fullname(graph: Graph, name: str) -> Node:
            nodes = graph.get_nodes_with_attributes(
                **dict(kind='op', fullname=name))
            return [Node(graph, nodes[0])] if nodes else None

        partial_get_node_by_fullname = partial(get_node_by_fullname, name=name)
        get_node_by_fullname_func = FunctionResultsAccumulator(
            partial_get_node_by_fullname)
        for_graph_and_each_sub_graph_recursively(graph,
                                                 get_node_by_fullname_func)
        node = get_node_by_fullname_func.results
    else:
        node = graph.get_op_nodes(name=name)

    return node[0] if node else None
Exemplo n.º 8
0
    def find_and_replace_pattern(self, graph: Graph):
        for fake_output in graph.get_op_nodes(op='FakeOutput'):
            name = fake_output.soft_get('name', fake_output.id)

            producer = fake_output.in_port(0).get_source().node
            producer_outputs = 0
            for port in producer.out_ports().values():
                if not port.disconnected():
                    producer_outputs += 1
            if producer_outputs != 1:
                # At this stage we don't know the type of output, so we rely on MO transformation which updates the
                # Const type for elementwise operations in case of input data types mismatch
                add = create_op_with_const_inputs(graph, Add, {1: int64_array(0)}, {'can_be_fused': False})
                rename_nodes([(fake_output, name + '/TBD'), (add, name)])

                prev_op_in_port = fake_output.in_port(0).get_connection().get_source()
                # Get tensor names incoming to FakeOutput
                tensor_names = prev_op_in_port.get_tensor_names()

                # Remove tensor info from data node
                prev_op_in_port.remove_tensor_names()

                fake_output.in_port(0).get_connection().set_destination(add.in_port(0))
                fake_output.out_port(0).get_connection().set_source(add.out_port(0))

                # Move tensor names to Add op, which replaces FakeOutput
                if len(tensor_names) > 0:
                    add.out_port(0).add_tensor_names(tensor_names)

            else:
                result_in_port = fake_output.out_port(0).get_destination()
                result_in_port.disconnect()
                fake_output.in_port(0).get_connection().set_destination(result_in_port)
                rename_nodes([(fake_output, name + '/TBD'), (producer, name)])
    def find_and_replace_pattern(self, graph: Graph):
        for roll_node in graph.get_op_nodes(op='Roll'):
            if not roll_node.in_port(2).disconnected():
                return
            node_name = roll_node.soft_get('name', roll_node.id)

            # reshape to 1d tensor
            reshape_to_1d = create_op_node_with_second_input(
                graph, Reshape, int64_array([-1]),
                {'name': node_name + '/reshape'})
            roll_node.in_port(0).get_connection().insert_node(reshape_to_1d)

            # add zero const as axes input to roll
            const_zero = Const(graph, {
                'value': int64_array([0]),
                'name': node_name + '/axes'
            }).create_node()
            const_zero.out_port(0).connect(roll_node.in_port(2))

            # reshape to original shape
            shape_of = Shape(graph, {
                'name': node_name + '/shape_of'
            }).create_node()
            reshape_to_1d.in_port(0).get_connection().add_destination(
                shape_of.in_port(0))
            reshape_to_orig_shape = Reshape(graph, {}).create_node()
            rename_nodes([(roll_node, node_name + '/roll'),
                          (reshape_to_orig_shape, node_name)])
            shape_of.out_port(0).connect(reshape_to_orig_shape.in_port(1))
            roll_node.out_port(0).get_connection().insert_node(
                reshape_to_orig_shape)
Exemplo n.º 10
0
    def add_reshapes_for_tf_subgraph_calls(graph: Graph):
        """
        Input and output tensors of the TFCustomSubgraphCall must be 4D because IE layer accepts and produces only 4D
        tensors. This function adds reshape operations where it is necessary.
        :param graph: graph to operate on.
        :return: None.
        """
        for src_node_name, dst_node_name, edge_attrs in list(
                graph.edges(data=True)):
            src_node = Node(graph, src_node_name)
            dst_node = Node(graph, dst_node_name)
            if dst_node.kind == 'op' and dst_node.has_valid('type') and dst_node.type == 'TFCustomSubgraphCall' and \
                    src_node.has_valid('shape') and len(src_node.shape) != 4:
                log.info(
                    "There is an data tensor of shape '{}' which goes into '{}' node"
                    .format(src_node.shape, dst_node.type))
                CustomSubgraphCall.add_reshape_before_op_node(
                    graph, src_node_name, dst_node_name, edge_attrs)

        for node in graph.get_op_nodes(op='TFCustomSubgraphCall'):
            for index, data_node in node.out_nodes().items():
                real_dims_count = len(data_node.shape)
                if real_dims_count != 4:
                    log.info(
                        "There is an data tensor of shape '{}' with real dims count '{}' which goes out of '{}' "
                        "node".format(data_node.shape, real_dims_count,
                                      node.name))
                    CustomSubgraphCall.add_reshape_after_data_node(
                        graph, data_node.id)

                    # need to update shape of the op so IE generates XML with 4D tensors
                    out_shape = CustomSubgraphCall.make_shape_4d(
                        data_node['shape'])

                    data_node['shape'] = out_shape
Exemplo n.º 11
0
    def find_and_replace_pattern(self, graph: Graph):
        fifo_qd_shapes = defaultdict(list)
        for node in graph.get_op_nodes():
            if node.op not in ["QueueDequeue", "QueueDequeueV2"]:
                continue

            new_inputs = ""
            fifo_qd_name = node.soft_get('name', node.id)
            for port_idx, port in node.out_ports().items():
                if port.disconnected():
                    continue
                if not np_data_type_to_precision(
                        node.types[port_idx]) in SUPPORTED_DATA_TYPES:
                    raise Error("Data type {} is not supported for the"
                                "node {}".format(node.types[port_idx],
                                                 fifo_qd_name))

                fifo_qd_shapes[fifo_qd_name].append(
                    dict(shape=node.shapes[port_idx],
                         out=port_idx,
                         data_type=node.types[port_idx]))
                new_inputs += "{}:{}, ".format(fifo_qd_name, port_idx)

            log.error(
                "Found TF {} operation in the model. "
                "PLEASE NOTE, the model will contain new input(s) ".format(
                    node.op) + new_inputs +
                "created due to automatically triggered pruning transformation for this operation.",
                extra={'is_warning': True})

        add_input_ops(graph, fifo_qd_shapes, True)
Exemplo n.º 12
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(type='StridedSlice'):
            StridedSliceNormalizer.normalize_strided_slice(graph, node)
            PermuteAttrs.create_permute_attrs(
                node,
                attrs=[
                    ('begin_mask',
                     'input:0'),  # but indeed depends from slice_rank
                    ('end_mask', 'input:0'),
                    ('new_axis_mask', 'input:0'),
                    ('shrink_axis_mask', 'input:0'),
                    ('ellipsis_mask', 'input:0')
                ])

            # StridedSliceNormalizer inserted nodes that changed original begin, end, and strides data nodes
            # Until now it was not possible to set correct permutations
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:1', 'slice',
                                                  'dim_size')
            PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                  'input:2', 'slice',
                                                  'dim_size')
            if node.is_in_port_connected(3):
                PermuteInputs().set_input_permutation(node.in_node(3), node,
                                                      'input:3', 'slice',
                                                      'dim_size')

            # If there are new_axis_mask or shrink_axis_mask then StridedSlice should be performed in the
            # original layout, same as for Squeeze, Unsqueeze, Reshape, Gather
            if np.count_nonzero(node['new_axis_mask']) > 0 or np.count_nonzero(
                    node['shrink_axis_mask']) > 0:
                node['reinterp_shape'] = True
                node['nchw_layout'] = True
Exemplo n.º 13
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='AttributedVariadicSplit'):
            name = node.soft_get('name', node.id)

            axis = node.soft_get('axis', None)
            assert axis is not None, \
                'AttributedVariadicSplit should have `axis` parameter set, but it`s not for node {}'.format(name)

            size_splits = node.soft_get('size_splits', None)
            assert size_splits is not None, \
                'AttributedVariadicSplit should have `size_splits` parameter set, but it`s not for node {}'.format(name)

            split = create_op_with_const_inputs(
                graph, VariadicSplit, {
                    1: np.int64(axis),
                    2: size_splits
                }, {
                    'name': name + '/VariadicSplit',
                    'out_ports_count': len(size_splits)
                })

            for idx, port in node.out_ports().items():
                port.get_connection().set_source(split.out_port(idx))

            node.in_port(0).get_connection().set_destination(split.in_port(0))
            graph.remove_node(node.id)
Exemplo n.º 14
0
    def find_and_replace_pattern(self, graph: Graph):
        for attr_pad in graph.get_op_nodes(op='AttributedPad'):
            # save the original node name to use it in the new Pad op instance
            original_name = attr_pad.soft_get('name', attr_pad.id)

            new_pad = Pad(graph, {
                'mode': attr_pad.soft_get('mode', None),
            }).create_node()
            rename_nodes([(attr_pad, original_name + '/to_be_removed'),
                          (new_pad, original_name)])

            attr_pad.in_port(0).get_connection().set_destination(
                new_pad.in_port(0))
            new_pad.in_port(1).connect(
                Const(graph, {
                    'value': attr_pad.pads[:, 0]
                }).create_node().out_port(0))
            new_pad.in_port(2).connect(
                Const(graph, {
                    'value': attr_pad.pads[:, 1]
                }).create_node().out_port(0))
            if attr_pad.soft_get('mode') == 'constant':
                # create Constant node of proper data type (equal to the data type of the Pad first input)
                convert_pad_value = create_op_with_const_inputs(
                    graph, ConvertLike, {0: attr_pad.fill_value},
                    {'name': original_name + '/pad_value_convert'})
                convert_pad_value.in_port(1).connect(
                    new_pad.in_port(0).get_source())
                new_pad.in_port(3).connect(convert_pad_value.out_port(0))

            attr_pad.out_port(0).get_connection().set_source(
                new_pad.out_port(0))
            graph.remove_node(attr_pad.id)
Exemplo n.º 15
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='Interpolate', version='opset1'):
            transformation_mode = 'align_corners' if int(
                node.soft_get('align_corners', 0)) else 'half_pixel'
            interpolate1_name = node.soft_get('name', node.id)
            interpolate4 = create_op_with_const_inputs(
                graph, Interpolate, {
                    2: mo_array([1.0, 1.0]),
                    3: int64_array(node.axes)
                }, {
                    'mode': node.mode,
                    'antialias': node.antialias,
                    'coordinate_transformation_mode': transformation_mode,
                    'pads_begin': correct_pad(node.soft_get('pads_begin', 0)),
                    'pads_end': correct_pad(node.soft_get('pads_end', 0)),
                    'nearest_mode': 'round_prefer_floor',
                    'cube_coeff': -0.75,
                    'shape_calculation_mode': 'sizes',
                    'version': 'opset4',
                    'in_ports_count': 4,
                })

            interpolate1_input_connection = node.in_port(0).get_connection()
            interpolate1_input_connection.set_destination(
                interpolate4.in_port(0))

            sizes_connection = node.in_port(1).get_connection()
            sizes_connection.set_destination(interpolate4.in_port(1))

            node.out_port(0).get_connection().set_source(
                interpolate4.out_port(0))
            rename_nodes([(node, interpolate1_name + '/delete'),
                          (interpolate4, interpolate1_name)])
Exemplo n.º 16
0
    def fifo_queue_analysis(cls, graph: Graph, inputs_desc: dict):
        """
        The FIFOQueue with QueueDeque has a separate input that specifies the size of batch to extract from queue. This
        input is redundant and should be remove from the model analysis output.
        """
        inputs_to_ignore = set()
        for fifo_queue in graph.get_op_nodes(op='FIFOQueueV2'):
            if len(fifo_queue.get_outputs({'out': 0})) != 1:
                log.debug(
                    'The FIFOQueue operation "{}" has more than 1 consumers'.
                    format(fifo_queue.id))
                continue
            queue_deque = fifo_queue.out_node(0)
            if queue_deque.op in [
                    'QueueDequeueMany', 'QueueDequeueManyV2',
                    'QueueDequeueUpTo', 'QueueDequeueUpToV2'
            ]:
                queue_deque_input_1 = queue_deque.in_node(1)
                if queue_deque_input_1.op in [
                        'Parameter', 'PlaceholderWithDefault'
                ]:
                    log.debug(
                        'Adding node "{}" to placeholder ignore list'.format(
                            queue_deque_input_1.id))
                    inputs_to_ignore.add(queue_deque_input_1.id)

                # create input per each QueueDeque output port
                for port_ind in range(len(queue_deque.out_nodes())):
                    inputs_desc["{}:{}".format(queue_deque.id, port_ind)] = {
                        'shape': fifo_queue.shapes[port_ind].tolist(),
                        'value': None,
                        'data_type': fifo_queue.types[port_ind]
                    }
        return inputs_to_ignore
Exemplo n.º 17
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(type='StridedSlice'):
            StridedSliceNormalizer.normalize_strided_slice(graph, node)
            PermuteAttrs.create_permute_attrs(
                node,
                attrs=[
                    ('begin_mask',
                     'input:0'),  # but indeed depends from slice_rank
                    ('end_mask', 'input:0'),
                    ('new_axis_mask', 'input:0'),
                    ('shrink_axis_mask', 'input:0'),
                    ('ellipsis_mask', 'input:0')
                ])

            # StridedSliceNormalizer inserted nodes that changed original begin, end, and strides data nodes
            # Until now it was not possible to set correct permutations
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:1', 'slice',
                                                  'dim_size')
            PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                  'input:2', 'slice',
                                                  'dim_size')
            if node.is_in_port_connected(3):
                PermuteInputs().set_input_permutation(node.in_node(3), node,
                                                      'input:3', 'slice',
                                                      'dim_size')
Exemplo n.º 18
0
def send_shapes_info(framework: str, graph: Graph):
    """
    This function sends information about model input shapes.
    :param framework: framework name.
    :param graph: model graph.
    """
    shapes = []
    for node in graph.get_op_nodes():
        op_type = node.soft_get('type', None)
        if op_type == 'Parameter':
            if 'shape' in node:
                shapes.append(node['shape'])
    t = tm.Telemetry()

    if shapes:
        shape_str = ""
        is_partially_defined = "0"
        for shape in shapes:
            shape_str += (np.array2string(int64_array(unmask_shape(shape)))
                          if shape is not None else "Undefined") + ","
            if not is_fully_defined(shape):
                is_partially_defined = "1"
        message_str = "{fw:" + framework + ",shape:\"" + shape_str[:-1] + "\"}"
        t.send_event('mo', 'input_shapes', message_str)
        t.send_event(
            'mo', 'partially_defined_shape', "{partially_defined_shape:" +
            is_partially_defined + ",fw:" + framework + "}")
Exemplo n.º 19
0
    def find_and_replace_pattern(self, graph: Graph):
        for loop_node in graph.get_op_nodes(op='Loop'):
            loop_name = loop_node.soft_get('name', loop_node.id)
            body_graph = loop_node['body']
            body_pattern = TensorListOutputConcatenation.get_body_pattern()
            internal_matches = find_subgraph_match_to_pattern(
                body_graph, body_pattern)

            for internal_match in internal_matches:
                # check if EmptyTensorList from the main graph is connected with Parameter node from the body graph
                # that is assigned for storing intermediate output results of While Loop. If yes, the transformation
                # detects intermediate outputs concatenation by this port and can use Loop axis attribute
                reserve_node = Loop.get_external_nodes_by_internal_id(
                    loop_node, internal_match['container'].internal_layer_id)
                reserve_node = reserve_node[0] if (
                    len(reserve_node) == 1
                    and reserve_node[0].op == 'EmptyTensorList') else None
                if reserve_node is None:
                    log.info(
                        "A sub-graph around the loop node {} does not match "
                        "TensorFlow 2 EmptyTensorList->TensorListPushBack pattern for intermediate "
                        "outputs concatenation".format(loop_name))
                    continue

                external_match = {'while': loop_node, 'reserve': reserve_node}
                # check that back edges connect Parameter node (or container with intermediate output results)
                # and concatenation result produced by TensorListPushBack node
                if Loop.back_edge_exists(
                        loop_node.back_edges,
                        internal_match['concatenation_result'].
                        internal_layer_id,
                        internal_match['container'].internal_layer_id):
                    TensorListOutputConcatenation.transform_tensor_list_output_concatenation(
                        external_match, internal_match)
Exemplo n.º 20
0
 def find_and_replace_pattern(self, graph: Graph):
     for node in graph.get_op_nodes(op='VariadicSplit',
                                    swap_axis_and_split_size_inputs=True):
         axis_src = node.in_port(2).get_source()
         node.in_port(2).disconnect()
         node.in_port(1).get_connection().set_destination(node.in_port(2))
         node.in_port(1).connect(axis_src)
Exemplo n.º 21
0
 def find_and_replace_pattern(self, graph: Graph):
     for node in graph.get_op_nodes(op='Split', input_port=1):
         axis_src = node.in_port(0).get_source()
         node.in_port(0).disconnect()
         node.in_port(1).get_connection().set_destination(node.in_port(0))
         node.in_port(1).connect(axis_src)
         del node['input_port']
    def find_and_replace_pattern(self, graph: Graph):
        for ctc_greedy_decoder_tf in graph.get_op_nodes(
                op='CTCGreedyDecoderSeqLen', output_sparse_format=True):
            ctc_greedy_decoder_tf_name = ctc_greedy_decoder_tf.soft_get(
                'name', ctc_greedy_decoder_tf.id)

            # TF CTCGreedyDecoder have 4 output tensors. If any of them connected to not Result operation then
            # transformation in not applicable
            for port_num in ctc_greedy_decoder_tf.out_ports():
                if not ctc_greedy_decoder_tf.out_port(port_num).disconnected()\
                        and ctc_greedy_decoder_tf.out_port(port_num).get_destination().node.soft_get('op') != 'Result':
                    return

            # If the first and second output are not connected to Result operations -
            # create Result operation and connect it to appropriate output
            if ctc_greedy_decoder_tf.out_port(0).disconnected():
                first_result = Result(
                    graph, {
                        'name': ctc_greedy_decoder_tf_name + '/decoded_classes'
                    }).create_node()
                ctc_greedy_decoder_tf.out_port(0).connect(
                    first_result.in_port(0))

            if ctc_greedy_decoder_tf.out_port(1).disconnected():
                second_result = Result(graph, {
                    'name':
                    ctc_greedy_decoder_tf_name + '/seq_lengths_output'
                }).create_node()
                ctc_greedy_decoder_tf.out_port(1).connect(
                    second_result.in_port(0))

            # For normalizing input channel needs to transpose input data from [T, N, C] to [N, T, C]
            # which supported CTCGreedyDecoderSeqLen op.
            log.warning(
                'Found TF CTCGreedyDecoder operation at the end of network. '
                'PLEASE NOTE, appropriate network output operation CTCGreedyDecoderSeqLen {} '
                'will have dense format, not sparse format!'.format(
                    ctc_greedy_decoder_tf_name))
            ctc_data_permute = create_op_with_const_inputs(
                graph, Transpose, {1: int64_array([1, 0, 2])},
                {'name': ctc_greedy_decoder_tf_name + '/ctc_data_permute'})

            assert ctc_greedy_decoder_tf.has_valid('merge_repeated'), \
                'The CTCGreedyDecoderSeqLen node "{}" misses "merge_repeated" attribute'.format(
                    ctc_greedy_decoder_tf_name)

            ctc_greedy_decoder_tf.in_port(0).get_source().connect(
                ctc_data_permute.in_port(0))
            ctc_greedy_decoder_tf.in_port(0).disconnect()
            ctc_data_permute.out_port(0).connect(
                ctc_greedy_decoder_tf.in_port(0))

            del ctc_greedy_decoder_tf['output_sparse_format']

            for port_num in [2, 3
                             ]:  # MO CTCGreedyDecoderSeqLen may have 2 outputs
                if port_num in ctc_greedy_decoder_tf.out_ports():
                    if not ctc_greedy_decoder_tf.out_port(
                            port_num).disconnected():
                        ctc_greedy_decoder_tf.out_port(port_num).disconnect()
Exemplo n.º 23
0
    def find_and_replace_pattern(self, graph: Graph):
        for loop_node in graph.get_op_nodes(op='Loop'):
            loop_name = loop_node.soft_get('name', loop_node.id)
            body_graph = loop_node['body']
            body_pattern = MapFNOutputConcatenation.get_body_pattern()
            internal_matches = find_subgraph_match_to_pattern(
                body_graph, body_pattern)

            for internal_match in internal_matches:
                # check if TensorListReserve from the main graph is connected with Parameter node from the body graph
                # that is assigned for storing intermediate output results of While Loop. If yes, the transformation
                # detects intermediate outputs concatentation by this port and can use Loop axis attribute
                reserve_node = Loop.get_external_nodes_by_internal_id(
                    loop_node, internal_match['container'].internal_layer_id)
                reserve_node = reserve_node[0] if (
                    len(reserve_node) == 1
                    and reserve_node[0].op == 'TensorListReserve') else None
                if reserve_node is None:
                    log.info(
                        "A sub-graph around the loop node {} does not match "
                        "TensorFlow 2 MapFN pattern for intermediate outputs concatenation"
                        .format(loop_name))
                    continue
                stack_node = Loop.get_external_nodes_by_internal_id(
                    loop_node,
                    internal_match['concatenation_result'].internal_layer_id)
                stack_node = stack_node[0] if len(stack_node) == 1 else None

                if stack_node is None:
                    log.info(
                        "A sub-graph around the loop node {} does not match "
                        "TensorFlow 2 MapFN pattern for intermediate outputs concatenation"
                        .format(loop_name))
                    continue

                # skip StopGradient node if it exists between While loop output port and TensorListStack operation
                stack_node = skip_nodes_by_condition(
                    stack_node, lambda x: x.has_and_set('identity'), True)
                stack_node = stack_node if stack_node.op == 'TensorListStack' else None
                if stack_node is None:
                    log.info(
                        "A sub-graph around the loop node {} does not match "
                        "TensorFlow 2 MapFN pattern for intermediate outputs concatenation"
                        .format(loop_name))
                    continue

                external_match = {
                    'while': loop_node,
                    'reserve': reserve_node,
                    'stack': stack_node
                }
                # check that back edges connect Parameter node (or container with intermediate output results)
                # and concatenation result produced by TensorListSetItem node
                if Loop.back_edge_exists(loop_node.back_edges, internal_match['concatenation_result'].internal_layer_id,
                                         internal_match['container'].internal_layer_id) and \
                        Loop.back_edge_exists(loop_node.back_edges,
                                              internal_match['increment_iteration_result'].internal_layer_id,
                                              internal_match['current_iteration'].internal_layer_id):
                    MapFNOutputConcatenation.transform_map_fn_output_concatenation(
                        external_match, internal_match)
Exemplo n.º 24
0
    def find_and_replace_pattern(self, graph: Graph):
        for loop_node in graph.get_op_nodes(op='Loop'):
            loop_name = loop_node.soft_get('name', loop_node.id)
            body_graph = loop_node['body']
            body_pattern = MapFNInputSlicing.get_body_pattern()
            internal_matches = find_subgraph_match_to_pattern(body_graph, body_pattern)

            for internal_match in internal_matches:
                # check if TensorListGetItem from the body graph is connected with TensorListFromTensor
                # from the main graph. If yes, the transformation detects input slicing by this port
                # and can use Loop axis attribute
                unstack_node = Loop.get_external_nodes_by_internal_id(loop_node,
                                                                      internal_match['tensor_list'].internal_layer_id)
                unstack_node = unstack_node[0] if (len(unstack_node) == 1
                                                   and unstack_node[0].op == 'TensorListFromTensor') else None
                if unstack_node is None:
                    log.info("A sub-graph around the loop node {} does not match "
                             "TensorFlow 2 MapFN pattern for input slicing".format(loop_name))
                    continue

                external_match = {'while': loop_node,
                                  'unstack': unstack_node}
                # check that back edges connect correct Parameter and Result nodes in the body
                # check connections between body input ports and external inputs ports of Loop node
                if Loop.back_edge_exists(loop_node.back_edges,
                                         internal_match['increment_iteration_result'].internal_layer_id,
                                         internal_match['current_iteration'].internal_layer_id):
                    MapFNInputSlicing.transform_map_fn_input_slicing(external_match, internal_match)
Exemplo n.º 25
0
    def find_and_replace_pattern(self, graph: Graph):
        add_output_ops(graph,
                       graph.graph['packed_outputs'],
                       inputs=graph.graph['user_shapes'])

        # For keeping tensor names information for output nodes fake outputs are added
        # to graph during the model loading. In the following code fake outputs are removed
        # and tensor names information is moved to output->Result edge.
        for node in graph.get_op_nodes(needs_removal=True):
            fw_info = None
            in_node = None
            for in_port_idx in node.in_edges():
                node_idx = node.in_edge(in_port_idx)['in']
                if node_idx in node.in_nodes():
                    in_node = node.in_node(node_idx)
                    fw_info_value = get_edge_attribute_between_nodes(
                        in_node, node, 'fw_tensor_debug_info')
                    if fw_info_value:
                        fw_info = fw_info_value
                        break
            graph.erase_node(node)

            if fw_info is not None and in_node is not None:
                for out_idx in in_node.out_nodes():
                    set_edge_attribute_between_nodes(in_node,
                                                     in_node.out_node(out_idx),
                                                     'fw_tensor_debug_info',
                                                     fw_info)
Exemplo n.º 26
0
    def iterator_get_next_analysis(cls, graph: Graph, inputs_desc: dict):
        message = None
        op_nodes = graph.get_op_nodes(op='IteratorGetNext')

        params = ''
        for iter_get_next in op_nodes:
            for port in iter_get_next.out_nodes().keys():
                inputs_desc['{}:{}'.format(
                    iter_get_next.soft_get('name', iter_get_next.id),
                    port)] = {
                        'shape': iter_get_next.shapes[port].tolist(),
                        'value': None,
                        'data_type': iter_get_next.types[port]
                    }
                if params != '':
                    params = params + ','
                shape = str(iter_get_next.shapes[port].tolist()).replace(
                    ',', '')
                params = params + '{}:{}{}'.format(
                    iter_get_next.soft_get('name', iter_get_next.id), port,
                    shape)

        if len(op_nodes):
            message = 'It looks like there is IteratorGetNext as input\n' \
                      'Run the Model Optimizer without --input option \n' \
                      'Otherwise, try to run the Model Optimizer with:\n\t\t--input "{}"\n'.format(params)
        return message
Exemplo n.º 27
0
def convert_parameters_data_type(graph: Graph, data_type_str: str):
    inputs = graph.get_op_nodes(op='Parameter')
    data_type = data_type_str_to_np(data_type_str)
    user_defined_data_types = graph.graph[
        'user_shapes'] if 'user_shapes' in graph.graph else None
    for input in inputs:
        user_defined_type = None
        name = input.soft_get('initial_node_name', input.id)

        # override data type for Parameter specified by the user. This is a workaround for the issue in the
        # extensions.middle.ChangePlaceholderTypes transformation which has an incorrect condition and always overrides
        # Parameter data type to np.float32. When the transformation is fixed the code below must be updated
        if user_defined_data_types is not None and name in user_defined_data_types:
            for desc in user_defined_data_types[name]:
                if 'port' in desc and desc[
                        'port'] is None:  # neither input nor output port specified
                    user_defined_type = desc.get('data_type', None)
                else:  # need to check the particular port the Parameter was created for
                    p_name = get_new_placeholder_name(
                        name, 'out' in desc,
                        desc['out'] if 'out' in desc else desc['in'])
                    if p_name == input.soft_get('name'):
                        user_defined_type = desc.get('data_type', None)
        if user_defined_type is not None:
            log.info('Overriding Parameter node {} data type to {}'.format(
                name, user_defined_type))
            input['data_type'] = user_defined_type
            input.out_port(0).set_data_type(user_defined_type, True)
        elif not input.has_valid('data_type') or input.data_type == np.float32:
            input['data_type'] = data_type
            input.out_port(0).set_data_type(data_type, True)
        else:
            log.info('Do not change data type for node {}'.format(
                input.soft_get('name')))
Exemplo n.º 28
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='MVNCaffe'):
            node_name = node.soft_get('name', node.id)

            start_axis = 2
            if node['across_channels'] == 1:
                start_axis = 1

            rank = Rank(graph, {'name': node_name + '/Rank'}).create_node()

            # create range of axes based on `start_axis` and rank of input
            rng = create_op_with_const_inputs(graph, Range, {
                0: int64_array(start_axis),
                2: int64_array(1)
            }, {
                'name': node_name + '/Range',
                'output_type': np.int64
            })
            rng.in_port(1).connect(rank.out_port(0))

            new_mvn = MVN(
                graph, {
                    'eps': node.soft_get('eps', 1e-9),
                    'eps_mode': 'inside_sqrt',
                    'normalize_variance': node.soft_get(
                        'normalize_variance', 1)
                }).create_node([node.in_port(0).get_source().node, rng])
            new_mvn.in_port(0).get_connection().add_destination(
                rank.in_port(0))
            node.out_port(0).get_connection().set_source(new_mvn.out_port(0))
            rename_nodes([(node, node_name + '/tbd'), (new_mvn, node_name)])

            graph.remove_node(node.id)
    def find_and_replace_pattern(self, graph: Graph):
        ir_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        for node in graph.get_op_nodes():
            if node.op in operations_with_data_type_attributes:
                dst_type = operations_with_data_type_attributes[
                    node.op]['attr_name']
                node_name = node.soft_get('name', node.id)
                assert node.has_valid(
                    dst_type), '{} attribute is missing for node {}'.format(
                        dst_type, node_name)

                final_type = None
                if node[dst_type] == np.float64:
                    final_type = np.float32

                if node[dst_type] in [np.float32, np.float64] and ir_data_type == np.float16 and \
                        not node.has_and_set('returns_shape_value'):
                    final_type = np.float16
                elif node.has_and_set('returns_shape_value'
                                      ) and node[dst_type] == np.float16:
                    # return back FP32 for all nodes with shape values
                    final_type = np.float32

                if final_type is not None:
                    log.warning(
                        'Change data type from {} to {} for node {}'.format(
                            node[dst_type], final_type, node_name))
                    node[dst_type] = final_type

                if final_type == np.float16:
                    assert_that_is_castable_to_fp16(node)
Exemplo n.º 30
0
    def find_and_replace_pattern(self, graph: Graph):
        for concat in graph.get_op_nodes(type='Concat'):
            for in_port in concat.in_ports().values():
                if not in_port.disconnected():
                    shape = in_port.data.get_shape()
                    assert shape is not None
                    if 0 in shape:
                        concat.delete_input_port(in_port.idx)

            connected_ports = [
                port for port_idx, port in sorted(concat.in_ports().items())
                if not port.disconnected()
            ]
            assert len(connected_ports), 'Concat "{}" have no inputs after removing inputs with 0 dimensions' \
                                         ''.format(concat.soft_get('name', concat.id))

            max_port_index = max(
                [port_idx for port_idx in concat.in_ports().keys()])
            # re-connect input ports sequentially and remove all not used
            port_idx_to_connect = 0
            for port_idx in range(max_port_index + 1):
                if concat.is_in_port_connected(port_idx):
                    if port_idx != port_idx_to_connect:
                        concat.add_input_port(port_idx_to_connect,
                                              skip_if_exist=True)
                        concat.in_port(
                            port_idx).get_connection().set_destination(
                                concat.in_port(port_idx_to_connect))
                    port_idx_to_connect += 1
                elif port_idx in concat.in_ports():
                    concat.delete_input_port(port_idx)