def pass_rc_through_zero_port_only(node: Node, reverse_channels: Node):
        r"""
        BEFORE                          AFTER

          previous_op
              |
        ReverseChannels  previous_op     previous_op  previous_op
                     \     /                      \     /
                       Node                         Node
                                                     |
                                              ReverseChannels

        returns boolean value whatever we should continue propagating current ReverseChannels operation down or not
        """
        # detaching reverse_channels node from the graph
        if reverse_channels.is_in_port_connected(0) and reverse_channels.is_out_port_connected(0) \
                and node.is_out_port_connected(0):
            reverse_channels.out_port(0).get_connection().set_source(
                reverse_channels.in_port(0).get_connection().get_source())
            reverse_channels.in_port(0).disconnect()

            node.out_port(0).get_connection().set_source(
                reverse_channels.out_port(0))
            node.out_port(0).disconnect()
            node.out_port(0).connect(reverse_channels.in_port(0))
            return True
        return False
Esempio n. 2
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 4

        # check that shape value is defined that is needed for shape inference
        shape = node.in_node(2)
        assert shape.value is not None and shape.value.size == 2, \
            "SparseFillEmptyRows is supported only with constant shape value"

        shape_value = int64_array(shape.value)

        # check that default value is scalar
        default_value = node.in_node(3)
        assert default_value.shape is not None and len(default_value.shape) == 0, \
            "Default value for SparseFillEmptyRows must be scalar"

        if node.is_out_port_connected(0):  # set a shape for output indices
            if is_fully_defined(shape_value):
                node.out_port(0).data.set_shape([np.prod(shape_value), 2])
            else:
                node.out_port(0).data.set_shape([dynamic_dimension_value, 2])
        if node.is_out_port_connected(1):  # set a shape for output values
            if is_fully_defined(shape_value):
                node.out_port(1).data.set_shape([np.prod(shape_value)])
            else:
                node.out_port(1).data.set_shape([dynamic_dimension_value])
        if node.is_out_port_connected(
                2):  # set a shape for empty row indicator
            node.out_port(2).data.set_shape([shape_value[0]])
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        connected_in_ports = [
            port for port in node.in_ports().values()
            if not port.disconnected()
        ]
        assert len(connected_in_ports) in [2, 3], \
            "Incorrect number of inputs for {} node".format(node_name)

        logits_shape = node.in_port(0).data.get_shape()
        sequence_len_shape = node.in_port(1).data.get_shape()
        if len(node.in_nodes()) == 3:
            blank_index_shape = node.in_port(2).data.get_shape()
            assert len(blank_index_shape) == 1, \
                'Incorrect rank of blank_index for {} node'.format(node_name)

        # check shapes of input tensors
        assert len(logits_shape) == 3, \
            'Incorrect rank of logits for {} node'.format(node_name)

        assert len(sequence_len_shape) == 1, \
            'Incorrect rank of sequence length tensor for {} node'.format(node_name)
        assert compatible_dims(logits_shape[0], sequence_len_shape[0]), \
            'Batch dimensions of input tensors must be the same for {} node'.format(node_name)

        batch_size = logits_shape[0]
        time_size = logits_shape[1]
        if node.is_out_port_connected(0):
            node.out_port(0).data.set_shape([batch_size, time_size])
        if node.is_out_port_connected(1):
            node.out_port(1).data.set_shape([batch_size])
Esempio n. 4
0
    def get_external_nodes_by_internal_id(loop_node: Node,
                                          internal_layer_id: int) -> list:
        """
        Get a list of nodes from the main graph that are connected with a node with internal_layer_id
        from the body graph

        :param loop_node: The Loop node
        :param internal_layer_id: Internal layer ID of the node in the body graph
        :return: A list of external nodes (from the main graph) that are connected with a node with
        internal_layer_id from the body graph
        """
        for map_item in loop_node.input_port_map:
            if map_item['internal_layer_id'] == internal_layer_id \
                    and loop_node.is_in_port_connected(map_item['external_port_id']):
                return [
                    loop_node.in_port(
                        map_item['external_port_id']).get_source().node
                ]
        for map_item in loop_node.output_port_map:
            if map_item['internal_layer_id'] == internal_layer_id \
                    and loop_node.is_out_port_connected(map_item['external_port_id']):
                return [
                    dest.node for dest in loop_node.out_port(
                        map_item['external_port_id']).get_destinations()
                ]
        return []
Esempio n. 5
0
    def infer(node: Node):
        """
         MO input edges:   |   Description:
         -------------------------------------------------
                0          | x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs)
                1          | w: The weight matrix
                2          | b: The bias vector
                3          | h_prev: Previous/initial hidden state
                4          | cs_prev: Value of the initial cell state
         """
        assert len(node.in_nodes()) == 5
        """
        MO output edges:    |   Description:
                0           | cs: Output data / output hidden states concatenated over the whole time sequence
                1           | h: Output cell states concatenated over the whole time sequence
        """

        assert len(node.out_nodes()) in [1, 2]

        mark_input_bins(node)
        input_shape = node.in_node(0).shape

        assert len(input_shape) == 3
        out_shape = input_shape.copy()
        node.out_port(0).data.set_shape(out_shape)
        if node.is_out_port_connected(1):
            node.out_port(1).data.set_shape(out_shape)
    def replace_identityN(node: Node):
        graph = node.graph
        name = node.soft_get('name', node.id)

        assert node.has_valid(
            'data_types'), 'IdentityN {} has no `data_types` attribute'.format(
                name)
        dtypes = node.data_types

        for idx, port in node.in_ports().items():
            if not node.is_in_port_connected(
                    idx) or not node.is_out_port_connected(idx):
                # ATTENTION section in the description above
                continue
            assert idx < len(
                dtypes
            ), 'IdentityN {} has inconsistent `data_types` attribute {}'.format(
                name, dtypes)
            identity = Identity(graph, {
                'name': '{}/{}_port'.format(name, idx),
                'data_type': dtypes[idx]
            }).create_node()
            port.get_connection().set_destination(identity.in_port(0))
            node.out_port(idx).get_connection().set_source(
                identity.out_port(0))

        # ATTENTION section in the description above
        for in_port in node.in_ports().values():
            in_port.disconnect()
        for out_port in node.out_ports().values():
            out_port.disconnect()
Esempio n. 7
0
    def insert_pre_processing(graph: Graph, input_node: Node,
                              node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (
            -1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        features_dim_idx = get_features_dim(graph.graph['layout'],
                                            len(input_node.shape))
        assert compatible_dims(
            value.size, input_node.shape[features_dim_idx]) or value.size == 1

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = input_node.soft_get('name',
                                   input_node.id) + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph,
                                                    op=op,
                                                    port_value_dict={1: value},
                                                    op_attrs={'name': name})

        if input_node.is_out_port_connected(0) and len(
                input_node.out_port(0).get_destinations()) == 1:
            # There are models with pattern Parameter(uint8) -> Convert(float).
            # Adding mean/scale leads to the following:
            # Parameter(uint8) -> Mean/Scale -> Convert(float) which is incorrect.
            # To fix this mean and scale preprocessing node is inserted after Convert(float) node.
            out_node = input_node.out_port(0).get_destination().node
            convert_type = out_node.soft_get('dst_type')
            if out_node.soft_get('type') == "Convert" and (convert_type in [
                    np.float32, np.float16
            ]):
                input_node = out_node
                if convert_type != value.dtype:
                    new_value = value.astype(convert_type)
                    const_node = preprocessing.in_port(
                        1).get_connection().get_source().node
                    const_node['value'] = new_value

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0),
                                                "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
Esempio n. 8
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 2
        tensor = node.in_node(0)
        port_id = node.in_node(1)

        output_shape = shape_array(tensor.shape)
        for out_port_id in range(2):
            if node.is_out_port_connected(out_port_id):
                node.out_port(out_port_id).data.set_shape(output_shape)

        if port_id.has_valid('value'):
            output_value = tensor.value
            if output_value is not None:
                for out_port_id in range(2):
                    if node.is_out_port_connected(out_port_id):
                        node.out_port(out_port_id).data.set_value(
                            output_value.copy())