Пример #1
0
    def infer(node: Node):
        in_shape = node.in_port(0).data.get_shape()
        if in_shape.size != 4:
            raise Error(
                'TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C is not dynamic_dimension and C % (block_size**2):
            raise Error(
                'Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C // (block_size * block_size),
                                     height=H * block_size,
                                     width=W * block_size)

        if is_fully_defined(in_shape) and is_fully_defined(
                out_shape) and np.prod(in_shape) != np.prod(out_shape):
            raise Error(
                'Number of input elements "{}" is not equal to number of output elements "" for node "{}"'
                ''.format(in_shape, out_shape, node.soft_get('name', node.id)))
        node.out_port(0).data.set_shape(out_shape)
Пример #2
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 4

        # check that shape value is defined that is needed for shape inference
        shape = node.in_node(2)
        assert shape.value is not None and shape.value.size == 2, \
            "SparseFillEmptyRows is supported only with constant shape value"

        shape_value = np.array(shape.value, dtype=np.int64)

        # check that default value is scalar
        default_value = node.in_node(3)
        assert default_value.shape is not None and len(default_value.shape) == 0, \
            "Default value for SparseFillEmptyRows must be scalar"

        if node.is_out_port_connected(0):  # set a shape for output indices
            if is_fully_defined(shape_value):
                node.out_port(0).data.set_shape([np.prod(shape_value), 2])
            else:
                node.out_port(0).data.set_shape([dynamic_dimension_value, 2])
        if node.is_out_port_connected(1):  # set a shape for output values
            if is_fully_defined(shape_value):
                node.out_port(1).data.set_shape([np.prod(shape_value)])
            else:
                node.out_port(1).data.set_shape([dynamic_dimension_value])
        if node.is_out_port_connected(
                2):  # set a shape for empty row indicator
            node.out_port(2).data.set_shape([shape_value[0]])
Пример #3
0
def resolve_convolution_with_group(node: Node, group: int, ir_version: str):
    input_shape = node.in_port(0).data.get_shape()
    assert len(input_shape) in [3, 4, 5]

    weights_shape = node.in_port(1).data.get_shape()
    assert weights_shape is not None
    assert len(weights_shape) in [3, 4, 5]
    assert weights_shape[0] % group == 0

    if ir_version == 'V7':
        if weights_shape[0] == node.output:
            # weights are already is in [G*O I X Y] format
            return
        new_shape = shape_array([node.output, -1, *weights_shape[2:]])
    elif ir_version == 'V10':
        # TODO rewrite this transformation to generate a shape-computing sub-graph. Ticket 62076
        I = input_shape[1]
        new_shape = shape_array(
            [group, node.output // group, I // group, *weights_shape[2:]])
        assert is_fully_defined(weights_shape[2:]) and is_fully_defined(I) and \
               np.prod(weights_shape) == np.prod(new_shape), 'Initial weights shape {}, grouped weights shape {}' \
                                                             ''.format(weights_shape, new_shape)
        del node['group']
        node['type'] = 'GroupConvolution'
    else:
        raise Error("Unknown IR version: {}".format(ir_version))

    reshape = create_op_node_with_second_input(node.graph, Reshape,
                                               int64_array(new_shape),
                                               {'override_output_shape': True})

    node.in_port(1).get_connection().insert_node(reshape)
    def is_fusable_reverse_sequence(node: Node):
        sequence_lengths = node.in_port(1).data.get_value()
        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None

        seq_len = input_shape[node.seq_axis]
        if sequence_lengths is not None and is_fully_defined(
                sequence_lengths) and is_fully_defined(seq_len):
            return np.all(sequence_lengths == seq_len)
        else:
            # check that we take sequence_length from input shape based on ReverseV2ToReverseSequence transformation
            broadcast_node = node.in_port(1).get_source().node
            if broadcast_node.op != 'Broadcast':
                return False
            gather_node = broadcast_node.in_port(0).get_source().node
            if gather_node.op != "Gather" or \
                    (np.all(gather_node.in_port(2).data.get_value() != [0]) or
                     np.all(gather_node.in_port(1).data.get_value() != [node.seq_axis])):
                return False
            gather_node_2 = broadcast_node.in_port(1).get_source().node
            if gather_node_2.op != "Gather" or \
                    (np.all(gather_node_2.in_port(2).data.get_value() != [0]) or
                     np.all(gather_node_2.in_port(1).data.get_value() != [node.batch_axis])):
                return False
            shape_node = gather_node.in_port(0).get_source().node
            if shape_node.op != "ShapeOf":
                return False
            if shape_node.in_port(0).get_source().node != node.in_port(
                    0).get_source().node:
                return False

            return True
Пример #5
0
    def iterations_count(loop_node: Node):
        """
        Try to determine the number of loop iterations. If we detect that the number is dynamic then return None.

        :param loop_node: Loop operation node
        :return: number of iterations or dynamic_dimensions if the number depends on runtime values.
        """
        assert loop_node.soft_get('type') == 'Loop'

        if loop_node.is_in_port_connected(1):
            execution_condition = loop_node.in_port(1).data.get_value()
            if not is_fully_defined(
                    execution_condition):  # dynamic execution condition
                return dynamic_dimension_value
            execution_condition = execution_condition.item()
            if not execution_condition:  # 0 iterations
                return 0
        num_iterations = loop_node.in_port(0).data.get_value()
        if not is_fully_defined(num_iterations):
            return dynamic_dimension_value
        if num_iterations is not None:
            num_iterations = num_iterations.item(0)
            # in some ONNX models the num_iterations input is equal to max(int64) meaning dynamic number of iterations
            if num_iterations < 0 or num_iterations == np.iinfo(np.int64).max:
                return dynamic_dimension_value
        return num_iterations
Пример #6
0
def multi_box_detection_infer(node: Node):
    loc_shape = node.in_node(0).shape
    conf_shape = node.in_node(1).shape
    prior_boxes_shape = node.in_node(2).shape
    node_name = node.soft_get('name', node.id)

    if loc_shape is None or conf_shape is None or prior_boxes_shape is None:
        raise Error(
            'Shapes for the Detection Output node "{}" are not defined'.format(
                node_name))

    prior_size = 4
    if node.has('normalized') and not node.normalized:
        prior_size = 5

    if is_fully_defined(
            prior_boxes_shape[-1]) and prior_boxes_shape[-1] % prior_size != 0:
        raise Error(
            'Amount of confidences "{}" is not divisible by {} for node "{}"'
            ''.format(prior_boxes_shape[-1], prior_size, node_name))

    num_priors = prior_boxes_shape[-1] // prior_size
    if not node.has_valid('keep_top_k') or node.keep_top_k == -1:
        node['keep_top_k'] = num_priors

    # do not try to infer number of classes because it is not possible in case when input shapes are partially defined
    if not node.has_valid('num_classes'):
        node['num_classes'] = conf_shape[-1] // num_priors
        log.debug('Inferred amount of classes "{}"'.format(node.num_classes))

    num_loc_classes = node.num_classes
    if node.has_and_set('share_location') and node.share_location:
        num_loc_classes = 1

    if not compatible_dims(num_priors * num_loc_classes * 4, loc_shape[-1]):
        raise Error(
            'Locations and prior boxes shapes mismatch: "{}" vs "{}" for node "{}"'
            ''.format(loc_shape, prior_boxes_shape, node_name))

    if not node.variance_encoded_in_target and not compatible_dims(
            prior_boxes_shape[-2], 2):
        raise Error(
            'The "-2" dimension of the prior boxes must be 2 but it is "{}" for node "{}".'
            ''.format(prior_boxes_shape[-2], node_name))

    if is_fully_defined(conf_shape[-1]) and is_fully_defined(
            num_priors) and conf_shape[-1] % num_priors != 0:
        raise Error(
            'Amount of confidences "{}" is not divisible by amount of priors "{}" for node "{}".'
            ''.format(conf_shape[-1], num_priors, node_name))

    node.out_port(0).data.set_shape([1, 1, conf_shape[0] * node.keep_top_k, 7])

    # the line below is needed for the TF framework so the MO will not change the layout
    node.graph.node[node.out_node(0).id]['nchw_layout'] = True
Пример #7
0
    def find_iterations_count_for_output(ti_node):
        def check_field(record, field):
            return field in record and record[field] is not None

        iterations_count = dynamic_dimension_value
        # find out iterations count from inputs.
        # If no input contains 'axis' attribute then no slicing is in TI and it has only one iteration
        # If several inputs have axis attribute with different iterations count then we use maximum value.
        for in_rec in ti_node.input_port_map:
            if not check_field(in_rec, 'axis'):
                continue
            assert check_field(
                in_rec, 'external_port_id'
            ), "external_port_id not set for input of {} node".format(
                ti_node.id)
            in_shape = ti_node.in_port(
                in_rec['external_port_id']).data.get_shape()
            if check_field(in_rec, 'end') and in_rec['end'] >= 0 and \
                    check_field(in_rec, 'start') and in_rec['start'] >= 0:
                in_rec_end = in_rec['end']
                in_rec_start = in_rec['start']
            elif check_field(in_rec, 'end') and in_rec['end'] >= 0:
                in_rec_end = in_rec['end']
                in_rec_start = in_shape[in_rec['axis']] if not check_field(in_rec, 'start') else \
                    in_shape[in_rec['axis']] + 1 + in_rec['start']
            elif check_field(in_rec, 'start') and in_rec['start'] >= 0:
                in_rec_end = in_shape[in_rec['axis']] if not check_field(in_rec, 'end') else \
                    in_shape[in_rec['axis']] + 1 + in_rec['end']
                in_rec_start = in_rec['start']
            else:
                in_rec_end = ti_node.in_port(
                    in_rec['external_port_id']).data.get_shape()[
                        in_rec['axis']]
                in_rec_start = 0

            if check_field(in_rec, 'stride'):
                in_rec_stride = in_rec['stride']
            else:
                in_rec_stride = 1

            # in case of dynamic iterations count don't continue any calculations on this iteration
            if not is_fully_defined(in_rec_end) or not is_fully_defined(
                    in_rec_start):
                continue

            if iterations_count is not dynamic_dimension_value and \
                    ceil((in_rec_end - in_rec_start) / in_rec_stride) != iterations_count:
                raise Error(
                    "TensorIterator node {} have inputs with different iterations count"
                    .format(ti_node.id))
            iterations_count = ceil(
                (in_rec_end - in_rec_start) / in_rec_stride)

        return iterations_count
Пример #8
0
    def control_flow_infer(node: Node, is_executable: bool, mark_executability: callable):
        """
        Infers control flow through switch operation node. It marks output data nodes executability according to
        executability of current node and switch data value
        :param node: Node instance to infer control flow through
        :param is_executable: if current node is executable
        :param mark_executability: function to mark executability of node
        """
        out_data_nodes = node.out_nodes(control_flow=True)
        node_with_switch_value = node.in_node(1)

        switch_data_0_port_node_id = [out_data_nodes[0].id] if 0 in out_data_nodes else []
        switch_data_1_port_node_id = [out_data_nodes[1].id] if 1 in out_data_nodes else []
        assert 1 <= len(switch_data_0_port_node_id) + len(switch_data_1_port_node_id) <= 2

        if not node_with_switch_value.has_valid('value') or not is_fully_defined(node_with_switch_value.value):
            # Mark both ports as executable
            resulting_switch_data_node_ids = switch_data_0_port_node_id + switch_data_1_port_node_id
            for n in resulting_switch_data_node_ids:
                mark_executability(n, True)
        else:
            switch_value = node_with_switch_value.value.item(0)
            resulting_switch_data_node_ids = [switch_data_0_port_node_id, switch_data_1_port_node_id]

            for n in resulting_switch_data_node_ids[not switch_value]:
                mark_executability(n, False)
            for n in resulting_switch_data_node_ids[switch_value]:
                mark_executability(n, is_executable)
Пример #9
0
    def infer(node):
        """
        https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch
        """
        input_shape = node.in_port(0).data.get_shape()
        node_name = node.soft_get('name', node.id)
        assert len(node.in_nodes()) == 4, 'Some inputs are not connected for the operation SpaceToBatch with name {}' \
                                          ''.format(node_name)

        block_size = node.in_port(1).data.get_value()
        pads_begin = node.in_port(2).data.get_value()
        pads_end = node.in_port(3).data.get_value()
        assert block_size is not None and pads_begin is not None and pads_end is not None,\
            'Some inputs are not defined for SpaceToBatch operation with name {}'.format(node_name)

        pads = pads_begin + input_shape + pads_end

        if is_fully_defined(block_size):
            block_elements_count = np.prod(block_size)
        else:
            block_elements_count = dynamic_dimension
        node.out_port(0).data.set_shape([
            input_shape[0] * block_elements_count,
            *[x for x in (pads[1:] // block_size[1:])]
        ])

        # block_shape, pads_begin, pads_end should be permuted during the NHWC->NCHW layout change
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'shape')
        PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0',
                                              'shape')
        PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:0',
                                              'shape')
Пример #10
0
    def infer(node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if len(node.in_nodes()) != 4:
            return

        block_size = node.in_port(1).data.get_value()
        crops_begin = node.in_port(2).data.get_value()
        crops_end = node.in_port(3).data.get_value()
        if block_size is None or crops_begin is None or crops_end is None:
            return

        pads = block_size * input_shape

        sizes = pads[1:] - crops_begin[1:] - crops_end[1:]
        if is_fully_defined(block_size):
            block_elements_count = np.prod(block_size)
        else:
            block_elements_count = dynamic_dimension
        batch = input_shape[0] // block_elements_count

        node.out_port(0).data.set_shape([batch, *sizes])

        # block_shape, crops_begin, crops_end values should be permuted during the NHWC->NCHW layout change
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'shape')
        PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0',
                                              'shape')
        PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:0',
                                              'shape')
Пример #11
0
def send_shapes_info(framework: str, graph: Graph):
    """
    This function sends information about model input shapes.
    :param framework: framework name.
    :param graph: model graph.
    """
    shapes = []
    for node in graph.get_op_nodes():
        op_type = node.soft_get('type', None)
        if op_type == 'Parameter':
            if 'shape' in node:
                shapes.append(node['shape'])
    t = tm.Telemetry()

    if shapes:
        shape_str = ""
        is_partially_defined = "0"
        for shape in shapes:
            shape_str += np.array2string(int64_array(
                unmask_shape(shape))) + ","
            if not is_fully_defined(shape):
                is_partially_defined = "1"
        message_str = "{fw:" + framework + ",shape:\"" + shape_str[:-1] + "\"}"
        t.send_event('mo', 'input_shapes', message_str)
        t.send_event(
            'mo', 'partially_defined_shape', "{partially_defined_shape:" +
            is_partially_defined + ",fw:" + framework + "}")
Пример #12
0
    def test_reduce_dynamic(self, shape, axes, keepdims, p):
        false_mask = np.zeros(shape)
        false_mask[0][1][1] = True
        data = np.ma.masked_array(np.ones(shape), mask=false_mask)
        assert not is_fully_defined(data)
        reduced_tensor = np.sum(data, axis=tuple(axes), keepdims=keepdims)
        # create an array of all masked elements which is the expected result of the reduce of the tensor with dynamic
        # values
        fully_undefined = np.ma.masked_array(reduced_tensor, mask=np.ones(reduced_tensor.shape))
        axis = int64_array(axes)
        p = int64_array(p)
        graph = build_graph(nodes_attributes,
                            [*connect('data', '0:reduce_lp'),
                             *connect('axis', '1:reduce_lp'),
                             *connect('reduce_lp', '0:identity'),
                             ('identity', 'identity_d', {'out': 0}),
                             ('identity_d', 'output')
                             ],
                            {'data_d': {'value': data, 'shape': data.shape},
                             'axis_d': {'value': axis, 'shape': axis.shape},
                             'reduce_lp': {'keep_dims': keepdims}},
                            nodes_with_edges_only=True)

        reduce_node = Node(graph, 'reduce_lp')
        reduce_node.op = reduce_node.type = 'ReduceL' + str(p)
        reduce_infer(reduce_node)
        self.assertTrue(strict_compare_tensors(reduce_node.out_port(0).data.get_value(), fully_undefined))
Пример #13
0
    def infer(node):
        name = node.soft_get('name', node.id)

        op = node.soft_get('op', None)
        assert op is not None and op in ['Split', 'AttributedSplit'], \
            'Unexpected `op`={} attribute for Split-like node {}'.format(op, name)

        num_in_ports = 1 if op == 'AttributedSplit' else 2 if op == 'Split' else None
        assert num_in_ports in [1, 2], \
            'SplitBase supports AttributedSplit with 1 input and Split with 2 inputs, but it is {} for {} node {}' \
            ''.format(num_in_ports, op, name)

        connected_inputs = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_inputs) == num_in_ports and all([i in connected_inputs for i in range(num_in_ports)]), \
            "{} should have {} connected input ports, but it doesn't for node: `{}`. Ports: {}" \
            "".format(op, num_in_ports, name, connected_inputs)

        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None, 'Input shape is unknown for node {}'.format(
            name)
        assert node.has_valid(
            'num_splits'
        ), 'Parameter `num_splits` is unknown for node {}'.format(name)
        num_splits = node.num_splits

        axis = node.in_port(1).data.get_value(
        ) if op == 'Split' else node.soft_get('axis', None)
        assert axis is not None, '{} `axis` is unknown for node {}'.format(
            op, name)
        assert axis.ndim == 0, '{} `axis` should be scalar, but it`s not for node {}'.format(
            op, name)

        assert not is_fully_defined(input_shape[axis]) or input_shape[axis] % num_splits == 0, \
            'Input shape is not evenly divided by `num_splits` of {} node {}. `input_shape`={}, `axis`={}, ' \
            '`num_splits`={}'.format(op, name, input_shape, axis, num_splits)

        out_shape = input_shape.copy()
        out_shape[axis] = input_shape[axis] // num_splits

        input_value = node.in_port(0).data.get_value()
        output_value = np.split(input_value.copy(), axis=axis, indices_or_sections=num_splits) \
            if input_value is not None else None

        for idx, port in node.out_ports().items():
            if idx in node.out_nodes():
                port.data.set_shape(out_shape)
                if output_value is not None:
                    port.data.set_value(output_value[idx])

        if op == 'Split':
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:0', 'axis')
        elif op == 'AttributedSplit':
            PermuteAttrs.create_permute_attrs(node,
                                              attrs=[('axis', 'input:0')])
Пример #14
0
def normalize_eltwise_inputs(graph: Graph):
    '''
    The function normalizes input shapes for eltwise nodes.
    In the first step the function gets to know which shapes/unsqueeze dims for inputs are required for normalization.
    In the second step the function inserts Unsqueeze nodes between non-normalized inputs and eltwise nodes.
    '''
    # Generate a map for producers of eltwise nodes with non-normalized shapes
    # and in this map every producer has another map that reflects normalized shape
    # to a list of eltwise consumers
    mapping = {}
    for eltwise_node in graph.get_op_nodes(is_eltwise=True):
        unsqueeze_dims_map = compute_unsqueeze_map_for_eltwise(eltwise_node)
        for consumer_port in eltwise_node.in_ports().values():
            producer_port = consumer_port.get_source()
            unsqueeze_dims = unsqueeze_dims_map[producer_port]
            if unsqueeze_dims is not None and len(unsqueeze_dims) > 0:
                unsqueeze_dims = tuple([x for x in unsqueeze_dims])
                if producer_port not in mapping:
                    mapping.update(
                        {producer_port: {
                            unsqueeze_dims: [consumer_port]
                        }})
                elif unsqueeze_dims not in mapping[producer_port]:
                    mapping[producer_port].update(
                        {unsqueeze_dims: [consumer_port]})
                else:
                    mapping[producer_port][unsqueeze_dims].append(
                        consumer_port)

    # Walk through each produced in the map and insert Unsqueeze nodes between a producer and eltwise nodes
    for producer_port in mapping.keys():
        producer_node = producer_port.node
        for unsqueeze_dims in mapping[producer_port].keys():
            unsqueeze_name = producer_node.soft_get(
                'name', producer_node.id) + '/EltwiseUnsqueeze'
            unsqueeze_node = create_op_with_const_inputs(
                graph, Unsqueeze, {1: int64_array(list(unsqueeze_dims))},
                {'name': unsqueeze_name})
            unsqueeze_node.in_port(0).connect(producer_port)

            # Insert Unsqueeze with determined unsqueeze dimensions between the current producer and eltwise node
            for consumer_port in mapping[producer_port][unsqueeze_dims]:
                consumer_port.connect(unsqueeze_node.out_port(0))

            # The shape and value adjustments must be explicitly done within the transformation
            # since the transformation is called from Fusing transformation that excludes
            # automatic call of shape inference pass
            producer_port_value = producer_port.data.get_value()
            producer_port_shape = producer_port.data.get_shape()
            new_shape = producer_port_shape.copy()
            for unsqueeze_dim in unsqueeze_dims:
                new_shape = shape_insert(new_shape, unsqueeze_dim, 1)
            if producer_port_value is not None and is_fully_defined(new_shape):
                unsqueeze_node.out_port(0).data.set_value(
                    np.reshape(producer_port_value, new_shape))
            else:
                unsqueeze_node.out_port(0).data.set_shape(new_shape)
Пример #15
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None, 'The input shape for node "{}" is None'.format(node_name)
        assert node.has_valid('output_type'), \
            '`output_type` attribute is not set for NonZero node `{}`'.format(node_name)
        assert node.output_type in [np.int64, np.int32], \
            'NonZero `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name)

        input_value = node.in_port(0).data.get_value()
        if is_fully_defined(input_value):
            node.out_port(0).data.set_value(np.array(np.nonzero(input_value), dtype=node.output_type))
        else:
            if is_fully_defined(input_shape):
                # output shape of NonZero is still static (upper bound)
                node.out_port(0).data.set_shape([len(input_shape), np.prod(input_shape)])
            else:
                node.out_port(0).data.set_shape([len(input_shape), dynamic_dimension_value])
Пример #16
0
 def value_propagation(node: Node):
     """
     This function performs a value propagation for MatMul layer.
     :param node: MatMul layer
     :return: None
     """
     a_value = node.in_port(0).get_source().data.get_value()
     b_value = node.in_port(1).get_source().data.get_value()
     if is_fully_defined(a_value) and is_fully_defined(b_value):
         if node.transpose_a:
             a_value = transpose(a_value)
         if node.transpose_b:
             b_value = transpose(b_value)
         # np.matmul does not work correctly with masked arrays, so need explicitly convert inputs to regular arrays
         if isinstance(a_value, np.ma.masked_array):
             a_value = a_value.filled()
         if isinstance(b_value, np.ma.masked_array):
             b_value = b_value.filled()
         node.out_port(0).data.set_value(np.matmul(a_value, b_value))
Пример #17
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['cell']
        cell_name = node.soft_get('name', node.id)
        cell_type = node.soft_get('type')
        WR_input_id = node.soft_get('wr_input_id')
        hidden_size_coef = node.soft_get('gates_count')
        hidden_size = node.get_attrs()["hidden_size"]

        # default values for RNNCell/GRUCell
        additional_port_id = 4
        if cell_type == "LSTMCell":
            additional_port_id = 5

        WR_shape = node.in_port(WR_input_id).data.get_shape()
        assert WR_shape is not None, "Undefined 'WR' input shape for Cell node '{}'".format(
            cell_name)
        assert is_fully_defined(
            WR_shape
        ), 'Not fully defined shape for WR for Cell node "{}"'.format(
            cell_name)

        num_elements_in_WR = np.prod(WR_shape)
        input_size = (num_elements_in_WR /
                      (hidden_size_coef * hidden_size)) - hidden_size

        # Reshape
        reshape = create_op_node_with_second_input(
            graph, Reshape,
            int64_array(
                [hidden_size_coef * hidden_size, hidden_size + input_size]),
            {'name': cell_name + '/Dims'})

        # VariadicSplit
        split = create_op_with_const_inputs(
            graph, VariadicSplit, {
                1: int64_array(1),
                2: int64_array([input_size, hidden_size])
            }, {
                'out_ports_count': 2,
                'name': cell_name + '/Split'
            }, reshape)

        # Cell
        node.in_port(WR_input_id).get_connection().set_destination(
            reshape.in_port(0))

        node.add_input_port(additional_port_id, skip_if_exist=True)
        assert node.in_port(additional_port_id).disconnected()

        # (x, y, WR, B) -> (x, y, W, R, B(additional_port))
        node.in_port(additional_port_id - 1).get_connection().set_destination(
            node.in_port(additional_port_id))
        split.out_port(0).connect(node.in_port(additional_port_id - 2))
        split.out_port(1).connect(node.in_port(additional_port_id - 1))
Пример #18
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()}
        assert len(connected_in_ports) == 3 and 0 in connected_in_ports and 1 in connected_in_ports and \
               2 in connected_in_ports, "Gather should have 3 connected input port, but it doesn't for " \
                                        "node: `{}`. Ports: {}".format(name, connected_in_ports)

        data_shape = node.in_port(0).data.get_shape()
        assert data_shape is not None
        indices_shape = node.in_port(1).data.get_shape()
        assert indices_shape is not None
        axis = node.in_port(2).data.get_value()
        assert axis is not None, 'axis input is undefined'

        assert -len(data_shape) <= axis < len(data_shape), \
            'axis must be within interval [-data_rank, data_rank). Instead got axis = {}, data_rank = {} '.\
            format(axis, len(data_shape))

        batch_dims = node.batch_dims
        assert -len(indices_shape) <= batch_dims <= len(indices_shape), \
            'batch_dims must be within interval [-indices_rank, indices_rank]. Instead got batch_dims = {}, ' \
            'indices_rank = {} '.format(batch_dims, len(indices_shape))

        # normalize to positive values
        axis = axis + len(data_shape) if axis < 0 else axis
        batch_dims = batch_dims + len(indices_shape) if batch_dims < 0 else batch_dims

        assert np.ma.allequal(data_shape[:batch_dims], indices_shape[:batch_dims]), \
            'data and indices inputs must have equal first dimensions until batch_dims'

        assert batch_dims <= axis, \
            'normalized batch_dims must be <= axis. Instead got batch_dims = {}, axis = {}'.format(axis, batch_dims)

        # we import PermuteInputs locally because it uses Gather inside and we have recursive imports
        from mo.graph.perm_inputs import PermuteInputs
        PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'axis')

        batch_dims_range = indices_shape[:batch_dims]
        out_shape = np.concatenate((data_shape[:axis], indices_shape[batch_dims:], data_shape[axis + 1:]))

        data_value = node.in_port(0).data.get_value()
        indices_value = node.in_port(1).data.get_value()
        if data_value is not None and indices_value is not None and is_fully_defined(indices_value):
            if batch_dims == 0:
                node.out_port(0).data.set_value(np.ma.take(data_value, indices_value, axis))
            else:
                out_value = np.empty(out_shape)
                for batch_idx in np.ndindex(tuple(batch_dims_range)):
                    out_value[batch_idx] = np.ma.take(data_value[batch_idx], indices_value[batch_idx],
                                                      axis - batch_dims)
                node.out_port(0).data.set_value(out_value)
        else:
            node.out_port(0).data.set_shape(out_shape)
Пример #19
0
    def infer(node: Node):
        real_squeeze_dims = int64_array([])
        input_shape = node.in_port(0).data.get_shape()
        node_name = node.soft_get('name', node.id)
        if input_shape is None:
            raise Error(
                'Input shape is not defined for node {}'.format(node_name))

        output_shape = input_shape.copy()
        assert len(node.in_nodes(
        )) == 2, 'The Squeeze node {} must have 2 inputs'.format(node_name)

        # TODO remove the following 'if' statement when IE start support 0D tensors
        squeeze_dims = node.in_port(1).data.get_value()
        if squeeze_dims.ndim == 0:
            squeeze_dims = squeeze_dims.reshape([1])

        for dim in squeeze_dims:
            if output_shape[dim] == 1 or output_shape[dim] is dynamic_dimension:
                real_squeeze_dims = np.ma.append(
                    real_squeeze_dims,
                    get_canonical_axis_index(output_shape, dim))
            else:
                raise Error(
                    'Trying to squeeze dimension not equal to 1 for node "{}"'.
                    format(node_name))

        # if squeeze_dims empty then all 1s should be removed (tf specification of Squeeze op)
        if squeeze_dims.size == 0:
            for i in range(output_shape.size):
                if output_shape[i] == 1:
                    real_squeeze_dims = np.ma.append(
                        real_squeeze_dims,
                        get_canonical_axis_index(output_shape, i))

        assert is_fully_defined(
            real_squeeze_dims
        ), 'Squeeze dimension(s) is not defined for op "{}"'.format(node_name)
        output_shape = shape_delete(output_shape, real_squeeze_dims)
        node.out_port(0).data.set_shape(output_shape)

        # make dimensions positive to correctly translate from NHWC to NCHW layout
        if node.in_port(1).get_source().node.op == 'Const':
            node.in_port(1).data.set_value(real_squeeze_dims)

        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                node.in_port(0).data.get_value().reshape(output_shape))

        # the squeeze_dim attribute will be converted to the second input in the end of the Middle phase
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'axis')
Пример #20
0
def uni_directional_broadcasting(input_value: np.array,
                                 target_shape: np.array):
    """
    Uni-directional broadcasting of input tensor to target shape following the numpy semantic
    :param input_value: input value to broadcast
    :param target_shape: target shape
    :return: broadcasted value
    """
    assert is_fully_defined(target_shape)
    assert uni_directional_shape_broadcasting(shape_array(input_value.shape), target_shape) is not None, \
        'The tensor of shape "{}" cannot be uni-directionally broadcasted to shape "{}"'.format(input_value.shape,
                                                                                                target_shape)
    return input_value * np.ones(target_shape).astype(input_value.dtype)
Пример #21
0
    def infer(node):
        pad_node_name = node.soft_get('name', node.id)

        assert len(node.in_nodes()) in [3, 4], "The node {} must have 3 or 4 inputs".format(pad_node_name)

        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        pad_beg = node.in_port(1).data.get_value()
        pad_end = node.in_port(2).data.get_value()

        assert pad_beg is not None, 'The padding begin value is None for node {}'.format(pad_node_name)
        assert pad_end is not None, 'The padding end value is None for node {}'.format(pad_node_name)
        assert input_shape is not None, 'The input shape is None for node {}'.format(pad_node_name)
        assert len(input_shape) == len(pad_beg), \
            'Length of begin padding "{}" does not correspond to input tensor shape "{}" for node "{}".' \
            ''.format(pad_beg, input_shape, pad_node_name)
        assert len(input_shape) == len(pad_end), \
            'Length of end padding "{}" does not correspond to input tensor shape "{}" for node "{}".' \
            ''.format(pad_beg, input_shape, pad_node_name)
        assert not node.is_in_port_connected(3) or node.in_port(3).data.get_shape().size == 0, \
            'Optional 3rd input of Pad operation should be scalar, but has shape {} for node {}' \
            ''.format(node.in_port(3).data.get_shape(), pad_node_name)

        node.out_port(0).data.set_shape(input_shape + pad_beg + pad_end)

        if input_value is not None and is_fully_defined(pad_beg) and is_fully_defined(pad_end):
            pads = np.insert(pad_end, np.arange(len(pad_end)), pad_beg)
            pads = np.reshape(pads, (len(pad_end), 2))
            pad_val = 0
            if len(node.in_nodes()) == 4:
                pad_val = node.in_port(3).data.get_value() if node.in_port(3).data is not None else 0
            if is_fully_defined(input_value):
                node.out_port(0).data.set_value(np.pad(input_value, pads, constant_values=pad_val, mode='constant'))
            else:
                node.out_port(0).data.set_value(shape_array(np.pad(input_value, pads, constant_values=pad_val,
                                                                   mode='constant')))
        # pad values should be permuted during the NHWC->NCHW layout change
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape')
        PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape')
Пример #22
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 1
        assert node.fill_value is not None
        assert node.input_as_shape

        shape = node.in_port(0).data.get_value()
        assert shape is not None

        if is_fully_defined(shape):
            node.out_port(0).data.set_value(
                np.full(shape, node.fill_value, np.float32))
        else:
            node.out_port(0).data.set_shape(shape)
Пример #23
0
def bi_directional_broadcasting(input_value: np.array, second_shape: np.array):
    """
    Bi-directional broadcasting of input tensor to target shape following the numpy semantic
    :param input_value: input value to broadcast
    :param second_shape: second tensor shape
    :return: broadcasted value
    """
    output_shape = bi_directional_shape_broadcasting(
        shape_array(input_value.shape), second_shape)
    assert output_shape is not None, 'The tensor of shape "{}" cannot be bi-directionally broadcasted to shape "{}"' \
                                     ''.format(input_value.shape, second_shape)
    assert is_fully_defined(output_shape)
    return input_value * np.ones(second_shape).astype(input_value.dtype)
Пример #24
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_in_ports) == 2 and 0 in connected_in_ports and 1 in connected_in_ports, \
            "Tile should have 2 connected input port, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_in_ports)

        shape = node.in_port(0).data.get_shape()
        assert shape is not None, "Undefined input shape for Tile node '{}'.".format(
            name)
        tile_array = node.in_port(1).data.get_value()
        assert tile_array is not None, "Undefined `repeats` (1st port input value) of Tile node '{}'".format(
            name)

        # align ranks of the tile_array tensor and input shape node
        if shape.size < tile_array.size:
            shape = shape_insert(shape, 0,
                                 [1] * (tile_array.size - shape.size))
        elif shape.size > tile_array.size:
            tile_array = shape_insert(tile_array, 0,
                                      [1] * (shape.size - tile_array.size))

        input_value = node.in_port(0).data.get_value()
        if input_value is not None and is_fully_defined(
                shape) and is_fully_defined(tile_array):
            node.out_port(0).data.set_value(
                np.tile(input_value.reshape(shape), tile_array))
        else:
            node.out_port(0).data.set_shape(shape * tile_array)

        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'shape')
Пример #25
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)
        connected_input_ports = [
            in_port.idx for in_port in node.in_ports().values()
            if not in_port.disconnected()
        ]
        assert len(connected_input_ports) == 3 and [0, 1, 2] == sorted(connected_input_ports), \
            'Range operation should have 3 inputs, {} found for {}'.format(len(connected_input_ports), name)

        start = node.in_port(0).data.get_value()
        limit = node.in_port(1).data.get_value()
        delta = node.in_port(2).data.get_value()

        for input in (start, limit, delta):
            if input is not None and not node.has_valid('output_type'):
                node['output_type'] = input.dtype

        if not is_fully_defined(start) or not is_fully_defined(
                limit) or not is_fully_defined(delta):
            node.out_port(0).data.set_shape(
                shape_array([dynamic_dimension_value]))
        else:
            node.out_port(0).data.set_value(
                np.arange(start, limit, delta, dtype=node['output_type']))
Пример #26
0
def reduce_helper(func: callable, x: np.array, axis: tuple, keepdims: bool):
    """
    Performs the reduction of input data tensor "x" over axis "axis" with function "func" and optionally removes reduced
    dimensions (if "keepdims" is False). If the input tensor has dynamic values, all elements of the result tensor
    are changed to be dynamic.

    :param func: numpy reduce function
    :param x: the data to perform reduction on
    :param axis: the axis for reduction
    :param keepdims: flag specifying whether keep reduce dimensions or not
    :return: the result tensor
    """
    result = func(x, axis=axis, keepdims=keepdims)
    if is_fully_defined(x):
        return result
    else:
        return np.ma.masked_array(result,
                                  mask=np.ones(result.shape, dtype=np.bool))
Пример #27
0
def mark_const_producer_nodes(graph):
    """
    Mark nodes that produce constant values.
    :param graph: graph to operate on.
    :return: .
    """
    nx.set_node_attributes(G=graph, name='is_const_producer', values=True)

    for node in graph.pseudo_topological_sort():
        for input, output, attrs in graph.in_edges(node.id, data=True):
            if 'control_flow_edge' in attrs and attrs['control_flow_edge']:
                graph.node[input]['is_const_producer'] = False
                graph.node[output]['is_const_producer'] = False

        if not node.has('value') or node.value is None or not is_fully_defined(
                node.value):
            for input, _ in graph.in_edges(node.id):
                graph.node[input]['is_const_producer'] = False
Пример #28
0
 def find_and_replace_pattern(self, graph: Graph):
     dynamic_inputs = {}
     for parameter in graph.get_op_nodes(op='Parameter'):
         param_shape = parameter.soft_get('shape', shape_array(dynamic_dimension_value))
         if not is_fully_defined(param_shape):
             parameter_name = parameter.soft_get('name', parameter.id)
             dynamic_inputs[parameter_name] = param_shape
     if dynamic_inputs:
         log.error('The model contains input(s) with partially defined shapes: {}. '
                   'Starting from the 2022.1 release the Model Optimizer can generate an IR with partially defined '
                   'input shapes ("-1" dimension in the TensorFlow model or dimension with string value in the ONNX '
                   'model). Some of the OpenVINO plugins require model input shapes to be static, so you should '
                   'call "reshape" method in the Inference Engine and specify static input shapes. For optimal '
                   'performance, it is still recommended to update input shapes with fixed ones using "--input" or '
                   '"--input_shape" command-line parameters.'
                   .format(','.join('name="{}" shape="{}"'.format(name, unmask_shape(shape))
                                    for name, shape in dynamic_inputs.items())),
                   extra={'is_warning': True})
     partial_infer(graph)
Пример #29
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)

        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        target_shape = node.in_port(1).data.get_value()
        assert target_shape is not None, 'Output shape is not defined for node "{}"'.format(node_name)
        assert node.has_and_set('mode'), 'Broadcasting mode is not defined for node "{}"'.format(node_name)

        PermuteInputs().set_input_permutation(node.in_node(1), node, 'output:0', 'shape')

        if input_value is not None and not node.has_and_set('stop_value_propagation') and \
                is_fully_defined(target_shape):
            if node.mode == 'numpy':
                node.out_port(0).data.set_value(uni_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_value(bi_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node, 'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                node.out_port(0).data.set_value(explicit_broadcasting(input_value, target_shape, axes_mapping))
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(node_name, node.mode))
        else:
            if node.mode == 'numpy':
                node.out_port(0).data.set_shape(uni_directional_shape_broadcasting(input_shape, target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_shape(bi_directional_shape_broadcasting(input_shape, target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node, 'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                new_shape, _ = explicit_shape_broadcasting(input_shape, target_shape, axes_mapping)
                node.out_port(0).data.set_shape(new_shape)
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(node_name, node.mode))
Пример #30
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        if input_shape is None:
            raise Error('Input shape for node "{}" is None'.format(node_name))

        assert len(node.in_nodes(
        )) == 1, 'Wrong number of inputs to the layer {}'.format(node_name)

        if not node.has_valid('expand_axis'):
            raise Error(
                'ExpandDims axis is not defined for node {}'.format(node_name))

        expand_axes = node.expand_axis
        if expand_axes is None:
            raise Error(
                'The "expand_axis" attribute is None for node "{}"'.format(
                    node_name))

        if isinstance(expand_axes, int):
            expand_axes = int64_array([expand_axes])
        elif expand_axes.ndim == 0:
            expand_axes = expand_axes.reshape([1])

        # expand_axis is a position where the new axis is placed so expand_dims works for negative axis in a different
        # way not as insert operation
        for expand_axis in expand_axes:
            if expand_axis < 0:
                expand_axis += len(input_shape) + 1

        expand_axes = sorted(expand_axes)
        output_shape = input_shape.copy()
        for expand_axis in expand_axes:
            output_shape = shape_insert(output_shape, expand_axis, 1)

        if input_value is not None and is_fully_defined(output_shape):
            node.out_port(0).data.set_value(input_value.reshape(output_shape))
        else:
            node.out_port(0).data.set_shape(output_shape)