示例#1
0
def mark_const_producer_nodes(graph):
    """
    Mark nodes that produce constant values.
    :param graph: graph to operate on.
    :return: .
    """
    nx.set_node_attributes(G=graph, name='is_const_producer', values=True)

    for node in graph.pseudo_topological_sort():
        for input, output, attrs in graph.in_edges(node.id, data=True):
            if 'control_flow_edge' in attrs and attrs['control_flow_edge']:
                graph.node[input]['is_const_producer'] = False
                graph.node[output]['is_const_producer'] = False

        if not node.has('value') or node.value is None or not is_fully_defined(
                node.value):
            for input, _ in graph.in_edges(node.id):
                graph.node[input]['is_const_producer'] = False
示例#2
0
def reduce_helper(func: callable, x: np.array, axis: tuple, keepdims: bool):
    """
    Performs the reduction of input data tensor "x" over axis "axis" with function "func" and optionally removes reduced
    dimensions (if "keepdims" is False). If the input tensor has dynamic values, all elements of the result tensor
    are changed to be dynamic.

    :param func: numpy reduce function
    :param x: the data to perform reduction on
    :param axis: the axis for reduction
    :param keepdims: flag specifying whether keep reduce dimensions or not
    :return: the result tensor
    """
    result = func(x, axis=axis, keepdims=keepdims)
    if is_fully_defined(x):
        return result
    else:
        return np.ma.masked_array(result,
                                  mask=np.ones(result.shape, dtype=np.bool))
示例#3
0
 def find_and_replace_pattern(self, graph: Graph):
     dynamic_inputs = {}
     for parameter in graph.get_op_nodes(op='Parameter'):
         param_shape = parameter.soft_get('shape', shape_array(dynamic_dimension_value))
         if not is_fully_defined(param_shape):
             parameter_name = parameter.soft_get('name', parameter.id)
             dynamic_inputs[parameter_name] = parameter
     if dynamic_inputs:
         log.error('The model contains input(s) with partially defined shapes: {}. '
                   'Starting from the 2022.1 release the Model Optimizer can generate an IR with partially defined '
                   'input shapes ("-1" dimension in the TensorFlow model or dimension with string value in the ONNX '
                   'model). Some of the OpenVINO plugins require model input shapes to be static, so you should '
                   'call "reshape" method in the Inference Engine and specify static input shapes. For optimal '
                   'performance, it is still recommended to update input shapes with fixed ones using "--input" or '
                   '"--input_shape" command-line parameters.'
                   .format(','.join('name="{}" shape="{}"'.format(name, Parameter.shape_serialize(parameter))
                                    for name, parameter in dynamic_inputs.items())),
                   extra={'is_warning': True})
     partial_infer(graph)
示例#4
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)
        connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
        assert len(connected_in_ports) == 1, \
            'Size operation should have exact one input node, but it has {}'.format(len(connected_in_ports))

        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None, \
            'Input shape is undefined for Size node `{}`'.format(node.soft_get('name', node.id))

        assert node.has_valid('output_type'), \
            '`output_type` attribute is not set for Size node `{}`'.format(name)
        assert node.output_type in [np.int64, np.int32], \
            'Size `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name)

        if is_fully_defined(input_shape):
            node.out_port(0).data.set_value(mo_array(np.prod(input_shape), dtype=node.output_type))
        else:
            node.out_port(0).data.set_value(shape_array(dynamic_dimension_value))
示例#5
0
    def infer(node: Node):
        ScatterNDBase.infer(node)

        input_value = node.in_port(0).data.get_value()
        indices_shape = node.in_port(1).data.get_shape()
        indices_value = node.in_port(1).data.get_value()
        updates_value = node.in_port(2).data.get_value()

        # compute output value if all inputs are constant
        if input_value is not None and is_fully_defined(indices_value) and updates_value is not None:
            output_value = input_value.copy()
            indx_range = indices_shape[:-1]
            for indx in np.ndindex(tuple(indx_range)):
                if indx == ():
                    # a case when updates is a scalar
                    indx = 0
                    updates_value = [updates_value]
                output_value[indices_value[indx]] = updates_value[indx]

            node.out_port(0).data.set_value(output_value)
示例#6
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)

        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        target_shape = node.in_port(1).data.get_value()
        assert target_shape is not None, 'Output shape is not defined for node "{}"'.format(node_name)
        assert node.has_and_set('mode'), 'Broadcasting mode is not defined for node "{}"'.format(node_name)

        PermuteInputs().set_input_permutation(node.in_node(1), node, 'output:0', 'shape')

        if input_value is not None and not node.has_and_set('stop_value_propagation') and \
                is_fully_defined(target_shape):
            if node.mode == 'numpy':
                node.out_port(0).data.set_value(uni_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_value(bi_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node, 'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                node.out_port(0).data.set_value(explicit_broadcasting(input_value, target_shape, axes_mapping))
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(node_name, node.mode))
        else:
            if node.mode == 'numpy':
                node.out_port(0).data.set_shape(uni_directional_shape_broadcasting(input_shape, target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_shape(bi_directional_shape_broadcasting(input_shape, target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node, 'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                new_shape, _ = explicit_shape_broadcasting(input_shape, target_shape, axes_mapping)
                node.out_port(0).data.set_shape(new_shape)
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(node_name, node.mode))
示例#7
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        if input_shape is None:
            raise Error('Input shape for node "{}" is None'.format(node_name))

        assert len(node.in_nodes(
        )) == 1, 'Wrong number of inputs to the layer {}'.format(node_name)

        if not node.has_valid('expand_axis'):
            raise Error(
                'ExpandDims axis is not defined for node {}'.format(node_name))

        expand_axes = node.expand_axis
        if expand_axes is None:
            raise Error(
                'The "expand_axis" attribute is None for node "{}"'.format(
                    node_name))

        if isinstance(expand_axes, int):
            expand_axes = int64_array([expand_axes])
        elif expand_axes.ndim == 0:
            expand_axes = expand_axes.reshape([1])

        # expand_axis is a position where the new axis is placed so expand_dims works for negative axis in a different
        # way not as insert operation
        for expand_axis in expand_axes:
            if expand_axis < 0:
                expand_axis += len(input_shape) + 1

        expand_axes = sorted(expand_axes)
        output_shape = input_shape.copy()
        for expand_axis in expand_axes:
            output_shape = shape_insert(output_shape, expand_axis, 1)

        if input_value is not None and is_fully_defined(output_shape):
            node.out_port(0).data.set_value(input_value.reshape(output_shape))
        else:
            node.out_port(0).data.set_shape(output_shape)
示例#8
0
    def test_reduce_dynamic(self, shape, axes, keepdims, p):
        false_mask = np.zeros(shape)
        false_mask[0][1][1] = True
        data = np.ma.masked_array(np.ones(shape), mask=false_mask)
        assert not is_fully_defined(data)
        reduced_tensor = np.sum(data, axis=tuple(axes), keepdims=keepdims)
        # create an array of all masked elements which is the expected result of the reduce of the tensor with dynamic
        # values
        fully_undefined = np.ma.masked_array(reduced_tensor,
                                             mask=np.ones(
                                                 reduced_tensor.shape))
        axis = int64_array(axes)
        p = int64_array(p)
        graph = build_graph(nodes_attributes, [
            *connect('data', '0:reduce_lp'), *connect('axis', '1:reduce_lp'),
            *connect('reduce_lp', '0:identity'),
            ('identity', 'identity_d', {
                'out': 0
            }), ('identity_d', 'output')
        ], {
            'data_d': {
                'value': data,
                'shape': data.shape
            },
            'axis_d': {
                'value': axis,
                'shape': axis.shape
            },
            'reduce_lp': {
                'keep_dims': keepdims
            }
        },
                            nodes_with_edges_only=True)

        reduce_node = Node(graph, 'reduce_lp')
        reduce_node.op = reduce_node.type = 'ReduceL' + str(p)
        reduce_infer(reduce_node)
        self.assertTrue(
            strict_compare_tensors(
                reduce_node.out_port(0).data.get_value(), fully_undefined))
示例#9
0
def reduce_helper(func: callable, x: np.array, axis: tuple, keepdims: bool):
    """
    Performs the reduction of input data tensor "x" over axis "axis" with function "func" and optionally removes reduced
    dimensions (if "keepdims" is False). If the input tensor has dynamic values, all elements of the result tensor
    are changed to be dynamic.

    :param func: numpy reduce function
    :param x: the data to perform reduction on
    :param axis: the axis for reduction
    :param keepdims: flag specifying whether keep reduce dimensions or not
    :return: the result tensor
    """
    result = func(x, axis=axis, keepdims=keepdims)
    # we need to handle this case specially to avoid problems with deepcopy method with MaskedConstant converted to
    # masked_array - see issue https://github.com/numpy/numpy/issues/21022
    if isinstance(result, np.ma.core.MaskedConstant):
        return np.ma.masked_array(data=-1, mask=True, dtype=result.dtype)
    if is_fully_defined(x):
        return result
    else:
        return np.ma.masked_array(result,
                                  mask=np.ones(result.shape, dtype=np.bool))
示例#10
0
    def infer(node):
        if len(node.in_nodes()) <= 1:
            raise Error(
                'There is no input with unsqueeze dims for the node {}'.format(
                    node.soft_get('name')))
        unsqueeze_dims = node.in_port(1).data.get_value()
        if unsqueeze_dims is None:
            raise Error(
                'The dimensions to unsqueeze are not defined for the node {}'.
                format(node.soft_get('name')))
        unsqueeze_dims = int64_array(unsqueeze_dims)

        input_value = node.in_port(0).data.get_value()
        input_shape = node.in_port(0).data.get_shape()

        # TODO remove the following line when the Inference Engine plugins support 0D tensors
        if unsqueeze_dims.ndim == 0:
            unsqueeze_dims = int64_array([unsqueeze_dims.item()])

        # make dimensions positive to correctly translate from NHWC to NCHW layout
        unsqueeze_dims = int64_array([
            dim + len(node.in_port(0).data.get_shape()) + 1 if dim < 0 else dim
            for dim in unsqueeze_dims
        ])
        if node.in_port(1).get_source().node.op == 'Const':
            node.in_port(1).data.set_value(unsqueeze_dims)

        output_shape = input_shape.copy()
        for dim in unsqueeze_dims:
            output_shape = shape_insert(output_shape, dim, 1)

        if input_value is not None and is_fully_defined(output_shape):
            node.out_port(0).data.set_value(input_value.reshape(output_shape))
        else:
            node.out_port(0).data.set_shape(output_shape)

        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'axis')
示例#11
0
    def infer(node: Node):
        ScatterNDBase.infer(node)

        input_value = node.in_port(0).data.get_value()
        indices_shape = node.in_port(1).data.get_shape()
        indices_value = node.in_port(1).data.get_value()
        updates_value = node.in_port(2).data.get_value()

        # compute output value if all inputs are constant
        if input_value is not None and is_fully_defined(indices_value) and updates_value is not None:
            output_value = input_value.copy()
            indx_range = indices_shape[:-1]
            for indx in np.ndindex(tuple(indx_range)):
                if indx == ():
                    # a case when updates is a scalar
                    indx = 0
                    updates_value = [updates_value]
                insert_index = indices_value[indx]
                # we check and change index type explicitly to avoid error in indexing ndarray by another ndarray
                if isinstance(insert_index, np.ndarray):
                    insert_index = tuple(insert_index)
                output_value[insert_index] = updates_value[indx]

            node.out_port(0).data.set_value(output_value)
示例#12
0
def concat_infer(node):
    node_name = node.soft_get('name', node.id)
    if not node.has('axis'):
        N = node.N
        axis_input = node.in_node(N)
        if axis_input.has_valid('value') and axis_input.value.size == 1:
            node['axis'] = axis_input.value.item()
            node.graph.remove_edge(
                axis_input.node,
                node.node)  # TODO add skip attribute instead of deleting
        else:
            raise Error(
                'Input with value is not specified for node "{}"'.format(
                    node_name))
    else:
        N = len(node.in_nodes())

    shapes = [node.in_node(i).shape for i in range(N)]
    if any(s is None for s in shapes):
        raise Error(
            'One of the input shapes is not defined for node "{}"'.format(
                node_name))

    shape = shape_array(shapes[0])

    axis = get_canonical_axis_index(shape, node.axis)
    node.axis = axis

    mask = np.zeros_like(shape, dtype=np.bool)
    mask[axis] = True  # pylint: disable=unsupported-assignment-operation
    not_mask = np.logical_not(mask)  # pylint: disable=assignment-from-no-return
    for s in shapes[1:]:
        s = shape_array(s)
        if np.ma.allequal(shape[not_mask], s[not_mask]):
            shape[mask] += s[mask]
        else:
            raise Error(
                'Concat input shapes do not match for node "{}" with axis {}'.
                format(node_name, axis))

    #  dynamic dimensions in the output (except the concat axis) can be deduced from input shape
    for pos in range(len(shape)):
        if shape[pos] is dynamic_dimension and pos != axis:
            for in_shape in shapes:
                if in_shape[pos] is not dynamic_dimension:
                    shape[pos] = in_shape[pos]

    node.out_port(0).data.set_shape(shape)
    PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])

    values = [node.in_node(i).value for i in range(N)]
    if any([v is None for v in values]):
        return

    # if one of the input values are dynamic, the output tensor type is inferred from one of the fully defined inputs
    output_dtype = np.int64
    for input in values:
        if is_fully_defined(input):
            output_dtype = input.dtype

    if any(not is_fully_defined(v) for v in values):
        node.out_port(0).data.set_value(
            np.ma.concatenate(values, axis=node.axis).astype(output_dtype))
    else:  # there is a serious performance benefit to use concatenation as it is implemented below
        node.out_node(0).value = np.concatenate(values, axis=node.axis).astype(
            values[0].dtype, copy=False)
        node.out_node(0).shape = shape_array(node.out_node(0).value.shape)
示例#13
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        connected_in_ports = [
            port for port in node.in_ports().values()
            if not port.disconnected()
        ]
        assert len(connected_in_ports) == 2, \
            "Incorrect number of inputs for {} node".format(node_name)

        data_shape = node.in_port(0).data.get_shape()
        data_value = node.in_port(0).data.get_value()
        indices_shape = node.in_port(1).data.get_shape()
        indices_value = node.in_port(1).data.get_value()

        assert node.has_valid(
            'batch_dims'
        ), "Node {} must contain `batch_dims` attribute".format(node_name)
        batch_dims = node.batch_dims

        # check that a number of batch dimensions is less than both ranks of data and indices tensors
        assert batch_dims < len(
            data_shape
        ), "Number of batch dimensions must be less than a rank of data"
        assert batch_dims < len(
            indices_shape
        ), "Number of batch dimensions must be less than a rank of indices"

        # check that batch dimensions of data and indices are the same
        for batch_dim in range(batch_dims):
            assert compatible_dims(data_shape[batch_dim], indices_shape[batch_dim]), \
                "The dimension {} for data and indices tensors must be the same".format(batch_dim)

        # check ranks of input tensors
        assert len(data_shape) > 0, "Data must not be a scalar"
        assert len(indices_shape) > 0, "Indices must not be a scalar"
        assert (batch_dims + indices_shape[-1]) <= len(data_shape), \
            "Length of a tuple with indices must not exceed a rank of data tensor excluding batch dimensions"
        assert node['version'] in ['opset5', 'opset8'], 'Unsupported version of GatherND operation: {}, operation ' \
                                                        'name : {}'.format(node['version'], node.soft_get('name'))

        # compute output shape
        batch = []
        if batch_dims > 0:
            if node['version'] == 'opset5':  # Support old version of gatherND shape inference
                if is_fully_defined(data_shape[:batch_dims]):
                    batch = [np.prod(data_shape[:batch_dims]).tolist()]
                else:
                    batch = [dynamic_dimension_value]
            elif node['version'] == 'opset8':
                for dim in range(batch_dims):
                    assert compatible_dims(indices_shape[dim], data_shape[dim]),\
                        "Batch dimensions in data.shape and indices.shape must be compatible"
                if is_fully_defined(indices_shape[:batch_dims]):
                    batch = indices_shape[:batch_dims].tolist()
                elif is_fully_defined(data_shape[:batch_dims]):
                    batch = data_shape[:batch_dims].tolist()
                else:
                    for ind in range(batch_dims):
                        if indices_shape[ind] != dynamic_dimension_value:
                            batch.append(indices_shape[ind])
                        elif data_shape[ind] != dynamic_dimension_value:
                            batch.append(data_shape[ind])
                        else:
                            batch.append(dynamic_dimension_value)

        slice_shape = list(data_shape[(batch_dims + indices_shape[-1]):])

        output_shape = batch + list(indices_shape)[batch_dims:-1] + slice_shape
        node.out_port(0).data.set_shape(output_shape)

        # compute output value if all input indices are defined
        if is_fully_defined(indices_value) and data_value is not None:
            batch_dims_size = 1

            for i in range(batch_dims):
                batch_dims_size *= indices_shape[i]

            output_data = []

            reshaped_indices = indices_value.reshape(batch_dims_size, -1,
                                                     indices_shape[-1])

            reshaped_data = data_value.reshape((batch_dims_size, ) + tuple(
                (data_shape[batch_dims:])))

            for batch_dim in range(reshaped_indices.shape[0]):
                for outer_dim in range(reshaped_indices.shape[1]):
                    gather_index = tuple(
                        reshaped_indices[batch_dim][outer_dim])
                    output_data.append(reshaped_data[(batch_dim, ) +
                                                     gather_index])
            output_value = np.asarray(
                output_data, dtype=data_value.dtype).reshape(output_shape)
            node.out_port(0).data.set_value(output_value)
示例#14
0
文件: If.py 项目: mikhailk62/openvino
    def update_if_output_ports_shape(if_node: Node):
        """
        Update shape and values for If output ports.

        :param if_node: The If node to update output ports and shapes
        :return: None
        """
        node_name = if_node.soft_get('name', if_node.id)

        then_outputs = [
            node for node in if_node.then_graph.get_op_nodes()
            if node.has('output_id')
        ]
        else_outputs = [
            node for node in if_node.else_graph.get_op_nodes()
            if node.has('output_id')
        ]
        outputs_mapping = {}
        outputs_number = len(if_node.out_ports())

        if outputs_number == 0 and len(
                if_node.out_ports(control_flow=True)) != 0:
            # Some models have if with control flow outputs.
            # These shape inference for such ifs
            # TODO: need to rethink and redo support for control flow edges in if operation
            for node in if_node.out_nodes(control_flow=True).values():
                node.shape = int64_array([])
            return

        for port_id in if_node.out_ports().keys():
            outputs_mapping[port_id] = {}

        # variables then_contains_fake_outputs/else_contains_fake_outputs contains True value
        # if all outputs from then_body/else_body have shape [0]. It means then_body/else_body does not return data
        # and further shape_inference for this branch is not possible.
        # TODO: exclude support fake_outputs from this code when we will support shape_inference with empty tensors

        then_contains_fake_outputs = \
            If.results_mapping_and_finding_fake_outputs(then_outputs, 'then_graph', outputs_mapping)
        else_contains_fake_outputs = \
            If.results_mapping_and_finding_fake_outputs(else_outputs, 'else_graph', outputs_mapping)

        # use_then_shape is True when else_body or when both bodies do not return data. If use_then_shape is True If's
        # outputs will have the same shapes as then_body results
        use_then_shape = else_contains_fake_outputs or not then_contains_fake_outputs

        cond_value = if_node.in_port(0).data.get_value()

        for port_id in outputs_mapping:
            then_else_nodes = outputs_mapping[port_id]
            assert 'then_graph' in then_else_nodes.keys(), 'then_graph does not connect with If.out_port[{0}] ' \
                                                           'in {1} node!'.format(port_id, node_name)
            assert 'else_graph' in then_else_nodes.keys(), 'else_graph does not connect with If.out_port[{0}] ' \
                                                           'in {1} node!'.format(port_id, node_name)

            then_shape = then_else_nodes['then_graph'].in_port(
                0).data.get_shape()
            then_value = then_else_nodes['then_graph'].in_port(
                0).data.get_value()
            else_shape = then_else_nodes['else_graph'].in_port(
                0).data.get_shape()
            else_value = then_else_nodes['else_graph'].in_port(
                0).data.get_value()

            if is_fully_defined(cond_value):
                if cond_value.item() is True:
                    if then_value is not None:
                        if_node.out_port(port_id).data.set_value(then_value)
                    else:
                        if_node.out_port(port_id).data.set_shape(then_shape)
                else:
                    if else_value is not None:
                        if_node.out_port(port_id).data.set_value(else_value)
                    else:
                        if_node.out_port(port_id).data.set_shape(else_shape)
            else:
                if then_contains_fake_outputs ^ else_contains_fake_outputs:
                    # if exactly one of the outputs is fake then use another one
                    if_node.out_port(port_id).data.set_shape(
                        then_shape if use_then_shape else else_shape)
                else:
                    # find "intersection" which is equal to the dimension value if corresponding dimensions are equal
                    # and dynamic otherwise
                    assert len(then_shape) == len(else_shape), 'Ranks of "then" and "else" output tensors are ' \
                                                               'different for node {} for port {}'.format(node_name,
                                                                                                          port_id)
                    output_shape = [
                        d1 if is_fully_defined(d1) and is_fully_defined(d2)
                        and d1 == d2 else dynamic_dimension_value
                        for d1, d2 in zip(then_shape, else_shape)
                    ]
                    if_node.out_port(port_id).data.set_shape(output_shape)
示例#15
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_in_ports) == 3 and 0 in connected_in_ports and 1 in connected_in_ports and \
               2 in connected_in_ports, "Gather should have 3 connected input port, but it doesn't for " \
                                        "node: `{}`. Ports: {}".format(name, connected_in_ports)

        data_shape = node.in_port(0).data.get_shape()
        assert data_shape is not None
        indices_shape = node.in_port(1).data.get_shape()
        assert indices_shape is not None
        axis = node.in_port(2).data.get_value()

        # axis of Gather could be accepted as both scalar and 1D tensor
        if isinstance(axis, np.ndarray):
            axis = axis.item()
        assert axis is not None, 'axis input is undefined'

        assert -len(data_shape) <= axis < len(data_shape), \
            'axis must be within interval [-data_rank, data_rank). Instead got axis = {}, data_rank = {} '.\
            format(axis, len(data_shape))

        batch_dims = node.batch_dims
        assert -len(indices_shape) <= batch_dims <= len(indices_shape), \
            'batch_dims must be within interval [-indices_rank, indices_rank]. Instead got batch_dims = {}, ' \
            'indices_rank = {} '.format(batch_dims, len(indices_shape))

        # normalize to positive values
        axis = axis + len(data_shape) if axis < 0 else axis
        batch_dims = batch_dims + len(
            indices_shape) if batch_dims < 0 else batch_dims

        assert np.ma.allequal(data_shape[:batch_dims], indices_shape[:batch_dims]), \
            'data and indices inputs must have equal first dimensions until batch_dims'

        assert batch_dims <= axis, \
            'normalized batch_dims must be <= axis. Instead got batch_dims = {}, axis = {}'.format(axis, batch_dims)

        # we import PermuteInputs locally because it uses Gather inside and we have recursive imports
        from openvino.tools.mo.graph.perm_inputs import PermuteInputs
        PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0',
                                              'axis')

        batch_dims_range = indices_shape[:batch_dims]
        out_shape = np.concatenate(
            (data_shape[:axis], indices_shape[batch_dims:],
             data_shape[axis + 1:]))

        data_value = node.in_port(0).data.get_value()
        indices_value = node.in_port(1).data.get_value()
        if data_value is not None and indices_value is not None and is_fully_defined(
                indices_value):
            if batch_dims == 0:
                node.out_port(0).data.set_value(
                    np.ma.take(data_value, indices_value, axis))
            else:
                out_value = np.empty(out_shape)
                for batch_idx in np.ndindex(tuple(batch_dims_range)):
                    out_value[batch_idx] = np.ma.take(data_value[batch_idx],
                                                      indices_value[batch_idx],
                                                      axis - batch_dims)
                node.out_port(0).data.set_value(out_value)
        else:
            node.out_port(0).data.set_shape(out_shape)
示例#16
0
def serialize_constants_recursively(graph: Graph, bin_file, data_type,
                                    bin_hashes):
    nodes = sorted(graph.nodes())
    for node in nodes:
        node = Node(graph, node)

        if node.kind == 'data' and node.value is not None and \
                any('bin' in d for u, v, d in graph.out_edges(node.node, data=True)):
            # avoid array copying while taking hash
            blob = node.value if node.value.ndim > 0 else node.value.reshape(
                (1))
            assert is_fully_defined(
                blob), 'The constant value cannot contain dynamic values'
            if isinstance(blob, np.ma.masked_array):
                blob = np.ma.getdata(blob)
            blob_hash = hashlib.sha512(
                np.ascontiguousarray(blob).view(np.uint8)).hexdigest()

            if blob_hash in bin_hashes and np.array_equal(
                    blob, bin_hashes[blob_hash]['blob']):
                graph.node[
                    node.node]['offset'] = bin_hashes[blob_hash]['offset']
                graph.node[node.node]['size'] = bin_hashes[blob_hash]['size']
                graph.node[
                    node.node]['blob_precision'] = np_data_type_to_precision(
                        blob.dtype)
                update_offset_size_in_const_node(node)
            else:
                start = bin_file.tell()
                blob.tofile(bin_file)
                end = bin_file.tell()

                graph.node[node.node]['offset'] = start
                graph.node[node.node]['size'] = end - start
                graph.node[
                    node.node]['blob_precision'] = np_data_type_to_precision(
                        blob.dtype)

                bin_hashes[blob_hash] = {
                    'offset': graph.node[node.node]['offset'],
                    'size': graph.node[node.node]['size'],
                    'blob': blob
                }
                update_offset_size_in_const_node(node)

                assert (blob.dtype.itemsize * np.prod(node.shape) == end - start) or \
                       node.has_valid('force_shape'), node.attrs()

            log.debug(
                "Detected binary for graph: '{}', node: '{}', id: {}, shape: '{}', offset: '{}', size: '{}'"
                .format(graph, node.soft_get('name'), node.id, node.shape,
                        node.offset, node.size))

    # separate loop for sub-graph to dump them after all blobs for more natural blob offset ordering
    # TODO: implement strict order for all blobs in entier IR
    for node in nodes:
        node = Node(graph, node)
        # Dump blobs recursively if sub-graphs are present in the node
        if node.has_valid('sub_graphs'):
            for sub_graph_attr_name in node.sub_graphs:
                sub_graph = node[sub_graph_attr_name]
                serialize_constants_recursively(sub_graph, bin_file, data_type,
                                                bin_hashes)
示例#17
0
 def shape(self, shape):
     self._is_shape_static = is_fully_defined(shape)
     self._shape = tuple(shape)
示例#18
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['node']
        node_name = node.soft_get('name', node.id)

        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            # Third input represents output shape. Cutting its value according to scheme:
            # [N, C, spatial_dim_0, ..., spatial_dim_n] -> [spatial_dim_0, ..., spatial_dim_n]
            in_rank = node.in_port(0).data.get_shape().size

            shape_src = node.in_port(2).get_source()
            node.in_port(2).disconnect()

            ss_0 = create_op_with_const_inputs(
                graph, StridedSlice, {
                    1: mo_array([2], dtype=np.int32),
                    2: mo_array([in_rank], dtype=np.int32),
                    3: mo_array([1], dtype=np.int32)
                }, {
                    'name': node_name + '/ss_0_port',
                    'begin_mask': mo_array([1], dtype=np.int32),
                    'end_mask': mo_array([0], dtype=np.int32),
                    'new_axis_mask': mo_array([0], dtype=np.int32),
                    'shrink_axis_mask': mo_array([0], dtype=np.int32),
                    'ellipsis_mask': mo_array([0], dtype=np.int32)
                })

            shape_src.connect(ss_0.in_port(0))
            ss_0.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        elif node.has_valid('original_output_spatial_shape'):
            # node had fixed output spatial shape set in original framework, so we restore it here
            const = Const(
                graph, {
                    'value': int64_array(node.original_output_spatial_shape),
                    'name': node_name + '/original_spatial_shape'
                }).create_node()
            node.add_input_port(2, skip_if_exist=True)
            const.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        group = node.soft_get('group', 1)

        if group != 1:
            assert group > 1

            weights_shape = node.in_port(1).data.get_shape()
            assert weights_shape is not None
            I = node.in_port(0).data.get_shape()[1]
            assert I % group == 0
            assert node.output % group == 0

            new_shape = shape_array(
                [group, I // group, node.output // group, *weights_shape[2:]])

            assert not is_fully_defined(new_shape) or not is_fully_defined(weights_shape) or \
                   np.prod(weights_shape) == np.prod(new_shape), 'Initial weights shape {}, grouped weights shape {}' \
                                                                 ''.format(weights_shape, new_shape)
            reshape = create_op_node_with_second_input(
                graph, Reshape, new_shape, {'override_output_shape': True},
                node.in_port(1).get_source().node)

            node.in_port(1).get_connection().set_source(reshape.out_port(0))

            node['type'] = 'GroupConvolutionBackpropData'
        else:
            node['type'] = 'ConvolutionBackpropData'
示例#19
0
def resolve_convolution_with_group(node: Node, group: int, ir_version: str):
    node_name = node.soft_get('name', node.id)
    input_shape = node.in_port(0).data.get_shape()
    assert len(input_shape) in [3, 4, 5]

    weights_shape = node.in_port(1).data.get_shape()
    assert weights_shape is not None
    assert len(weights_shape) in [3, 4, 5]
    group = int64_array(group).item()
    assert weights_shape[0] % group == 0

    if ir_version == 'V7':
        if weights_shape[0] == node.output:
            # weights are already is in [G*O I X Y] format
            return

        num_spatial_dims = len(weights_shape[2:])
        # Reshape has special_zero=True, if zeros are set then original shapes are copied
        zeros_to_copy_spatial_dims = np.zeros(num_spatial_dims)
        new_shape = shape_array([node.output, -1, *zeros_to_copy_spatial_dims])
        reshape = create_op_node_with_second_input(
            node.graph, Reshape, new_shape, {'override_output_shape': True})
    elif ir_version == 'V10':
        I = input_shape[1]
        if is_fully_defined(weights_shape[2:]) and is_fully_defined(I):
            new_shape = shape_array(
                [group, node.output // group, I // group, *weights_shape[2:]])
            assert np.prod(weights_shape) == np.prod(new_shape), 'Initial weights shape {}, grouped weights shape {}' \
                                                                 ''.format(weights_shape, new_shape)
            reshape = create_op_node_with_second_input(
                node.graph, Reshape, new_shape, {
                    'override_output_shape': True,
                    'special_zero': True
                })
        else:
            # if weights/or input channel dimension is dynamic need to to compute new_shape in a new subgraph
            weights_node = node.in_port(1).get_source().node
            input_node = node.in_port(0).get_source().node

            weights_shape = Shape(node.graph, {
                'name': node_name + '/ShapeOfWeights'
            }).create_node()
            weights_shape.in_port(0).connect(weights_node.out_port(0))

            weights_spatial_shape = create_op_with_const_inputs(
                node.graph,
                StridedSlice,
                port_value_dict={
                    1: int64_array([2]),
                    2: int64_array([-1])
                },
                op_attrs={
                    'begin_mask': [1],
                    'end_mask': [0],
                    'new_axis_mask': [0],
                    'shrink_axis_mask': [0],
                    'ellipsis_mask': [0]
                },
                input_node=weights_shape)

            const_part_of_shape = Const(
                node.graph,
                attrs=dict(name=node_name + '/GroupsAndOutputChannelsSize',
                           value=int64_array([group, node.output // group
                                              ]))).create_node()

            input_shape_node = Shape(node.graph, {
                'name': node_name + '/ShapeOfInput'
            }).create_node()
            input_shape_node.in_port(0).connect(input_node.out_port(0))

            input_num_channels = create_op_with_const_inputs(
                node.graph,
                Gather,
                port_value_dict={
                    1: int64_array([1]),
                    2: int64_array(0)
                },
                op_attrs={'name': node_name + '/GatherInputNumChannels'},
                input_node=input_shape_node)

            # input channels num divided by number of groups to alight weights shape into [GROUPS C_OUT C_IN X Y]
            C_IN = create_op_with_const_inputs(
                node.graph,
                Div,
                port_value_dict={1: int64_array(group)},
                op_attrs={'name': node_name + '/Div'},
                input_node=input_num_channels)

            new_shape_node = Concat(node.graph, {
                'axis': 0,
                'in_ports_count': 3
            }).create_node()
            new_shape_node.in_port(0).connect(const_part_of_shape.out_port(0))
            new_shape_node.in_port(1).connect(C_IN.out_port(0))
            new_shape_node.in_port(2).connect(
                weights_spatial_shape.out_port(0))
            reshape = Reshape(node.graph, {
                'override_output_shape': True,
                'special_zero': True
            }).create_node()
            reshape.in_port(1).connect(new_shape_node.out_port(0))

        del node['group']
        node['type'] = 'GroupConvolution'
    else:
        raise Error("Unknown IR version: {}".format(ir_version))

    node.in_port(1).get_connection().insert_node(reshape)
示例#20
0
 def test_is_fully_defined(self, data, result):
     self.assertEqual(is_fully_defined(data), result)
示例#21
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_inputs = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_inputs) == 2 and all([i in connected_inputs for i in range(2)]), \
            "Reshape should have 2 connected input ports, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_inputs)

        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None

        new_shape = node.in_port(1).data.get_value()
        assert new_shape is not None, 'Dynamic Reshape second input is not supported. Node {}'.format(
            name)

        assert np.argwhere(new_shape == -1).size <= 1, \
            'Reshape second input should not have several `-1` values set. ' \
            'Node: {}, reshape second input value {}'.format(name, new_shape)

        num_of_input_elements = np.prod(input_shape)
        num_of_output_elements = 1
        for index, x in enumerate(new_shape):
            if x is dynamic_dimension:
                num_of_output_elements = dynamic_dimension_value
            elif x == 0 and node.has_and_set('special_zero'):
                if input_shape[index] is not dynamic_dimension:
                    num_of_output_elements *= input_shape[index]
            elif x != -1:
                num_of_output_elements *= x

        # input_shape = [dynamic, 5, 6], new_shape = [0, -1] => output_shape [dynamic, 30]
        # marker that no dynamic input dimensions or all of them are copied with "0" magic value
        all_dynamic_dimension_are_copied = True
        if not is_fully_defined(input_shape):
            for index, x in enumerate(input_shape):
                if x is dynamic_dimension:
                    if index >= len(new_shape) or new_shape[index] != 0:
                        all_dynamic_dimension_are_copied = False

        undefined_dim = dynamic_dimension
        if num_of_output_elements is not dynamic_dimension and all_dynamic_dimension_are_copied and \
                is_fully_defined(new_shape):
            undefined_dim = num_of_input_elements // num_of_output_elements
        output_shape = []
        for index, x in enumerate(new_shape):
            if x == 0 and node.has_and_set('special_zero'):
                output_shape.append(input_shape[index])
            elif x == -1:
                output_shape.append(undefined_dim)
            else:
                output_shape.append(x)

        # even if the new_shape contains some dynamic values we can calculate the actual value by deducing it from the
        # input shape if it is static: input_shape = [5, 3, 8], new_shape = [4, d] => output_shape = [4, 30]
        if is_fully_defined(input_shape) and not is_fully_defined(new_shape):
            dynamic_indices = np.argwhere(
                [item is dynamic_dimension for item in new_shape])
            num_of_output_elements = 1
            if dynamic_indices.size == 1:
                for index, x in enumerate(new_shape):
                    if x == 0 and node.has_and_set('special_zero'):
                        num_of_output_elements *= input_shape[index]
                    elif x is not dynamic_dimension and x != -1:
                        num_of_output_elements *= x
            assert num_of_input_elements % num_of_output_elements == 0, \
                'Incorrect number of output elements deduced for node {}: '.format(name)
            output_shape[dynamic_indices[0]
                         [0]] = num_of_input_elements // num_of_output_elements

        assert not is_fully_defined(input_shape) or not is_fully_defined(output_shape) or \
               np.prod(input_shape) == np.prod(output_shape), \
               "Number of elements in input {} and output {} of reshape node {} mismatch" \
               "".format(input_shape, output_shape, name)

        PermuteInputs().set_input_permutation(node.in_node(1), node,
                                              'output:0', 'shape')

        if node.in_port(0).data.get_value() is not None and is_fully_defined(
                output_shape):
            node.out_port(0).data.set_value(
                node.in_port(0).data.get_value().reshape(output_shape))
        else:
            node.out_port(0).data.set_shape(output_shape)
示例#22
0
    def infer(node: Node):
        """
        Infers shape of convolution node as it is done in ONNX.
        It is very similar to one that Caffe does, but slightly different.
        We made a complete fork of this function because they are supposed to be
        supported differently by different people.
        Args:
            node: graph convolution node
        """
        input_shape = node.in_port(0).data.get_shape()
        if input_shape is None:
            raise Error('Input data shape is None for node {}'.format(
                node.soft_get('name', node.id)))

        # bias_term cannot be deduced earlier for frameworks that represent
        # convolution weights/biases as regular inputs; so the number of inputs
        # is being checked here and restore correct value for bias_term to
        # have the rest of the code unchanged. It will be used after we merge
        # several infer functions for convolution in different FWs to a single one.
        if not node.has_valid('bias_term'):
            node['bias_term'] = len(node.in_nodes()) == 3

        weights_index = node.weights_index if node.has_valid(
            'weights_index') else 1
        # Reshape weights kernel to original shape
        # In case of caffe or MXNet framework, values for weights have no structured shape like OIHW
        # so we have to reshape weights to normal shape
        # For this case, Convolution node should have attribute reshape_kernel = True
        if node.has_valid('reshape_kernel') and node.reshape_kernel:
            if not (node.has_valid('output') and node.has_valid('channel_dims')
                    and node.has_valid('group')
                    and node.has_valid('kernel_spatial')):
                log.error(
                    'Cannot reshape kernel due to not all required attrs was set to {} node'
                    .format(node.id))
                return
            # layout for Convolution weights is OIHW
            kernel_shape = shape_array([
                node.output,
                input_shape[node.channel_dims].item() / node.group, *[
                    node.kernel_spatial[i]
                    for i in range(len(node.kernel_spatial))
                ]
            ])
            if node.type == 'Deconvolution':  # layout for Deconvolution weights is IOHW
                kernel_shape[[0, 1]] = kernel_shape[[1, 0]]

            if is_fully_defined(
                    kernel_shape) and np.prod(kernel_shape) != np.prod(
                        node.in_node(weights_index).value.shape):
                log.error(
                    "Size of weights {} does not match kernel shape: {}\n"
                    "".format(np.prod(node.in_node(weights_index).value.shape),
                              kernel_shape) +
                    "    Possible reason is wrong channel number in input shape\n"
                )
                raise Error("Cannot reshape weights to kernel shape")

            if not is_fully_defined(kernel_shape):
                num_undefined = np.count_nonzero(kernel_shape.mask is True)  # pylint: disable=no-member
                if num_undefined > 1:
                    raise Error(
                        'Too many undefined dimensions of the kernel shape for node {}. Use --input_shape '
                        'command line parameter to specify model input shapes'.
                        format(node.soft_get('name', node.id)))
                kernel_size = np.prod(node.in_node(weights_index).value.shape)
                # calculate undefined dimension using fully defined shape of the weights input and known kernel_shape
                # dimensions
                kernel_shape[np.where(kernel_shape == np.ma.masked)[0]
                             [0]] = kernel_size // np.prod(kernel_shape)

            node.in_node(weights_index).shape = shape_array(kernel_shape)
            node.in_node(weights_index).value = np.reshape(
                node.in_node(weights_index).value, kernel_shape)
            node.reshape_kernel = False

        # Pass weights shape to node attribute kernel_shape
        kernel_shape = node.in_node(weights_index).shape
        node['kernel_shape'] = kernel_shape
        # Calculate kernel_spatial_idx and spatial_dims if it is not specified
        # It is necessary for ONNX dut to convolution can be 1D/2D/3D
        if not node.has_valid('kernel_spatial_idx'):
            node['kernel_spatial_idx'] = np.delete(
                [x for x in range(len(kernel_shape))],
                (node.input_feature_channel, node.output_feature_channel))

        if not node.has_valid('spatial_dims'):
            node['spatial_dims'] = np.delete(
                [x for x in range(len(input_shape))],
                (node.channel_dims[0], node.batch_dims[0]))

        node['kernel_spatial'] = kernel_shape[node.kernel_spatial_idx]

        if not node.has_valid('output'):
            # restore the number of output feature maps from the second argument that is weights
            if node.type in [
                    'Convolution', 'Deconvolution', 'DeformableConvolution',
                    'BinaryConvolution'
            ]:
                node['output'] = kernel_shape[node.output_feature_channel]
            else:
                raise Error(
                    'Convolution infer function was called for a node {} with unsupported type {}',
                    node.soft_get('name'), node.type)

        # Set default values for dilation, strides and pads if not set
        if not node.has_valid('dilation'):
            node['dilation'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('stride'):
            node['stride'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('pad'):
            node['pad'] = int64_array([[0, 0]] * len(input_shape))
        node['pad_spatial_shape'] = node.pad[node.spatial_dims]

        if not node.has_valid('output_padding'):
            node['output_padding'] = np.full([len(input_shape)],
                                             0,
                                             dtype=np.int64)

        if node.has_valid('output_padding') and len(input_shape) > len(
                node['output_padding']):
            output_padding = np.zeros(len(input_shape), dtype=np.int64)
            for i in range(len(node['output_padding'])):
                output_padding[i] = node['output_padding'][i]
            node['output_padding'] = output_padding

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.stride[node.spatial_dims]

        kernel_extent = node.dilation[node.spatial_dims] * (
            node.kernel_spatial - 1) + 1
        # TensorFlow always has auto_pad attribute that can be either valid or same_upper
        # In ONNX auto_pad attribute is deprecated but appears in some models (could be valid, same_upper or same_lower)
        # Caffe do not use auto_pad attribute
        if node.has_valid(
                'auto_pad'
        ) and node.auto_pad != 'explicit' and not node.has_valid(
                'output_spatial_shape'):
            node['pad_spatial_shape'], node[
                'output_spatial_shape'] = tf_window_op_pad_infer(
                    input_spatial_shape, kernel_extent, stride_spatial_shape,
                    node.auto_pad, node.type == 'Deconvolution')

            pad = np.zeros((len(input_shape), 2), dtype=np.int64)
            pad[node.spatial_dims] = node.pad_spatial_shape
            node.pad = pad
        else:
            pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)
            if node.type in ('Convolution', 'BinaryConvolution'):
                float_spatial = Convolution.calc_convolution(
                    input_spatial_shape, stride_spatial_shape,
                    pad_spatial_shape, kernel_extent)
                node['output_spatial_shape'] = shape_array(float_spatial)
            elif node.type == 'Deconvolution':
                # In case of given output_spatial_shape we calculate pads spatial
                if node.has_valid('output_spatial_shape'):
                    if node.has_valid('get_pad'):
                        node['pad'] = node.get_pad(node, input_shape,
                                                   kernel_shape)
                    else:
                        log.debug(
                            'Can\'t calculate paddings due to missing lambda get_pad in {} node'
                            .format(node.id))
                        return
                else:
                    output_padding = node.output_padding[
                        node.spatial_dims] if node.has_valid(
                            'output_padding') else None
                    if output_padding is not None and any(output_padding):
                        pad_spatial_shape -= output_padding
                        for dim in range(len(pad_spatial_shape)):
                            node.pad_spatial_shape[dim][
                                1] -= pad_spatial_shape[dim]

                    float_spatial = Convolution.calc_deconvolution(
                        node, input_spatial_shape, pad_spatial_shape,
                        kernel_extent)
                    node['output_spatial_shape'] = shape_array(float_spatial)
            elif node.type == 'DeformableConvolution':
                # get the output spatial shape from the second input with offsets
                node['output_spatial_shape'] = int64_array(
                    [node.in_node(1).shape[2:4]])
            else:
                assert 'Unsupported layer type "{}"'.format(node.type)

        # For cases when group attribute wasn't set in extractor we should specify get_group attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_group'):
            node['group'] = node.get_group(node)
        output_shape = shape_array(
            [dynamic_dimension_value for _ in range(len(input_shape))])
        output_shape[node.batch_dims] = input_shape[node.batch_dims]  # pylint: disable=unsupported-assignment-operation
        output_shape[node.spatial_dims] = node.output_spatial_shape  # pylint: disable=unsupported-assignment-operation

        # For cases when output attribute wasn't set in extractor we should specify get_output_feature_dim attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_output_feature_dim'):
            node['output'] = node.get_output_feature_dim(node)
        output_shape[node.channel_dims] = node.output  # pylint: disable=unsupported-assignment-operation
        node['output_shape'] = output_shape

        node.out_port(0).data.set_shape(output_shape)

        # bin attribute is used for pre-processing, but it will be deleted in BlobNormalizer transformation
        # and the blobs (weights, biases) will be represented as inputs to the node
        mark_input_bins(
            node, start_port=1 if node.type != 'DeformableConvolution' else 2)
        assign_dims_to_weights(node.in_node(weights_index),
                               node.kernel_spatial_idx,
                               node.input_feature_channel,
                               node.output_feature_channel, len(kernel_shape))

        PermuteAttrs.create_permute_attrs(
            node,
            attrs=[
                ('pad', 'input:0'),
                ('stride', 'input:0'),
                ('dilation', 'input:0'),
                ('output_shape', 'input:0'),
                ('batch_dims', 'input:0'),
                ('channel_dims', 'input:0'),
                ('spatial_dims', 'input:0'),
                ('kernel_shape', 'input:{}'.format(weights_index)),
                ('kernel_spatial_idx', 'input:{}'.format(weights_index)),
                ('input_feature_channel', 'input:{}'.format(weights_index)),
                ('output_feature_channel', 'input:{}'.format(weights_index)),
            ])

        # is needed to permute Conv weights from the original TF [H, W, C_IN, C_OUT] into IE [C_OUT, C_IN, H, W]
        # but for other nodes in weights subgraph permutations must turned off
        # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW.
        PermuteAttrs.set_permutation(
            node.in_node(weights_index), node,
            node.soft_get('get_weights_permute', None))
        PermuteInputs().set_input_permutation(node.in_node(weights_index),
                                              node,
                                              'input:{}'.format(weights_index),
                                              'transpose')
示例#23
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 3, \
            "Select operation must have 3 inputs: 'condition', 'then' and 'else' tensors for node {}".format(node_name)

        condition_value = node.in_port(0).data.get_value()
        condition_shape = node.in_port(0).data.get_shape()
        resulting_tensors = [
            node.in_port(1).data.get_value(),
            node.in_port(2).data.get_value()
        ]

        a_shape = node.in_port(1).data.get_shape()
        b_shape = node.in_port(2).data.get_shape()
        broadcast_rule = node.soft_get('auto_broadcast', 'numpy')

        if broadcast_rule == 'numpy':
            msg = "In Select node '{}' condition and then/else shapes must be broadcastable. " \
                  "But instead got: cond_shape={}, then_shape={}, else_shape={}".format(
                    node_name, condition_shape, a_shape, b_shape)

            output_shape = bi_directional_shape_broadcasting(a_shape, b_shape)
            assert output_shape is not None, msg

            output_is_scalar = len(output_shape) == 0

            # if Select was created from TF Where operations then 1D condition must have the same size
            # as 0-index dimension of output_shape. This condition is different from being numpy compatible
            # but by adding ones to the end we can achieve numpy compatibility, as in transformation SelectBroadcast.py
            if node.has_valid('format') and node['format'] == 'tf' and len(
                    condition_shape) == 1:
                # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/array_ops.py#L4596-L4598
                msg_tf = "In Select node '{}' if 'condition' is a 1D tensor then it's size " \
                         "must be matching with the first dimension of then/else branches. " \
                         "But instead got: cond_shape={}, then_shape={}, else_shape={}".format(
                            node_name, condition_shape, a_shape, b_shape)

                # check equality only if both values non-dynamic
                if is_fully_defined(
                        condition_shape[0]
                ) and not output_is_scalar and is_fully_defined(
                        output_shape[0]):
                    assert condition_shape[0] == output_shape[0], msg_tf
                ones_shape = len(output_shape) if output_is_scalar else len(
                    output_shape) - 1
                condition_shape = np.concatenate(
                    (condition_shape, np.ones(ones_shape, dtype=np.int64)))

            output_shape = bi_directional_shape_broadcasting(
                output_shape, condition_shape)
            assert output_shape is not None, msg

        elif broadcast_rule == 'pdpd':
            # todo: add pdpd broadcasting rule
            # note that additionally to output_shape resulting_tensors must be broadcasted as well
            raise Error("PDPD broadcasting rule is not implemented yet")
        else:  # broadcasting is not allowed
            assert compatible_shapes(a_shape, b_shape) and compatible_shapes(condition_shape, a_shape), \
                'In node \'{}\' for Select operation when broadcasting is off all inputs must be of the same shape. ' \
                'But instead got: cond_shape={}, then_shape={}, else_shape={}'.format(
                    node_name, condition_shape, a_shape, b_shape)
            output_shape = shape_array([
                i if i is not dynamic_dimension else j
                for i, j in zip(a_shape, b_shape)
            ])

        node.out_port(0).data.set_shape(output_shape)

        if condition_value is not None:
            if is_fully_defined(condition_value) and np.all(
                    condition_value == condition_value.item(0)):
                # in some graphs Select condition is always True[False] and
                # one of the branches is None (which is not selected)
                # if we use np.where for such cases then dtype of output_value will be object (non numeric type)
                # and subsequent numpy operation on such tensors will fail
                output_value = resulting_tensors[not np.
                                                 bool(condition_value.item(0))]
                if output_value is None:
                    return
                if broadcast_rule == 'numpy':
                    output_value = bi_directional_broadcasting(
                        output_value, output_shape)
                elif broadcast_rule == 'pdpd':
                    # todo: add pdpd broadcasting rule
                    raise Error(
                        "PDPD broadcasting rule is not implemented yet")

                node.out_port(0).data.set_value(output_value)
            elif resulting_tensors[0] is not None and resulting_tensors[
                    1] is not None:
                output_value = np.ma.where(condition_value,
                                           resulting_tensors[0],
                                           resulting_tensors[1])
                node.out_port(0).data.set_value(output_value)