Exemplo n.º 1
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 4

        # check that shape value is defined that is needed for shape inference
        shape = node.in_node(2)
        assert shape.value is not None and shape.value.size == 2, \
            "SparseFillEmptyRows is supported only with constant shape value"

        shape_value = int64_array(shape.value)

        # check that default value is scalar
        default_value = node.in_node(3)
        assert default_value.shape is not None and len(default_value.shape) == 0, \
            "Default value for SparseFillEmptyRows must be scalar"

        if node.is_out_port_connected(0):  # set a shape for output indices
            if is_fully_defined(shape_value):
                node.out_port(0).data.set_shape([np.prod(shape_value), 2])
            else:
                node.out_port(0).data.set_shape([dynamic_dimension_value, 2])
        if node.is_out_port_connected(1):  # set a shape for output values
            if is_fully_defined(shape_value):
                node.out_port(1).data.set_shape([np.prod(shape_value)])
            else:
                node.out_port(1).data.set_shape([dynamic_dimension_value])
        if node.is_out_port_connected(
                2):  # set a shape for empty row indicator
            node.out_port(2).data.set_shape([shape_value[0]])
Exemplo n.º 2
0
def roipooling_infer(node: Node):
    """
    Sets shape of output node according specified parameters input blobs and node
    Sets number from the first input blob, channels from the second one, height and width are specified
    Parameters
    ----------
    node
    """
    shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))]
    if any(s is None for s in shapes):
        return
    if len(node.in_nodes()) == 4:  # TensorFlow case of CropAndResize operation
        crop_size = node.in_node(3).value
        if crop_size is None:
            log.error('The ROIPooling size is not known for node {}'.format(
                node.soft_get('name')))
            return
        if not isinstance(crop_size, np.ndarray) or len(crop_size) != 2:
            log.error(
                'The ROIPooling size is should have 2 elements for node {}'.
                format(node.soft_get('name')))
        node.pooled_h = crop_size[0]
        node.pooled_w = crop_size[1]
        node.graph.remove_edge(node.in_node(3).id, node.id)
        node.graph.remove_edge(node.in_node(2).id, node.id)

    layout = node.graph.graph['layout']
    assert len(layout) == 4

    node.out_port(0).data.set_shape(
        shape_for_layout(layout,
                         batch=shapes[1][get_batch_dim(layout, 4)],
                         features=shapes[0][get_features_dim(layout, 4)],
                         height=node.pooled_h,
                         width=node.pooled_w))
Exemplo n.º 3
0
    def infer(node: Node):
        assert len(node.in_nodes()) == len(__class__.inputs) + len(
            __class__.extra_inputs)

        for axis in ['concat_axis', 'split_axis']:
            axis_node = __class__.extra_inputs.index(axis) + len(
                __class__.inputs)
            assert node.in_node(axis_node).has_valid('value')
            assert node.in_node(axis_node).value == 1

        shift_const = node.in_node(
            __class__.extra_inputs.index('shift_const') +
            len(__class__.inputs))
        assert shift_const.has_valid('value')
        shift_const = shift_const.value
        assert shift_const.ndim == 0  # expect scalar value
        node['shift_const'] = shift_const.copy()

        weights_node = node.in_node(__class__.inputs.index('weights'))
        biases_node = node.in_node(__class__.inputs.index('biases'))

        assert weights_node.has_valid('value')
        assert biases_node.has_valid('value')

        # Restore original infer function (to avoid calling previous code twice) and call it
        node.infer = node.old_infer
        node.infer(node)
Exemplo n.º 4
0
    def infer(node: Node):
        if node.has_and_set('extra_inputs'):
            assert len(node.in_nodes()) == 8
        else:
            assert len(node.in_nodes()) == 5
        assert len(node.out_nodes()) in [1, 2]

        hidden_shape = node.in_node(1).shape.copy()
        cell_shape = node.in_node(2).shape.copy()

        mark_input_bins(node, start_port=3)
        node.out_node(0).shape = hidden_shape
        if len(node.out_nodes()) == 2:
            node.out_node(1).shape = cell_shape

        hidden_size = hidden_shape[1]

        if node.has_valid('hidden_size'):
            if node.hidden_size != hidden_size:
                raise Error(
                    "Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}"
                    .format(node.in_node(1).shape, node.soft_get('name')))
        else:
            node['hidden_size'] = hidden_size

        assert cell_shape[1] == hidden_size

        input_shape = node.in_node(0).shape
        assert input_shape is not None
        assert compatible_dims(hidden_shape[0], cell_shape[0]) and \
               compatible_dims(cell_shape[0], input_shape[0]), 'States are not broadcast-able by batch for node {}' \
                                                               ''.format(node.soft_get('name', node.id))
Exemplo n.º 5
0
def batch_norm_4_infer(node: Node):
    copy_shape_infer(node)
    mark_input_bins(node, ['weights', 'biases', 'mean', 'variance'])
    if node.has('fix_gamma') and node.fix_gamma:
        # go to the 1-st input weights and set all elements to 1
        node.in_node(1).value = np.full_like(node.in_node(1).value,
                                             1,
                                             dtype=np.float32)
Exemplo n.º 6
0
    def replace_op(self, graph: Graph, node: Node):
        matmul = MatMul(graph, dict(name=node.name, transpose_b=True)).create_node([node.in_node(0), node.in_node(1)])

        # Bias
        if len(node.in_nodes()) > 2:
            matmul = Add(graph, dict(name=node.name + '/bias')).create_node([matmul, node.in_node(2)])

        return [matmul.id]
Exemplo n.º 7
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)

        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        target_shape = node.in_port(1).data.get_value()
        assert target_shape is not None, 'Output shape is not defined for node "{}"'.format(
            node_name)
        assert node.has_and_set(
            'mode'), 'Broadcasting mode is not defined for node "{}"'.format(
                node_name)

        PermuteInputs().set_input_permutation(node.in_node(1), node,
                                              'output:0', 'shape')

        if input_value is not None and not node.has_and_set('stop_value_propagation') and \
                is_fully_defined(target_shape):
            if node.mode == 'numpy':
                node.out_port(0).data.set_value(
                    uni_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_value(
                    bi_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                      'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                node.out_port(0).data.set_value(
                    explicit_broadcasting(input_value, target_shape,
                                          axes_mapping))
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(
                    node_name, node.mode))
        else:
            if node.mode == 'numpy':
                node.out_port(0).data.set_shape(
                    uni_directional_shape_broadcasting(input_shape,
                                                       target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_shape(
                    bi_directional_shape_broadcasting(input_shape,
                                                      target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                      'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                new_shape, _ = explicit_shape_broadcasting(
                    input_shape, target_shape, axes_mapping)
                node.out_port(0).data.set_shape(new_shape)
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(
                    node_name, node.mode))
Exemplo n.º 8
0
    def replace_op(self, graph: Graph, node: Node):
        mode = node.module.mode
        if mode.endswith('linear'):  # like bilinear or trilinear
            mode = 'linear'
        align_corners = node.module.align_corners

        if mode == 'linear':
            height = node.module.size[0] if node.module.size is not None else -1
            width = node.module.size[1] if node.module.size is not None else -1
            dims = node.module.dims
            axes = np.arange(2, dims)
            pads = np.zeros(dims, dtype=np.int32)
            scales = np.repeat(node.module.scale_factor, dims - 2).astype(np.float32)
            attrs = {
                'name': node.name,
                'version': 'opset4',
                'height': height,
                'width': width,
                'mode': mode,
                'axes': axes,
                'pads_begin': pads,
                'pads_end': pads,
                'coordinate_transformation_mode': 'align_corners' if align_corners else 'half_pixel',
                'shape_calculation_mode': 'sizes' if node.module.size is not None else 'scales',
            }

            sizes = Const(graph, {'value': np.array([height, width])}).create_node()
            axes = Const(graph, {'value': axes}).create_node()
            scales = Const(graph, {'value': scales}).create_node()
            interp = Interpolate(graph, attrs).create_node([node.in_node(0), sizes, scales, axes])
        else:
            if node.module.size:
                attrs = {
                    'name': node.name,
                    'version': 'opset1',
                    'height': node.module.size[0],
                    'width': node.module.size[1],
                    'mode': mode,
                    'axes': [2, 3],
                    'align_corners': node.module.align_corners,
                }
                interp = Interpolate(graph, attrs).create_node([node.in_node(0)])
            else:
                if not node.module.scale_factor:
                    raise Error('No scale_factor found')
                attrs = {
                    'name': node.name,
                    'height_scale': np.float(node.module.scale_factor),
                    'width_scale': np.float(node.module.scale_factor),
                    'mode': mode,
                    'align_corners': node.module.align_corners,
                }
                interp = UpsampleOp(graph, attrs).create_node([node.in_node(0)])

        return [interp.id]
Exemplo n.º 9
0
    def priorbox_clustered_infer(node: Node):
        layout = node.graph.graph['layout']
        data_shape = node.in_node(0).shape
        num_ratios = len(node.width)

        if node.has_and_set('V10_infer'):
            assert node.in_node(0).value is not None
            node.out_port(0).data.set_shape([2, np.prod(node.in_node(0).value) * num_ratios * 4])
        else:
            res_prod = data_shape[get_height_dim(layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4
            node.out_port(0).data.set_shape([1, 2, res_prod])
Exemplo n.º 10
0
    def resize_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            raise Error('Input shape for operation "{}" is None'.format(node.soft_get('name', node.id)))

        scale_value = node.in_node(1).value

        node.out_port(0).data.set_shape(input_shape * scale_value)
    def test_transpose_insert_with_two_result_nodes(self, nhwc_to_nchw_order, nchw_to_nhwc_order,
                                                    add_permutation_attrs, fft_kind):
        shape_len = len(nhwc_to_nchw_order) if add_permutation_attrs else 3
        shape = np.array(range(shape_len))
        add_shape = shape if nhwc_to_nchw_order is None else shape[nhwc_to_nchw_order]
        graph = build_graph(nodes_attrs=nodes_for_case_with_two_results,
                            edges=edges_for_case_with_two_results,
                            update_attributes={
                                'placeholder1_data': {'shape': int64_array(shape)},
                                'placeholder1': {'shape': int64_array(shape), 'rt_info': RTInfo()},
                                'transpose_parameter_order': {
                                    'value': np.array(nhwc_to_nchw_order),
                                    'shape': int64_array(np.array(nhwc_to_nchw_order).shape)
                                },
                                'transpose_parameter_order_data': {
                                    'value': np.array(nhwc_to_nchw_order),
                                    'shape': int64_array(np.array(nhwc_to_nchw_order).shape)
                                },
                                'fft': {'op': fft_kind, 'type': fft_kind},
                                'add_data': {'shape': add_shape},
                                'fft_data': {'shape': add_shape},
                                'result1': {'shape': shape, 'rt_info': RTInfo()},
                                'result2': {'shape': shape, 'rt_info': RTInfo()},
                            })

        if add_permutation_attrs:
            graph_ref = build_graph(nodes_for_case_with_two_results, edges_with_transpose_for_case_with_two_results)
        else:
            graph_ref = build_graph(nodes_for_case_with_two_results, edges_for_case_with_two_results)

        param1_node = Node(graph, 'placeholder1')
        result1_node = Node(graph, 'result1')
        result2_node = Node(graph, 'result2')

        if add_permutation_attrs:
            shape_len = len(nhwc_to_nchw_order)
            param1_node['permute_attrs'] = PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')])
            param1_node.out_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len)
            result1_node.in_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len)
            result2_node.in_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len)

        PreserveRuntimeInfo().find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph, graph_ref, 'result1')
        self.assertTrue(flag, resp)

        self.assertFalse(param1_node.has_valid('permute_attrs'))
        self.assertFalse(param1_node.out_node(0).has_valid('permutation'))

        if add_permutation_attrs:
            rt_info = param1_node.rt_info.info
            old_api_map = rt_info[('old_api_map_order', 0)].info
            self.assertTrue(np.array_equal(old_api_map['inverse_order'], nchw_to_nhwc_order))
Exemplo n.º 12
0
    def replace_op(self, graph: Graph, node: Node):
        pow_2 = Const(graph, {'value': np.float32(2.0)}).create_node()
        reduce_axis = Const(graph, {'value': np.int32(-1)}).create_node()
        pow_0_5 = Const(graph, {'value': np.float32(0.5)}).create_node()

        sq = Pow(graph, dict(name=node.in_node(0).name + '/sq',
                             power=2.0)).create_node([node.in_node(0), pow_2])
        sum = ReduceSum(graph, dict(name=sq.name + '/sum')).create_node(
            [sq, reduce_axis])
        sqrt = Pow(graph, dict(name=sum.name + '/sqrt',
                               power=0.5)).create_node([sum, pow_0_5])
        return [sqrt.id]
Exemplo n.º 13
0
    def replace_op(self, graph: Graph, node: Node):
        in_node_0 = node.in_node(0)
        in_node_1 = node.in_node(1)
        in_node_2 = node.in_node(2)

        ss = ScaleShiftOp(graph, {'name': node.id + "/ScaleShift_", 'axis': 0})
        scale_shift = ss.create_node(inputs=[in_node_1, in_node_0])

        el = Add(graph, {'name': node.id + "/Add_"})
        el_node = el.create_node(inputs=[scale_shift, in_node_2])

        return [el_node.id]
Exemplo n.º 14
0
def multi_box_prior_infer_mxnet(node: Node):
    v10 = node.has_and_set('V10_infer')
    data_H, data_W = node.in_node(0).value if v10 else node.in_node(
        0).shape[2:]

    num_ratios = len(node.aspect_ratio)
    num_priors = len(node.min_size) + num_ratios - 1
    if v10:
        node.out_node(0).shape = shape_array(
            [2, data_H * data_W * num_priors * 4])
    else:
        node.out_node(0).shape = shape_array(
            [1, 2, data_H * data_W * num_priors * 4])
Exemplo n.º 15
0
    def infer(node: Node):
        assert node.has_valid('output_type')

        node.out_port(0).data.set_shape(node.in_port(0).data.get_value())

        # We need to keep data type in data nodes corresponding to min and max values,
        # as min and max value type should be the same as output_type attribute of RandomUniform
        # operation. 'correct_data_type' attribute prevents changes of the data node type when
        # ir data type is not equal to data node type.
        node.in_node(1)['correct_data_type'] = True
        node.in_node(2)['correct_data_type'] = True

        PermuteInputs().set_input_permutation(node.in_node(0), node,
                                              'output:0', 'shape')
Exemplo n.º 16
0
    def test_reorder_detection_out_inputs(self):
        graph = build_graph(
            {
                'node_1': {
                    'type': 'Identity',
                    'kind': 'op',
                    'op': 'Parameter'
                },
                'node_2': {
                    'type': 'Identity',
                    'kind': 'op',
                    'op': 'Parameter'
                },
                'node_3': {
                    'type': 'Identity',
                    'kind': 'op',
                    'op': 'Parameter'
                },
                'multi_box_detection': {
                    'type': '_contrib_MultiBoxDetection',
                    'kind': 'op',
                    'op': '_contrib_MultiBoxDetection'
                },
            }, [('node_1', 'multi_box_detection'),
                ('node_2', 'multi_box_detection'),
                ('node_3', 'multi_box_detection')], {
                    'node_1': {
                        'shape': np.array([1, 34928])
                    },
                    'node_2': {
                        'shape': np.array([1, 183372])
                    },
                    'node_3': {
                        'shape': np.array([1, 2, 34928])
                    },
                })

        pattern = SsdReorderDetectionOutInputs()
        pattern.find_and_replace_pattern(graph)

        node_multi_box = Node(graph, 'multi_box_detection')

        node_input1 = node_multi_box.in_node(0)
        node_input2 = node_multi_box.in_node(1)
        node_input3 = node_multi_box.in_node(2)
        self.assertEqual(node_input1.name, 'node_2')
        self.assertEqual(node_input2.name, 'node_1')
        self.assertEqual(node_input3.name, 'node_3')
Exemplo n.º 17
0
    def infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        if input_shape is None:
            return

        input_value = node.in_port(0).data.get_value()
        axes = node.in_port(1).data.get_value()
        if input_value is not None and axes is not None:
            norm_value = np.linalg.norm(input_value,
                                        node.p,
                                        axes,
                                        keepdims=True)
            if node.eps_mode == 'add':
                norm_value = norm_value + node.eps
            elif node.eps_mode == 'max':
                norm_value = np.max(norm_value, node.eps)
            else:
                assert False, 'Unsupported "eps_mode" = {}'.format(
                    node.eps_mode)
            node.out_port(0).data.set_value(input_value / norm_value)
        else:
            node.out_port(0).data.set_shape(input_shape)

        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'axis')
Exemplo n.º 18
0
def reorder_inputs_for_shape_or_slice(op_node: Node, input_port: int,
                                      permute_indices_for_gather: list):
    """
    axis and slice permutations are almost the same the only difference is that for slice in general
    case permutation depends from slice_rank not from input_rank or output_rank
    """
    graph = op_node.graph
    data_node = op_node.in_node(input_port)

    gather_name = op_node.soft_get('name', op_node.id) + '/ShapeGather'
    const = Const(
        graph, {
            'value': permute_indices_for_gather,
            'name': gather_name + '/const',
            'need_shape_inference': True
        }).create_node_with_data()
    axis_const = Const(graph, {
        'value': int64_array(0),
        'name': gather_name + '/axis'
    }).create_node_with_data()
    gather = Gather(graph, {
        'name': gather_name,
        'need_shape_inference': True
    }).create_node_with_data([data_node, const, axis_const])
    attrs = graph.get_edge_data(data_node.id, op_node.id, key=0).copy()

    graph.add_edge(gather.id, op_node.id, **attrs)
    graph.remove_edge(data_node.id, op_node.id)

    # need to run manually to override output shape value to resolve shape collision for nodes with
    # 'correct_data_layout' output port attrs
    op_node['need_shape_inference'] = True
Exemplo n.º 19
0
    def infer(node: Node):
        # order parameter calculation and checks
        in_ports = node.in_ports()
        connected_ports = [
            port for port in in_ports.values() if not port.disconnected()
        ]
        input_shape = node.in_port(0).data.get_shape()

        if node.has_and_set('reverse_order'):
            assert len(connected_ports) == 1 and 0 in in_ports, \
                'Cannot infer `{}` due to both order and reverse_order was set'.format(node.soft_get('name'))
            order = np.arange(len(input_shape))[::-1]  # Reverse order
        else:
            # we import PermuteInputs locally because it uses Transpose inside and we have recursive imports
            from openvino.tools.mo.graph.perm_inputs import PermuteInputs
            assert len(connected_ports) == 2 and 0 in in_ports and 1 in in_ports, \
                "{} node `{}` should have 2 input ports, where 0-input is a data input and 1-input represents " \
                "Transpose `order`".format(node.op, node.id)
            order = node.in_port(1).data.get_value()
            assert order is not None, 'Cannot infer `{}` because order is None'.format(
                node.soft_get('name'))
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:0', 'order')

        # setting shape and value if applicable
        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                np.transpose(node.in_port(0).data.get_value(), axes=order))
        else:
            node.out_port(0).data.set_shape(input_shape[order])
Exemplo n.º 20
0
    def infer(node: Node):
        """
         MO input edges:   |   Description:
         -------------------------------------------------
                0          | x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs)
                1          | w: The weight matrix
                2          | b: The bias vector
                3          | h_prev: Previous/initial hidden state
                4          | cs_prev: Value of the initial cell state
         """
        assert len(node.in_nodes()) == 5
        """
        MO output edges:    |   Description:
                0           | cs: Output data / output hidden states concatenated over the whole time sequence
                1           | h: Output cell states concatenated over the whole time sequence
        """

        assert len(node.out_nodes()) in [1, 2]

        mark_input_bins(node)
        input_shape = node.in_node(0).shape

        assert len(input_shape) == 3
        out_shape = input_shape.copy()
        node.out_port(0).data.set_shape(out_shape)
        if node.is_out_port_connected(1):
            node.out_port(1).data.set_shape(out_shape)
Exemplo n.º 21
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error('TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                        'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if (H is not dynamic_dimension and H % block_size) or (W is not dynamic_dimension and W % block_size):
            raise Error('Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                        'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                        'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * (block_size ** 2),
                                     height=H // block_size,
                                     width=W // block_size)

        node.out_port(0).data.set_shape(out_shape)
Exemplo n.º 22
0
    def infer(node: Node):
        assert len(node.out_nodes()) in [1, 2]

        hidden_shape = node.in_node(1).shape.copy()

        mark_input_bins(node, start_port=2)
        node.out_node(0).shape = hidden_shape

        hidden_size = hidden_shape[1]
        if node.has_valid('hidden_size'):
            if node.hidden_size != hidden_size:
                raise Error(
                    "Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}"
                    .format(node.in_node(1).shape, node.soft_get('name')))
        else:
            node['hidden_size'] = hidden_size
Exemplo n.º 23
0
    def replace_op(self, graph: Graph, node: Node):
        attrs = {'name': node.id + "/ScaleShift_"}

        param = graph.node[node.id]['pb'].bn_param
        pb_model = graph.node[node.id]['model_pb']

        blobs = pb_model.blobs

        if len(blobs) != 4:
            raise Error("Incorrect number of blobs in BN layer {}".format(
                node.id))

        mean = np.array(blobs[0].data)
        var = np.array(blobs[1].data)
        betta = np.array(blobs[2].data)
        gamma = np.array(blobs[3].data)

        gamma = gamma + np.repeat(param.eps, gamma.shape)

        scale = 1.0 / np.sqrt(gamma) * mean
        shift = var - betta * scale

        ss = ScaleShiftOp(graph, attrs)
        scale_shift = ss.create_node([node.in_node(0)])
        input_as_const(scale_shift, attrs, 1, 'weights', scale)
        input_as_const(scale_shift, attrs, 2, 'biases', shift)

        return [scale_shift.id]
Exemplo n.º 24
0
def add_removed_converts(graph: Graph):
    for data_node_name in graph.get_nodes_with_attributes(
            Insert_Convert_operation_after=True):
        data_node = Node(graph, data_node_name)
        # Get access to Const node connected to data node
        const_op = data_node.in_node(0)

        if const_op.type != 'Const':
            logger.debug('Error when try to insert Convert operation after {} with {} type'.\
                format(const_op.soft_get('name'), const_op.soft_get('type')))
            continue

        if const_op.data_type != np.float32:
            logger.debug('Error when try to insert Convert operation after Const: {}'.\
                format(const_op.soft_get('name')))
            continue

        convert_op = Cast(
            graph, {
                'dst_type': np.float32,
                'name': const_op.name + '/restored_convert',
                'stop_value_propagation': True
            }).create_node()

        # Insert Convert operation after Const operation
        const_op.out_port(0).get_connection().insert_node(convert_op)
        convert_op.out_node().value = None

        # Convert Const value to FP16 to make types in graph consistent
        const_op.value, _, _ = convert_blob(const_op.value, np.float16)
        const_op.infer(const_op)
Exemplo n.º 25
0
 def replace_op(self, graph: Graph, node: Node):
     axis = Const(graph, {'value': 0}).create_node()
     inputs = [node.in_node(1),  # weight
               node.in_node(0),  # input_ids
               axis]
     gather = Gather(graph, dict(name=node.name)).create_node(inputs)
     return [gather.id]
Exemplo n.º 26
0
def generate_feed_dict(graph: tf_v1.Graph, node: Node):
    """
    The first value in the return tuple is True if all inputs for the node has constant values.
    The second returned value is mapping of placeholder tensor to the numpy arrays with the values for these
    placeholders.
    :param graph: the TensorFlow Graph to generate feed dictionary to.
    :param node: the node which represents TensorFlow sub-graph of operations.
    :return: pair where the first element is a flag that specifies that all node inputs are constants and a dictionary
    where key is the input Tensor object and the value is the tensor value.
    """
    all_constants = True
    feed_dict = dict()
    for in_data_node_name, edge_attrs in node.get_inputs():
        if 'control_flow_edge' in edge_attrs and edge_attrs[
                'control_flow_edge']:
            continue
        value = node.in_node(edge_attrs['in']).value
        if value is None:
            all_constants = False
            placeholder_pb = node['pbs'][edge_attrs['placeholder_name']]
            value = np.ones(
                shape=tf_tensor_shape(placeholder_pb.attr['shape'].shape),
                dtype=tf_dtype_extractor(placeholder_pb.attr['dtype'].type))
        feed_dict[graph.get_tensor_by_name(edge_attrs['placeholder_name'] +
                                           ":0")] = value
    return all_constants, feed_dict
Exemplo n.º 27
0
    def array_infer(node: Node):
        size = node.in_node(0)
        assert size.value is not None

        # 0 port: handle
        if 0 in node.out_nodes().keys():
            if node.has_valid('element_shape'):
                element_shape = node['element_shape']
            else:
                element_shape = None

            out_node = node.out_node(0).id
            output_value = node.out_node(0).id
            node.graph.node[out_node]['value'] = np.array(output_value)

            output_shape = node.graph.node[out_node]['value'].shape
            node.graph.node[out_node]['shape'] = shape_array(output_shape)

            node.graph.node[out_node]['element_shape'] = shape_array(
                element_shape)
            node.graph.node[out_node]['size'] = size.value
        # 1 port flow
        if 1 in node.out_nodes().keys():
            output_value = None

            out_node = node.out_node(1).id
            node.graph.node[out_node][
                'value'] = None if output_value is None else np.array(
                    output_value)
            node.graph.node[out_node]['shape'] = shape_array(output_shape)
Exemplo n.º 28
0
    def control_flow_infer(node: Node, is_executable: bool,
                           mark_executability: callable):
        """
        Infers control flow through switch operation node. It marks output data nodes executability according to
        executability of current node and switch data value
        :param node: Node instance to infer control flow through
        :param is_executable: if current node is executable
        :param mark_executability: function to mark executability of node
        """
        out_data_nodes = node.out_nodes(control_flow=True)
        node_with_switch_value = node.in_node(1)

        switch_data_0_port_node_id = [out_data_nodes[0].id
                                      ] if 0 in out_data_nodes else []
        switch_data_1_port_node_id = [out_data_nodes[1].id
                                      ] if 1 in out_data_nodes else []
        assert 1 <= len(switch_data_0_port_node_id) + len(
            switch_data_1_port_node_id) <= 2

        if not node_with_switch_value.has_valid(
                'value') or not is_fully_defined(node_with_switch_value.value):
            # Mark both ports as executable
            resulting_switch_data_node_ids = switch_data_0_port_node_id + switch_data_1_port_node_id
            for n in resulting_switch_data_node_ids:
                mark_executability(n, True)
        else:
            switch_value = node_with_switch_value.value.item(0)
            resulting_switch_data_node_ids = [
                switch_data_0_port_node_id, switch_data_1_port_node_id
            ]

            for n in resulting_switch_data_node_ids[not switch_value]:
                mark_executability(n, False)
            for n in resulting_switch_data_node_ids[switch_value]:
                mark_executability(n, is_executable)
Exemplo n.º 29
0
    def infer(cls, node: Node):
        input_shape = node.in_node(0).shape
        input_h = input_shape[2]
        input_w = input_shape[3]
        output_h = node.output_size[0]
        output_w = node.output_size[1]

        stride_h = input_h // output_h
        stride_w = input_w // output_w
        kernel_h = input_h - (output_h - 1) * stride_h
        kernel_w = input_w - (output_w - 1) * stride_w

        data = {
            'window': int64_array([1, 1, kernel_h, kernel_w]),
            'stride': int64_array([1, 1, stride_h, stride_w]),
            'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
            'pad_spatial_shape': int64_array([[0, 0], [0, 0]]),
            'pool_method': 'avg',
            'exclude_pad': False,
            'output_spatial_shape': None,
            'spatial_dims': None,
            'channel_dims': int64_array([1]),
            'batch_dims': int64_array([0]),
            'layout': 'NCHW',
            'rounding_type': 'floor',
            'pooling_convention': 'valid'
        }

        # update the attributes of the node
        Pooling.update_node_stat(node, data)
        Pooling.infer(node)
Exemplo n.º 30
0
    def test_remove_softmax_activation_input(self):
        graph = build_graph(
            {
                'node_1': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op',
                    'op': 'Parameter'
                },
                'softmax': {
                    'type': 'SoftmaxActivation',
                    'value': None,
                    'kind': 'op',
                    'op': 'SoftmaxActivation'
                },
            }, [('node_1', 'softmax')])

        pattern = CheckSoftmaxNodeInputs()
        pattern.find_and_replace_pattern(graph)

        node_softmax = Node(graph, 'softmax')

        self.assertEqual(len(node_softmax.in_nodes()), 1)

        node_input1 = node_softmax.in_node(0)
        self.assertEqual(node_input1.name, 'node_1')