Beispiel #1
0
def build_range_test_graphs(start=0, limit=10, delta=1, dst_type_str='FP16',
                            src_type_str='FP32', returns_shape_value=None):
    nodes = {
        **valued_const_with_data('start', float32_array(start)),
        **valued_const_with_data('limit', float32_array(limit)),
        **valued_const_with_data('delta', float32_array(delta)),
        **regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range',
                                               'returns_shape_value': returns_shape_value,
                                               'output_type': data_type_str_to_np(src_type_str),
                                               'infer': Range.infer}),
        **result('res'),
    }

    nodes_ref = deepcopy(nodes)
    nodes_ref.update({
        **regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range',
                                               'returns_shape_value': returns_shape_value,
                                               'output_type': data_type_str_to_np(dst_type_str),
                                               'infer': Range.infer}),
    })

    edges = [
        *connect('start', '0:range'),
        *connect('limit', '1:range'),
        *connect('delta', '2:range'),
        *connect('range', 'res'),
    ]
    graph = build_graph(nodes, edges)
    graph_ref = build_graph(nodes_ref, edges)

    graph = partial_infer(graph)

    graph.graph['cmd_params'].data_type = dst_type_str
    convert_blobs(graph, dst_type_str)
    return graph, graph_ref
    def setUp(self):
        self.start_node_name = 'StatefulPartitionedCall/Preprocessor/unstack'
        self.end_node_name = 'StatefulPartitionedCall/Preprocessor/stack'
        self.end_node_name2 = 'StatefulPartitionedCall/Preprocessor/stack2'
        self.loop_start_node_name = 'prefix/map/while/Preprocessor/unstack'
        self.loop_end_node_name = 'prefix/map/while/Preprocessor/stack'
        self.mul_const = float32_array([0.025, 0.374, -0.45])
        self.sub_const = float32_array([2.0, 3.0, 4.0])

        self.nodes = {
            **regular_op('input', {'type': 'Parameter'}),

            **regular_op('mul', {'op': 'Mul', 'type': 'Multiply', 'name': 'my_mul'}),
            **regular_op('sub', {'op': 'Sub', 'type': 'Subtract', 'name': 'my_sub'}),
            **const('mul_const', self.mul_const),
            **const('sub_const', self.sub_const),

            **regular_op(self.start_node_name, {'op': 'Identity'}),
            **regular_op(self.end_node_name, {'op': 'Identity'}),
            **regular_op(self.end_node_name2, {'op': 'Identity'}),

            **regular_op('loop', {'op': 'Loop', 'body': None}),

            **regular_op('resize', {'type': 'Interpolate'}),
            **result('result'),
        }
        self.replacement_desc = {'start_nodes': [self.start_node_name],
                                 'end_nodes': [self.end_node_name, self.end_node_name2]}
Beispiel #3
0
    def extract(cls, node):
        activation_alpha = onnx_attr(node,
                                     'activation_alpha',
                                     'floats',
                                     default=None,
                                     dst_type=lambda x: float32_array(x))
        activation_beta = onnx_attr(node,
                                    'activation_beta',
                                    'floats',
                                    default=None,
                                    dst_type=lambda x: float32_array(x))
        activations = onnx_attr(
            node,
            'activations',
            'strings',
            default=None,
            dst_type=lambda x: list(
                map(lambda s: s.decode(encoding="utf-8").lower(), list(x))))
        clip = onnx_attr(node, 'clip', 'f', default=None)
        linear_before_reset = onnx_attr(node,
                                        'linear_before_reset',
                                        'i',
                                        default=0)

        attrs = {
            'batch_dim':
            1,
            'sequence_dim':
            0,
            'blobs_wrb':
            True,
            'has_num_directions':
            True,
            'num_layers':
            1,
            'format':
            'onnx',
            'multilayers':
            False,
            'gate_order': [0, 1, 2],

            # ONNX - specific attrs
            'activation_alpha':
            activation_alpha,
            'activation_beta':
            activation_beta,
            'activations':
            activations,
            'clip':
            clip,
            'direction':
            onnx_attr(node, 'direction', 's', b'forward').decode().lower(),
            'hidden_size':
            int64_array(onnx_attr(node, 'hidden_size', 'i')),
            'linear_before_reset':
            linear_before_reset,
        }

        GRU.update_node_stat(node, attrs)
        return cls.enabled
Beispiel #4
0
    def test_run_with_const_input(self):
        inp_shape = (1, 3, 1000, 1000)

        nodes = {
            **shaped_const_with_data('input', int64_array(inp_shape)),
            **regular_op('sizes_const', {'op': 'Const'}),
            **{'sizes_const_d': {'kind': 'data', 'value': float32_array([1., 1., 1., 100.])}},
            **regular_op_with_empty_data('interpolate', {'type': 'Interpolate', 'shape_calculation_model': 'scales'}),
            **result('res'),
        }

        nodes_ref = {
            **shaped_const_with_data('input', int64_array(inp_shape)),
            **regular_op('sizes_const', {'op': 'Const', 'returns_shape_value': True}),
            **{'sizes_const_d': {'kind': 'data', 'value': float32_array([1., 1., 1., 100.])}},
            **regular_op_with_empty_data('interpolate', {'type': 'Interpolate', 'shape_calculation_model': 'scales'}),
            **result('res'),
        }

        edges = [
            *connect('input', '0:interpolate'),
            *connect('sizes_const', '1:interpolate'),
            *connect('interpolate', 'res'),
        ]
        graph = build_graph(nodes, edges)
        interp_node = Node(graph, 'interpolate')
        interp_node.add_input_port(2)

        MarkNodesWithShapeValues().find_and_replace_pattern(graph)

        graph_ref = build_graph(nodes_ref, edges)
        (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True)
        self.assertTrue(flag, resp)
Beispiel #5
0
    def extract(cls, node):
        variance = onnx_attr(node,
                             'variance',
                             'floats',
                             default=[],
                             dst_type=lambda x: float32_array(x))
        if len(variance) == 0:
            variance = [0.1]

        update_attrs = {
            'aspect_ratio':
            onnx_attr(node,
                      'aspect_ratio',
                      'floats',
                      dst_type=lambda x: float32_array(x)),
            'min_size':
            onnx_attr(node,
                      'min_size',
                      'floats',
                      dst_type=lambda x: float32_array(x)),
            'max_size':
            onnx_attr(node,
                      'max_size',
                      'floats',
                      dst_type=lambda x: float32_array(x)),
            'flip':
            onnx_attr(node, 'flip', 'i', default=0),
            'clip':
            onnx_attr(node, 'clip', 'i', default=0),
            'variance':
            list(variance),
            'img_size':
            onnx_attr(node, 'img_size', 'i', default=0),
            'img_h':
            onnx_attr(node, 'img_h', 'i', default=0),
            'img_w':
            onnx_attr(node, 'img_w', 'i', default=0),
            'step':
            onnx_attr(node, 'step', 'f', default=0.0),
            'step_h':
            onnx_attr(node, 'step_h', 'f', default=0.0),
            'step_w':
            onnx_attr(node, 'step_w', 'f', default=0.0),
            'offset':
            onnx_attr(node, 'offset', 'f', default=0.0),
        }

        # update the attributes of the node
        PriorBoxOp.update_node_stat(node, update_attrs)
        return cls.enabled
Beispiel #6
0
    def extract(cls, node):
        direction = onnx_attr(node, 'direction', 's',
                              b'forward').decode().lower()

        activation_alpha = onnx_attr(node,
                                     'activation_alpha',
                                     'floats',
                                     default=None,
                                     dst_type=lambda x: float32_array(x))
        activation_beta = onnx_attr(node,
                                    'activation_beta',
                                    'floats',
                                    default=None,
                                    dst_type=lambda x: float32_array(x))
        activations = onnx_attr(
            node,
            'activations',
            'strings',
            default=['tanh', 'tanh']
            if direction == 'bidirectional' else ['tanh'],
            dst_type=lambda x: list(
                map(lambda s: s.decode(encoding="utf-8").lower(), list(x))))
        clip = onnx_attr(node, 'clip', 'f', default=None)

        # Since pytorch generates ONNX bidirectional RNN models with only one activation, duplicating activation
        if direction == 'bidirectional' and len(activations) == 1:
            activations.append(activations[0])

        attrs = {
            'batch_dim': 1,
            'sequence_dim': 0,
            'blobs_wrb': True,
            'has_num_directions': True,
            'num_layers': 1,
            'format': 'onnx',
            'multilayers': False,
            'gate_order': [0],

            # ONNX attrs
            'activation_alpha': activation_alpha,
            'activation_beta': activation_beta,
            'activations': activations,
            'clip': clip,
            'direction': direction,
            'hidden_size': int64_array(onnx_attr(node, 'hidden_size', 'i')),
        }

        RNN.update_node_stat(node, attrs)
        return cls.enabled
Beispiel #7
0
def build_cast_test_graphs(input_data, dst_type_str='FP16'):
    nodes = {
        **valued_const_with_data('input', float32_array(input_data)),
        **regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast',
                                              'dst_type': np.float32,
                                              'infer': Cast.infer}),
        **result('res'),
    }

    nodes_ref = deepcopy(nodes)
    nodes_ref.update({
        **regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast',
                                              'dst_type': data_type_str_to_np(dst_type_str),
                                              'infer': Cast.infer}),
    })

    edges = [
        *connect('input', 'cast'),
        *connect('cast', 'res'),
    ]
    graph = build_graph(nodes, edges)
    graph_ref = build_graph(nodes_ref, edges)

    graph = partial_infer(graph)

    graph.graph['cmd_params'].data_type = dst_type_str
    convert_blobs(graph, dst_type_str)
    return graph, graph_ref
 def test_conversion(self, input_shape, scales, axes):
     input_shape_as_array = int64_array(input_shape)
     scales_as_array = float32_array(scales)
     graph = build_graph(graph_node_attrs,
                         graph_edges,
                         {
                             'placeholder_data': {'shape': input_shape_as_array},
                             'scales': {'value': scales_as_array, 'shape': scales_as_array.shape},
                             'scales_data': {'value': scales_as_array, 'shape': scales_as_array.shape},
                             'upsample_data':
                                 {'shape': ((input_shape_as_array + 1.e-5) * scales_as_array).astype(np.int64)}
                         })
     graph.graph['layout'] = 'NCHW'
     ref_graph = build_graph(new_ref_graph_node_attr,
                             new_ref_graph_edges,
                             {
                                 'placeholder_data': {'shape': int64_array(input_shape)},
                                 'ss_begin': {'value': int64_array([axes[0]])},
                                 'ss_end': {'value': int64_array([axes[-1] + 1])},
                                 'ss_begin_data': {'value': int64_array([axes[0]])},
                                 'ss_end_data': {'value': int64_array([axes[-1] + 1])},
                                 'factor': {'value': scales_as_array[2:],
                                            'shape': scales_as_array[2:].shape},
                                 'factor_data': {'value': scales_as_array[2:],
                                                 'shape': scales_as_array[2:].shape},
                                 'axes_const': {'value': int64_array(axes), 'shape': int64_array(axes).shape},
                                 'interpolate_data': {
                                     'shape': (input_shape_as_array * scales_as_array + 1e-5).astype(np.int64)},
                             })
     UpsampleToResample().find_and_replace_pattern(graph)
     (flag, resp) = compare_graphs(graph, ref_graph, 'output')
     self.assertTrue(flag, resp)
    def find_and_replace_pattern(self, graph: Graph):
        for attr_clamp in graph.get_op_nodes(op='AttributedClamp'):
            original_name = attr_clamp.soft_get('name', attr_clamp.id)

            rename_node(attr_clamp, original_name + '/TBR')
            min_value = attr_clamp.soft_get('min', np.finfo(np.float32).min)
            max_value = attr_clamp.soft_get('max', np.finfo(np.float32).max)
            new_clamp = create_op_with_const_inputs(graph, Clamp, {
                1: float32_array(min_value),
                2: float32_array(max_value)
            }, {'name': original_name})
            rename_node(new_clamp, original_name)

            attr_clamp.in_port(0).get_connection().set_destination(
                new_clamp.in_port(0))
            attr_clamp.out_port(0).get_connection().set_source(
                new_clamp.out_port(0))
            graph.remove_node(attr_clamp.id)
Beispiel #10
0
 def extract(cls, node):
     boundaries = float32_array(node.pb.attr['boundaries'].list.f)
     Bucketize.update_node_stat(
         node, {
             'boundaries': boundaries,
             'with_right_bound': False,
             'output_type': np.int32
         })
     return cls.enabled
Beispiel #11
0
    def placeholder_scales(self, placeholder: Node):
        """
        Helper function to get scales for prior boxes out of input image size:
                [1 / im_width, 1 / im_height, 1 / im_width, 1 / im_height]
        """
        graph = placeholder.graph
        name = placeholder.soft_get('name', placeholder.id)

        shape_value = placeholder.soft_get('shape', None)
        assert shape_value is not None, \
            "[ {} replacer ] Placeholder `{}` should have shape attribute".format(self.replacement_id, name)
        assert isinstance(shape_value, np.ndarray), \
            "[ {} replacer ] Placeholder `{}` shape attribute should be np.ndarray".format(self.replacement_id, name)
        assert shape_value.size == 4, \
            "[ {} replacer ] Placeholder `{}` should be 4D. Shape: {}".format(self.replacement_id, name, shape_value)

        shape = Shape(graph, {'name': 'input_image_shape'}).create_node()
        shape.in_port(0).connect(placeholder.out_port(0))

        begin = Const(graph, {'value': int64_array([1])}).create_node()
        end = Const(graph, {'value': int64_array([3])}).create_node()
        stride = Const(graph, {'value': int64_array([1])}).create_node()
        spatial = StridedSlice(graph, {'name': name + '/get_h_w', 'begin_mask': int64_array([1]),
                                       'end_mask': int64_array([1]), 'new_axis_mask': int64_array([0]),
                                       'shrink_axis_mask': int64_array([0]), 'ellipsis_mask': int64_array([0])}).create_node()

        spatial.in_port(0).connect(shape.out_port(0))
        spatial.in_port(1).connect(begin.out_port(0))
        spatial.in_port(2).connect(end.out_port(0))
        spatial.in_port(3).connect(stride.out_port(0))

        power = Const(graph, {'value': float32_array([-1.])}).create_node()
        spatial_scale = Pow(graph, {}).create_node()

        spatial_scale.in_port(0).connect(spatial.out_port(0))
        spatial_scale.in_port(1).connect(power.out_port(0))

        # Power `type_infer` requires inputs to have equal data type
        convert_to_fp32 = Cast(graph, {'dst_type': np.float32}).create_node()
        spatial_scale.in_port(0).get_connection().insert_node(convert_to_fp32)

        order = Const(graph, {'value': int64_array([1, 0])}).create_node()
        axis_const = Const(graph, {'value': int64_array(0)}).create_node()
        reverse = Gather(graph, {}).create_node()

        reverse.in_port(0).connect(spatial_scale.out_port(0))
        reverse.in_port(1).connect(order.out_port(0))
        axis_const.out_port(0).connect(reverse.in_port(2))

        priors_scale_node = Concat(graph, {'axis': 0, 'in_ports_count': 2}).create_node()
        priors_scale_node.add_input_port(0, skip_if_exist=True)
        priors_scale_node.add_input_port(1, skip_if_exist=True)

        priors_scale_node.in_port(0).connect(reverse.out_port(0))
        priors_scale_node.in_port(1).connect(reverse.out_port(0))
        return priors_scale_node
    def replace_pattern(self, graph: Graph, match: dict):
        if not self.is_applicable(match):
            return

        unsqueeze_node = match['unsqueeze']
        unsqueeze_name = unsqueeze_node.soft_get('name', unsqueeze_node.id)
        second_input_of_unsqueeze = unsqueeze_node.in_port(
            1).get_connection().get_source().node
        d_idx = int(second_input_of_unsqueeze.value)
        axis = d_idx - 1

        shape_node = Shape(graph,
                           dict(name=unsqueeze_name + '/Shape')).create_node()
        axis_len_node = node_to_get_shape_value_of_indices(shape_node, [axis])

        second_input_of_tile = match['tile'].in_port(
            1).get_connection().get_source().node
        scale = int64_array([second_input_of_tile.value[d_idx]])
        float_scale = float32_array([second_input_of_tile.value[d_idx]])
        mul_node = create_op_with_const_inputs(
            graph, Mul, {1: scale}, {'name': unsqueeze_name + '/Mul'})

        axis_len_node.out_port(0).connect(mul_node.in_port(0))

        interp_node = create_op_with_const_inputs(
            graph, Interpolate, {
                2: float_scale,
                3: int64_array([axis])
            }, {
                'mode': 'nearest',
                'antialias': 0,
                'pads_begin': int64_array([0]),
                'pads_end': int64_array([0]),
                'coordinate_transformation_mode': 'half_pixel',
                'nearest_mode': 'round_prefer_floor',
                'cube_coeff': -0.75,
                'version': 'opset4',
                'shape_calculation_mode': 'scales',
                'in_ports_count': 4,
                'maybe_part_of_sequence': True
            })
        mul_node.out_port(0).connect(interp_node.in_port(1))

        reshape_node = match['reshape']
        reshape_node.out_port(0).get_connection().set_source(
            interp_node.out_port(0))
        reshape_name = reshape_node.soft_get('name', reshape_node.id)
        rename_nodes([(reshape_node, reshape_name + '/delete'),
                      (interp_node, reshape_name)])

        unsqueeze_connection = unsqueeze_node.in_port(0).get_connection()
        unsqueeze_connection.set_destination(interp_node.in_port(0))
        unsqueeze_connection.get_source().connect(shape_node.in_port(0))
def replace_interpolate_pattern(graph: Graph, match: dict):
    split = match['split']
    scale = float32_array([get_split_scale(split)])
    axis = int(split.in_port(1).get_connection().get_source().node.value)
    split_node_name = split.name
    axis_node = Const(graph, {'name': split_node_name + '/axis', 'value': int64_array([axis])}).create_node()

    shape_node = Shape(graph, dict(name=split_node_name + '/Shape')).create_node()
    scales_node = Const(graph, dict(name=split_node_name + '/scales', value=scale)).create_node()
    mul_node = Mul(graph, dict(name=split_node_name + '/Mul')).create_node()
    scales_node.out_port(0).connect(mul_node.in_port(1))

    strided_slice_node = create_op_with_const_inputs(graph,
                                                     StridedSlice,
                                                     {1: int64_array([axis]), 2: int64_array([axis + 1])},
                                                     {
                                                        'name': split_node_name + '/StridedSlice',
                                                        'begin_mask': int64_array([1]),
                                                        'end_mask': int64_array([1]),
                                                        'new_axis_mask': int64_array([0]),
                                                        'shrink_axis_mask': int64_array([0]),
                                                        'ellipsis_mask': int64_array([0])
                                                     })
    shape_node.out_port(0).connect(strided_slice_node.in_port(0))

    cast_shape_to_float = Cast(graph, {'dst_type': np.float32}).create_node()

    strided_slice_node.out_port(0).connect(cast_shape_to_float.in_port(0))
    cast_shape_to_float.out_port(0).connect(mul_node.in_port(0))

    interp_node = Interpolate(graph,
                              dict(name=split_node_name + '/Interpolate',
                                   mode='nearest',
                                   antialias=0, pads_begin=int64_array([0]), pads_end=int64_array([0]),
                                   coordinate_transformation_mode='half_pixel', nearest_mode='round_prefer_floor',
                                   cube_coeff=-0.75, version='opset4', shape_calculation_mode='scales',
                                   in_ports_count=4, maybe_part_of_sequence=True)).create_node()

    floor_node = Floor(graph, {'name': split_node_name + '/Floor'}).create_node()
    cast_mul_result_to_int = Cast(graph, {'dst_type': np.int64}).create_node()

    mul_node.out_port(0).connect(floor_node.in_port(0))
    floor_node.out_port(0).connect(cast_mul_result_to_int.in_port(0))

    cast_mul_result_to_int.out_port(0).connect(interp_node.in_port(1))
    scales_node.out_port(0).connect(interp_node.in_port(2))
    axis_node.out_port(0).connect(interp_node.in_port(3))

    match['concat'].out_port(0).get_connection().set_source(interp_node.out_port(0))

    split_connection = split.in_port(0).get_connection()
    split_connection.set_destination(interp_node.in_port(0))
    split_connection.get_source().connect(shape_node.in_port(0))
Beispiel #14
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 5
        assert len(node.out_nodes()) == 1
        inputs = [node.in_node(i) for i in range(5)]
        x, input_low, input_high, output_low, output_high = inputs
        assert x.has_valid('shape')
        # TODO Check all inputs[1..4] shapes are broadcastable to inputs[0] shape
        assert all([broadcastable(inputs[i].shape, inputs[0].shape) for i in range(1, 5)]), \
            "Not all shapes from FakeQuantize inputs can be broadcasted to input[0] for node {}".format(
                node.soft_get('name'))
        node.out_node().shape = x.shape.copy()

        if all([node.in_node(i).has_valid('value') for i in range(5)]):
            x, input_low, input_high, output_low, output_high = \
                [float32_array(np.broadcast_to(node.value, x.value.shape)) for node in inputs]

            assert node.has_valid('levels')
            assert isinstance(node.levels, int)

            underflow_mask = x <= input_low
            overflow_mask = x > input_high
            # pylint: disable=assignment-from-no-return
            middle_mask = np.logical_not(
                np.logical_or(underflow_mask, overflow_mask))

            def middle_part(x, input_low, input_high, output_low, output_high):
                return round_half_up((x - input_low) / (input_high - input_low) * (node.levels - 1)) / \
                    (node.levels - 1) * (output_high - output_low) + output_low

            output = np.zeros_like(x)
            # pylint: disable=unsupported-assignment-operation
            output[middle_mask] = middle_part(
                x[middle_mask],
                input_low[middle_mask],
                input_high[middle_mask],
                output_low[middle_mask],
                output_high[middle_mask],
            )

            # pylint: disable=unsupported-assignment-operation
            output[overflow_mask] = output_high[overflow_mask]
            # pylint: disable=unsupported-assignment-operation
            output[underflow_mask] = output_low[underflow_mask]

            if not node.has_and_set('stop_value_propagation'):
                node.out_node().value = output
Beispiel #15
0
    def append_variances(priors_scale_node: Node, variance: list):
        graph = priors_scale_node.graph
        name = priors_scale_node.name

        sp_shape = Shape(graph, {'name': name + '/shape'}).create_node()
        priors_scale_node.out_port(0).connect(sp_shape.in_port(0))

        begin = Const(graph, {'value': int64_array([-2])}).create_node()
        end = Const(graph, {'value': int64_array([-1])}).create_node()
        stride = Const(graph, {'value': int64_array([1])}).create_node()
        shape_part_for_tiling = StridedSlice(graph, {'name': name + '/get_-2_dim', 'begin_mask': int64_array([1]),
                                                     'end_mask': int64_array([1]), 'new_axis_mask': int64_array([0]),
                                                     'shrink_axis_mask': int64_array([0]),
                                                     'ellipsis_mask': int64_array([0])}).create_node()

        sp_shape.out_port(0).connect(shape_part_for_tiling.in_port(0))
        begin.out_port(0).connect(shape_part_for_tiling.in_port(1))
        end.out_port(0).connect(shape_part_for_tiling.in_port(2))
        stride.out_port(0).connect(shape_part_for_tiling.in_port(3))

        shape_concat = create_op_node_with_second_input(graph, Concat, int64_array([4]),
                                                        {'name': name + '/shape_for_tiling', 'in_ports_count': 2,
                                                         'axis': int64_array(0)},
                                                        shape_part_for_tiling)

        variance = Const(graph, {'name': name + '/variance', 'value': float32_array(variance)}).create_node()
        tile = Broadcast(graph, {'name': name + '/variance_tile'}).create_node()
        variance.out_port(0).connect(tile.in_port(0))
        shape_concat.out_port(0).connect(tile.in_port(1))

        reshape_dim = Const(graph, {'value': int64_array([-1, 4])}).create_node()
        sp_reshape = Reshape(graph, {'name': name + '/reshape'}).create_node()
        sp_reshape.in_port(0).connect(priors_scale_node.out_port(0))
        sp_reshape.in_port(1).connect(reshape_dim.out_port(0))

        concat = Concat(graph,
                        {'name': name + '/priors_concat', 'axis': int64_array(0), 'in_ports_count': 2}).create_node()
        sp_reshape.out_port(0).connect(concat.in_port(0))
        tile.out_port(0).connect(concat.in_port(1))

        output_dims = Const(graph, {'value': int64_array([1, 2, -1])}).create_node()
        output_node = Reshape(graph, {'name': name + '/3D_priors_wth_variances'}).create_node()
        concat.out_port(0).connect(output_node.in_port(0))
        output_dims.out_port(0).connect(output_node.in_port(1))

        return output_node
Beispiel #16
0
    def is_applicable(match: dict) -> bool:
        """
        This function checks whether this transformation is applicable.
        :param match: dictionary with nodes from the found pattern
        :return: True, if the transformation is applicable
                 False, otherwise
        """
        unsqueeze_node = match['unsqueeze']
        second_input_of_unsqueeze = unsqueeze_node.in_port(
            1).get_connection().get_source().node
        if not second_input_of_unsqueeze.has_valid('value') or len(
                second_input_of_unsqueeze.value) != 1:
            return False

        d_idx = int(second_input_of_unsqueeze.value)
        if d_idx == 0:
            return False

        second_input_of_tile = match['tile'].in_port(
            1).get_connection().get_source().node
        if not second_input_of_tile.has_valid('value'):
            return False

        input_shape_of_unsqueeze = unsqueeze_node.in_port(0).data.get_shape()
        input_rank_of_unsqueeze = len(input_shape_of_unsqueeze)
        if input_rank_of_unsqueeze not in {4, 5}:
            return False

        if input_rank_of_unsqueeze + 1 != len(second_input_of_tile.value):
            return False

        expected_tile_constant = np.ones(input_rank_of_unsqueeze + 1,
                                         dtype=np.float32)
        expected_tile_constant[d_idx] = float(
            second_input_of_tile.value[d_idx])

        if not np.array_equal(expected_tile_constant,
                              float32_array(second_input_of_tile.value)):
            return False

        reshape_node = match['reshape']
        new_shape = reshape_node.in_port(1).data.get_value()
        if new_shape is None or input_rank_of_unsqueeze != len(new_shape):
            return False

        return True
Beispiel #17
0
    def extract(cls, node):
        onnx_opset_version = get_onnx_opset_version(node)
        if onnx_opset_version is not None and onnx_opset_version >= 9:
            mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
            ONNXResize10.update_node_stat(node, {'mode': mode})
        else:
            mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
            scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: float32_array(x))
            width_scale = onnx_attr(node, 'width_scale', 'f')
            height_scale = onnx_attr(node, 'height_scale', 'f')

            supported_modes = ['nearest', 'linear']
            if mode not in supported_modes:
                raise Error(
                    'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.',
                    node.name,
                    mode,
                    supported_modes
                )

            if scales is not None:
                if scales.shape != (4,):
                    raise Error(
                        'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.',
                        node.name
                    )
                if math.fabs(scales[0] - 1) > 1e-5 or math.fabs(scales[1] - 1) > 1e-5:
                    raise Error(
                        'Upsampling of batch and feature dimensions is not supported for node {}.',
                        node.name
                    )
                height_scale = scales[2]
                width_scale = scales[3]

            if (width_scale is None or height_scale is None) and len(node.in_nodes()) != 2:
                raise Error(
                    'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.',
                    width_scale,
                    height_scale,
                    node.name
                )

            UpsampleOp.update_node_stat(node, {'mode': mode, 'height_scale': height_scale,
                                               'width_scale': width_scale})
        return cls.enabled
 def extract(cls, node):
     attrs = dict(class_agnostic_box_regression=onnx_attr(
         node, 'class_agnostic_box_regression', 'i', 0),
                  max_detections_per_image=onnx_attr(
                      node, 'max_detections_per_image', 'i', 100),
                  nms_threshold=onnx_attr(node, 'nms_threshold', 'f', 0.5),
                  num_classes=onnx_attr(node, 'num_classes', 'i', 81),
                  post_nms_count=onnx_attr(node, 'post_nms_count', 'i',
                                           2000),
                  score_threshold=onnx_attr(node, 'score_threshold', 'f',
                                            0.05),
                  max_delta_log_wh=onnx_attr(node, 'max_delta_log_wh', 'f',
                                             log(1000. / 16.)),
                  deltas_weights=float32_array(
                      onnx_attr(node, 'deltas_weights', 'floats',
                                [10., 10., 5., 5.])))
     ExperimentalDetectronDetectionOutput.update_node_stat(node, attrs)
     return cls.enabled
Beispiel #19
0
    def replace_op(self, graph: Graph, node: Node):
        in_node_0 = node.in_node(0)

        broadcast = lambda x: float32_array([x])
        threshold = Const(graph, {
            'name': node.id + "/Input_1",
            "value": broadcast(0)
        }).create_node()
        in_1 = threshold
        in_2 = threshold
        in_3 = Const(graph, {
            'name': node.id + "/Input_3",
            "value": broadcast(-1)
        }).create_node()
        in_4 = Const(graph, {
            'name': node.id + "/Input_4",
            "value": broadcast(+1)
        }).create_node()
        quant = FakeQuantize(graph, {
            'name': node.id + "/FakeQuantize_",
            "levels": 2
        }).create_node(inputs=[in_node_0, in_1, in_2, in_3, in_4])

        return [quant.id]
Beispiel #20
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        log.debug('UpsampleToResample is triggered')
        upsample = match['upsample']
        upsample_name = upsample.soft_get('name', upsample.id)
        input_shape = upsample.in_port(0).data.get_shape()
        input_shape_rank = len(input_shape)
        if input_shape_rank not in [4, 5]:
            log.warning('The input shape is not 4D or 5D for op {}'.format(
                upsample.soft_get('name')))
            return

        depth_scale = None
        layout = graph.graph['layout']

        if len(upsample.in_nodes()) == 2:
            if upsample.in_node(1).value is None:
                return
            scales = upsample.in_node(1).value
            assert len(scales) in (
                4, 5
            ), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format(
                len(scales), upsample_name)
            if not (math.isclose(scales[0], 1, rel_tol=1e-5)
                    and math.isclose(scales[1], 1, rel_tol=1e-5)):
                return
            height_scale = scales[get_height_dim(layout, input_shape_rank)]
            width_scale = scales[get_width_dim(layout, input_shape_rank)]
            if len(scales) == 5:
                depth_scale = scales[get_depth_dim(layout, input_shape_rank)]
        else:
            height_scale = upsample['height_scale']
            width_scale = upsample['width_scale']

        if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():
            upsample.in_port(1).disconnect()

        upsample_name = upsample.soft_get('name', upsample.id)
        shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node()

        layout = graph.graph['layout']

        if input_shape_rank == 4:
            begin_value = int64_array(
                [get_height_dim(layout, input_shape_rank)])
            factor_value = float32_array([height_scale, width_scale])
        else:
            begin_value = int64_array(
                [get_depth_dim(layout, input_shape_rank)])
            factor_value = float32_array(
                [depth_scale, height_scale, width_scale])

        ss = create_op_with_const_inputs(
            graph, StridedSlice, {
                1: begin_value,
                2: int64_array([get_width_dim(layout, input_shape_rank) + 1]),
                3: int64_array([1])
            }, {
                'name': upsample_name + '/ss_0_port',
                'begin_mask': int64_array([1]),
                'end_mask': int64_array([1]),
                'new_axis_mask': int64_array([0]),
                'shrink_axis_mask': int64_array([0]),
                'ellipsis_mask': int64_array([0])
            })

        mul = create_op_node_with_second_input(
            graph, Mul, factor_value, {'name': upsample_name + '/factor_mul'})

        source = upsample.in_port(0).get_connection().get_source()
        source.connect(shape.in_port(0))
        shape.out_port(0).connect(ss.in_port(0))

        ss.out_port(0).connect(mul.in_port(0))

        # Create Interpolate operation
        if input_shape_rank == 4:
            axes = int64_array([
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])
        else:
            axes = int64_array([
                get_depth_dim(layout, input_shape_rank),
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])

        axes_node = Const(graph, {
            'name': upsample_name + '/axis',
            'value': axes
        }).create_node()

        interpolate = Interpolate(
            graph, {
                'mode': upsample.attrs()['mode'],
                'antialias': 0,
                'pads_begin': int64_array([0]),
                'pads_end': int64_array([0]),
                'coordinate_transformation_mode': 'half_pixel',
                'nearest_mode': 'round_prefer_floor',
                'cube_coeff': -0.75,
                'shape_calculation_mode': 'scales',
                'version': 'opset4',
                'in_ports_count': 4
            }).create_node()

        upsample.add_input_port(1, skip_if_exist=True)
        assert upsample.in_port(1).disconnected()
        mul.out_port(0).connect(interpolate.in_port(1))
        axes_node.out_port(0).connect(interpolate.in_port(3))

        scales_node = Const(graph, {
            'name': upsample_name + '/scales',
            'value': factor_value
        }).create_node()
        scales_node.out_port(0).connect(interpolate.in_port(2))

        upsample.in_port(0).get_connection().set_destination(
            interpolate.in_port(0))
        upsample.out_port(0).get_connection().set_source(
            interpolate.out_port(0))

        rename_nodes([(upsample, upsample_name + '/delete'),
                      (interpolate, upsample_name)])

        convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()
        convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node()

        mul.in_port(0).get_connection().insert_node(convert_to_float)
        mul.out_port(0).get_connection().insert_node(convert_to_int)
Beispiel #21
0
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import numpy as np
import unittest

from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array, int8_array
from openvino.tools.mo.middle.quantize_dequantize_linear_resolver import QuantizeDequantizeLinearResolver
from openvino.tools.mo.middle.quantize_linear_resolver import QuantizeLinearResolver
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, result, connect, connect_data, \
    valued_const_with_data, regular_op_with_empty_data

nodes_attributes = {
    **valued_const_with_data('y_scale_1', float32_array(1.0 / 255.0)),
    **valued_const_with_data('y_scale_2', float32_array(1.0 / 255.0)),
    **valued_const_with_data('y_zeropoint_1', int8_array(0)),
    **valued_const_with_data('y_zeropoint_2', int8_array(0)),
    **valued_const_with_data('x_scale_1', float32_array(1.0 / 255.0)),
    **valued_const_with_data('x_scale_2', float32_array(1.0 / 255.0)),
    **valued_const_with_data('x_zeropoint_1', int8_array(0)),
    **valued_const_with_data('x_zeropoint_2', int8_array(0)),
    **valued_const_with_data('const_input', float32_array([[0.3, 0.6], [-0.7, -0.9]])),
    **valued_const_with_data('in_low', float32_array(-128.0)),
    **valued_const_with_data('in_high', float32_array(127.0)),
    **valued_const_with_data('out_low', float32_array(-128.0)),
    **valued_const_with_data('out_high', float32_array(127.0)),
    **valued_const_with_data('non_const_in_low', float32_array(-128.0)),
    **valued_const_with_data('non_const_in_high', float32_array(127.0)),
    **valued_const_with_data('non_const_out_low', float32_array(-128.0)),
    **valued_const_with_data('non_const_out_high', float32_array(127.0)),
                                  }),
    **regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
    **regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10], {
                                      'type': 'ReverseChannels',
                                      'axis': int64_array(1)
                                  }),
    **regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),
    **result('result'),
}

nodes2 = {
    **regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {
                                      'type': 'Parameter'
                                  }),
    **valued_const_with_data('mul_const',
                             float32_array([-127.5, -127.5, -127.5])),
    **regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
    **valued_const_with_data('pad_const_1', int64_array([0, 0, 0, 0])),
    **valued_const_with_data('pad_const_2', int64_array([0, 0, 1, 1])),
    **regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),
    **regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10], {
                                      'type': 'ReverseChannels',
                                      'axis': int64_array(1)
                                  }),
    **result('result'),
    **result('result2'),
}

nodes3 = {
    **regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {
                                      'type': 'Parameter'
    **regular_op_with_shaped_data('placeholder1', [1, 3, 10, 10], {'type': 'Parameter', 'rt_info': RTInfo()}),
    **regular_op_with_shaped_data('placeholder2', [1, 1, 1, 1], {'type': 'Parameter'}),

    **regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
    **regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10],
                                  {'type': 'ReverseChannels', 'axis': int64_array(1)}),

    **regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),

    **result('result'),
}

nodes2 = {
    **regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}),

    **valued_const_with_data('mul_const', float32_array([-127.5, -127.5, -127.5])),
    **regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}),
    **valued_const_with_data('pad_const_1', int64_array([0, 0, 0, 0])),
    **valued_const_with_data('pad_const_2', int64_array([0, 0, 1, 1])),
    **regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}),
    **regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10],
                                  {'type': 'ReverseChannels', 'axis': int64_array(1)}),
    **result('result'),
    **result('result2'),
}

nodes3 = {
    **regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}),
    **regular_op_with_shaped_data('transpose', [1, 3, 10, 10], {'type': 'Transpose'}),
    **valued_const_with_data('transpose_order', int64_array([0, 3, 1, 2])),
    **regular_op_with_shaped_data('reverse_channels_up', [1, 3, 10, 10],
import numpy as np

from openvino.tools.mo.front.AttributedRandomUniformToRandomUniform import AttributedRandomUniformToRandomUniform
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, const, result, regular_op

nodes = {
    **regular_op('placeholder', {'type': 'Parameter'}),
    **regular_op(
        'attr_random_uniform', {
            'type': 'AttributedRandomUniform',
            'op': 'AttributedRandomUniform',
            'output_type': np.float32,
            'min_val': float32_array([-1.5]),
            'max_val': float32_array([10.7]),
            'shape': int64_array([5, 4, 3])
        }),
    **result('result'),

    # new RandomUniform node and inputs
    **regular_op('random_uniform', {'type': 'RandomUniform'}),
    **const('min_val', float32_array([-1.5])),
    **const('max_val', float32_array([10.7])),
    **const('shape', int64_array([5, 4, 3])),
}


class AttributedRandomUniformToRandomUniformTest(unittest.TestCase):
    def test_min_max(self):
Beispiel #25
0
 def operation(a, b):
     if np.any(b < 0) and np.issubdtype(a.dtype, np.signedinteger):
         return float32_array(a.astype(np.float32)**b)
     return a**b
Beispiel #26
0
    def replace_op(self, graph: Graph, node: Node):
        input_out_port = node.in_port(0).get_source()

        memory_pair_input = unique_id('id')
        memory_pair_output = unique_id('id')

        # Input -> FullyConnected
        fc_layer_after_input_attrs = {
            'name': 'input_fullyconnected',
            'out-size': node.gifo_x_weights_shape[0],
            'transpose_weights': True,
            'bias_term': True,
        }

        fc_layer_after_input = FullyConnected(
            graph, fc_layer_after_input_attrs).create_node()
        fc_layer_after_input.in_port(0).connect(input_out_port)
        input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 1,
                       'weights', node.gifo_x_weights)
        input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 2,
                       'biases', node.gifo_biases)

        init_value_prev_lstm_output = create_const_with_batch_from_input(
            input_out_port, node.gifo_r_weights_shape[1])
        prev_lstm_output = ReadValue(graph, {
            'name': 'prev_memory_output',
            'variable_id': memory_pair_input
        }).create_node()
        prev_lstm_output.in_port(0).connect(
            init_value_prev_lstm_output.out_port(0))

        # *Memory(output) -> FullyConnected
        fc_layer_from_prev_state_attrs = {
            'name': 'prev_memory_output_fullyconnected',
            'out-size': node.gifo_r_weights_shape[0],
            'transpose_weights': True,
            'bias_term': False,
        }

        fc_layer_from_prev_state = FullyConnected(
            graph, fc_layer_from_prev_state_attrs).create_node()
        fc_layer_from_prev_state.in_port(0).connect(
            prev_lstm_output.out_port(0))
        input_as_const(fc_layer_from_prev_state,
                       fc_layer_from_prev_state_attrs, 1, 'weights',
                       node.gifo_r_weights)

        # Memory -> FullyConnected  \
        #                           *Eltwise(sum)
        # Input -> FullyConnected   /
        join_input_prev_state_sum = Add(graph, {
            'name': 'join_input_eltwise'
        }).create_node()
        join_input_prev_state_sum.in_port(0).connect(
            fc_layer_from_prev_state.out_port(0))
        join_input_prev_state_sum.in_port(1).connect(
            fc_layer_after_input.out_port(0))

        # *Eltwise(sum) -> Split
        # it is split into 4 nodes: Act, Eltw*3
        # the following order is mandatory
        #       ___Tanh
        #      /
        # Split ---(2)Eltwise(sum)
        #     |\
        #     | \__(3)Eltwise(sum)
        #     |____(4)Eltwise(sum)
        split_joined_input_axis = Const(graph, {
            'value': np.int64(1)
        }).create_node()
        split_joined_input = Split(graph, {
            'name': 'join_input_split',
            'num_splits': 4,
            'out_ports_count': 4
        }).create_node()
        split_joined_input.in_port(0).connect(
            join_input_prev_state_sum.out_port(0))
        split_joined_input.in_port(1).connect(
            split_joined_input_axis.out_port(0))

        init_value_prev_lstm_state = create_const_with_batch_from_input(
            split_joined_input.out_port(0), node.input_gate_weights.shape[0])
        prev_lstm_state = ReadValue(graph, {
            'name': 'prev_memory_state',
            'variable_id': memory_pair_output
        }).create_node()
        prev_lstm_state.in_port(0).connect(
            init_value_prev_lstm_state.out_port(0))

        # *Memory(state) -> *ScaleShift(input)
        state_input_scaleshift_attrs = {
            'name': 'input_scaleshift',
            'bias_term': False
        }
        state_input_scaleshift = ScaleShiftOp(
            graph, state_input_scaleshift_attrs).create_node()
        state_input_scaleshift.in_port(0).connect(prev_lstm_state.out_port(0))
        input_as_const(state_input_scaleshift, state_input_scaleshift_attrs, 1,
                       'weights', node.input_gate_weights)

        # *Memory(state) -> *ScaleShift(forget)
        state_forget_scaleshift_attrs = {
            'name': 'forget_scaleshift',
            'bias_term': False
        }
        state_forget_scaleshift = ScaleShiftOp(
            graph, state_forget_scaleshift_attrs).create_node()
        state_forget_scaleshift.in_port(0).connect(prev_lstm_state.out_port(0))
        input_as_const(state_forget_scaleshift, state_forget_scaleshift_attrs,
                       1, 'weights', node.forget_gate_weights)

        # Split                                 \
        #                                       (2)Eltwise(sum)
        # Memory(state) -> *ScaleShift(input)  /
        join_prev_lstm_input_joined_input_sum = Add(
            graph, {
                'name': 'join_prev_lstm_input_joined_input_eltwise'
            }).create_node()
        join_prev_lstm_input_joined_input_sum.in_port(0).connect(
            split_joined_input.out_port(1))
        join_prev_lstm_input_joined_input_sum.in_port(1).connect(
            state_input_scaleshift.out_port(0))
        # Split                                 \
        #                                       (3)Eltwise(sum)
        # Memory(state) -> *ScaleShift(forget)  /
        join_prev_lstm_input_joined_forget_sum = Add(
            graph, {
                'name': 'join_prev_lstm_input_joined_forget_sum',
            }).create_node()
        join_prev_lstm_input_joined_forget_sum.in_port(0).connect(
            split_joined_input.out_port(2))
        join_prev_lstm_input_joined_forget_sum.in_port(1).connect(
            state_forget_scaleshift.out_port(0))

        # Split -> Tanh
        remember_tahn = Tanh(graph, {'name': 'remember_tahnv'}).create_node()
        remember_tahn.in_port(0).connect(split_joined_input.out_port(0))

        # Split -> (2)Eltwise(sum) -> *Sigmoid
        remember_sigmoid = Sigmoid(graph, {
            'name': 'remember_sigmoid'
        }).create_node()
        remember_sigmoid.in_port(0).connect(
            join_prev_lstm_input_joined_input_sum.out_port(0))

        # Split -> (3)Eltwise(sum) -> **Sigmoid
        forget_sigmoid = Sigmoid(graph, {
            'name': 'forget_sigmoid'
        }).create_node()
        forget_sigmoid.in_port(0).connect(
            join_prev_lstm_input_joined_forget_sum.out_port(0))

        # *Memory(state)                        \
        #                                       (6)Eltwise(mul)
        # Split -> (3)Eltwise(sum) -> **Sigmoid /
        join_forget_prev_state_mul = Mul(graph, {
            'name': 'join_forget_prev_state_mul'
        }).create_node()
        join_forget_prev_state_mul.in_port(0).connect(
            forget_sigmoid.out_port(0))
        join_forget_prev_state_mul.in_port(1).connect(
            prev_lstm_state.out_port(0))

        # Split -> Tahn                         \
        #                                       (5)Eltwise(mul)
        # Split -> (2)Eltwise(sum) -> *Sigmoid   /
        join_remember_candidates_mul = Mul(
            graph, {
                'name': 'join_remember_candidates_mul'
            }).create_node()
        join_remember_candidates_mul.in_port(0).connect(
            remember_tahn.out_port(0))
        join_remember_candidates_mul.in_port(1).connect(
            remember_sigmoid.out_port(0))

        # (5)Eltwise(mul)  \
        #               (7)Eltwise(sum)
        # (6)Eltwise(mul)   /
        join_forget_remember_sum = Add(graph, {
            'name': 'join_forget_remember_sum'
        }).create_node()
        join_forget_remember_sum.in_port(0).connect(
            join_forget_prev_state_mul.out_port(0))
        join_forget_remember_sum.in_port(1).connect(
            join_remember_candidates_mul.out_port(0))

        # (7)Eltwise(sum) -> Clamp
        join_forget_clamp = create_op_with_const_inputs(
            graph, Clamp, {
                1: float32_array(-node.clip_value),
                2: float32_array(node.clip_value)
            }, {'name': 'join_forget_clamp'}, join_forget_remember_sum)
        #
        # Clamp -> (2)Memory(state)
        next_lstm_state = Assign(graph, {
            'name': 'next_lstm_state',
            'variable_id': memory_pair_output
        }).create_node()
        next_lstm_state.in_port(0).connect(join_forget_clamp.out_port(0))

        res_node = Result(graph, {'name': 'next_lstm_state_out'}).create_node()
        res_node.in_port(0).connect(next_lstm_state.out_port(0))

        # Clamp -> (2)Tahn
        state_filtered_tahn = Tanh(graph, {
            'name': 'state_filtered_tahn'
        }).create_node()
        state_filtered_tahn.in_port(0).connect(join_forget_clamp.out_port(0))

        # Clamp -> (2)ScaleShift
        clamp_scaleshift_attrs = {
            'name': 'clamp_scaleshift',
            'bias_term': False
        }
        clamp_scaleshift = ScaleShiftOp(graph,
                                        clamp_scaleshift_attrs).create_node()
        clamp_scaleshift.in_port(0).connect(join_forget_clamp.out_port(0))
        input_as_const(clamp_scaleshift, clamp_scaleshift_attrs, 1, 'weights',
                       node.output_gate_weights)

        # Split                 \
        #                       (4)Eltwise(sum)
        # Clamp -> (2)ScaleShift /
        join_next_lstm_input_joined_input_sum = Add(
            graph, {
                'name': 'join_next_lstm_input_joined_input_sum',
            }).create_node()
        join_next_lstm_input_joined_input_sum.in_port(0).connect(
            split_joined_input.out_port(3))
        join_next_lstm_input_joined_input_sum.in_port(1).connect(
            clamp_scaleshift.out_port(0))

        # (4)Eltwise(sum) -> (3)Sigmoid
        output_sigmoid = Sigmoid(graph, {
            'name': 'output_sigmoid'
        }).create_node()
        output_sigmoid.in_port(0).connect(
            join_next_lstm_input_joined_input_sum.out_port(0))

        # (4)Eltwise(sum) -> (3)Sigmoid         \
        #                                       (5)Eltwise(mul)
        # Clamp -> (2)Tahn                      /
        joined_output_mul = Mul(graph, {
            'name': 'joined_output_mul'
        }).create_node()
        joined_output_mul.in_port(0).connect(state_filtered_tahn.out_port(0))
        joined_output_mul.in_port(1).connect(output_sigmoid.out_port(0))

        # (5)Eltwise(mul) -> (3)FullyConnected
        fc_output_attrs = {
            'name': 'FullyConnected',
            'out-size': node.projection_weights_shape[0],
            'transpose_weights': True,
            'bias_term': False
        }
        fc_output = FullyConnected(graph, fc_output_attrs).create_node()
        fc_output.in_port(0).connect(joined_output_mul.out_port(0))
        input_as_const(fc_output, fc_output_attrs, 1, 'weights',
                       node.projection_weights)

        #                   / (2)Memory(output)
        # (3)FullyConnected
        #                   \ Output (any next node) (edge created automatically after replacement)
        next_lstm_output = Assign(graph, {
            'name': 'next_lstm_output',
            'variable_id': memory_pair_input
        }).create_node()
        next_lstm_output.in_port(0).connect(fc_output.out_port(0))

        res_node_lstm_output = Result(graph, {
            'name': 'next_lstm_output_out'
        }).create_node()
        res_node_lstm_output.in_port(0).connect(next_lstm_output.out_port(0))

        return [fc_output.id]
Beispiel #27
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['pool']
        node_name = node.soft_get('name', node.id)

        if node.pool_step is None:
            node.stride = int64_array([1, 1, node.window[-1], node.window[-1]])

        # create Reshape before convolution
        # shape = [in_shape[0], pool_stride, 1, in_shape[1]/pool_stride]
        i_shape = Shape(graph, {'name': node_name + '/Shape'}).create_node()

        dst_dtype = np.float32  # even if data_type=FP16 use float32 for shape values
        shape = Cast(graph, {
            'name': node_name + '/to_float',
            'dst_type': dst_dtype
        }).create_node()
        i_shape.in_port(0).connect(node.in_port(0).get_source())
        shape.in_port(0).connect(i_shape.out_port(0))

        N, H = node_to_get_shape_value_of_indices(
            shape, [0]), node_to_get_shape_value_of_indices(shape, [1])

        div = create_op_with_const_inputs(
            graph, Div, {1: float32_array([node.pool_stride])},
            {'name': node_name + '/div_stride_h'})
        div.in_port(0).connect(H.out_port(0))

        concat = create_op_with_const_inputs(
            graph, Concat, {
                1: float32_array([node.pool_stride]),
                2: float32_array([1])
            }, {
                'name': node_name + '/concat_all_dims',
                'in_ports_count': 4,
                'axis': 0
            })
        concat.in_port(0).connect(N.out_port(0))
        concat.in_port(3).connect(div.out_port(0))

        reshape_pattern = Cast(graph, {
            'name': node_name + '/to_int',
            'dst_type': np.int64
        }).create_node()
        concat.out_port(0).connect(reshape_pattern.in_port(0))

        reshape_in = Reshape(graph, {
            'name': node_name + '/reshape_in'
        }).create_node()
        reshape_in.in_port(1).connect(reshape_pattern.out_port(0))

        # create Reshape after Convolution
        reshape_out = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, -1]),
            {'name': node_name + '/reshape_out'})

        # connect input_reshape_node
        source = node.in_port(0).get_source()
        node.in_port(0).get_connection().set_source(reshape_in.out_port(0))
        reshape_in.in_port(0).connect(source)
        # connect output_reshape_node
        node.out_port(0).get_connection().set_source(reshape_out.out_port(0))
        node.out_port(0).connect(reshape_out.in_port(0))
Beispiel #28
0
 def operation(a):
     if np.issubdtype(a.dtype, np.signedinteger):
         return float32_array(a.astype(np.float32)**0.5)
     return a**0.5
Beispiel #29
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        log.debug('Matched NearestNeighborUpsampling pattern: {}'.format(
            [node.id for node in match.values()]))
        try:
            input_height = match['pack_1'].in_node(1).value.item()
            input_width = match['pack_1'].in_node(3).value.item()

            height_scale = match['mul_const'].shape[-4]
            width_scale = match['mul_const'].shape[-2]
        except Exception as ex:
            log.warning(
                'Failed to determine scaling parameters from the topology. Do not apply pattern.'
            )
            return

        reshape2_name = match['reshape_2'].name
        resample_op = Interpolate(
            graph, {
                'mode': 'nearest',
                'antialias': 0,
                'pads_begin': int64_array([0]),
                'pads_end': int64_array([0]),
                'coordinate_transformation_mode': 'half_pixel',
                'nearest_mode': 'round_prefer_floor',
                'cube_coeff': -0.75,
                'version': 'opset4',
                'name': reshape2_name + '/Resample',
                'shape_calculation_mode': 'scales',
                'in_ports_count': 4
            })
        resample_node = resample_op.create_node([match['op']])
        axes_node = Const(
            graph, {
                'name':
                resample_node.name + '/axes',
                'value':
                int64_array([2, 3])
                if graph.graph['layout'] == 'NCHW' else int64_array([1, 2])
            }).create_node()
        sizes_node = Const(
            graph, {
                'value':
                mo_array(
                    [input_height * height_scale, input_width * width_scale]),
                'name':
                resample_node.name + '/target_shape'
            }).create_node()
        scales_node = Const(
            graph, {
                'value': float32_array([height_scale, width_scale]),
                'name': resample_node.name + '/scales'
            }).create_node()

        match['reshape_2'].replace_node(resample_node)

        resample_node.add_input_port(1, skip_if_exist=True)
        assert resample_node.in_port(1).disconnected()
        sizes_node.out_port(0).connect(resample_node.in_port(1))
        scales_node.out_port(0).connect(resample_node.in_port(2))
        axes_node.out_port(0).connect(resample_node.in_port(3))

        graph.remove_nodes_from(
            [node.id for node in match.values() if node.id != match['op'].id])