コード例 #1
0
def _conv_stride_prop(graph: Graph, node: Node, spatial_dims, supported=True):
    """
    This function handles convolution stride propagation. There is two cases: conv->(op) and conv->conv. In first case
    we propagate stride from op, and in second case we also change stride for second conv
    """
    next_ops = get_next_operation(node)
    stride_props, all_ops_are_valid = _check_next_ops(next_ops)

    def _check_convolution(node: Node):
        return node.has_valid('kernel_spatial') and np.array_equal(
            node.kernel_spatial, mo_array([1, 1]))

    # Check that all ops are valid and have same values
    if not all_ops_are_valid:
        # We have to insert pooling layers
        for op in next_ops:
            if op.has_valid('stride_prop') and not np.array_equal(
                    op.stride_prop[spatial_dims], mo_array([1, 1])):
                # Insert pooling
                _insert_pooling(graph, node.out_node(), op, spatial_dims)
    elif len(stride_props) > 0:
        node.stride *= stride_props[0]
        log.debug('STRIDE PROP: {} got new strides {}'.format(
            node.name, node.stride))
        for op in next_ops:
            if op.soft_get('has_stride') == True:
                op.stride = mo_array([1, 1, 1, 1])
        node['is_partial_inferred'] = False
        node['output_spatial_shape'] = False
        _clean_fw_tensor_attrs(node.out_node())

    # If Convolution is valid then set `stride_prop` to Convolution stride
    node['stride_prop'] = mo_array(
        node.stride) if _check_convolution(node) else mo_array([1, 1, 1, 1])
コード例 #2
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        op = match['op']
        out_port = op.in_port(0).get_source()

        if op.soft_get('scale', 1) != 1:
            const = Const(graph, {'value': mo_array(op.scale)}).create_node()
            mul = Mul(graph, {'name': op.name + '/mul_'}).create_node()
            const.out_port(0).connect(mul.in_port(1))
            out_port.connect(mul.in_port(0))
            out_port = mul.out_port(0)

        if op.soft_get('shift', 0) != 0:
            const = Const(graph, {'value': mo_array(op.shift)}).create_node()
            add = Add(graph, {'name': op.name + '/add_'}).create_node()
            const.out_port(0).connect(add.in_port(1))
            out_port.connect(add.in_port(0))
            out_port = add.out_port(0)

        if op.soft_get('power', 1) != 1:
            const = Const(graph, {'value': mo_array(op.power)}).create_node()
            pow = Pow(graph, {'name': op.name + '/pow_'}).create_node()
            const.out_port(0).connect(pow.in_port(1))
            out_port.connect(pow.in_port(0))
            out_port = pow.out_port(0)

        op.out_port(0).get_connection().set_source(out_port)
コード例 #3
0
    def extract(cls, node):
        eps = node.pb.batch_norm_param.eps
        attrs = {'eps': eps}
        pb_model = None if not node.soft_get('model_pb',
                                             None) else node.model_pb
        if pb_model:
            blobs = pb_model.blobs
            assert len(
                blobs) >= 2, 'BatchNorm accepts not less then two input blobs'
            mean = mo_array(blobs[0].data)
            variance = mo_array(blobs[1].data)

            if len(blobs) == 3:
                scale = blobs[2].data[0]
                if scale != 0:
                    scale = 1.0 / scale
                mean *= scale
                variance *= scale

            embed_input(attrs, 1, 'gamma', np.ones(mean.shape), 'gamma')
            embed_input(attrs, 2, 'beta', np.zeros(variance.shape), 'beta')
            embed_input(attrs, 3, 'mean', mean, 'biases')
            embed_input(attrs, 4, 'variance', variance, 'weights')

        BatchNormInference.update_node_stat(node, attrs)
        return cls.enabled
コード例 #4
0
    def add_unsqueeze_for_new(graph: Graph, ss_node: Node):
        log.info(
            "StridedSlice op with new axis mask '{}' has been detected".format(
                ss_node.id))
        if len(ss_node.in_nodes()) != 4 or len(ss_node.out_nodes()) != 1:
            return

        shape_out = ss_node.out_node().shape
        dim = mo_array(range(len(ss_node['new_axis_mask'])))[mo_array(
            ss_node['new_axis_mask'], dtype=bool)]
        ss_shape = []
        for i in range(0, len(ss_node['new_axis_mask'])):
            if not ss_node['new_axis_mask'][i]:
                ss_shape.append(shape_out[i])
            else:
                ss_node['new_axis_mask'][i] = 0

        ss_node.out_port(0).data.set_shape(ss_shape)

        # insert Unsqueeze
        unsqueeze_node = Unsqueeze(graph,
                                   dict(name=ss_node.name +
                                        '/Unsqueeze_new')).create_node()
        ss_node.out_port(0).get_connection().insert_node(unsqueeze_node)
        unsqueeze_node.out_port(0).data.set_shape(shape_out)

        dims_node = Const(graph, {
            'name': unsqueeze_node.id + '/Indices',
            'value': int64_array(dim)
        }).create_node()
        dims_node.out_port(0).connect(unsqueeze_node.in_port(1))
コード例 #5
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['proposal']
        assert len(node.in_ports()) == 3, "Proposal op must have exactly 3 input ports"
        im_info_shape = node.in_port(2).data.get_shape()
        assert im_info_shape is not None

        if np.array_equal(im_info_shape, [1, 6]):
            log.error('The model contains Proposal layer "{}" with input of shape [1, 6]. Inference Engine '
                      'implementation of the Proposal layer uses only 4 first values (indices 0, 1, 2 and 3). '
                      'Elements with indices 4 and 5 will be ignored.'.format(node.soft_get('name', node.id)),
                      extra={'is_warning': True})

            cropped_im_info = create_op_with_const_inputs(graph, StridedSlice, {1: mo_array([0, 0], dtype=np.int32),
                                                                                2: mo_array([1, 3], dtype=np.int32),
                                                                                3: mo_array([1, 1], dtype=np.int32)},
                                                          {'name': 'cropped_im_info',
                                                           'begin_mask': int64_array([1, 1]),
                                                           'end_mask': int64_array([1, 1]),
                                                           'new_axis_mask': int64_array([0, 0]),
                                                           'shrink_axis_mask': int64_array([0, 0]),
                                                           'ellipsis_mask': int64_array([0, 0]),
                                                           'override_output_shape': True,
                                                           })

            node.in_port(2).get_connection().insert_node(cropped_im_info)

            # update the im_info_shape so the next 'if' statement become true
            im_info_shape = int64_array([1, 3])

        if np.array_equal(im_info_shape, [1, 3]) or np.array_equal(im_info_shape, [1, 4]):
            reshape = create_op_node_with_second_input(graph, Reshape, [im_info_shape[1]], {'name': 'im_info/Reshape'})
            node.in_port(2).get_connection().set_destination(reshape.in_port(0))
            reshape.out_port(0).connect(node.in_port(2))
コード例 #6
0
    def extract(cls, node):
        pb = node.pb
        model = node.model_pb
        param = pb.scale_param
        attrs = {
            'axis': param.axis,
        }

        if model is None and len(pb.bottom) == 1:
            # default weights and biases for scale layer if the caffemodel file doesn't contain them
            model = NamedAttrsClass({
                'blobs':
                mo_array([
                    NamedAttrsClass({'data': mo_array([1])}),
                    NamedAttrsClass({'data': mo_array([0])})
                ])
            })
        # scale with 1 input and 1 or 2 blobs
        if model and len(model.blobs) != 0 and len(pb.bottom) == 1:
            attrs.update(weights_biases(param.bias_term, model))
        # 2 inputs + bias
        elif len(pb.bottom) == 2 and param.bias_term:
            if model is None or len(model.blobs) == 0:
                # default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them
                model = NamedAttrsClass({
                    'blobs':
                    mo_array([NamedAttrsClass({'data': mo_array([0])})])
                })

            embed_input(attrs, 1, 'biases', model.blobs[0].data)
        ScaleShiftOp.update_node_stat(node, attrs)
        return cls.enabled
コード例 #7
0
    def array_infer(node: Node):
        size = node.in_node(0)
        assert size.value is not None

        # 0 port: handle
        if 0 in node.out_nodes().keys():
            if node.has_valid('element_shape'):
                element_shape = node['element_shape']
            else:
                element_shape = None

            out_node = node.out_node(0).id
            output_value = node.out_node(0).id
            node.graph.node[out_node]['value'] = mo_array(output_value)

            output_shape = node.graph.node[out_node]['value'].shape
            node.graph.node[out_node]['shape'] = shape_array(output_shape)

            node.graph.node[out_node]['element_shape'] = shape_array(element_shape)
            node.graph.node[out_node]['size'] = size.value
        # 1 port flow
        if 1 in node.out_nodes().keys():
            output_value = None

            out_node = node.out_node(1).id
            node.graph.node[out_node]['value'] = None if output_value is None else mo_array(output_value)
            node.graph.node[out_node]['shape'] = shape_array(output_shape)
コード例 #8
0
    def extract(cls, node):
        dst_type = lambda x: mo_array(x)

        scale = onnx_attr(node,
                          'scale',
                          'f',
                          default=mo_array(1.0),
                          dst_type=dst_type)
        bias = onnx_attr(node,
                         'bias',
                         'floats',
                         default=None,
                         dst_type=dst_type)

        # Expand dims for bias in case if it is not scalar
        if bias.ndim != 0:
            broadcast_dims_cnt = 2 if node.graph.graph[
                'layout'] == 'NCHW' else 0
            for idx in range(broadcast_dims_cnt):
                bias = np.expand_dims(bias, axis=-1)

        node['scale'] = scale
        node['bias'] = bias

        return cls.enabled
コード例 #9
0
ファイル: regionyolo_ext.py プロジェクト: mikhailk62/openvino
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.region_yolo_param
        flatten_param = proto_layer.flatten_param
        axis = flatten_param.axis
        end_axis = flatten_param.end_axis
        coords = param.coords
        classes = param.classes
        num = param.num
        update_attrs = {
            'coords': coords,
            'classes': classes,
            'num': num,
            'do_softmax': int(param.do_softmax),
            'anchors': mo_array(param.anchors),
            'mask': mo_array(param.mask)
        }

        flatten_attrs = {'axis': axis, 'end_axis': end_axis}

        mapping_rule = merge_attrs(param, update_attrs)

        mapping_rule.update(flatten_attrs)
        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        RegionYoloOp.update_node_stat(node, mapping_rule)
        return cls.enabled
コード例 #10
0
ファイル: bn.py プロジェクト: yury-intel/openvino
    def replace_op(self, graph: Graph, node: Node):
        attrs = {'name': node.id + "/ScaleShift_"}

        param = graph.node[node.id]['pb'].bn_param
        pb_model = graph.node[node.id]['model_pb']

        blobs = pb_model.blobs

        if len(blobs) != 4:
            raise Error("Incorrect number of blobs in BN layer {}".format(
                node.id))

        mean = mo_array(blobs[0].data)
        var = mo_array(blobs[1].data)
        betta = mo_array(blobs[2].data)
        gamma = mo_array(blobs[3].data)

        gamma = gamma + np.repeat(param.eps, gamma.shape)

        scale = 1.0 / np.sqrt(gamma) * mean
        shift = var - betta * scale

        ss = ScaleShiftOp(graph, attrs)
        scale_shift = ss.create_node([node.in_node(0)])
        input_as_const(scale_shift, attrs, 1, 'weights', scale)
        input_as_const(scale_shift, attrs, 2, 'biases', shift)

        return [scale_shift.id]
コード例 #11
0
def _simple_stride_prop(graph: Graph,
                        node: Node,
                        spatial_dims,
                        supported=True):
    """
    This function handles stride propagation for op nodes. If node is in supported ops dict so this is supported operation and we
    can propagate stride directly via this op (stride_prop will be set by using bottom stride_prop), otherwise we can't and
    stride_prop attr will be set as 1,1,1,1
    """
    next_ops = get_next_operation(node)
    stride_props, all_ops_are_valid = _check_next_ops(next_ops)

    if not supported or not all_ops_are_valid:
        # We have to insert pooling layers
        for op in next_ops:
            if op.has_valid('stride_prop') and not np.array_equal(op.stride_prop[spatial_dims], mo_array([1, 1])) and \
                    (op.has_valid('has_stride') == False or op.soft_get('has_stride') == False):
                _insert_pooling(graph, node.out_node(), op, spatial_dims)
        # If Convolution is valid then set `stride_prop` to Convolution stride
        node['stride_prop'] = mo_array([1, 1, 1, 1])
        return

    for op in next_ops:
        if op.soft_get('has_stride') == True:
            op.stride = mo_array([1, 1, 1, 1])
            log.debug(
                "STRIDE PROP: {} {} strides was moved upper via {}".format(
                    op.type, op.name, node.name))

    node['stride_prop'] = mo_array(
        stride_props[0]) if len(stride_props) > 0 else mo_array([1, 1, 1, 1])
    node['is_partial_inferred'] = False
    _clean_fw_tensor_attrs(node.out_node())
コード例 #12
0
def _insert_pooling(graph: Graph, first_node: Node, second_node: Node,
                    spatial_dims):
    """
    This function inserts point wise pooling layer between two nodes
    """
    log.debug("STRIDE PROP: Insert pooling between {} and {}".format(
        first_node.name, second_node.name))
    stride_prop = second_node.stride_prop
    assert len(graph.get_edge_data(first_node.id, second_node.id)) == 1
    eattrs = graph.get_edge_data(first_node.id, second_node.id)[0]
    graph.remove_edge(first_node.id, second_node.id)

    pooling = Pooling(
        graph,
        dict(name='Pooling_',
             spatial_dims=spatial_dims,
             window=mo_array([1, 1, 1, 1]),
             output_spatial_shape=None,
             stride=mo_array(stride_prop),
             pad_spatial_shape=mo_array([[0, 0], [0, 0]]),
             pad=mo_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
             pool_method='max',
             is_partial_inferred=False))
    pooling_data = pooling.create_node_with_data([first_node])

    _clean_fw_tensor_attrs(pooling_data)

    graph.add_edges_from([(pooling_data.id, second_node.id, eattrs)])
コード例 #13
0
    def replace_pattern(graph: Graph, match: dict):
        conv = match['op']
        assert len(conv.in_nodes()) == 2
        initial_shape = conv.in_port(1).data.get_shape()
        assert initial_shape is not None
        weights = conv.in_port(1).data.get_value().flatten()
        weights_rounded = np.round(weights)
        assert np.all(np.isclose(weights, weights_rounded))
        assert len(conv.in_node(1).out_nodes()) == 1
        weights_rounded = mo_array(weights_rounded, dtype=np.int32) + 1  # -1 --> 0
        # Reversing element in chunks by 8 elements to pack bits correctly
        # First need to pad data with necessary number of element to make the length dividable by 8
        pad = (-len(weights_rounded)) % 8
        weights_rounded = mo_array(np.concatenate((weights_rounded, np.zeros([pad]))), dtype=np.int32)
        assert len(weights_rounded) % 8 == 0
        weights_rounded = weights_rounded.reshape([len(weights_rounded) // 8, 8])
        weights_rounded = np.flip(weights_rounded, axis=1)
        weights_rounded = weights_rounded.flatten()
        packed = np.packbits(weights_rounded)
        conv.in_port(1).data.set_value(packed)
        conv['packed_weights'] = 1

        conv.in_node(1)['force_shape'] = initial_shape.copy()
        conv.in_node(1)['shape'] = initial_shape.copy()
        conv.in_node(1)['force_type'] = 'U1'
コード例 #14
0
 def extract(cls, node: Node):
     scale = onnx_attr(node,
                       'scale',
                       'f',
                       default=mo_array(1.0),
                       dst_type=lambda x: mo_array(x))
     AttributedPower.update_node_stat(node, {'scale': scale})
     return cls.enabled
コード例 #15
0
 def enter_infer(node: Node):
     output_shape = node.in_node(0).shape
     output_value = node.in_node(0).value
     for _, out_node in node.graph.out_edges(node.id):
         node.graph.node[out_node]['shape'] = mo_array(output_shape)
         node.graph.node[out_node][
             'value'] = None if output_value is None else mo_array(
                 output_value)
コード例 #16
0
    def extract(cls, node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        node_attrs = {
            'crop_begin': mo_array(attrs.tuple("begin", int, ())),
            'crop_end': mo_array(attrs.tuple("end", int, ())),
            'step': mo_array(attrs.tuple("step", int, ())),
        }

        MXSlice.update_node_stat(node, node_attrs)
        return cls.enabled
コード例 #17
0
    def pass_rc_through_eltwise(node, reverse_channels):
        r"""
        BEFORE                              AFTER

          previous_op                                       previous_op'
              |                                                 |
        ReverseChannels  previous_op'    previous_op     ReverseChannels
                     \     /                        \     /
                     Eltwise                        Eltwise
                                                      |
                                                ReverseChannels

        returns boolean value whatever we should continue propagating current ReverseChannels operation down or not
        """
        before_shape = reverse_channels.out_port(0).data.get_shape()

        port_axis = []
        for idx, port in node.in_ports().items():
            if port.get_connection().get_source().node.id == reverse_channels.id:
                continue
            shape = port.data.get_shape()
            non_one_dims = np.where(shape != 1)[0]
            if shape[reverse_channels.axis] == 1:
                continue  # nothing to flip for this input
            if len(non_one_dims) == 1 and shape[non_one_dims.item()] == reverse_channels.order.size:
                new_axis = non_one_dims.item()
            elif np.array_equal(before_shape, shape):
                new_axis = reverse_channels.axis
            else:
                # shape has multiple non-one values and shape is not fully broadcasted to value port shape
                # it is safe not to propagate reverse channels
                return False
            port_axis.append((port, new_axis))

        # reversing eltwise inputs where applicable
        for port, axis in port_axis:
            ric_copy = reverse_channels.copy_node({'axis': mo_array(axis), 'order': mo_array(reverse_channels.order)})

            src = port.get_connection().get_source()
            port.get_connection().set_source(ric_copy.out_port(0))
            src.disconnect()
            src.connect(ric_copy.in_port(0))

        # detaching reverse_channels node from the graph
        reverse_channels.out_port(0).get_connection().set_source(
            reverse_channels.in_port(0).get_connection().get_source())
        reverse_channels.in_port(0).disconnect()

        # propagating reverse_channels node to the output port of eltwise
        node.out_port(0).get_connection().set_source(reverse_channels.out_port(0))
        node.out_port(0).disconnect()
        node.out_port(0).connect(reverse_channels.in_port(0))

        # propagated reverse_channels successfully through current node, will continue propagation
        return True
コード例 #18
0
class mo_array_test(unittest.TestCase):
    @generate(*[(mo_array([2, 3, 5, 7]), np.array([2, 3, 5, 7])),
                (mo_array([2., 3., 5., 7.], dtype=np.float64), np.array([2., 3., 5., 7.])),
                (mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.], dtype=np.float32)),
                ])
    def test_mo_array_positive(self, data, result):
        self.assertEqual(data.dtype, result.dtype)

    @generate(*[(mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.])),
                ])
    def test_mo_array_negative(self, data, result):
        self.assertNotEqual(data.dtype, result.dtype)
コード例 #19
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['op']
        node_id = node['variable_id']

        out_node_port = node.out_port(0).get_destination()
        in_node_port = node.in_port(0).get_source()
        node.in_port(0).disconnect()
        node.out_port(0).disconnect()
        crop = Crop(graph, {'name': 'Result_for_'+node_id, 'dim': mo_array([1]), 'offset': mo_array([0]),
                            'axis': mo_array([0])}).create_node()
        in_node_port.connect(crop.in_port(0))
        crop.out_port(0).connect(out_node_port)
コード例 #20
0
    def add_squeeze_for_shrink(graph: Graph, ss_node: Node):
        # add Squeeze for shrink_axis_mask
        log.info(
            "StridedSlice op with shrink mask '{}' has been detected".format(
                ss_node.id))

        if len(ss_node.in_nodes()) != 4 or len(ss_node.out_nodes()) != 1:
            return

        shape_out = ss_node.out_node().shape
        dim = mo_array(range(len(ss_node['shrink_axis_mask'])))[mo_array(
            ss_node['shrink_axis_mask'], dtype=bool)]
        ss_shape = []
        i = 0
        k = 0

        # Don't permute reshape if channels were squeezed
        dont_permute = graph.graph['layout'] == 'NCHW'
        if graph.graph['layout'] == 'NHWC' and ss_node['shrink_axis_mask'][
                -1] == 1:
            dont_permute = True

        while k < len(shape_out):
            if i >= len(ss_node['shrink_axis_mask']
                        ) or not ss_node['shrink_axis_mask'][i]:
                ss_shape.append(shape_out[k])
                k = k + 1
            else:
                ss_node['shrink_axis_mask'][i] = 0
                ss_shape.append(1)
            i = i + 1

        while i < len(ss_node['shrink_axis_mask']):
            ss_node['shrink_axis_mask'][i] = 0
            ss_shape.append(1)
            i = i + 1

        ss_node.out_port(0).data.set_shape(ss_shape)

        # insert Squeeze
        squeeze_node = Squeeze(
            graph,
            dict(name=ss_node.name + '/Squeeze_shrink',
                 nchw_layout=dont_permute,
                 correct_data_layout=dont_permute)).create_node()
        ss_node.out_port(0).get_connection().insert_node(squeeze_node)
        squeeze_node.out_port(0).data.set_shape(shape_out)

        dims_node = Const(graph, {
            'name': squeeze_node.id + '/Indices',
            'value': int64_array(dim)
        }).create_node()
        dims_node.out_port(0).connect(squeeze_node.in_port(1))
コード例 #21
0
 def looking_for_iteration_counter(graph: Graph, match: dict):
     types = ['TensorIteratorInput', 'TensorIteratorOutput']
     candidates = mo_array(
         [match['Identity_1_data'], match['Identity_2_data']])
     results = mo_array([False for i in range(len(candidates))])
     for i, candidat in enumerate(candidates):
         for node in candidat.out_nodes():
             if node['op'] in types:
                 results[i] = True
     assert not np.all(results)
     assert sum(results) == 1
     return candidates[results == True][0]
コード例 #22
0
    def replace(node: Node, const: Node):
        graph = node.graph
        shape = const.shape
        const_name = const.soft_get('name', const.id)

        non_one_dims = np.argwhere(shape != 1).flatten()
        one_dims = np.argwhere(shape == 1).flatten()

        if not (non_one_dims.size == 1 and 5 < np.prod(shape) < 500):
            # (5;500) range is deduced to affect less models
            return

        value = const.value
        if not np.array_equal(np.arange(0, np.prod(shape), 1).reshape(shape), value):
            return

        positive_idx = non_one_dims.item(0)
        negative_idx = positive_idx - len(shape)

        node_name = node.soft_get('name', node.id)
        gather = create_op_with_const_inputs(graph, Gather, {1: int64_array(negative_idx), 2: int64_array(0)},
                                             {'name': node_name + '/BroadcastingDim'})
        gather_for_const = create_op_with_const_inputs(graph, Gather, {1: int64_array(negative_idx), 2: int64_array(0)},
                                                       {'name': const_name + '/BroadcastingDim'})
        shapeof_node = Shape(graph, {'name': const_name + '/ShapeOf'}).create_node()
        shapeof_node.out_port(0).connect(gather_for_const.in_port(0))

        equal_node = create_op_with_const_inputs(graph, Equal, {1: int64_array(1)}, {'name': node_name + '/ConstOne'})
        gather.out_port(0).connect(equal_node.in_port(0))

        select_node = Select(graph, {'name': node_name + '/Select',
                                      'auto_broadcast': 'numpy'}).create_node([equal_node, gather_for_const, gather])

        const.out_port(0).connect(shapeof_node.in_port(0))

        range_node = create_op_with_const_inputs(graph, Range,
                                                 {0: mo_array(0, dtype=value.dtype),
                                                  2: mo_array(1, dtype=value.dtype)},
                                                 {'name': const_name + '/Range', 'dtype': value.dtype})
        select_node.out_port(0).connect(range_node.in_port(1))

        node.in_port(1).get_connection().add_destination(gather.in_port(0))

        node.in_port(0).get_connection().set_source(range_node.out_port(0))

        if one_dims.size:
            unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, one_dims,
                                                         {'name': const_name + '/KeepShape'})
            range_node.out_port(0).get_connection().insert_node(unsqueeze)
            rename_nodes([(const, const_name + '/ToBeDeleted'), (unsqueeze, const_name)])
        else:
            rename_nodes([(const, const_name + '/ToBeDeleted'), (range_node, const_name)])
コード例 #23
0
    def replace_op(self, graph: Graph, node: Node):
        alpha = onnx_attr(node, 'alpha', 'f', default=0.2)
        beta = onnx_attr(node, 'beta', 'f', default=0.5)

        hard_sigmoid = create_op_with_const_inputs(
            graph, HardSigmoid, {
                1: mo_array(alpha),
                2: mo_array(beta)
            }, {'name': node.name + '/HardSigmoid_'})

        node.in_port(0).get_connection().set_destination(
            hard_sigmoid.in_port(0))
        return [hard_sigmoid.id]
コード例 #24
0
 def replace_sub_graph(self, graph: Graph, match: dict):
     ph = match['placeholder']
     if ph.name in graph.graph['freeze_placeholder']:
         name = ph.name
         if ph.has_and_set('data_type'):
             data_type = ph.data_type
         else:
             data_type = SUPPORTED_DATA_TYPES[
                 graph.graph['cmd_params'].data_type][0]
         string_value = graph.graph['freeze_placeholder'][name]
         try:
             if data_type != np.bool:
                 value = mo_array(string_value, dtype=data_type)
             # TODO: investigate why boolean type is allowed only for TensorFlow
             elif data_type == np.bool and graph.graph['fw'] == 'tf':
                 from openvino.tools.mo.front.tf.common import tf_data_type_cast
                 if isinstance(string_value, list):
                     casted_list = list()
                     for v in mo_array(string_value):
                         casted_list.append(
                             tf_data_type_cast[ph.data_type](v))
                     value = mo_array(string_value, dtype=data_type)
                 else:
                     value = tf_data_type_cast[ph.data_type](string_value)
             else:
                 raise Error("Cannot cast value {} to {} data_type".format(
                     string_value, data_type))
         except:
             raise Error("Cannot cast value {} to {} data_type".format(
                 string_value, data_type))
         try:
             value = np.reshape(a=value, newshape=ph.shape)
         except:
             raise Error("Can not reshape value {} to shape {}".format(
                 value, ph.shape))
         out_edges = list(graph.out_edges(ph.id, data=True))
         new_node = Const(graph).create_node(
             attrs={
                 'value': value,
                 'data_type': type(value),
                 'name': name + '/const_placeholder',
                 'shape': ph.shape
             })
         graph.erase_node(ph)
         graph.add_edges_from([(new_node.id, v, attrs)
                               for u, v, attrs in out_edges])
         log.info(
             "Placeholder node \"{}\" was replaced with Const node \"{}\" with value \"{}\""
             .format(name, new_node.name, value))
コード例 #25
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['normalize']

        # rename normalize node since it will be no longer output node after the transformation
        output_name = node.soft_get('name', node.id)
        normalizel2_name = output_name + '/normalizel2'
        rename_node(node, normalizel2_name)

        assert node.in_port(0).data.get_shape().size in [2, 3, 4]
        assert node.has_valid('across_spatial')
        assert node.has_valid('channel_shared')
        assert node.has_valid('eps')

        if 'bin' in node.in_edge(1):
            del node.in_edge(1)['bin']

        weights = node.in_port(1).data.get_value()
        assert weights is not None
        # in the code below we intentionally use get_source() to get the out port. Because updating the out port will
        # update the Const node 'value' and 'shape' attributes
        if node.channel_shared or all(weights == weights[0]):
            node.in_port(1).get_source().data.set_value(mo_array([weights[0]]))
        else:
            new_shape = np.ones((len(node.in_port(0).data.get_shape())),
                                dtype=np.int64)
            new_shape[1] = -1
            node.in_port(1).get_source().data.set_value(
                mo_array(weights).reshape(new_shape))

        mul = Mul(graph, {'name': output_name}).create_node()
        rename_node(mul, output_name)

        if not node.across_spatial:
            axes = int64_array([1])
        else:
            axes = int64_array(
                np.arange(start=1, stop=node.in_port(0).data.get_shape().size))

        normalizel2 = create_op_with_const_inputs(graph, NormalizeL2Op,
                                                  {1: axes}, {
                                                      'eps_mode': 'add',
                                                      'eps': node.eps
                                                  })

        node.out_port(0).get_connection().set_source(mul.out_port(0))
        node.in_port(1).get_connection().get_source().connect(mul.in_port(1))
        normalizel2.out_port(0).connect(mul.in_port(0))
        node.in_port(0).get_connection().set_destination(
            normalizel2.in_port(0))
コード例 #26
0
    def quantize_data(fake_quantize: Node, dst_type: type,
                      quantized_type: type, mode: str):
        graph = fake_quantize.graph
        name = fake_quantize.soft_get('name', fake_quantize.id)
        levels = fake_quantize.levels

        quantize = fake_quantize.copy_node(
            dict(name=name + '/Copy', stop_value_propagation=False), graph)
        fake_quantize.in_port(0).get_connection().set_destination(
            quantize.in_port(0))

        # inherit input limits
        fake_quantize.in_port(1).get_connection().set_destination(
            quantize.in_port(1))
        fake_quantize.in_port(2).get_connection().set_destination(
            quantize.in_port(2))

        # calculate output limits for quantized weights
        assert mode in ["signed", "unsigned"]
        i_min_value = -(levels // 2) if mode == "signed" else 0

        i_min = mo_array(i_min_value, dtype=dst_type) if not quantize.in_node(
            0).shape.size else mo_array([i_min_value], dtype=dst_type)
        i_max = mo_array(levels + i_min - 1, dtype=dst_type)

        assert i_max - i_min == levels - 1
        out_low = Const(graph, dict(name=name + '/Copy/out_low',
                                    value=i_min)).create_node()
        out_high = Const(graph, dict(name=name + '/Copy/out_high',
                                     value=i_max)).create_node()

        out_low.out_port(0).connect(quantize.in_port(3))
        out_high.out_port(0).connect(quantize.in_port(4))
        out_low.out_port(0).connect(fake_quantize.in_port(1))
        out_high.out_port(0).connect(fake_quantize.in_port(2))

        original_const = quantize.in_port(0).get_source().node
        quantized_data_name = original_const.soft_get(
            'name', original_const.id) + '/quantized'
        cast = Cast(
            graph,
            dict(name=quantized_data_name,
                 dst_type=quantized_type,
                 stop_value_propagation=False)).create_node()

        quantize.out_port(0).connect(cast.in_port(0))

        cast.out_port(0).connect(fake_quantize.in_port(0))
コード例 #27
0
    def replace_pattern(graph: Graph, match: dict):
        mem = match['op']
        mem_shape = mem.in_port(0).data.get_shape()
        mem_parent = mem.in_port(0).get_source()
        context = mem['context']

        for child_port in mem_parent.get_destinations():
            child = child_port.node
            # check if we find Splice containing context 'context'
            if child['op'] == 'Splice' and child.id != mem.id and set(
                    child['context']).issubset(set(context)):
                left_cont_out = child['context'][0]
                left_cont = context[0]

                for child_of_child in child.out_port(0).get_destinations():
                    out_transfer = child_of_child.node
                    out_transfer_port = child_of_child
                    if out_transfer['op'] == 'Crop':
                        # modify existing Crop to get right data from larger Splice
                        out_transfer['offset'] = out_transfer['offset'] + (
                            left_cont_out - left_cont) * mem_shape[-1]
                    else:
                        # insert Crop if we have not one
                        child_of_child.disconnect()
                        crop_node = Crop(
                            graph, {
                                'name':
                                graph.unique_id(prefix='Splice_crop_'),
                                'offset':
                                (left_cont_out - left_cont) * mem_shape[-1],
                                'dim':
                                mo_array(
                                    [len(child['context']) * mem_shape[-1]]),
                                'axis':
                                mo_array([-1])
                            }).create_node()
                        child.out_port(0).connect(crop_node.in_port(0))
                        crop_node.out_port(0).connect(child_of_child)
                        crop_node.out_port(0).data.set_shape(
                            child.out_port(0).data.get_shape())

                        out_transfer_port = crop_node.in_port(0)

                    # move edge to child from old Splice to larger
                    out_transfer_port.disconnect()
                    mem.out_port(0).connect(out_transfer_port)

                graph.remove_node(child.id)
コード例 #28
0
def resolve_shared_inputs(node: Node, port_ids_to_duplicate: List[int]):
    """
    Duplicates shared constants that are consumed by more than one node. 
    If constant is consumed by several ports of one node - no duplication gets done
    """
    graph = node.graph

    for port_id in port_ids_to_duplicate:
        dst_port_map = defaultdict(list)
        for dst in node.in_port(
                port_id).get_source().get_connection().get_destinations():
            dst_port_map[dst.node].append(dst.idx)
        del dst_port_map[node]
        value = node.in_port(port_id).data.get_value()
        if value is None:
            log.debug(
                'Can not duplicate due no data for in_port {} of node {}'.
                format(port_id, node.name))
        for node, idxs in dst_port_map.items():
            const = Const(
                graph, {
                    'value': mo_array(value),
                    'name': node.soft_get('name', node.id) + '/duplicated_'
                }).create_node()
            for idx in idxs:
                node.in_port(idx).disconnect()
                const.out_port(0).connect(node.in_port(idx))
            const.infer(const)
コード例 #29
0
def calculate_prior_box_value(value: Node, value_to_div: Port,
                              value_to_add: Port):
    """
    :param value: Node with value. Here is supposed the node with op='Split'
    :param value_to_div: Output port with values to be divided by 2
    :param value_to_add: Output port with values to be added to values from value_to_div port
    :return: Sub and Add nodes

    The sub-graph can be described by formulas:
    min = value[value_to_add] - (value[value_to_div] / 2)
    max = value[value_to_add] + (value[value_to_div] / 2)
    """
    graph = value.graph
    dtype = data_type_str_to_np(graph.graph['cmd_params'].data_type)
    _min = Sub(graph, dict(name=value.name + '/Sub')).create_node()
    div = create_op_node_with_second_input(graph,
                                           Div,
                                           mo_array([2], dtype=dtype),
                                           op_attrs=dict(name=value.name +
                                                         '/Div'))
    div.in_port(0).connect(value_to_div)
    _min.in_port(0).connect(value_to_add)
    _min.in_port(1).connect(div.out_port(0))

    _max = Add(graph, dict(name=value.name + '/Add')).create_node()
    _max.in_port(0).connect(div.out_port(0))
    _max.in_port(1).connect(value_to_add)

    return _min, _max
コード例 #30
0
ファイル: ONNXResize11.py プロジェクト: mikhailk62/openvino
    def onnx_resize_infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        if input_shape is None:
            return

        assert (node.is_in_port_connected(0) and (node.is_in_port_connected(2) or node.is_in_port_connected(3))), \
            "One of the scales or sizes inputs must be connected to Node {} with op {}." \
            "".format(node.soft_get("name", node.id), node.op)

        assert node.coordinate_transformation_mode != 'tf_crop_and_resize', \
            'Mode tf_crop_and_resize is not supported for op {} with name {}'.format(node.op,
                                                                                     node.soft_get("name", node.id))

        if not node.is_in_port_connected(3):
            # i.e. input 'sizes' is not given
            input2_value = node.in_port(2).data.get_value()
            assert input2_value is not None, \
                "Node {} with op {} has no value in input port 2".format(node.soft_get('name', node.id), node.op)
            scale = mo_array(input2_value)
            output_shape = np.floor(input_shape * scale + 1.0e-6).astype(np.int64)
        else:
            # i.e. input 'sizes' is given
            sizes = node.in_port(3).data.get_value()
            assert sizes is not None, \
                "Node {} with op {} has no value in input port 3".format(node.soft_get("name", node.id), node.op)
            output_shape = input_shape.copy()
            spatial_dimension_indices = range(2, len(input_shape))
            output_shape[spatial_dimension_indices] = sizes[2:]

        node.out_port(0).data.set_shape(output_shape)