Exemplo n.º 1
0
    def __init__(self):
        sequence_two_dim_softmax = GraphSequence(
            [ConverterSequenceNode('root', ['SoftMax'])])
        sequence_two_dim_softmax.set_outputs(['root'])

        sequence_multi_dim_softmax = GraphSequence([
            ConverterSequenceNode('max', ['Max']),
            ConverterSequenceNode('max_reduction_indicies', ['Const']),
            ConverterSequenceNode('sub', ['Sub']),
            ConverterSequenceNode('exp', ['Exp']),
            ConverterSequenceNode('sum', ['Sum']),
            ConverterSequenceNode('sum_reduction_indicies', ['Const']),
            ConverterSequenceNode('root', ['RealDiv']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        sequence_multi_dim_softmax.set_inputs(
            'max', ['input', 'max_reduction_indicies'])
        sequence_multi_dim_softmax.set_inputs('sub', ['input', 'max'])
        sequence_multi_dim_softmax.set_inputs('exp', ['sub'])
        sequence_multi_dim_softmax.set_inputs(
            'sum', ['exp', 'sum_reduction_indicies'])
        sequence_multi_dim_softmax.set_inputs('root', ['exp', 'sum'])
        sequence_multi_dim_softmax.set_outputs(['root'])

        self.sequences = [sequence_two_dim_softmax, sequence_multi_dim_softmax]
Exemplo n.º 2
0
    def __init__(self):

        sequence = GraphSequence([
            ConverterSequenceNode('matmul_op', ['MatMul']),
            ConverterSequenceNode('bias_op', ['BiasAdd', 'Add']),  # output
            NonConsumableConverterSequenceNode('biases',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('weights',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence.set_inputs('matmul_op', ['inputs', 'weights'])
        sequence.set_inputs('bias_op', ['matmul_op', 'biases'])
        sequence.set_outputs(['bias_op'])

        sequence_without_bias = GraphSequence([
            ConverterSequenceNode('matmul_op', ['MatMul']),
            NonConsumableConverterSequenceNode('weights',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_without_bias.set_inputs('matmul_op', ['inputs', 'weights'])
        sequence_without_bias.set_outputs(['matmul_op'])

        self.sequences = [sequence_without_bias, sequence]
Exemplo n.º 3
0
class EltWiseUnaryLayerResolver(LayerResolver, object):
    __metaclass__ = ABCMeta

    def __init__(self, layer_type, op_type, descriptor_class):
        super(EltWiseUnaryLayerResolver, self).__init__()
        self._layer_type = layer_type
        self._op_type = op_type
        self._descriptor_class = descriptor_class

        self.sequence = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            NonConsumableConverterSequenceNode('input1', ['?']),
        ])
        self.sequence.set_inputs('root', ['input1'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        non_const_input_sequences = [self.sequence]
        for sequence in non_const_input_sequences:
            for match in graph_matcher.match_sequence(sequence):
                eltwise_op = match['root']
                descriptor = self._descriptor_class(self._layer_type,
                                                    str(eltwise_op.name),
                                                    match.consumed_nodes)
                descriptors.append(descriptor)

        return descriptors
Exemplo n.º 4
0
class GenericBatchNormLayerResolver(BatchNormLayerResolver):
    class Descriptor(BatchNormLayerResolver.Descriptor):
        pass

    def __init__(self):
        self.sequence = GraphSequence([
            NonConsumableConverterSequenceNode('inputs', ['?']),
            ConverterSequenceNode('a', ['Mul']),
            ConverterSequenceNode('b', ['Add']),
            ConverterSequenceNode('weights', ['Const', 'Identity']),
            ConverterSequenceNode('biases', ['Const', 'Identity'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'weights'])
        self.sequence.set_inputs('b', ['a', 'biases'])
        self.sequence.set_outputs(['b'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            inputs_op = match['inputs']
            biases_op = match['biases']
            weights_op = match['weights']

            inputs_shape = graph_helper.get_op_output_shape(inputs_op)
            biases_op = graph_helper.evaluate_tensor_output(
                biases_op.outputs[0])
            weights_op = graph_helper.evaluate_tensor_output(
                weights_op.outputs[0])

            if np.isscalar(biases_op):
                biases_op = self._broadcast_tensor(biases_op, inputs_shape)
            if np.isscalar(weights_op):
                weights_op = self._broadcast_tensor(weights_op, inputs_shape)

            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            bn_op = match['a']
            potential_descriptors.append(
                GenericBatchNormLayerResolver.Descriptor(
                    str(bn_op.name),
                    consumed_nodes,
                    bn_mul_op=bn_op,
                    pre_calculated=True,
                    weights=weights_op,
                    biases=biases_op,
                    output_names=output_op_nodes_names))
        return potential_descriptors

    @classmethod
    def _broadcast_tensor(cls, tensor, shape):
        broadcasted_tensor = np.zeros(shape, dtype=np.float32)
        broadcasted_tensor = broadcasted_tensor + tensor
        return broadcasted_tensor
Exemplo n.º 5
0
    def __init__(self):
        sequence_scalar_pow = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('pow', ['Pow']),
            ConverterSequenceNode('const', ['Const'])
        ])
        sequence_scalar_pow.set_inputs('pow', ['input', 'const'])
        sequence_scalar_pow.set_outputs(['pow'])

        self.sequences = [sequence_scalar_pow]
Exemplo n.º 6
0
    def __init__(self):
        sequence_extract_glimpse = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('offsets', ['?']),
            ConverterSequenceNode('size', ['Const']),
            ConverterSequenceNode('extract_glimpse', ['ExtractGlimpse'])
        ])
        sequence_extract_glimpse.set_inputs('extract_glimpse',
                                            ['input', 'size', 'offsets'])
        sequence_extract_glimpse.set_outputs(['extract_glimpse'])

        self.sequences = [sequence_extract_glimpse]
Exemplo n.º 7
0
    def __init__(self):
        sequence_crop_and_resize = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('boxes', ['?']),
            NonConsumableConverterSequenceNode('box_ind', ['?']),
            NonConsumableConverterSequenceNode('crop_size', ['?']),
            ConverterSequenceNode('crop_and_resize', ['CropAndResize']),
        ])
        sequence_crop_and_resize.set_inputs('crop_and_resize', ['input', 'boxes', 'box_ind', 'crop_size'])
        sequence_crop_and_resize.set_outputs(['crop_and_resize'])

        self.sequences = [sequence_crop_and_resize]
Exemplo n.º 8
0
    def __init__(self):
        sequence_resize = GraphSequence([ConverterSequenceNode('root', ['ResizeBilinear'])])
        sequence_resize.set_outputs(['root'])

        sequence_shape_stridedslice_resize = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('shape', ['Shape']),
            ConverterSequenceNode('stridedSlice', ['StridedSlice']),
            ConverterSequenceNode('mul', ['Mul']),
            ConverterSequenceNode('const_stridedSlice_1', ['?']),
            ConverterSequenceNode('const_stridedSlice_2', ['?']),
            ConverterSequenceNode('const_stridedSlice_3', ['?']),
            ConverterSequenceNode('mul_const', ['?']),
            ConverterSequenceNode('root', ['ResizeBilinear'])])

        sequence_shape_stridedslice_resize.set_inputs('shape', ['input'])
        sequence_shape_stridedslice_resize.set_inputs('stridedSlice', ['shape',
                                                                       'const_stridedSlice_1',
                                                                       'const_stridedSlice_2',
                                                                       'const_stridedSlice_3'])
        sequence_shape_stridedslice_resize.set_inputs('mul', ['stridedSlice', 'mul_const'])
        sequence_shape_stridedslice_resize.set_inputs('root', ['mul', 'input'])
        sequence_shape_stridedslice_resize.set_outputs(['root'])

        self.sequences = [sequence_resize, sequence_shape_stridedslice_resize]
Exemplo n.º 9
0
class ArgMaxLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, axis, output_names=None):
            super(ArgMaxLayerResolver.Descriptor,
                  self).__init__('ArgMax',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.axis = axis

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['ArgMax']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'axis'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            argmax_op = match['root']
            input_op = match['input']
            axis_op = match['axis']

            input_shape = graph_helper.get_op_output_shape(input_op)
            input_rank = len(input_shape)

            axis = int(graph_helper.evaluate_tensor_output(axis_op.outputs[0]))
            if axis < 0:
                axis += input_rank

            if axis < 0 or axis >= input_rank:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_ARGMAX_INVALID_AXIS')(axis, input_rank))

            consumed_nodes = match.consumed_nodes
            argmax_descriptor = ArgMaxLayerResolver.Descriptor(
                str(argmax_op.name),
                consumed_nodes,
                axis,
                output_names=[str(argmax_op.outputs[0].name)])
            descriptors.extend([argmax_descriptor])

        return descriptors
Exemplo n.º 10
0
class InstanceNormRMSLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, operations, shape):
            super(InstanceNormRMSLayerResolver.Descriptor,
                  self).__init__('InstanceNormRMS', name, operations)
            self.shape = shape
            # SNPE runtime algo is y = x * WEIGHT / rms + BIAS
            # While L2 Normalization is y = x / rms
            # That requires WEIGHT = 1.0 and BIAS = 0.0 to mimic L2 Norm in SNPE
            # Shape of weights/biases should be same as the last dimension of input.
            self.weights = np.ones(shape[-1])
            self.biases = np.zeros(shape[-1])

    def __init__(self):
        # Graph topology of tf.math.l2_normalize
        self.sequence = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('a', ['Square']),
            ConverterSequenceNode('weights', ['Const', 'Identity']),
            ConverterSequenceNode('b', ['Sum']),
            ConverterSequenceNode('epsilon', ['Const', 'Identity']),
            ConverterSequenceNode('c', ['Maximum']),
            ConverterSequenceNode('d', ['Rsqrt']),
            ConverterSequenceNode('e', ['Mul'])
        ])
        self.sequence.set_inputs('a', ['input'])
        self.sequence.set_inputs('b', ['a', 'weights'])
        self.sequence.set_inputs('c', ['b', 'epsilon'])
        self.sequence.set_inputs('d', ['c'])
        self.sequence.set_inputs('e', ['d', 'input'])
        self.sequence.set_outputs(['e'])

    # For now, elementwise resolver cannot work with epsilon node.
    # Will meet error "ElementWise resolver must implement broadcast method.".
    def is_final_resolution(self):
        return True

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        potential_descriptors = []
        for match in matches:
            bn_op = match['SquaredDifference']
            input_op = match['input']

            shape = graph_helper.get_op_output_shape(input_op)

            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                InstanceNormRMSLayerResolver.Descriptor(str(bn_op.name),
                                                        consumed_nodes,
                                                        shape=shape))
        return potential_descriptors
Exemplo n.º 11
0
class DepthwiseConvolutionLayerResolver(ConvolutionLayerResolver, object):
    def __init__(self):
        super(DepthwiseConvolutionLayerResolver, self).__init__()
        self.graph_sequence_with_bias = GraphSequence([
            ConverterSequenceNode('conv', ['DepthwiseConv2dNative']),
            ConverterSequenceNode('bias', ['BiasAdd', 'Add']),
            NonConsumableConverterSequenceNode('other', ['?'])
        ])
        self.graph_sequence_with_bias.set_inputs('bias', ['conv', 'other'])
        self.graph_sequence_with_bias.set_outputs(['bias'])

        self.graph_sequence = GraphSequence(
            [ConverterSequenceNode('conv', ['DepthwiseConv2dNative'])])
        self.graph_sequence.set_outputs(['conv'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        matches += graph_matcher.match_sequence(self.graph_sequence_with_bias)
        descriptors = []
        for match in matches:
            self._resolve_from_match(descriptors, graph_helper, match)
        return descriptors

    def _resolve_from_match(self, descriptors, graph_helper, match):
        conv_op = match['conv']
        strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
        padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
        weights = self.get_weights(graph_helper, conv_op)
        weights = np.transpose(weights, [0, 1, 3, 2])

        if 'bias' in match:
            biases = self.get_biases(graph_helper, conv_op, match['bias'])
        else:
            biases = np.zeros(np.shape(weights)[-1], dtype=np.float32)
        consumed_nodes = match.consumed_nodes
        d = ConvolutionLayerResolver.Descriptor(str(conv_op.name),
                                                consumed_nodes, conv_op, None,
                                                strides, padding, weights,
                                                biases)
        input_tensor, _ = GraphHelper.get_op_input_tensors(conv_op, ('?', '?'))
        d.groups = graph_helper.get_op_output_shape(input_tensor)[-1]
        descriptors.append(d)
Exemplo n.º 12
0
    def __init__(self):

        sequence_1 = GraphSequence([
            ConverterSequenceNode('gather', ['GatherV2']),
            NonConsumableConverterSequenceNode('params', ['?']),
            NonConsumableConverterSequenceNode('axis', ['?']),
            NonConsumableConverterSequenceNode('indices', ['Placeholder'])
        ])
        sequence_1.set_inputs('gather', ['params', 'axis', 'indices'])
        sequence_1.set_outputs(['gather'])

        # Filter seqs 2
        sequence_2 = GraphSequence([
            ConverterSequenceNode('gather', ['Gather']),
            NonConsumableConverterSequenceNode('params', ['?']),
            NonConsumableConverterSequenceNode('indices', ['Placeholder'])
        ])
        sequence_2.set_inputs('gather', ['params', 'indices'])
        sequence_2.set_outputs(['gather'])

        self.sequences = [sequence_1, sequence_2]
Exemplo n.º 13
0
class CropLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, offset, size, output_names=None):
            super(CropLayerResolver.Descriptor,
                  self).__init__('Crop',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.offset = offset
            self.size = size

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['Slice']),
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('offsets', ['?']),
            NonConsumableConverterSequenceNode('size', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'offsets', 'size'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        descriptors = []
        for match in matches:
            slice_op = match['root']
            input_shape = graph_helper.get_op_output_shape(match['input'])
            offset = graph_helper.evaluate_tensor_output(
                match['offsets'].outputs[0])
            size = graph_helper.evaluate_tensor_output(
                match['size'].outputs[0])
            for index in range(0, len(size)):
                if size[index] == -1:
                    size[index] = input_shape[index] - offset[index]

            consumed_nodes = match.consumed_nodes
            descriptors.append(
                CropLayerResolver.Descriptor(str(slice_op.name),
                                             consumed_nodes, offset, size))
        return descriptors
Exemplo n.º 14
0
    def __init__(self):
        super(GroupedConvolutionLayerResolver, self).__init__()

        # grouped convolution with split
        tree_output_node = ConverterSequenceNode('conv_op', ['Conv2D'])
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Split']),
            ConverterSequenceNode('b', ['Split']),
            ConverterRepeatableSequenceTreeNode('repeatable_graph',
                                                tree_output_node,
                                                tree_output_node),
            ConverterSequenceNode('concat_op', ['Concat']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            NonConsumableConverterSequenceNode('concat_dim', ['Const']),
            NonConsumableConverterSequenceNode('split_dim1', ['Const']),
            ConverterSequenceNode('split_dim2', ['Const'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'split_dim1'])
        self.sequence.set_inputs('b', ['weights', 'split_dim2'])
        self.sequence.set_inputs('repeatable_graph', ['a', 'b'])
        self.sequence.set_inputs('concat_op',
                                 ['repeatable_graph', 'concat_dim'])
        self.sequence.set_outputs(['concat_op'])

        # grouped convolution with strided slice
        repeatable_sequence = GraphSequence([
            ConverterSequenceNode('ss', ['StridedSlice']),
            ConverterSequenceNode('ss_begin', ['Const']),
            ConverterSequenceNode('ss_end', ['Const']),
            ConverterSequenceNode('ss_strides', ['Const']),
            ConverterSequenceNode('conv', ['Conv2D']),
            ConverterSequenceNode('bias', ['BiasAdd']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            ConverterSequenceNode('biases', ['Identity', 'Const'])
        ])
        repeatable_sequence.set_inputs('ss',
                                       ['ss_begin', 'ss_end', 'ss_strides'])
        repeatable_sequence.set_inputs('conv', ['ss', 'weights'])
        repeatable_sequence.set_inputs('bias', ['biases', 'conv'])
        repeatable_sequence.set_outputs(['bias'])

        self.sequence_with_strided_slice = GraphSequence([
            ConverterRepeatableSequenceTreeNode(
                'repeatable_graph',
                tree_output_node=repeatable_sequence['bias'],
                tree_input_node=repeatable_sequence['ss']),
            ConverterSequenceNode('concat', ['Concat', 'ConcatV2']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        self.sequence_with_strided_slice.set_inputs('repeatable_graph',
                                                    ['input'])
        self.sequence_with_strided_slice.set_inputs(
            'concat', ['repeatable_graph', 'axis'])
        self.sequence_with_strided_slice.set_outputs(['concat'])
Exemplo n.º 15
0
    def __init__(self):
        sequence_keras = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('root', ['Relu']),
            ConverterSequenceNode('min', ['Minimum']),
            ConverterSequenceNode('min_cast', ['Cast']),
            ConverterSequenceNode('min_const', ['Const']),
            ConverterSequenceNode('max', ['Maximum']),
            ConverterSequenceNode('max_const', ['Const'])
        ])
        sequence_keras.set_inputs('root', ['input'])
        sequence_keras.set_inputs('min_cast', ['min_const'])
        sequence_keras.set_inputs('min', ['root', 'min_cast'])
        sequence_keras.set_inputs('max', ['min', 'max_const'])
        sequence_keras.set_outputs(['max'])

        self.sequences = [sequence_keras]
Exemplo n.º 16
0
# =============================================================================
#
#  Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
#  All Rights Reserved.
#  Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# =============================================================================
from snpe.converters.tensorflow.graph_matcher import (
    ConverterSequenceNode, NonConsumableConverterSequenceNode, GraphSequence)

real_div_sequence = GraphSequence([
    ConverterSequenceNode('root', ['RealDiv']),
    NonConsumableConverterSequenceNode('a', ['?']),
    NonConsumableConverterSequenceNode('b', ['?'])
])
real_div_sequence.set_inputs('root', ['a', 'b'])
real_div_sequence.set_outputs(['root'])

identity_sequence = GraphSequence([
    ConverterSequenceNode('root', ['Identity']),
    NonConsumableConverterSequenceNode('any', ['?']),
])
identity_sequence.set_inputs('root', ['any'])
identity_sequence.set_outputs(['root'])

placeholder_with_default_sequence = GraphSequence([
    ConverterSequenceNode('root', ['PlaceholderWithDefault']),
    NonConsumableConverterSequenceNode('any', ['?']),
])
placeholder_with_default_sequence.set_inputs('root', ['any'])
placeholder_with_default_sequence.set_outputs(['root'])
Exemplo n.º 17
0
class StridedSliceLayerResolver(LayerResolver, object):

    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, input_shape, begin, end, strides, begin_mask, end_mask,
                     ellipsis_mask, new_axis_mask, shrink_axis_mask, output_names=None):
            super(StridedSliceLayerResolver.Descriptor, self).__init__('StridedSlice', name, nodes, output_names=output_names)
            self.input_shape = input_shape
            self.begin = begin
            self.end = end
            self.strides = strides
            self.begin_mask = begin_mask
            self.end_mask = end_mask
            self.ellipsis_mask = ellipsis_mask
            self.new_axis_mask = new_axis_mask
            self.shrink_axis_mask = shrink_axis_mask

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['StridedSlice']),
            ConverterSequenceNode('begin', ['Const']),
            ConverterSequenceNode('end', ['Const']),
            ConverterSequenceNode('strides', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'begin', 'end', 'strides'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []

        for match in graph_matcher.match_sequence(self.sequence):
            strided_slice_op = match['root']
            input_op = match['input']

            if input_op.type == "Const":
                continue

            begin_op = match['begin']
            end_op = match['end']
            strides_op = match['strides']

            begin_tensor = graph_helper.evaluate_tensor_output(begin_op.outputs[0])
            end_tensor = graph_helper.evaluate_tensor_output(end_op.outputs[0])
            strides_tensor = graph_helper.evaluate_tensor_output(strides_op.outputs[0])
            input_tensor = graph_helper.evaluate_tensor_output(input_op.outputs[0])

            begin_shape = graph_helper.get_op_output_shape(begin_op)
            end_shape = graph_helper.get_op_output_shape(end_op)
            strides_shape = graph_helper.get_op_output_shape(strides_op)
            input_shape = graph_helper.get_op_output_shape(input_op)

            if begin_shape != end_shape or begin_shape != strides_shape:
                raise ConverterError(code_to_message.get_error_message('ERROR_TF_STRIDED_SLICE_SHAPE_MISMATCH'))

            begin_mask = strided_slice_op.get_attr("begin_mask")
            end_mask = strided_slice_op.get_attr("end_mask")
            ellipsis_mask = strided_slice_op.get_attr("ellipsis_mask")
            new_axis_mask = strided_slice_op.get_attr("new_axis_mask")
            shrink_axis_mask = strided_slice_op.get_attr("shrink_axis_mask")

            consumed_nodes = match.consumed_nodes
            pad_descriptor = StridedSliceLayerResolver.Descriptor(
                str(strided_slice_op.name), consumed_nodes, input_shape,
                begin_tensor, end_tensor, strides_tensor, begin_mask, end_mask, ellipsis_mask,
                new_axis_mask, shrink_axis_mask, output_names=[str(strided_slice_op.outputs[0].name)])
            descriptors.extend([pad_descriptor])

        return descriptors
Exemplo n.º 18
0
    def __init__(self):
        sequence_reshape = GraphSequence([
            ConverterSequenceNode('root', ['Reshape', 'Squeeze', 'ExpandDims'])
        ])
        sequence_reshape.set_outputs(['root'])

        sequence_flatten = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('shape', ['Shape']),
            ConverterSequenceNode('slice_1', ['Slice']),
            ConverterSequenceNode('const_1', ['Const']),
            ConverterSequenceNode('const_2', ['Const']),
            ConverterSequenceNode('slice_2', ['Slice']),
            ConverterSequenceNode('const_3', ['Const']),
            ConverterSequenceNode('const_4', ['Const']),
            ConverterSequenceNode('prod', ['Prod']),
            ConverterSequenceNode('const_5', ['Const']),
            ConverterSequenceNode('expand_dims', ['ExpandDims']),
            ConverterSequenceNode('const_6', ['Const']),
            ConverterSequenceNode('concat', ['ConcatV2']),
            ConverterSequenceNode('const_7', ['Const']),
            ConverterSequenceNode('root', ['Reshape']),
        ])
        sequence_flatten.set_inputs('shape', ['input'])
        sequence_flatten.set_inputs('slice_1', ['shape', 'const_1', 'const_2'])
        sequence_flatten.set_inputs('slice_2', ['shape', 'const_3', 'const_4'])
        sequence_flatten.set_inputs('prod', ['slice_2', 'const_5'])
        sequence_flatten.set_inputs('expand_dims', ['prod', 'const_6'])
        sequence_flatten.set_inputs('concat',
                                    ['slice_1', 'expand_dims', 'const_7'])
        sequence_flatten.set_inputs('root', ['input', 'concat'])
        sequence_flatten.set_outputs(['root'])

        # consume shape op
        # pattern: shape -> concat -> reshape
        sequence_shape_concat = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('shape', ['Shape']),
            ConverterSequenceNode('const_1', ['Const']),
            ConverterSequenceNode('const_2', ['Const']),
            ConverterSequenceNode('concat', ['ConcatV2']),
            ConverterSequenceNode('root', ['Reshape']),
        ])
        sequence_shape_concat.set_inputs('shape', ['input'])
        sequence_shape_concat.set_inputs('concat',
                                         ['shape', 'const_1', 'const_2'])
        sequence_shape_concat.set_inputs('root', ['concat', 'input'])
        sequence_shape_concat.set_outputs(['root'])

        # consume shape op
        # pattern: shape -> strideslice -> stack -> reshape
        sequence_shape_stridedslice_stack = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('shape', ['Shape']),
            ConverterSequenceNode('stridedslice', ['StridedSlice']),
            ConverterSequenceNode('const_1', ['Const']),
            ConverterSequenceNode('const_2', ['Const']),
            ConverterSequenceNode('const_3', ['Const']),
            ConverterSequenceNode('const_4', ['Const']),
            ConverterSequenceNode('const_5', ['Const']),
            ConverterSequenceNode('stack', ['Pack']),
            ConverterSequenceNode('root', ['Reshape']),
        ])
        sequence_shape_stridedslice_stack.set_inputs('shape', ['input'])
        sequence_shape_stridedslice_stack.set_inputs(
            'stridedslice', ['shape', 'const_1', 'const_2', 'const_3'])
        sequence_shape_stridedslice_stack.set_inputs(
            'stack', ['stridedslice', 'const_4', 'const_5'])
        sequence_shape_stridedslice_stack.set_inputs('root',
                                                     ['input', 'stack'])
        sequence_shape_stridedslice_stack.set_outputs(['root'])

        self.sequences = [
            sequence_reshape, sequence_flatten, sequence_shape_concat,
            sequence_shape_stridedslice_stack
        ]
Exemplo n.º 19
0
class PadLayerResolver(LayerResolver, object):
    TF_ATTRIBUTE_MODE = 'mode'

    class Descriptor(LayerDescriptor):
        def __init__(self,
                     name,
                     nodes,
                     paddings,
                     mode,
                     constant_values,
                     output_names=None):
            super(PadLayerResolver.Descriptor,
                  self).__init__('Pad', name, nodes, output_names=output_names)
            self.paddings = paddings
            self.mode = mode
            self.constant_values = constant_values

    def __init__(self):
        self.sequence_with_zero_padding = GraphSequence([
            ConverterSequenceNode('root', ['Pad', 'PadV2']),
            ConverterSequenceNode('paddings', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence_with_zero_padding.set_inputs('root',
                                                   ['input', 'paddings'])
        self.sequence_with_zero_padding.set_outputs(['root'])

        self.sequence_with_const_padding = GraphSequence([
            ConverterSequenceNode('root', ['Pad', 'PadV2']),
            ConverterSequenceNode('paddings', ['Const']),
            ConverterSequenceNode('const_values', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence_with_const_padding.set_inputs(
            'root', ['input', 'paddings', 'const_values'])
        self.sequence_with_const_padding.set_outputs(['root'])

        self.sequence_with_reflect_padding = GraphSequence([
            ConverterSequenceNode('mirror_pad', ['MirrorPad']),
            ConverterSequenceNode('paddings', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence_with_reflect_padding.set_inputs('mirror_pad',
                                                      ['input', 'paddings'])
        self.sequence_with_reflect_padding.set_outputs(['mirror_pad'])

        self.sequences = [
            self.sequence_with_zero_padding, self.sequence_with_const_padding,
            self.sequence_with_reflect_padding
        ]

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for sequence in self.sequences:
            for match in graph_matcher.match_sequence(sequence):
                pad_op = None
                mode_values = modeltools.PADDING_CONSTANT
                if 'root' in match:
                    pad_op = match['root']
                if 'mirror_pad' in match:
                    pad_op = match['mirror_pad']
                    mode = pad_op.get_attr(self.TF_ATTRIBUTE_MODE)
                    if mode.decode() == "REFLECT":
                        mode_values = modeltools.PADDING_REFLECT

                input_op = match['input']
                paddings_op = match['paddings']

                paddings_tensor = graph_helper.evaluate_tensor_output(
                    paddings_op.outputs[0])
                paddings_shape = graph_helper.get_op_output_shape(paddings_op)

                input_rank = len(graph_helper.get_op_output_shape(input_op))
                if [input_rank, 2] != paddings_shape:
                    raise ConverterError(
                        code_to_message.get_error_message(
                            'ERROR_TF_PAD_INVALUD_PADDINGS')(str(
                                [input_rank, 2]), str(paddings_shape)))

                if 'const_values' in match:
                    const_values_op = match['const_values']
                    const_values = graph_helper.evaluate_tensor_output(
                        const_values_op.outputs[0])
                else:
                    const_values = 0.0

                if not np.isscalar(const_values):
                    raise ConverterError(
                        code_to_message.get_error_message(
                            'ERROR_TF_PAD_CONSTANT_NOT_SCALAR'))

                consumed_nodes = match.consumed_nodes
                pad_descriptor = PadLayerResolver.Descriptor(
                    str(pad_op.name),
                    consumed_nodes,
                    paddings_tensor,
                    mode_values,
                    const_values,
                    output_names=[str(pad_op.outputs[0].name)])
                descriptors.extend([pad_descriptor])

        return descriptors
Exemplo n.º 20
0
    def __init__(self):
        super(MomentsLayerResolver, self).__init__()

        # Graph sequence where keep_dims is False and dims of 1 are stripped (default)
        sequence = GraphSequence([
            ConverterSequenceNode('moments/mean', ['Mean']),
            ConverterSequenceNode('moments/StopGradient', ['StopGradient']),
            ConverterSequenceNode('moments/SquaredDifference',
                                  ['SquaredDifference']),
            ConverterSequenceNode('moments/variance', ['Mean']),
            ConverterSequenceNode('moments/squeeze_mean', ['Squeeze']),
            ConverterSequenceNode('moments/squeeze_variance', ['Squeeze']),
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('mean_reduction_indices',
                                               ['?']),
            NonConsumableConverterSequenceNode('variance_reduction_indices',
                                               ['?']),
        ])
        sequence.set_inputs('moments/mean',
                            ['input', 'mean_reduction_indices'])
        sequence.set_inputs('moments/StopGradient', ['moments/mean'])
        sequence.set_inputs('moments/SquaredDifference',
                            ['input', 'moments/StopGradient'])
        sequence.set_inputs(
            'moments/variance',
            ['moments/SquaredDifference', 'variance_reduction_indices'])
        sequence.set_inputs('moments/squeeze_mean', ['moments/mean'])
        sequence.set_inputs('moments/squeeze_variance', ['moments/variance'])
        sequence.set_outputs(
            ['moments/squeeze_mean', 'moments/squeeze_variance'])

        # Graph sequence where keep_dims is True and input dimensions are maintained
        sequence_keep_dims = GraphSequence([
            ConverterSequenceNode('moments/mean', ['Mean']),
            ConverterSequenceNode('moments/StopGradient', ['StopGradient']),
            ConverterSequenceNode('moments/SquaredDifference',
                                  ['SquaredDifference']),
            ConverterSequenceNode('moments/variance', ['Mean']),
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('variance_reduction_indices',
                                               ['?']),
            NonConsumableConverterSequenceNode('mean_reduction_indices',
                                               ['?']),
        ])
        sequence_keep_dims.set_inputs('moments/mean',
                                      ['input', 'mean_reduction_indices'])
        sequence_keep_dims.set_inputs('moments/StopGradient', ['moments/mean'])
        sequence_keep_dims.set_inputs('moments/SquaredDifference',
                                      ['input', 'moments/StopGradient'])
        sequence_keep_dims.set_inputs(
            'moments/variance',
            ['moments/SquaredDifference', 'variance_reduction_indices'])
        sequence_keep_dims.set_outputs(['moments/mean', 'moments/variance'])

        self.sequences = [sequence, sequence_keep_dims]
Exemplo n.º 21
0
class TileLayerResolver(LayerResolver, object):

    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, multiples, output_names=None):
            super(TileLayerResolver.Descriptor, self).__init__('Tile', name, nodes, output_names=output_names)
            self.multiples = multiples

    def __init__(self):
        self.sequence = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('tile', ['Tile']),
            NonConsumableConverterSequenceNode('multiples', ['?'])
        ])
        self.sequence.set_inputs('tile', ['input', 'multiples'])
        self.sequence.set_outputs(['tile'])

        # sequence input->shape->stridedslice->pack->tile
        self.sequence_pack = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('shape', ['Shape']),
            NonConsumableConverterSequenceNode('stridedslice', ['StridedSlice']),
            NonConsumableConverterSequenceNode('const_3', ['Const']),
            NonConsumableConverterSequenceNode('const_4', ['Const']),
            NonConsumableConverterSequenceNode('const_5', ['Const']),
            ConverterSequenceNode('tile', ['Tile']),
            NonConsumableConverterSequenceNode('tile_multiples_pack', ['Pack']),
            NonConsumableConverterSequenceNode('const_1', ['Const']),
            NonConsumableConverterSequenceNode('const_2', ['Const']),
            NonConsumableConverterSequenceNode('tile_input', ['?'])
        ])
        self.sequence_pack.set_inputs('shape', ['input'])
        self.sequence_pack.set_inputs('stridedslice', ['shape', 'const_3', 'const_4', 'const_5'])
        self.sequence_pack.set_inputs('tile_multiples_pack', ['stridedslice', 'const_1', 'const_2'])
        self.sequence_pack.set_inputs('tile', ['tile_input', 'tile_multiples_pack'])
        self.sequence_pack.set_outputs(['tile'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        non_const_sequence = [self.sequence]
        for sequence in non_const_sequence:
            for match in graph_matcher.match_sequence(sequence):
                tile_op = match['tile']
                multiples_op = match['multiples']
                values = graph_helper.evaluate_tensor_output(multiples_op.outputs[0])

                consumed_nodes = match.consumed_nodes
                tile_descriptor = TileLayerResolver.Descriptor(
                    str(tile_op.name), consumed_nodes, values,
                    output_names=[str(tile_op.outputs[0].name)])

                descriptors.extend([tile_descriptor])

        const_sequence = [self.sequence_pack]
        for sequence in const_sequence:
            for match in graph_matcher.match_sequence(sequence):
                if 'tile_multiples_pack' in match:
                    tile_op = match['tile']
                    tile_input_op = match['tile_input']
                    tile_multiples_pack_op = match['tile_multiples_pack']

                    tile_input_tensor = graph_helper.evaluate_tensor_output(tile_input_op.outputs[0])
                    tile_multiples_pack_tensor = graph_helper.evaluate_tensor_output(tile_multiples_pack_op.outputs[0])

                    consumed_nodes = match.consumed_nodes
                    tile_descriptor = TileLayerResolver.Descriptor(
                        str(tile_op.name), consumed_nodes, tile_input_tensor,
                        output_names=[str(tile_op.outputs[0].name)])

                    tile_input_consumed_ops = [tile_input_op]
                    tile_input_shape = graph_helper.get_op_output_shape(tile_input_op)

                    const_descriptor = ConstantLayerResolver.Descriptor(str(tile_input_op.name),
                                                                        tile_input_consumed_ops,
                                                                        tile_input_tensor,
                                                                        tile_input_shape, tile_descriptor)
                    descriptors.append(const_descriptor)

        return descriptors
Exemplo n.º 22
0
class PermuteLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, order, output_names=None):
            super(PermuteLayerResolver.Descriptor,
                  self).__init__('Permute',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.order = order

    def __init__(self):
        self.sequence_with_explicit_order = GraphSequence([
            ConverterSequenceNode('root', ['Transpose']),
            ConverterSequenceNode('order', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence_with_explicit_order.set_inputs('root',
                                                     ['input', 'order'])
        self.sequence_with_explicit_order.set_outputs(['root'])

        self.sequence_with_implicit_order = GraphSequence([
            ConverterSequenceNode('root', ['Transpose']),
            ConverterSequenceNode('order', ['Sub']),
            ConverterSequenceNode('a', ['Sub']),
            ConverterSequenceNode('b', ['Const']),
            ConverterSequenceNode('c', ['Range']),
            ConverterSequenceNode('d', ['Const']),
            ConverterSequenceNode('e', ['Const']),
            ConverterSequenceNode('f', ['Rank']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])

        self.sequence_with_implicit_order.set_inputs('root',
                                                     ['input', 'order'])
        self.sequence_with_implicit_order.set_inputs('order', ['a', 'c'])
        self.sequence_with_implicit_order.set_inputs('a', ['b', 'f'])
        self.sequence_with_implicit_order.set_inputs('c', ['d', 'e', 'f'])
        self.sequence_with_implicit_order.set_inputs('f', ['input'])
        self.sequence_with_implicit_order.set_outputs(['root'])

        self.sequences = [
            self.sequence_with_explicit_order,
            self.sequence_with_implicit_order
        ]

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for sequence in self.sequences:
            for match in graph_matcher.match_sequence(sequence):
                transpose_op = match['root']
                input_op = match['input']
                order_op = match['order']

                order_tensor = graph_helper.evaluate_tensor_output(
                    order_op.outputs[0])

                input_shape = graph_helper.get_op_output_shape(input_op)
                order_shape = graph_helper.get_op_output_shape(order_op)

                input_rank = len(input_shape)
                order_rank = len(order_shape)
                try:
                    assert order_rank == 1
                    for d in range(input_rank):
                        assert d in order_tensor
                except AssertionError:
                    raise ConverterError(
                        code_to_message.get_error_message(
                            'ERROR_TF_PERMUTE_INVALID_ORDER_TENSOR')(
                                str(order_tensor)))

                consumed_nodes = match.consumed_nodes
                permute_descriptor = PermuteLayerResolver.Descriptor(
                    str(transpose_op.name),
                    consumed_nodes,
                    order_tensor,
                    output_names=[str(transpose_op.outputs[0].name)])
                descriptors.extend([permute_descriptor])

        return descriptors
    def __init__(self):

        # Seq #1 : with boxes reshaped and scores reshaped and sliced, , for layer nms/NonMaxSuppressionV2
        sequence_1 = GraphSequence([
            # Multi class scores
            NonConsumableConverterSequenceNode('scores_input', ['?']),
            ConverterSequenceNode('scores_reshape', ['Reshape']),
            ConverterSequenceNode('scores_reshape_input_shape', ['?']),
            ConverterSequenceNode('strided_slice_input_beign', ['?']),
            ConverterSequenceNode('strided_slice_input_end', ['?']),
            ConverterSequenceNode('strided_slice_input_strides', ['?']),
            ConverterSequenceNode('scores', ['StridedSlice']),

            # Boxes
            NonConsumableConverterSequenceNode('boxes_input', ['?']),
            ConverterSequenceNode('boxes', ['Reshape']),
            ConverterSequenceNode('boxes_reshape_input_shape', ['?']),
            ConverterSequenceNode('nms', ['NonMaxSuppressionV2']),
            ConverterSequenceNode('max_output_size', ['Const']),
            ConverterSequenceNode('iou_threshold', ['?']),
        ])
        sequence_1.set_inputs('boxes',
                              ['boxes_input', 'boxes_reshape_input_shape'])
        sequence_1.set_inputs('scores_reshape',
                              ['scores_input', 'scores_reshape_input_shape'])
        sequence_1.set_inputs('scores', [
            'scores_reshape', 'strided_slice_input_beign',
            'strided_slice_input_end', 'strided_slice_input_strides'
        ])
        sequence_1.set_inputs(
            'nms', ['boxes', 'scores', 'max_output_size', 'iou_threshold'])
        sequence_1.set_outputs(['nms'])

        # Seq #2, with boxes and scores squeezed, for layer nms/NonMaxSuppressionV2
        sequence_2 = GraphSequence([
            # Multi class scores
            NonConsumableConverterSequenceNode('scores_input', ['?']),
            ConverterSequenceNode('scores', ['Squeeze']),

            # Boxes
            NonConsumableConverterSequenceNode('boxes_input', ['?']),
            ConverterSequenceNode('boxes', ['Squeeze']),
            ConverterSequenceNode('nms', ['NonMaxSuppressionV2']),
            ConverterSequenceNode('max_output_size', ['Const']),
            ConverterSequenceNode('iou_threshold', ['Const']),
        ])
        sequence_2.set_inputs('boxes', ['boxes_input'])
        sequence_2.set_inputs('scores', ['scores_input'])
        sequence_2.set_inputs(
            'nms', ['boxes', 'scores', 'max_output_size', 'iou_threshold'])
        sequence_2.set_outputs(['nms'])

        # Seq #3,where no reshapes/slices are added (the resolver will be handling the reshapes in this case, as needed)
        sequence_3 = GraphSequence([
            NonConsumableConverterSequenceNode('boxes', ['?']),
            NonConsumableConverterSequenceNode('scores', ['?']),
            NonConsumableConverterSequenceNode('max_output_size', ['Const']),
            NonConsumableConverterSequenceNode('stub_1', ['?']),
            ConverterSequenceNode('nms', ['NonMaxSuppressionV2']),
            NonConsumableConverterSequenceNode('iou_threshold', ['?']),
        ])

        sequence_3.set_inputs(
            'nms', ['boxes', 'scores', 'max_output_size', 'iou_threshold'])
        sequence_3.set_outputs(['nms'])

        self.sequences = [sequence_1, sequence_2, sequence_3]

        # TODO: following added for VIVO support of nms + gather in 1.23.0 to support features as inputs
        #       remove for 1.24.0 release
        # Filter seqs
        filter_sequence = GraphSequence([
            ConverterSequenceNode('gather', ['GatherV2']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('params', ['?']),
            NonConsumableConverterSequenceNode('indices',
                                               ['NonMaxSuppressionV3'])
        ])
        filter_sequence.set_inputs('gather', ['params', 'indices', 'axis'])
        filter_sequence.set_outputs(['gather'])

        # Filter seqs 2
        filter_sequence_2 = GraphSequence([
            ConverterSequenceNode('gather', ['Gather']),
            NonConsumableConverterSequenceNode('params', ['?']),
            NonConsumableConverterSequenceNode('indices',
                                               ['NonMaxSuppressionV2'])
        ])
        filter_sequence_2.set_inputs('gather', ['params', 'indices'])
        filter_sequence_2.set_outputs(['gather'])

        self.g_sequences = [filter_sequence, filter_sequence_2]
Exemplo n.º 24
0
class GroupedConvolutionLayerResolver(ConvolutionLayerResolver, object):
    class Descriptor(ConvolutionLayerResolver.Descriptor):
        pass

    def __init__(self):
        super(GroupedConvolutionLayerResolver, self).__init__()

        # grouped convolution with split
        tree_output_node = ConverterSequenceNode('conv_op', ['Conv2D'])
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Split']),
            ConverterSequenceNode('b', ['Split']),
            ConverterRepeatableSequenceTreeNode('repeatable_graph',
                                                tree_output_node,
                                                tree_output_node),
            ConverterSequenceNode('concat_op', ['Concat']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            NonConsumableConverterSequenceNode('concat_dim', ['Const']),
            NonConsumableConverterSequenceNode('split_dim1', ['Const']),
            ConverterSequenceNode('split_dim2', ['Const'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'split_dim1'])
        self.sequence.set_inputs('b', ['weights', 'split_dim2'])
        self.sequence.set_inputs('repeatable_graph', ['a', 'b'])
        self.sequence.set_inputs('concat_op',
                                 ['repeatable_graph', 'concat_dim'])
        self.sequence.set_outputs(['concat_op'])

        # grouped convolution with strided slice
        repeatable_sequence = GraphSequence([
            ConverterSequenceNode('ss', ['StridedSlice']),
            ConverterSequenceNode('ss_begin', ['Const']),
            ConverterSequenceNode('ss_end', ['Const']),
            ConverterSequenceNode('ss_strides', ['Const']),
            ConverterSequenceNode('conv', ['Conv2D']),
            ConverterSequenceNode('bias', ['BiasAdd']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            ConverterSequenceNode('biases', ['Identity', 'Const'])
        ])
        repeatable_sequence.set_inputs('ss',
                                       ['ss_begin', 'ss_end', 'ss_strides'])
        repeatable_sequence.set_inputs('conv', ['ss', 'weights'])
        repeatable_sequence.set_inputs('bias', ['biases', 'conv'])
        repeatable_sequence.set_outputs(['bias'])

        self.sequence_with_strided_slice = GraphSequence([
            ConverterRepeatableSequenceTreeNode(
                'repeatable_graph',
                tree_output_node=repeatable_sequence['bias'],
                tree_input_node=repeatable_sequence['ss']),
            ConverterSequenceNode('concat', ['Concat', 'ConcatV2']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        self.sequence_with_strided_slice.set_inputs('repeatable_graph',
                                                    ['input'])
        self.sequence_with_strided_slice.set_inputs(
            'concat', ['repeatable_graph', 'axis'])
        self.sequence_with_strided_slice.set_outputs(['concat'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            conv_op = match['conv_op_1']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = match['weights']
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            try:
                concat_op = match['concat_op']
                concat_op_output_ops = graph_helper.get_op_outputs(concat_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    concat_op_output_ops, 'BiasAdd')
                # need to consume input of bias
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(weights.outputs[0].get_shape()[-1],
                                  dtype=np.float32)

            weights = graph_helper.evaluate_tensor_output(weights.outputs[0])
            descriptor = ConvolutionLayerResolver.Descriptor(
                str(conv_op.name),
                consumed_nodes,
                conv_op,
                bias_op,
                strides,
                padding,
                weights,
                biases,
                output_names=output_op_nodes_names)
            descriptor.input_ops = [match['a'], match['b']]
            descriptors.append(descriptor)

        for match in graph_matcher.match_sequence(
                self.sequence_with_strided_slice):
            if not match.consumed_nodes:
                continue
            input_op = match['input']
            concat_op = match['concat']
            axis_op = match['axis']
            conv_ops = self._get_repeatable_op_by_id(match, 'conv')
            weight_ops = self._get_repeatable_op_by_id(match, 'weights')
            bias_ops = self._get_repeatable_op_by_id(match, 'biases')
            bias_add_ops = self._get_repeatable_op_by_id(match, 'bias')
            ss_ops = self._get_repeatable_op_by_id(match, 'ss')

            input_shape = graph_helper.get_op_output_shape(input_op)
            weight_shapes = [
                graph_helper.get_op_output_shape(weight_op)
                for weight_op in weight_ops
            ]

            ss_strides = [
                graph_helper.evaluate_tensor_output(
                    ss_strides_op.outputs[0]).tolist()
                for ss_strides_op in self._get_repeatable_op_by_id(
                    match, 'ss_strides')
            ]
            ss_begins = [
                graph_helper.evaluate_tensor_output(
                    ss_begin_op.outputs[0]).tolist()
                for ss_begin_op in self._get_repeatable_op_by_id(
                    match, 'ss_begin')
            ]
            ss_ends = [
                graph_helper.evaluate_tensor_output(
                    ss_end_op.outputs[0]).tolist()
                for ss_end_op in self._get_repeatable_op_by_id(
                    match, 'ss_end')
            ]

            bias_add_shapes = [
                graph_helper.get_op_output_shape(bias_add_op)
                for bias_add_op in bias_add_ops
            ]

            strides = [
                conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
                for conv_op in conv_ops
            ]
            paddings = [
                conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
                for conv_op in conv_ops
            ]

            ss_shapes = [
                graph_helper.get_op_output_shape(ss_op.outputs[0])
                for ss_op in ss_ops
            ]

            num_groups = len(conv_ops)

            axis = graph_helper.evaluate_tensor_output(axis_op.outputs[0])

            is_grouped_convolution = True
            is_grouped_convolution &= self._elements_are_same(bias_add_shapes)
            is_grouped_convolution &= self._elements_are_same(weight_shapes)
            is_grouped_convolution &= self._elements_are_same(strides)
            is_grouped_convolution &= self._elements_are_same(paddings)
            is_grouped_convolution &= self._elements_are_same(ss_shapes)
            is_grouped_convolution &= self._elements_are_same(ss_strides)
            is_grouped_convolution &= not self._elements_are_same(ss_begins)
            is_grouped_convolution &= not self._elements_are_same(ss_ends)
            # stride slices must evenly divide the last dimension of input to number of groups
            is_grouped_convolution &= ss_shapes[0][
                -1] * num_groups == input_shape[-1]
            # strides must be all ones at all dimensions
            is_grouped_convolution &= ss_strides[0] == [1] * len(ss_strides[0])
            # concat must be on the last axis in grouped convolution
            is_grouped_convolution &= axis == -1 or axis == (
                len(bias_add_shapes[0]) - 1)

            if not is_grouped_convolution:
                logging.getLogger().warning(
                    code_to_message.get_error_message(
                        'WARNING_TF_GROUP_CONV_RESOLVE'))
                continue

            weight_tensors = [
                graph_helper.evaluate_tensor_output(weight_op.outputs[0])
                for weight_op in weight_ops
            ]
            weights = np.concatenate(weight_tensors, axis=-1)

            bias_tensors = [
                graph_helper.evaluate_tensor_output(bias_op.outputs[0])
                for bias_op in bias_ops
            ]
            biases = np.concatenate(bias_tensors, axis=-1)

            descriptor = ConvolutionLayerResolver.Descriptor(
                str(concat_op.name),
                match.consumed_nodes,
                conv_ops[0],
                None,
                strides[0],
                paddings[0],
                weights,
                biases,
                output_names=[str(concat_op.outputs[0].name)])
            descriptor.input_ops = ss_ops
            descriptor.output_op = concat_op
            descriptors.append(descriptor)

        return descriptors

    @classmethod
    def _get_repeatable_op_by_id(cls, match, name):
        ops = []
        indexed_id = name + '_{}'
        i = 1
        while indexed_id.format(i) in match:
            ops.append(match[indexed_id.format(i)])
            i += 1
        return ops

    @classmethod
    def _elements_are_same(cls, array):
        return all([element == array[0] for element in array])
Exemplo n.º 25
0
class ChannelShuffleLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, groups, output_names=None):
            super(ChannelShuffleLayerResolver.Descriptor,
                  self).__init__('ChannelShuffle',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.groups = groups
            self.shuffle_type = modeltools.CHANNEL_SHUFFLE_GROUPED

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('reshape_out', ['Reshape']),
            ConverterSequenceNode('transpose', ['Transpose']),
            ConverterSequenceNode('reshape_in', ['Reshape']),
            ConverterSequenceNode('shape_in', ['Const']),
            ConverterSequenceNode('order', ['Const']),
            ConverterSequenceNode('shape_out', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('reshape_out', ['shape_out', 'transpose'])
        self.sequence.set_inputs('transpose', ['order', 'reshape_in'])
        self.sequence.set_inputs('reshape_in', ['shape_in', 'input'])
        self.sequence.set_outputs(['reshape_out'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            input_op = match['input']
            reshape_out_op = match['reshape_out']
            reshape_in_op = match['reshape_in']
            transpose_op = match['transpose']

            input_shape = graph_helper.get_op_output_shape(input_op)
            reshape_in_shape = graph_helper.get_op_output_shape(reshape_in_op)
            transpose_shape = graph_helper.get_op_output_shape(transpose_op)
            reshape_out_shape = graph_helper.get_op_output_shape(
                reshape_out_op)

            if len(reshape_in_shape) < 2:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_CHANNEL_SHUFFLE_RESHAPE'))

            num_channels = input_shape[-1]
            num_groups = reshape_in_shape[-2]
            num_channels_prime = num_channels / num_groups

            if num_channels % num_groups != 0:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_CHANNEL_SHUFFLE'))

            is_channel_shuffle = True
            # first reshape must divide the channel dimension to [num_groups, num_channels_prime]
            is_channel_shuffle &= reshape_in_shape == input_shape[:-1] + [
                num_groups, num_channels_prime
            ]
            # transpose must permute the last two dimensions only
            is_channel_shuffle &= transpose_shape == input_shape[:-1] + [
                num_channels_prime, num_groups
            ]
            # output shape must be equal to the input shape
            is_channel_shuffle &= reshape_out_shape == input_shape

            if not is_channel_shuffle:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_CHANNEL_SHUFFLE_OUTPUT'))

            consumed_nodes = match.consumed_nodes
            descriptors.append(
                ChannelShuffleLayerResolver.Descriptor(
                    str(reshape_out_op.name),
                    consumed_nodes,
                    num_groups,
                    output_names=[str(reshape_out_op.outputs[0].name)]))

        return descriptors
Exemplo n.º 26
0
    def __init__(self):
        sequence_prelu = GraphSequence([
            ConverterSequenceNode('a', ['Relu']),
            ConverterSequenceNode('b', ['Abs']),
            ConverterSequenceNode('c', ['Sub']),
            ConverterSequenceNode('d', ['Mul']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('unknown', ['?']),
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu.set_inputs('a', ['inputs'])
        sequence_prelu.set_inputs('b', ['inputs'])
        sequence_prelu.set_inputs('c', ['inputs', 'b'])
        sequence_prelu.set_inputs('d', ['alphas', 'c'])
        sequence_prelu.set_inputs('e', ['d', 'unknown'])
        sequence_prelu.set_inputs('f', ['a', 'e'])
        sequence_prelu.set_outputs(['f'])

        sequence_prelu_negative_alpha = GraphSequence([
            ConverterSequenceNode('a', ['Relu']),
            ConverterSequenceNode('b', ['Neg']),
            ConverterSequenceNode('c', ['Neg']),
            ConverterSequenceNode('d', ['Relu']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu_negative_alpha.set_inputs('a', ['inputs'])
        sequence_prelu_negative_alpha.set_inputs('b', ['inputs'])
        sequence_prelu_negative_alpha.set_inputs('c', ['alphas'])
        sequence_prelu_negative_alpha.set_inputs('d', ['b'])
        sequence_prelu_negative_alpha.set_inputs('e', ['d', 'c'])
        sequence_prelu_negative_alpha.set_inputs('f', ['a', 'e'])
        sequence_prelu_negative_alpha.set_outputs(['f'])

        sequence_prelu_negative_relu = GraphSequence([
            ConverterSequenceNode('relu_pos', ['Relu']),
            ConverterSequenceNode('neg_1', ['Neg']),
            ConverterSequenceNode('neg_2', ['Neg']),
            ConverterSequenceNode('relu_neg', ['Relu']),
            ConverterSequenceNode('mul', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu_negative_relu.set_inputs('relu_pos', ['inputs'])
        sequence_prelu_negative_relu.set_inputs('neg_1', ['inputs'])
        sequence_prelu_negative_relu.set_inputs('relu_neg', ['neg_1'])
        sequence_prelu_negative_relu.set_inputs('neg_2', ['relu_neg'])
        sequence_prelu_negative_relu.set_inputs('mul', ['neg_2', 'alphas'])
        sequence_prelu_negative_relu.set_inputs('f', ['relu_pos', 'mul'])
        sequence_prelu_negative_relu.set_outputs(['f'])

        self.sequences = [
            sequence_prelu, sequence_prelu_negative_alpha,
            sequence_prelu_negative_relu
        ]
Exemplo n.º 27
0
    NonConsumableConverterSequenceNode('stub_40', ['?']),
    NonConsumableConverterSequenceNode('stub_41', ['?']),
    NonConsumableConverterSequenceNode('stub_42', ['?']),
    NonConsumableConverterSequenceNode('stub_43', ['?']),
    NonConsumableConverterSequenceNode('stub_44', ['?']),
    NonConsumableConverterSequenceNode('stub_45', ['?']),
    NonConsumableConverterSequenceNode('stub_46', ['?']),
    NonConsumableConverterSequenceNode('stub_47', ['?']),
    NonConsumableConverterSequenceNode('stub_48', ['?']),
    NonConsumableConverterSequenceNode('stub_49', ['?']),
    NonConsumableConverterSequenceNode('stub_50', ['?']),
    NonConsumableConverterSequenceNode('stub_51', ['?']),
    NonConsumableConverterSequenceNode('stub_52', ['?']),
])
box_decoder_sequence.set_inputs(
    'Postprocessor/Decode/add_3',
    ['Postprocessor/Decode/add_1', 'Postprocessor/Decode/div_7'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/mul_3', [
    'Postprocessor/Decode/div_1',
    'Postprocessor/Decode/get_center_coordinates_and_sizes/sub'
])
box_decoder_sequence.set_inputs(
    'Postprocessor/Decode/add_2',
    ['Postprocessor/Decode/add', 'Postprocessor/Decode/div_6'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/div_6',
                                ['Postprocessor/Decode/mul_1', 'stub_49'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/div_3',
                                ['Postprocessor/Decode/unstack', 'stub_41'])
box_decoder_sequence.set_inputs(
    'Postprocessor/Decode/sub_1',
    ['Postprocessor/Decode/add_1', 'Postprocessor/Decode/div_5'])
Exemplo n.º 28
0
class InstanceNormLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, operations, shape):
            super(InstanceNormLayerResolver.Descriptor,
                  self).__init__('InstanceNorm', name, operations)
            self.shape = shape
            # SNPE runtime algo is y = x * WEIGHT / rms + BIAS
            # While L2 Normalization is y = x / rms
            # That requires WEIGHT = 1.0 and BIAS = 0.0 to mimic L2 Norm in SNPE
            # Shape of weights/biases should be same as the last dimension of input.
            self.weights = np.ones(shape[-1])
            self.biases = np.zeros(shape[-1])

    def __init__(self):
        self.sequence = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('StopGradient', ['StopGradient']),
            ConverterSequenceNode('SquaredDifference', ['SquaredDifference']),
            ConverterSequenceNode('variance', ['Mean']),
            ConverterSequenceNode('add', ['Add']),
            ConverterSequenceNode('mean', ['Mean']),
            NonConsumableConverterSequenceNode('gamma', ['Identity']),
            ConverterSequenceNode('Rsqrt', ['Rsqrt']),
            ConverterSequenceNode('mul_2', ['Mul']),
            NonConsumableConverterSequenceNode('beta', ['Identity']),
            ConverterSequenceNode('mul', ['Mul']),
            ConverterSequenceNode('sub', ['Sub']),
            ConverterSequenceNode('mul_1', ['Mul']),
            ConverterSequenceNode('add_1', ['Add']),
            NonConsumableConverterSequenceNode('stub_14', ['?']),
            NonConsumableConverterSequenceNode('stub_15', ['?']),
            NonConsumableConverterSequenceNode('stub_16', ['?']),
            NonConsumableConverterSequenceNode('stub_17', ['?']),
            NonConsumableConverterSequenceNode('stub_18', ['?']),
        ])
        self.sequence.set_inputs('variance', ['SquaredDifference', 'stub_14'])
        self.sequence.set_inputs('StopGradient', ['mean'])
        self.sequence.set_inputs('add', ['variance', 'stub_15'])
        self.sequence.set_inputs('sub', ['beta', 'mul_2'])
        self.sequence.set_inputs('mean', ['input', 'stub_16'])
        self.sequence.set_inputs('gamma', ['stub_17'])
        self.sequence.set_inputs('mul_2', ['mean', 'mul'])
        self.sequence.set_inputs('Rsqrt', ['add'])
        self.sequence.set_inputs('beta', ['stub_18'])
        self.sequence.set_inputs('mul', ['Rsqrt', 'gamma'])
        self.sequence.set_inputs('add_1', ['mul_1', 'sub'])
        self.sequence.set_inputs('mul_1', ['input', 'mul'])
        self.sequence.set_inputs('SquaredDifference',
                                 ['input', 'StopGradient'])
        self.sequence.set_outputs(['add_1'])

    def is_final_resolution(self):
        return True

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        potential_descriptors = []
        for match in matches:
            bn_op = match['SquaredDifference']
            input_op = match['input']

            shape = graph_helper.get_op_output_shape(input_op)

            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                InstanceNormLayerResolver.Descriptor(str(bn_op.name),
                                                     consumed_nodes,
                                                     shape=shape))
        return potential_descriptors
Exemplo n.º 29
0
class ScaledBatchNormLayerResolver(BatchNormLayerResolver):
    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Add']),
            ConverterSequenceNode('b', ['Rsqrt']),
            ConverterSequenceNode('c', ['Mul']),
            ConverterSequenceNode('d', ['Mul']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Sub']),
            ConverterSequenceNode('g', ['Add']),
            ConverterSequenceNode('scale', ['?']),
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('mean', ['?']),
            ConverterSequenceNode('beta', ['?']),
            ConverterSequenceNode('variance', ['?']),
            ConverterSequenceNode('epsilon', ['?'])
        ])
        self.sequence.set_inputs('a', ['variance', 'epsilon'])
        self.sequence.set_inputs('b', ['a'])
        self.sequence.set_inputs('c', ['b', 'scale'])
        self.sequence.set_inputs('d', ['c', 'input'])
        self.sequence.set_inputs('e', ['c', 'mean'])
        self.sequence.set_inputs('f', ['e', 'beta'])
        self.sequence.set_inputs('g', ['d', 'f'])
        self.sequence.set_outputs(['g'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            variance_op = match['variance']
            epsilon_op = match['epsilon']
            if variance_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_VARIANCE'))
            variance = graph_helper.evaluate_tensor_output(
                variance_op.outputs[0])

            if epsilon_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_EPSILON'))
            epsilon = graph_helper.evaluate_tensor_output(
                epsilon_op.outputs[0])

            scale_op = match['scale']
            if scale_op.type not in ['Identity', 'Const', 'Fill']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_SCALE'))
            scale = graph_helper.evaluate_tensor_output(scale_op.outputs[0])

            mean_op = match['mean']
            if mean_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_MEAN'))
            mean = graph_helper.evaluate_tensor_output(mean_op.outputs[0])

            beta_op = match['beta']
            if beta_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_BETA'))
            beta = graph_helper.evaluate_tensor_output(beta_op.outputs[0])

            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            descriptors.append(
                BatchNormLayerResolver.Descriptor(
                    str(match['d'].name),
                    match.consumed_nodes,
                    bn_mul_op=match['d'],
                    mean=mean,
                    variance=variance,
                    epsilon=epsilon,
                    scale=scale,
                    beta=beta,
                    output_names=output_op_nodes_names))
        return descriptors
Exemplo n.º 30
0
        ['Sigmoid']),
    ConverterSequenceNode(
        'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Tanh_1',
        ['Tanh']),
    ConverterSequenceNode(
        'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/mul_2', ['Mul']),
    NonConsumableConverterSequenceNode('stub_16', ['?']),
    NonConsumableConverterSequenceNode('stub_17', ['?']),
    NonConsumableConverterSequenceNode('stub_18', ['?']),
    NonConsumableConverterSequenceNode('stub_19', ['?']),
    NonConsumableConverterSequenceNode('stub_20', ['?']),
    NonConsumableConverterSequenceNode('stub_21', ['?']),
    NonConsumableConverterSequenceNode('stub_22', ['?']),
    NonConsumableConverterSequenceNode('stub_23', ['?']),
])
cell_sequence.set_inputs(
    'rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel/read', ['stub_16'])
cell_sequence.set_inputs(
    'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Tanh_1',
    ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/add_1'])
cell_sequence.set_inputs(
    'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Sigmoid',
    ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/add'])
cell_sequence.set_inputs(
    'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/basic_lstm_cell/concat',
    ['stub_17', 'stub_18', 'stub_19'])
cell_sequence.set_inputs(
    'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Tanh',
    ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/split'])
cell_sequence.set_inputs(
    'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/add',
    ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/split', 'stub_22'])