Esempio n. 1
0
    def __init__(self):
        sequence_two_dim_softmax = GraphSequence(
            [ConverterSequenceNode('root', ['SoftMax'])])
        sequence_two_dim_softmax.set_outputs(['root'])

        sequence_multi_dim_softmax = GraphSequence([
            ConverterSequenceNode('max', ['Max']),
            ConverterSequenceNode('max_reduction_indicies', ['Const']),
            ConverterSequenceNode('sub', ['Sub']),
            ConverterSequenceNode('exp', ['Exp']),
            ConverterSequenceNode('sum', ['Sum']),
            ConverterSequenceNode('sum_reduction_indicies', ['Const']),
            ConverterSequenceNode('root', ['RealDiv']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        sequence_multi_dim_softmax.set_inputs(
            'max', ['input', 'max_reduction_indicies'])
        sequence_multi_dim_softmax.set_inputs('sub', ['input', 'max'])
        sequence_multi_dim_softmax.set_inputs('exp', ['sub'])
        sequence_multi_dim_softmax.set_inputs(
            'sum', ['exp', 'sum_reduction_indicies'])
        sequence_multi_dim_softmax.set_inputs('root', ['exp', 'sum'])
        sequence_multi_dim_softmax.set_outputs(['root'])

        self.sequences = [sequence_two_dim_softmax, sequence_multi_dim_softmax]
Esempio n. 2
0
class CropLayerResolver(LayerResolver, object):

    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, offset, size, output_names=None):
            super(CropLayerResolver.Descriptor, self).__init__('Crop', name, nodes, output_names=output_names)
            self.offset = offset
            self.size = size

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['Slice']),
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('offsets', ['?']),
            NonConsumableConverterSequenceNode('size', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'offsets', 'size'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        descriptors = []
        for match in matches:
            slice_op = match['root']
            input_shape = graph_helper.get_op_output_shape(match['input'])
            offset = graph_helper.evaluate_tensor_output(match['offsets'].outputs[0])
            size = graph_helper.evaluate_tensor_output(match['size'].outputs[0])
            for index in range(0, len(size)):
                if size[index] == -1:
                    size[index] = input_shape[index] - offset[index]

            consumed_nodes = match.consumed_nodes
            descriptors.append(
                CropLayerResolver.Descriptor(str(slice_op.name), consumed_nodes, offset, size))
        return descriptors
Esempio n. 3
0
    def __init__(self):
        sequence = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('tile', ['Tile']),
            NonConsumableConverterSequenceNode('multiples', ['?'])
        ])
        sequence.set_inputs('tile', ['input', 'multiples'])
        sequence.set_outputs(['tile'])

        self.sequences = [sequence]
Esempio n. 4
0
    def __init__(self):
        sequence_scalar_pow = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('pow', ['Pow']),
            ConverterSequenceNode('const', ['Const'])
        ])
        sequence_scalar_pow.set_inputs('pow', ['input', 'const'])
        sequence_scalar_pow.set_outputs(['pow'])

        self.sequences = [sequence_scalar_pow]
Esempio n. 5
0
    def __init__(self):
        sequence_resize = GraphSequence([ConverterSequenceNode('root', ['ResizeBilinear'])])
        sequence_resize.set_outputs(['root'])

        sequence_shape_stridedslice_resize = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('shape', ['Shape']),
            ConverterSequenceNode('stridedSlice', ['StridedSlice']),
            ConverterSequenceNode('mul', ['Mul']),
            ConverterSequenceNode('const_stridedSlice_1', ['?']),
            ConverterSequenceNode('const_stridedSlice_2', ['?']),
            ConverterSequenceNode('const_stridedSlice_3', ['?']),
            ConverterSequenceNode('mul_const', ['?']),
            ConverterSequenceNode('root', ['ResizeBilinear'])])

        sequence_shape_stridedslice_resize.set_inputs('shape', ['input'])
        sequence_shape_stridedslice_resize.set_inputs('stridedSlice', ['shape',
                                                                       'const_stridedSlice_1',
                                                                       'const_stridedSlice_2',
                                                                       'const_stridedSlice_3'])
        sequence_shape_stridedslice_resize.set_inputs('mul', ['stridedSlice', 'mul_const'])
        sequence_shape_stridedslice_resize.set_inputs('root', ['mul', 'input'])
        sequence_shape_stridedslice_resize.set_outputs(['root'])

        self.sequences = [sequence_resize, sequence_shape_stridedslice_resize]
Esempio n. 6
0
class ReductionLayerResolver(LayerResolver, object):
    __metaclass__ = ABCMeta

    class Descriptor(LayerDescriptor):
        def __init__(self, layer_type, name, nodes, axes, keep_dims, output_names=None):
            super(ReductionLayerResolver.Descriptor, self).__init__(layer_type, name, nodes, output_names=output_names)
            self.axes = axes
            self.keep_dims = keep_dims

    def __init__(self, layer_type, op_type, descriptor_class):
        super(ReductionLayerResolver, self).__init__()
        self._layer_type = layer_type
        self._op_type = op_type
        self._descriptor_class = descriptor_class

        self.sequence = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            ConverterSequenceNode('reduction_indices', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'reduction_indices'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            reduction_op = match['root']
            input_op = match['input']
            reduction_indices_op = match['reduction_indices']

            axes = graph_helper.evaluate_tensor_output(reduction_indices_op.outputs[0])
            keep_dims = bool(reduction_op.get_attr('keep_dims'))

            input_shape = graph_helper.get_op_output_shape(input_op)
            input_rank = len(input_shape)

            axes = [axes] if np.isscalar(axes) else axes.tolist()
            for i in range(len(axes)):
                axes[i] = int(axes[i])
                if axes[i] < 0:
                    axes[i] += input_rank

            reduction_descriptor = self._descriptor_class(self._layer_type, str(reduction_op.name),
                                                          match.consumed_nodes, axes, keep_dims,
                                                          output_names=[str(reduction_op.outputs[0].name)])
            descriptors.extend([reduction_descriptor])

        return descriptors
Esempio n. 7
0
class GenericBatchNormLayerResolver(BatchNormLayerResolver):
    class Descriptor(BatchNormLayerResolver.Descriptor):
        pass

    def __init__(self):
        self.sequence = GraphSequence([
            NonConsumableConverterSequenceNode('inputs', ['?']),
            ConverterSequenceNode('a', ['Mul']),
            ConverterSequenceNode('b', ['Add']),
            ConverterSequenceNode('weights', ['Const', 'Identity']),
            ConverterSequenceNode('biases', ['Const', 'Identity'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'weights'])
        self.sequence.set_inputs('b', ['a', 'biases'])
        self.sequence.set_outputs(['b'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            biases_op = match['biases']
            weights_op = match['weights']
            biases_op = graph_helper.evaluate_tensor_output(
                biases_op.outputs[0])
            weights_op = graph_helper.evaluate_tensor_output(
                weights_op.outputs[0])
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            bn_op = match['a']
            potential_descriptors.append(
                GenericBatchNormLayerResolver.Descriptor(
                    str(bn_op.name),
                    consumed_nodes,
                    bn_mul_op=bn_op,
                    pre_calculated=True,
                    weights=weights_op,
                    biases=biases_op,
                    output_names=output_op_nodes_names))
        return potential_descriptors
Esempio n. 8
0
class DepthwiseConvolutionLayerResolver(ConvolutionLayerResolver, object):
    def __init__(self):
        super(DepthwiseConvolutionLayerResolver, self).__init__()
        self.graph_sequence_with_bias = GraphSequence([
            ConverterSequenceNode('conv', ['DepthwiseConv2dNative']),
            ConverterSequenceNode('bias', ['BiasAdd']),
            NonConsumableConverterSequenceNode('other', ['?'])
        ])
        self.graph_sequence_with_bias.set_inputs('bias', ['conv', 'other'])
        self.graph_sequence_with_bias.set_outputs(['bias'])

        self.graph_sequence = GraphSequence(
            [ConverterSequenceNode('conv', ['DepthwiseConv2dNative'])])
        self.graph_sequence.set_outputs(['conv'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        matches += graph_matcher.match_sequence(self.graph_sequence_with_bias)
        descriptors = []
        for match in matches:
            self._resolve_from_match(descriptors, graph_helper, match)
        return descriptors

    def _resolve_from_match(self, descriptors, graph_helper, match):
        conv_op = match['conv']
        strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
        padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
        weights = self.get_weights(graph_helper, conv_op)
        weights = np.transpose(weights, [0, 1, 3, 2])

        if 'bias' in match:
            biases = self.get_biases(graph_helper, conv_op, match['bias'])
        else:
            biases = np.zeros(np.shape(weights)[-1], dtype=np.float32)
        consumed_nodes = match.consumed_nodes
        d = ConvolutionLayerResolver.Descriptor(str(conv_op.name),
                                                consumed_nodes, conv_op, None,
                                                strides, padding, weights,
                                                biases)
        input_tensor, _ = GraphHelper.get_op_input_tensors(conv_op, ('?', '?'))
        d.groups = graph_helper.get_op_output_shape(input_tensor)[-1]
        descriptors.append(d)
Esempio n. 9
0
    def __init__(self):
        sequence_extract_glimpse = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('size', ['?']),
            NonConsumableConverterSequenceNode('offsets', ['?']),
            ConverterSequenceNode('extract_glimpse', ['ExtractGlimpse'])
        ])
        sequence_extract_glimpse.set_inputs('extract_glimpse', ['input', 'size', 'offsets'])
        sequence_extract_glimpse.set_outputs(['extract_glimpse'])

        sequence_extract_glimpse_strided_slice = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('size', ['?']),
            ConverterSequenceNode('offsets', ['StridedSlice']),
            ConverterSequenceNode('extract_glimpse', ['ExtractGlimpse'])
        ])
        sequence_extract_glimpse_strided_slice.set_inputs('extract_glimpse',
                                                          ['input', 'size', 'offsets'])
        sequence_extract_glimpse_strided_slice.set_outputs(['extract_glimpse'])

        self.sequences = [sequence_extract_glimpse, sequence_extract_glimpse_strided_slice]
Esempio n. 10
0
class ArgMaxLayerResolver(LayerResolver, object):

    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, axis, output_names=None):
            super(ArgMaxLayerResolver.Descriptor, self).__init__('ArgMax', name, nodes, output_names=output_names)
            self.axis = axis

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['ArgMax']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'axis'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            argmax_op = match['root']
            input_op = match['input']
            axis_op = match['axis']

            input_shape = graph_helper.get_op_output_shape(input_op)
            input_rank = len(input_shape)

            axis = int(graph_helper.evaluate_tensor_output(axis_op.outputs[0]))
            if axis < 0:
                axis += input_rank

            if axis < 0 or axis >= input_rank:
                raise ConverterError(code_to_message.get_message('ERROR_TF_ARGMAX_INVALID_AXIS')(axis, input_rank))

            consumed_nodes = match.consumed_nodes
            argmax_descriptor = ArgMaxLayerResolver.Descriptor(
                str(argmax_op.name), consumed_nodes, axis,
                output_names=[str(argmax_op.outputs[0].name)])
            descriptors.extend([argmax_descriptor])

        return descriptors
Esempio n. 11
0
class DilatedConvolutionLayerResolver(ConvolutionLayerResolver, object):
    class Descriptor(ConvolutionLayerResolver.Descriptor):
        pass

    def __init__(self):
        super(DilatedConvolutionLayerResolver, self).__init__()
        self.graph_sequence = GraphSequence([
            ConverterSequenceNode('space_to_batch', ['SpaceToBatchND']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            ConverterSequenceNode('dilation_sizes', ['?']),
            ConverterSequenceNode('paddings', ['?']),
            ConverterSequenceNode('conv_op', ['Conv2D']),
            ConverterSequenceNode('kernel', ['?']),
            ConverterSequenceNode('batch_to_space', ['BatchToSpaceND']),
            ConverterSequenceNode('block_shape_out', ['?']),
            ConverterSequenceNode('crops', ['?'])]
        )
        self.graph_sequence.set_inputs('space_to_batch', ['inputs', 'dilation_sizes', 'paddings'])
        self.graph_sequence.set_inputs('conv_op', ['space_to_batch', 'kernel'])
        self.graph_sequence.set_inputs('batch_to_space', ['conv_op', 'block_shape_out', 'crops'])
        self.graph_sequence.set_outputs(['batch_to_space'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['conv_op']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = self.get_weights(graph_helper, conv_op)
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [str(match[node.identifier].outputs[0].name) for node in
                                     self.graph_sequence.output_nodes]
            try:
                batch_to_space_op = match['batch_to_space']
                conv_output_ops = graph_helper.get_op_outputs(batch_to_space_op)
                bias_op = GraphHelper.filter_single_op_by_type(conv_output_ops, 'BiasAdd')
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(weights.shape[-1], dtype=np.float32)
            dilation_sizes = match['dilation_sizes']
            dilation_sizes = graph_helper.evaluate_tensor_output(dilation_sizes.outputs[0])
            if np.shape(dilation_sizes) != (2,):
                raise ConverterError(code_to_message.get_message('ERROR_TF_CONV_RESOLVE_DILATION')(conv_op.name))

            d = ConvolutionLayerResolver.Descriptor(str(conv_op.name), consumed_nodes,
                                                    conv_op, bias_op, strides, padding, weights, biases,
                                                    output_names=output_op_nodes_names)
            d.dilationY = int(dilation_sizes[0])
            d.dilationX = int(dilation_sizes[1])
            d.input_ops = [match['space_to_batch']]
            descriptors.append(d)
        return descriptors
Esempio n. 12
0
class PReLuLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, operations, coefficients, output_names):
            super(PReLuLayerResolver.Descriptor, self).__init__('PReLU', name, operations,
                                                                output_names=output_names)
            self.coefficients = coefficients

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Relu']),
            ConverterSequenceNode('b', ['Abs']),
            ConverterSequenceNode('c', ['Sub']),
            ConverterSequenceNode('d', ['Mul']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('unknown', ['?']),
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        self.sequence.set_inputs('a', ['inputs'])
        self.sequence.set_inputs('b', ['inputs'])
        self.sequence.set_inputs('c', ['inputs', 'b'])
        self.sequence.set_inputs('d', ['alphas', 'c'])
        self.sequence.set_inputs('e', ['d', 'unknown'])
        self.sequence.set_inputs('f', ['a', 'e'])
        self.sequence.set_outputs(['f'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            coefficients = match['alphas']
            add_op = match['f']
            if coefficients.type not in ['Identity', 'Const']:
                raise ConverterError(code_to_message.get_message('ERROR_TF_RESOLVE_PRELU_COEFF'))

            output_op_nodes_names = [str(match[node.identifier].outputs[0].name) for node in self.sequence.output_nodes]
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                PReLuLayerResolver.Descriptor(str(add_op.name), consumed_nodes,
                                              graph_helper.evaluate_tensor_output(coefficients.outputs[0]),
                                              output_names=output_op_nodes_names))
        return potential_descriptors
Esempio n. 13
0
    def __init__(self):
        super(GroupedConvolutionLayerResolver, self).__init__()

        # grouped convolution with split
        tree_output_node = ConverterSequenceNode('conv_op', ['Conv2D'])
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Split']),
            ConverterSequenceNode('b', ['Split']),
            ConverterRepeatableSequenceTreeNode('repeatable_graph', tree_output_node, tree_output_node),
            ConverterSequenceNode('concat_op', ['Concat']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            NonConsumableConverterSequenceNode('concat_dim', ['Const']),
            NonConsumableConverterSequenceNode('split_dim1', ['Const']),
            ConverterSequenceNode('split_dim2', ['Const'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'split_dim1'])
        self.sequence.set_inputs('b', ['weights', 'split_dim2'])
        self.sequence.set_inputs('repeatable_graph', ['a', 'b'])
        self.sequence.set_inputs('concat_op', ['repeatable_graph', 'concat_dim'])
        self.sequence.set_outputs(['concat_op'])

        # grouped convolution with strided slice
        repeatable_sequence = GraphSequence([
                ConverterSequenceNode('ss', ['StridedSlice']),
                ConverterSequenceNode('ss_begin', ['Const']),
                ConverterSequenceNode('ss_end', ['Const']),
                ConverterSequenceNode('ss_strides', ['Const']),
                ConverterSequenceNode('conv', ['Conv2D']),
                ConverterSequenceNode('bias', ['BiasAdd']),
                ConverterSequenceNode('weights', ['Identity', 'Const']),
                ConverterSequenceNode('biases', ['Identity', 'Const'])
        ])
        repeatable_sequence.set_inputs('ss', ['ss_begin', 'ss_end', 'ss_strides'])
        repeatable_sequence.set_inputs('conv', ['ss', 'weights'])
        repeatable_sequence.set_inputs('bias', ['biases', 'conv'])
        repeatable_sequence.set_outputs(['bias'])

        self.sequence_with_strided_slice = GraphSequence([
            ConverterRepeatableSequenceTreeNode('repeatable_graph',
                                                tree_output_node=repeatable_sequence['bias'],
                                                tree_input_node=repeatable_sequence['ss']),
            ConverterSequenceNode('concat', ['Concat', 'ConcatV2']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        self.sequence_with_strided_slice.set_inputs('repeatable_graph', ['input'])
        self.sequence_with_strided_slice.set_inputs('concat', ['repeatable_graph', 'axis'])
        self.sequence_with_strided_slice.set_outputs(['concat'])
Esempio n. 14
0
class PadLayerResolver(LayerResolver, object):

    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, paddings, mode, constant_values, output_names=None):
            super(PadLayerResolver.Descriptor, self).__init__('Pad', name, nodes, output_names=output_names)
            self.paddings = paddings
            self.mode = mode
            self.constant_values = constant_values

    def __init__(self):
        self.sequence_with_zero_padding = GraphSequence([
            ConverterSequenceNode('root', ['Pad', 'PadV2']),
            ConverterSequenceNode('paddings', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence_with_zero_padding.set_inputs('root', ['input', 'paddings'])
        self.sequence_with_zero_padding.set_outputs(['root'])

        self.sequence_with_const_padding = GraphSequence([
            ConverterSequenceNode('root', ['Pad', 'PadV2']),
            ConverterSequenceNode('paddings', ['Const']),
            ConverterSequenceNode('const_values', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence_with_const_padding.set_inputs('root', ['input', 'paddings', 'const_values'])
        self.sequence_with_const_padding.set_outputs(['root'])

        self.sequences = [self.sequence_with_zero_padding, self.sequence_with_const_padding]

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for sequence in self.sequences:
            for match in graph_matcher.match_sequence(sequence):
                pad_op = match['root']
                input_op = match['input']
                paddings_op = match['paddings']

                paddings_tensor = graph_helper.evaluate_tensor_output(paddings_op.outputs[0])
                paddings_shape = graph_helper.get_op_output_shape(paddings_op)

                input_rank = len(graph_helper.get_op_output_shape(input_op))
                if [input_rank, 2] != paddings_shape:
                    raise ConverterError(code_to_message.get_message(
                        'ERROR_TF_PAD_INVALUD_PADDINGS')(str([input_rank, 2]), str(paddings_shape)))

                if 'const_values' in match:
                    const_values_op = match['const_values']
                    const_values = graph_helper.evaluate_tensor_output(const_values_op.outputs[0])
                else:
                    const_values = 0.0

                if not np.isscalar(const_values):
                    raise ConverterError(code_to_message.get_message('ERROR_TF_PAD_CONSTANT_NOT_SCALAR'))

                consumed_nodes = match.consumed_nodes
                pad_descriptor = PadLayerResolver.Descriptor(
                    str(pad_op.name), consumed_nodes, paddings_tensor,
                    snpe.modeltools.PADDING_CONSTANT, const_values,
                    output_names=[str(pad_op.outputs[0].name)])
                descriptors.extend([pad_descriptor])

        return descriptors
Esempio n. 15
0
class EltWiseLayerResolver(LayerResolver, object):
    __metaclass__ = ABCMeta

    def __init__(self, layer_type, op_type, descriptor_class):
        super(EltWiseLayerResolver, self).__init__()
        self._layer_type = layer_type
        self._op_type = op_type
        self._descriptor_class = descriptor_class

        self.sequence = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            NonConsumableConverterSequenceNode('input1', ['?']),
            NonConsumableConverterSequenceNode('input2', ['?'])
        ])
        self.sequence.set_inputs('root', ['input1', 'input2'])
        self.sequence.set_outputs(['root'])

        self.sequence_with_identity = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            ConverterSequenceNode('identity', ['Identity']),
            NonConsumableConverterSequenceNode('input1', ['?']),
            NonConsumableConverterSequenceNode('input2', ['?'])
        ])
        self.sequence_with_identity.set_inputs('identity', ['root'])
        self.sequence_with_identity.set_inputs('root', ['input1', 'input2'])
        self.sequence_with_identity.set_outputs(['identity'])

        self.sequence_with_const_input = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            NonConsumableConverterSequenceNode('const', ['Const', 'Identity']),
            NonConsumableConverterSequenceNode('other', ['?'])
        ])
        self.sequence_with_const_input.set_inputs('root', ['const', 'other'])
        self.sequence_with_const_input.set_outputs(['root'])

        self.sequence_with_const_input_and_identity = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            ConverterSequenceNode('identity', ['Identity']),
            NonConsumableConverterSequenceNode('const', ['Const', 'Identity']),
            NonConsumableConverterSequenceNode('other', ['?'])
        ])
        self.sequence_with_const_input_and_identity.set_inputs(
            'root', ['const', 'other'])
        self.sequence_with_const_input_and_identity.set_inputs(
            'identity', ['root'])
        self.sequence_with_const_input_and_identity.set_outputs(['identity'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        non_const_input_sequences = [
            self.sequence_with_identity, self.sequence
        ]
        for sequence in non_const_input_sequences:
            for match in graph_matcher.match_sequence(sequence):
                eltwise_op = match['root']
                descriptor = self._descriptor_class(self._layer_type,
                                                    str(eltwise_op.name),
                                                    match.consumed_nodes)
                descriptors.append(descriptor)

        const_input_sequences = [
            self.sequence_with_const_input_and_identity,
            self.sequence_with_const_input
        ]
        for sequence in const_input_sequences:
            for match in graph_matcher.match_sequence(sequence):
                eltwise_op = match['root']
                eltwise_descriptor = self._descriptor_class(
                    self._layer_type, str(eltwise_op.name),
                    match.consumed_nodes)
                descriptors.append(eltwise_descriptor)

                const_op = match['const']
                const_consumed_ops = [const_op]
                while const_op.type == 'Identity':
                    const_op = const_op.inputs[0].op
                    const_consumed_ops.append(const_op)

                if const_op.type != 'Const':
                    continue

                const_tensor = graph_helper.evaluate_tensor_output(
                    const_op.outputs[0])
                eltwise_shape = graph_helper.get_op_output_shape(eltwise_op)

                # Do not broadcast the constant for Sub and RealDiv ops because they support
                # dynamic broadcasting during runtime. This if statement should be removed once
                # all runtimes support dynamic broadcasting.
                if self._op_type in ['Sub', 'RealDiv']:
                    eltwise_shape = graph_helper.get_op_output_shape(const_op)
                    if not eltwise_shape:
                        eltwise_shape = [1]
                else:
                    if len(eltwise_shape) > 4:
                        eltwise_shape = eltwise_shape[-4:]
                    if len(eltwise_shape) > 3:
                        broadcast_shape = eltwise_shape[1:]
                    else:
                        broadcast_shape = eltwise_shape

                    if list(const_tensor.shape) != broadcast_shape:
                        const_tensor = self._broadcast_tensor(
                            const_tensor, broadcast_shape)

                const_descriptor = ConstantLayerResolver.Descriptor(
                    str(const_op.name), const_consumed_ops, const_tensor,
                    eltwise_shape, eltwise_descriptor)
                descriptors.append(const_descriptor)

        return descriptors

    def _broadcast_tensor(self, tensor, shape):
        raise ConverterError(
            'ElementWise resolver must implement broadcast method.')
Esempio n. 16
0
class GroupedConvolutionLayerResolver(ConvolutionLayerResolver, object):
    class Descriptor(ConvolutionLayerResolver.Descriptor):
        pass

    def __init__(self):
        super(GroupedConvolutionLayerResolver, self).__init__()
        tree_output_node = ConverterSequenceNode('conv_op', ['Conv2D'])
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Split']),
            ConverterSequenceNode('b', ['Split']),
            ConverterRepeatableSequenceTreeNode('repeatable_graph',
                                                tree_output_node,
                                                tree_output_node),
            ConverterSequenceNode('concat_op', ['Concat']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            NonConsumableConverterSequenceNode('concat_dim', ['Const']),
            NonConsumableConverterSequenceNode('split_dim1', ['Const']),
            ConverterSequenceNode('split_dim2', ['Const'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'split_dim1'])
        self.sequence.set_inputs('b', ['weights', 'split_dim2'])
        self.sequence.set_inputs('repeatable_graph', ['a', 'b'])
        self.sequence.set_inputs('concat_op',
                                 ['repeatable_graph', 'concat_dim'])
        self.sequence.set_outputs(['concat_op'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['conv_op_1']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = match['weights']
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            try:
                concat_op = match['concat_op']
                concat_op_output_ops = graph_helper.get_op_outputs(concat_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    concat_op_output_ops, 'BiasAdd')
                # need to consume input of bias
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(weights.outputs[0].get_shape()[-1],
                                  dtype=np.float32)

            weights = graph_helper.evaluate_tensor_output(weights.outputs[0])
            descriptor = ConvolutionLayerResolver.Descriptor(
                str(conv_op.name),
                consumed_nodes,
                conv_op,
                bias_op,
                strides,
                padding,
                weights,
                biases,
                output_names=output_op_nodes_names)
            descriptor.input_ops = [match['a'], match['b']]
            descriptors.append(descriptor)
        return descriptors
Esempio n. 17
0
class StridedSliceLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self,
                     name,
                     nodes,
                     input_shape,
                     begin,
                     end,
                     strides,
                     begin_mask,
                     end_mask,
                     ellipsis_mask,
                     new_axis_mask,
                     shrink_axis_mask,
                     output_names=None):
            super(StridedSliceLayerResolver.Descriptor,
                  self).__init__('StridedSlice',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.input_shape = input_shape
            self.begin = begin
            self.end = end
            self.strides = strides
            self.begin_mask = begin_mask
            self.end_mask = end_mask
            self.ellipsis_mask = ellipsis_mask
            self.new_axis_mask = new_axis_mask
            self.shrink_axis_mask = shrink_axis_mask

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['StridedSlice']),
            ConverterSequenceNode('begin', ['Const']),
            ConverterSequenceNode('end', ['Const']),
            ConverterSequenceNode('strides', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'begin', 'end', 'strides'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []

        for match in graph_matcher.match_sequence(self.sequence):
            strided_slice_op = match['root']
            input_op = match['input']

            if input_op.type == "Const":
                continue

            begin_op = match['begin']
            end_op = match['end']
            strides_op = match['strides']

            begin_tensor = graph_helper.evaluate_tensor_output(
                begin_op.outputs[0])
            end_tensor = graph_helper.evaluate_tensor_output(end_op.outputs[0])
            strides_tensor = graph_helper.evaluate_tensor_output(
                strides_op.outputs[0])
            input_tensor = graph_helper.evaluate_tensor_output(
                input_op.outputs[0])

            begin_shape = graph_helper.get_op_output_shape(begin_op)
            end_shape = graph_helper.get_op_output_shape(end_op)
            strides_shape = graph_helper.get_op_output_shape(strides_op)
            input_shape = graph_helper.get_op_output_shape(input_op)

            if begin_shape != end_shape or begin_shape != strides_shape:
                raise ConverterError(
                    code_to_message.get_message(
                        'ERROR_TF_STRIDED_SLICE_SHAPE_MISMATCH'))

            begin_mask = strided_slice_op.get_attr("begin_mask")
            end_mask = strided_slice_op.get_attr("end_mask")
            ellipsis_mask = strided_slice_op.get_attr("ellipsis_mask")
            new_axis_mask = strided_slice_op.get_attr("new_axis_mask")
            shrink_axis_mask = strided_slice_op.get_attr("shrink_axis_mask")

            consumed_nodes = match.consumed_nodes
            pad_descriptor = StridedSliceLayerResolver.Descriptor(
                str(strided_slice_op.name),
                consumed_nodes,
                input_shape,
                begin_tensor,
                end_tensor,
                strides_tensor,
                begin_mask,
                end_mask,
                ellipsis_mask,
                new_axis_mask,
                shrink_axis_mask,
                output_names=[str(strided_slice_op.outputs[0].name)])
            descriptors.extend([pad_descriptor])

        return descriptors
Esempio n. 18
0
    NonConsumableConverterSequenceNode('stub_39', ['?']),
    NonConsumableConverterSequenceNode('stub_40', ['?']),
    NonConsumableConverterSequenceNode('stub_41', ['?']),
    NonConsumableConverterSequenceNode('stub_42', ['?']),
    NonConsumableConverterSequenceNode('stub_43', ['?']),
    NonConsumableConverterSequenceNode('stub_44', ['?']),
    NonConsumableConverterSequenceNode('stub_45', ['?']),
    NonConsumableConverterSequenceNode('stub_46', ['?']),
    NonConsumableConverterSequenceNode('stub_47', ['?']),
    NonConsumableConverterSequenceNode('stub_48', ['?']),
    NonConsumableConverterSequenceNode('stub_49', ['?']),
    NonConsumableConverterSequenceNode('stub_50', ['?']),
    NonConsumableConverterSequenceNode('stub_51', ['?']),
    NonConsumableConverterSequenceNode('stub_52', ['?']),
])
box_decoder_sequence.set_inputs('Postprocessor/Decode/add_3', ['Postprocessor/Decode/add_1', 'Postprocessor/Decode/div_7'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/mul_3', ['Postprocessor/Decode/div_1', 'Postprocessor/Decode/get_center_coordinates_and_sizes/sub'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/add_2', ['Postprocessor/Decode/add', 'Postprocessor/Decode/div_6'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/div_6', ['Postprocessor/Decode/mul_1', 'stub_49'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/div_3', ['Postprocessor/Decode/unstack', 'stub_41'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/sub_1', ['Postprocessor/Decode/add_1', 'Postprocessor/Decode/div_5'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/sub', ['Postprocessor/Decode/add', 'Postprocessor/Decode/div_4'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/unstack', ['Postprocessor/Decode/transpose'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/stack', ['Postprocessor/Decode/sub', 'Postprocessor/Decode/sub_1', 'Postprocessor/Decode/add_2', 'Postprocessor/Decode/add_3'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/transpose_1', ['Postprocessor/Decode/stack', 'stub_52'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/div_5', ['Postprocessor/Decode/mul', 'stub_50'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/div', ['Postprocessor/Decode/unstack', 'stub_47'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/Exp', ['Postprocessor/Decode/div_3'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/get_center_coordinates_and_sizes/add_1', ['Postprocessor/Decode/get_center_coordinates_and_sizes/unstack', 'Postprocessor/Decode/get_center_coordinates_and_sizes/div_1'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/get_center_coordinates_and_sizes/div', ['Postprocessor/Decode/get_center_coordinates_and_sizes/sub_1', 'stub_46'])
box_decoder_sequence.set_inputs('Postprocessor/Decode/div_2', ['Postprocessor/Decode/unstack', 'stub_42'])
Esempio n. 19
0
class EltWiseLayerResolver(LayerResolver, object):
    __metaclass__ = ABCMeta

    def __init__(self, layer_type, op_type, descriptor_class):
        super(EltWiseLayerResolver, self).__init__()
        self._layer_type = layer_type
        self._op_type = op_type
        self._descriptor_class = descriptor_class

        self.sequence = GraphSequence([ConverterSequenceNode('root', [self._op_type])])
        self.sequence.set_outputs(['root'])

        self.sequence_with_identity = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            ConverterSequenceNode('identity', ['Identity'])
        ])
        self.sequence_with_identity.set_inputs('identity', ['root'])
        self.sequence_with_identity.set_outputs(['identity'])

        self.sequence_with_const_input = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            NonConsumableConverterSequenceNode('const', ['Const', 'Identity']),
            NonConsumableConverterSequenceNode('other', ['?'])
        ])
        self.sequence_with_const_input.set_inputs('root', ['const', 'other'])
        self.sequence_with_const_input.set_outputs(['root'])

        self.sequence_with_const_input_and_identity = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            ConverterSequenceNode('identity', ['Identity']),
            NonConsumableConverterSequenceNode('const', ['Const']),
            NonConsumableConverterSequenceNode('other', ['?'])
        ])
        self.sequence_with_const_input_and_identity.set_inputs('root', ['const', 'other'])
        self.sequence_with_const_input_and_identity.set_inputs('identity', ['root'])
        self.sequence_with_const_input_and_identity.set_outputs(['identity'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        non_const_input_sequences = [self.sequence_with_identity, self.sequence]
        for sequence in non_const_input_sequences:
            for match in graph_matcher.match_sequence(sequence):
                eltwise_op = match['root']
                consumed_nodes = match.consumed_nodes
                descriptor = self._descriptor_class(self._layer_type, str(eltwise_op.name), consumed_nodes)
                descriptors.append(descriptor)

        const_input_sequences = [self.sequence_with_const_input_and_identity, self.sequence_with_const_input]
        for sequence in const_input_sequences:
            for match in graph_matcher.match_sequence(sequence):
                eltwise_op = match['root']
                const_op = match['const']
                const_tensor = graph_helper.evaluate_tensor_output(const_op.outputs[0])
                eltwise_shape = graph_helper.get_op_output_shape(eltwise_op)
                eltwise_shape = expand_to_rank(eltwise_shape, 3)
                if len(eltwise_shape) > 3:
                    eltwise_shape = eltwise_shape[-3:]
                if list(const_tensor.shape) != eltwise_shape:
                    const_tensor = self._broadcast_tensor(const_tensor, eltwise_shape)

                eltwise_descriptor = self._descriptor_class(self._layer_type, str(eltwise_op.name), match.consumed_nodes)
                const_descriptor = ConstantLayerResolver.Descriptor(str(const_op.name), [const_op], const_tensor,
                                                                    eltwise_shape, eltwise_descriptor)
                descriptors.extend([eltwise_descriptor, const_descriptor])

        return descriptors

    def _broadcast_tensor(self, tensor, shape):
        raise ConverterError('ElementWise resolver must implement broadcast method.')
Esempio n. 20
0
class ChannelShuffleLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, groups, output_names=None):
            super(ChannelShuffleLayerResolver.Descriptor,
                  self).__init__('ChannelShuffle',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.groups = groups
            self.shuffle_type = snpe.modeltools.CHANNEL_SHUFFLE_GROUPED

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('reshape_out', ['Reshape']),
            ConverterSequenceNode('transpose', ['Transpose']),
            ConverterSequenceNode('reshape_in', ['Reshape']),
            ConverterSequenceNode('shape_in', ['Const']),
            ConverterSequenceNode('order', ['Const']),
            ConverterSequenceNode('shape_out', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('reshape_out', ['shape_out', 'transpose'])
        self.sequence.set_inputs('transpose', ['order', 'reshape_in'])
        self.sequence.set_inputs('reshape_in', ['shape_in', 'input'])
        self.sequence.set_outputs(['reshape_out'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            input_op = match['input']
            reshape_out_op = match['reshape_out']
            reshape_in_op = match['reshape_in']
            transpose_op = match['transpose']

            input_shape = graph_helper.get_op_output_shape(input_op)
            reshape_in_shape = graph_helper.get_op_output_shape(reshape_in_op)
            transpose_shape = graph_helper.get_op_output_shape(transpose_op)
            reshape_out_shape = graph_helper.get_op_output_shape(
                reshape_out_op)

            if len(reshape_in_shape) < 2:
                continue

            num_channels = input_shape[-1]
            num_groups = reshape_in_shape[-2]
            num_channels_prime = num_channels / num_groups

            if num_channels % num_groups != 0:
                continue

            is_channel_shuffle = True
            # first reshape must divide the channel dimension to [num_groups, num_channels_prime]
            is_channel_shuffle &= reshape_in_shape == input_shape[:-1] + [
                num_groups, num_channels_prime
            ]
            # transpose must permute the last two dimensions only
            is_channel_shuffle &= transpose_shape == input_shape[:-1] + [
                num_channels_prime, num_groups
            ]
            # output shape must be equal to the input shape
            is_channel_shuffle &= reshape_out_shape == input_shape

            if not is_channel_shuffle:
                continue

            consumed_nodes = match.consumed_nodes
            descriptors.append(
                ChannelShuffleLayerResolver.Descriptor(
                    str(reshape_out_op.name),
                    consumed_nodes,
                    num_groups,
                    output_names=[str(reshape_out_op.outputs[0].name)]))

        return descriptors
Esempio n. 21
0
class ScaledBatchNormLayerResolver(BatchNormLayerResolver):
    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Add']),
            ConverterSequenceNode('b', ['Rsqrt']),
            ConverterSequenceNode('c', ['Mul']),
            ConverterSequenceNode('d', ['Mul']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Sub']),
            ConverterSequenceNode('g', ['Add']),
            ConverterSequenceNode('scale', ['?']),
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('mean', ['?']),
            ConverterSequenceNode('beta', ['?']),
            ConverterSequenceNode('variance', ['?']),
            ConverterSequenceNode('epsilon', ['?'])
        ])
        self.sequence.set_inputs('a', ['variance', 'epsilon'])
        self.sequence.set_inputs('b', ['a'])
        self.sequence.set_inputs('c', ['b', 'scale'])
        self.sequence.set_inputs('d', ['c', 'input'])
        self.sequence.set_inputs('e', ['c', 'mean'])
        self.sequence.set_inputs('f', ['e', 'beta'])
        self.sequence.set_inputs('g', ['d', 'f'])
        self.sequence.set_outputs(['g'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            variance_op = match['variance']
            epsilon_op = match['epsilon']
            if variance_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_VARIANCE'))
            variance = graph_helper.evaluate_tensor_output(
                variance_op.outputs[0])

            if epsilon_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_EPSILON'))
            epsilon = graph_helper.evaluate_tensor_output(
                epsilon_op.outputs[0])

            scale_op = match['scale']
            if scale_op.type not in ['Identity', 'Const', 'Fill']:
                raise ConverterError(
                    code_to_message.get_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_SCALE'))
            scale = graph_helper.evaluate_tensor_output(scale_op.outputs[0])

            mean_op = match['mean']
            if mean_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_MEAN'))
            mean = graph_helper.evaluate_tensor_output(mean_op.outputs[0])

            beta_op = match['beta']
            if beta_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_BETA'))
            beta = graph_helper.evaluate_tensor_output(beta_op.outputs[0])

            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            descriptors.append(
                BatchNormLayerResolver.Descriptor(
                    str(match['d'].name),
                    match.consumed_nodes,
                    bn_mul_op=match['d'],
                    mean=mean,
                    variance=variance,
                    epsilon=epsilon,
                    scale=scale,
                    beta=beta,
                    output_names=output_op_nodes_names))
        return descriptors
Esempio n. 22
0
#  Confidential and Proprietary - Qualcomm Technologies, Inc.
#
#=============================================================================
from converters.tensorflow.graph_matcher import (
    ConverterSequenceNode,
    NonConsumableConverterSequenceNode,
    GraphSequence
)


real_div_sequence = GraphSequence([
    ConverterSequenceNode('root', ['RealDiv']),
    NonConsumableConverterSequenceNode('a', ['?']),
    NonConsumableConverterSequenceNode('b', ['?'])
])
real_div_sequence.set_inputs('root', ['a', 'b'])
real_div_sequence.set_outputs(['root'])

identity_sequence = GraphSequence([
    ConverterSequenceNode('root', ['Identity']),
    NonConsumableConverterSequenceNode('any', ['?']),
])
identity_sequence.set_inputs('root', ['any'])
identity_sequence.set_outputs(['root'])

placeholder_with_default_sequence = GraphSequence([
    ConverterSequenceNode('root', ['PlaceholderWithDefault']),
    NonConsumableConverterSequenceNode('any', ['?']),
])
placeholder_with_default_sequence.set_inputs('root', ['any'])
placeholder_with_default_sequence.set_outputs(['root'])
Esempio n. 23
0
    def __init__(self):
        sequence_prelu = GraphSequence([
            ConverterSequenceNode('a', ['Relu']),
            ConverterSequenceNode('b', ['Abs']),
            ConverterSequenceNode('c', ['Sub']),
            ConverterSequenceNode('d', ['Mul']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('unknown', ['?']),
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu.set_inputs('a', ['inputs'])
        sequence_prelu.set_inputs('b', ['inputs'])
        sequence_prelu.set_inputs('c', ['inputs', 'b'])
        sequence_prelu.set_inputs('d', ['alphas', 'c'])
        sequence_prelu.set_inputs('e', ['d', 'unknown'])
        sequence_prelu.set_inputs('f', ['a', 'e'])
        sequence_prelu.set_outputs(['f'])

        sequence_prelu_negative_alpha = GraphSequence([
            ConverterSequenceNode('a', ['Relu']),
            ConverterSequenceNode('b', ['Neg']),
            ConverterSequenceNode('c', ['Neg']),
            ConverterSequenceNode('d', ['Relu']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu_negative_alpha.set_inputs('a', ['inputs'])
        sequence_prelu_negative_alpha.set_inputs('b', ['inputs'])
        sequence_prelu_negative_alpha.set_inputs('c', ['alphas'])
        sequence_prelu_negative_alpha.set_inputs('d', ['b'])
        sequence_prelu_negative_alpha.set_inputs('e', ['d', 'c'])
        sequence_prelu_negative_alpha.set_inputs('f', ['a', 'e'])
        sequence_prelu_negative_alpha.set_outputs(['f'])

        sequence_prelu_negative_relu = GraphSequence([
            ConverterSequenceNode('relu_pos', ['Relu']),
            ConverterSequenceNode('neg_1', ['Neg']),
            ConverterSequenceNode('neg_2', ['Neg']),
            ConverterSequenceNode('relu_neg', ['Relu']),
            ConverterSequenceNode('mul', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu_negative_relu.set_inputs('relu_pos', ['inputs'])
        sequence_prelu_negative_relu.set_inputs('neg_1', ['inputs'])
        sequence_prelu_negative_relu.set_inputs('relu_neg', ['neg_1'])
        sequence_prelu_negative_relu.set_inputs('neg_2', ['relu_neg'])
        sequence_prelu_negative_relu.set_inputs('mul', ['neg_2', 'alphas'])
        sequence_prelu_negative_relu.set_inputs('f', ['relu_pos', 'mul'])
        sequence_prelu_negative_relu.set_outputs(['f'])

        self.sequences = [
            sequence_prelu, sequence_prelu_negative_alpha,
            sequence_prelu_negative_relu
        ]
Esempio n. 24
0
class FullyConnectedLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self,
                     name,
                     nodes,
                     matmul_op,
                     bias_op,
                     weights,
                     biases,
                     output_names=None):
            super(FullyConnectedLayerResolver.Descriptor,
                  self).__init__('FullyConnected',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.matmul_op = matmul_op
            self.bias_op = bias_op
            self.weights = weights
            self.biases = biases

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('matmul_op', ['MatMul']),
            ConverterSequenceNode('bias_op', ['BiasAdd', 'Add']),  # output
            NonConsumableConverterSequenceNode('biases',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('weights',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        self.sequence.set_inputs('matmul_op', ['inputs', 'weights'])
        self.sequence.set_inputs('bias_op', ['matmul_op', 'biases'])
        self.sequence.set_outputs(['bias_op'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            matmul_op = match['matmul_op']
            weights_op = match['weights']
            if weights_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_message(
                        'ERROR_TF_MATMUL_RESOLVE_WEIGHTS')(matmul_op.name))
            weights = graph_helper.evaluate_tensor_output(
                weights_op.outputs[0])

            bias_add_op = match['bias_op']
            biases_op = match['biases']
            if biases_op.type not in ['Identity', 'Const']:
                # do we still need this check ?
                raise ConverterError(
                    code_to_message.get_message('ERROR_TF_MATMUL_RESOLVE_BIAS')
                    (bias_add_op.name))
            biases = graph_helper.evaluate_tensor_output(biases_op.outputs[0])
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            descriptors.append(
                FullyConnectedLayerResolver.Descriptor(
                    str(matmul_op.name),
                    consumed_nodes,
                    matmul_op,
                    bias_add_op,
                    weights,
                    biases,
                    output_names=output_op_nodes_names))
        return descriptors
Esempio n. 25
0
    def __init__(self):
        sequence_reshape = GraphSequence([ConverterSequenceNode('root', ['Reshape', 'Squeeze', 'ExpandDims'])])
        sequence_reshape.set_outputs(['root'])

        sequence_flatten = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('shape', ['Shape']),
            ConverterSequenceNode('slice_1', ['Slice']),
            ConverterSequenceNode('const_1', ['Const']),
            ConverterSequenceNode('const_2', ['Const']),
            ConverterSequenceNode('slice_2', ['Slice']),
            ConverterSequenceNode('const_3', ['Const']),
            ConverterSequenceNode('const_4', ['Const']),
            ConverterSequenceNode('prod', ['Prod']),
            ConverterSequenceNode('const_5', ['Const']),
            ConverterSequenceNode('expand_dims', ['ExpandDims']),
            ConverterSequenceNode('const_6', ['Const']),
            ConverterSequenceNode('concat', ['ConcatV2']),
            ConverterSequenceNode('const_7', ['Const']),
            ConverterSequenceNode('root', ['Reshape']),
        ])
        sequence_flatten.set_inputs('shape', ['input'])
        sequence_flatten.set_inputs('slice_1', ['shape', 'const_1', 'const_2'])
        sequence_flatten.set_inputs('slice_2', ['shape', 'const_3', 'const_4'])
        sequence_flatten.set_inputs('prod', ['slice_2', 'const_5'])
        sequence_flatten.set_inputs('expand_dims', ['prod', 'const_6'])
        sequence_flatten.set_inputs('concat', ['slice_1', 'expand_dims', 'const_7'])
        sequence_flatten.set_inputs('root', ['input', 'concat'])
        sequence_flatten.set_outputs(['root'])

        self.sequences = [sequence_reshape, sequence_flatten]
Esempio n. 26
0
class PermuteLayerResolver(LayerResolver, object):

    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, order, output_names=None):
            super(PermuteLayerResolver.Descriptor, self).__init__('Permute', name, nodes, output_names=output_names)
            self.order = order

    def __init__(self):
        self.sequence_with_explicit_order = GraphSequence([
            ConverterSequenceNode('root', ['Transpose']),
            ConverterSequenceNode('order', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence_with_explicit_order.set_inputs('root', ['input', 'order'])
        self.sequence_with_explicit_order.set_outputs(['root'])

        self.sequence_with_implicit_order = GraphSequence([
            ConverterSequenceNode('root', ['Transpose']),
            ConverterSequenceNode('order', ['Sub']),
            ConverterSequenceNode('a', ['Sub']),
            ConverterSequenceNode('b', ['Const']),
            ConverterSequenceNode('c', ['Range']),
            ConverterSequenceNode('d', ['Const']),
            ConverterSequenceNode('e', ['Const']),
            ConverterSequenceNode('f', ['Rank']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])

        self.sequence_with_implicit_order.set_inputs('root', ['input', 'order'])
        self.sequence_with_implicit_order.set_inputs('order', ['a', 'c'])
        self.sequence_with_implicit_order.set_inputs('a', ['b', 'f'])
        self.sequence_with_implicit_order.set_inputs('c', ['d', 'e', 'f'])
        self.sequence_with_implicit_order.set_inputs('f', ['input'])
        self.sequence_with_implicit_order.set_outputs(['root'])

        self.sequences = [self.sequence_with_explicit_order, self.sequence_with_implicit_order]

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for sequence in self.sequences:
            for match in graph_matcher.match_sequence(sequence):
                transpose_op = match['root']
                input_op = match['input']
                order_op = match['order']

                order_tensor = graph_helper.evaluate_tensor_output(order_op.outputs[0])

                input_shape = graph_helper.get_op_output_shape(input_op)
                order_shape = graph_helper.get_op_output_shape(order_op)

                input_rank = len(input_shape)
                order_rank = len(order_shape)
                try:
                    assert order_rank == 1
                    for d in range(input_rank):
                        assert d in order_tensor
                except AssertionError:
                    raise ConverterError(code_to_message.get_message(
                        'ERROR_TF_PERMUTE_INVALID_ORDER_TENSOR')(str(order_tensor)))

                consumed_nodes = match.consumed_nodes
                permute_descriptor = PermuteLayerResolver.Descriptor(
                    str(transpose_op.name), consumed_nodes, order_tensor,
                    output_names=[str(transpose_op.outputs[0].name)])
                descriptors.extend([permute_descriptor])

        return descriptors
Esempio n. 27
0
    ConverterSequenceNode('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/mul_1', ['Mul']),
    ConverterSequenceNode('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/mul', ['Mul']),
    ConverterSequenceNode('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/add_1', ['Add']),
    ConverterSequenceNode('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Sigmoid_2', ['Sigmoid']),
    ConverterSequenceNode('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Tanh_1', ['Tanh']),
    ConverterSequenceNode('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/mul_2', ['Mul']),
    NonConsumableConverterSequenceNode('stub_16', ['?']),
    NonConsumableConverterSequenceNode('stub_17', ['?']),
    NonConsumableConverterSequenceNode('stub_18', ['?']),
    NonConsumableConverterSequenceNode('stub_19', ['?']),
    NonConsumableConverterSequenceNode('stub_20', ['?']),
    NonConsumableConverterSequenceNode('stub_21', ['?']),
    NonConsumableConverterSequenceNode('stub_22', ['?']),
    NonConsumableConverterSequenceNode('stub_23', ['?']),
])
cell_sequence.set_inputs('rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel/read', ['stub_16'])
cell_sequence.set_inputs('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Tanh_1',
                              ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/add_1'])
cell_sequence.set_inputs('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Sigmoid',
                              ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/add'])
cell_sequence.set_inputs('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/basic_lstm_cell/concat',
                              ['stub_17', 'stub_18', 'stub_19'])
cell_sequence.set_inputs('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Tanh',
                              ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/split'])
cell_sequence.set_inputs('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/add',
                              ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/split', 'stub_22'])
cell_sequence.set_inputs('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/basic_lstm_cell/MatMul',
                              ['rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/basic_lstm_cell/concat',
                               'rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel/read'])
cell_sequence.set_inputs('rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/mul',
                              ['stub_23', 'rnn/rnn/multi_rnn_cell/cell_0/cell_0/basic_lstm_cell/Sigmoid'])
Esempio n. 28
0
class DilatedDepthwiseConvolutionLayerResolver(ConvolutionLayerResolver, object):
    class Descriptor(ConvolutionLayerResolver.Descriptor):
        pass

    def __init__(self):
        super(DilatedDepthwiseConvolutionLayerResolver, self).__init__()
        self.graph_sequence = GraphSequence([
            NonConsumableConverterSequenceNode('space_to_batch', ['SpaceToBatchND']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            NonConsumableConverterSequenceNode('dilation_sizes', ['?']),
            NonConsumableConverterSequenceNode('paddings', ['?']),
            ConverterSequenceNode('conv_op', ['DepthwiseConv2dNative']),
            ConverterSequenceNode('kernel', ['?']),
            NonConsumableConverterSequenceNode('batch_to_space', ['BatchToSpaceND']),  # output
            NonConsumableConverterSequenceNode('block_shape_out', ['?']),
            NonConsumableConverterSequenceNode('crops', ['?'])
        ])
        self.graph_sequence.set_inputs('space_to_batch', ['inputs', 'dilation_sizes', 'paddings'])
        self.graph_sequence.set_inputs('conv_op', ['space_to_batch', 'kernel'])
        self.graph_sequence.set_inputs('batch_to_space', ['conv_op', 'block_shape_out', 'crops'])
        self.graph_sequence.set_outputs(['batch_to_space'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['conv_op']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = self.get_weights(graph_helper, conv_op)
            weights = np.transpose(weights, [0, 1, 3, 2])
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [str(match[node.identifier].outputs[0].name) for node in
                                     self.graph_sequence.output_nodes]
            try:
                batch_to_space_op = match['batch_to_space']
                conv_output_ops = graph_helper.get_op_outputs(batch_to_space_op)
                bias_op = GraphHelper.filter_single_op_by_type(conv_output_ops, 'BiasAdd')
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(np.shape(weights)[-1], dtype=np.float32)
            dilation_sizes = match['dilation_sizes']
            dilation_sizes = graph_helper.evaluate_tensor_output(dilation_sizes.outputs[0])
            if np.shape(dilation_sizes) != (2,):
                raise ConverterError(code_to_message.get_message('ERROR_TF_CONV_RESOLVE_DILATION')(conv_op.name))

            space_to_batch_op = match['space_to_batch']
            paddings_op = match['paddings']
            paddings_tensor = graph_helper.evaluate_tensor_output(paddings_op.outputs[0])
            input_op = conv_op

            batch_to_space_op = match['batch_to_space']
            crop_op = match['crops']
            crops_tensor = graph_helper.evaluate_tensor_output(crop_op.outputs[0])
            output_names = [str(conv_op.outputs[0].name)]

            if paddings_tensor.any() and not np.array_equal(paddings_tensor, crops_tensor):
                paddings_tensor = np.pad(paddings_tensor, ((1, 1), (0, 0)), 'constant')
                pad_descriptor = PadLayerResolver.Descriptor(
                    str(space_to_batch_op.name),
                    [match['space_to_batch'], match['dilation_sizes'], match['paddings']],
                    paddings_tensor,
                    snpe.modeltools.PADDING_CONSTANT,
                    0.0,
                    output_names=[str(space_to_batch_op.outputs[0].name)])
                descriptors.append(pad_descriptor)
            else:
                consumed_nodes.extend([space_to_batch_op, paddings_op, match['dilation_sizes']])
                input_op = space_to_batch_op

            if crops_tensor.any() and not np.array_equal(paddings_tensor, crops_tensor):
                crops_tensor = np.pad(crops_tensor, ((1, 1), (0, 0)), 'constant')
                offsets = crops_tensor[:, 0]
                size = np.array(graph_helper.get_op_output_shape(match['batch_to_space']), dtype=np.int32)
                crop_descriptor = CropLayerResolver.Descriptor(
                    str(match['batch_to_space'].name),
                    [match['batch_to_space'], match['block_shape_out'], match['crops']],
                    offsets,
                    size,
                    output_names=[str(match['batch_to_space'].outputs[0].name)])
                descriptors.append(crop_descriptor)
            else:
                consumed_nodes.extend([batch_to_space_op, crop_op, match['block_shape_out']])
                output_names = output_op_nodes_names

            d = ConvolutionLayerResolver.Descriptor(str(conv_op.name), consumed_nodes,
                                                    conv_op, bias_op, strides, padding, weights,
                                                    biases,
                                                    output_names=output_names)

            d.groups = graph_helper.get_op_output_shape(space_to_batch_op)[-1]
            d.dilationY = int(dilation_sizes[0])
            d.dilationX = int(dilation_sizes[1])
            d.input_ops = [input_op]
            descriptors.append(d)

        return descriptors
Esempio n. 29
0
class GroupedConvolutionLayerResolver(ConvolutionLayerResolver, object):
    class Descriptor(ConvolutionLayerResolver.Descriptor):
        pass

    def __init__(self):
        super(GroupedConvolutionLayerResolver, self).__init__()

        # grouped convolution with split
        tree_output_node = ConverterSequenceNode('conv_op', ['Conv2D'])
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Split']),
            ConverterSequenceNode('b', ['Split']),
            ConverterRepeatableSequenceTreeNode('repeatable_graph', tree_output_node, tree_output_node),
            ConverterSequenceNode('concat_op', ['Concat']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            NonConsumableConverterSequenceNode('concat_dim', ['Const']),
            NonConsumableConverterSequenceNode('split_dim1', ['Const']),
            ConverterSequenceNode('split_dim2', ['Const'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'split_dim1'])
        self.sequence.set_inputs('b', ['weights', 'split_dim2'])
        self.sequence.set_inputs('repeatable_graph', ['a', 'b'])
        self.sequence.set_inputs('concat_op', ['repeatable_graph', 'concat_dim'])
        self.sequence.set_outputs(['concat_op'])

        # grouped convolution with strided slice
        repeatable_sequence = GraphSequence([
                ConverterSequenceNode('ss', ['StridedSlice']),
                ConverterSequenceNode('ss_begin', ['Const']),
                ConverterSequenceNode('ss_end', ['Const']),
                ConverterSequenceNode('ss_strides', ['Const']),
                ConverterSequenceNode('conv', ['Conv2D']),
                ConverterSequenceNode('bias', ['BiasAdd']),
                ConverterSequenceNode('weights', ['Identity', 'Const']),
                ConverterSequenceNode('biases', ['Identity', 'Const'])
        ])
        repeatable_sequence.set_inputs('ss', ['ss_begin', 'ss_end', 'ss_strides'])
        repeatable_sequence.set_inputs('conv', ['ss', 'weights'])
        repeatable_sequence.set_inputs('bias', ['biases', 'conv'])
        repeatable_sequence.set_outputs(['bias'])

        self.sequence_with_strided_slice = GraphSequence([
            ConverterRepeatableSequenceTreeNode('repeatable_graph',
                                                tree_output_node=repeatable_sequence['bias'],
                                                tree_input_node=repeatable_sequence['ss']),
            ConverterSequenceNode('concat', ['Concat', 'ConcatV2']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        self.sequence_with_strided_slice.set_inputs('repeatable_graph', ['input'])
        self.sequence_with_strided_slice.set_inputs('concat', ['repeatable_graph', 'axis'])
        self.sequence_with_strided_slice.set_outputs(['concat'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            conv_op = match['conv_op_1']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = match['weights']
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [str(match[node.identifier].outputs[0].name) for node in
                                     self.sequence.output_nodes]
            try:
                concat_op = match['concat_op']
                concat_op_output_ops = graph_helper.get_op_outputs(concat_op)
                bias_op = GraphHelper.filter_single_op_by_type(concat_op_output_ops, 'BiasAdd')
                # need to consume input of bias
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(weights.outputs[0].get_shape()[-1], dtype=np.float32)

            weights = graph_helper.evaluate_tensor_output(weights.outputs[0])
            descriptor = ConvolutionLayerResolver.Descriptor(str(conv_op.name), consumed_nodes, conv_op, bias_op,
                                                             strides, padding, weights, biases,
                                                             output_names=output_op_nodes_names)
            descriptor.input_ops = [match['a'], match['b']]
            descriptors.append(descriptor)

        for match in graph_matcher.match_sequence(self.sequence_with_strided_slice):
            if not match.consumed_nodes:
                continue
            input_op = match['input']
            concat_op = match['concat']
            axis_op = match['axis']
            conv_ops = self._get_repeatable_op_by_id(match, 'conv')
            weight_ops = self._get_repeatable_op_by_id(match, 'weights')
            bias_ops = self._get_repeatable_op_by_id(match, 'biases')
            bias_add_ops = self._get_repeatable_op_by_id(match, 'bias')
            ss_ops = self._get_repeatable_op_by_id(match, 'ss')

            input_shape = graph_helper.get_op_output_shape(input_op)
            weight_shapes = [graph_helper.get_op_output_shape(weight_op) for weight_op in weight_ops]

            ss_strides = [graph_helper.evaluate_tensor_output(ss_strides_op.outputs[0]).tolist()
                          for ss_strides_op in self._get_repeatable_op_by_id(match, 'ss_strides')]
            ss_begins = [graph_helper.evaluate_tensor_output(ss_begin_op.outputs[0]).tolist()
                          for ss_begin_op in self._get_repeatable_op_by_id(match, 'ss_begin')]
            ss_ends = [graph_helper.evaluate_tensor_output(ss_end_op.outputs[0]).tolist()
                          for ss_end_op in self._get_repeatable_op_by_id(match, 'ss_end')]

            bias_add_shapes = [graph_helper.get_op_output_shape(bias_add_op) for bias_add_op in bias_add_ops]

            strides = [conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES) for conv_op in conv_ops]
            paddings = [conv_op.get_attr(self.TF_ATTRIBUTE_PADDING) for conv_op in conv_ops]

            ss_shapes = [graph_helper.get_op_output_shape(ss_op.outputs[0])
                         for ss_op in ss_ops]

            num_groups = len(conv_ops)

            axis = graph_helper.evaluate_tensor_output(axis_op.outputs[0])

            is_grouped_convolution = True
            is_grouped_convolution &= self._elements_are_same(bias_add_shapes)
            is_grouped_convolution &= self._elements_are_same(weight_shapes)
            is_grouped_convolution &= self._elements_are_same(strides)
            is_grouped_convolution &= self._elements_are_same(paddings)
            is_grouped_convolution &= self._elements_are_same(ss_shapes)
            is_grouped_convolution &= self._elements_are_same(ss_strides)
            is_grouped_convolution &= not self._elements_are_same(ss_begins)
            is_grouped_convolution &= not self._elements_are_same(ss_ends)
            # stride slices must evenly divide the last dimension of input to number of groups
            is_grouped_convolution &= ss_shapes[0][-1] * num_groups == input_shape[-1]
            # strides must be all ones at all dimensions
            is_grouped_convolution &= ss_strides[0] == [1] * len(ss_strides[0])
            # concat must be on the last axis in grouped convolution
            is_grouped_convolution &= axis == -1 or axis == (len(bias_add_shapes[0]) - 1)

            if not is_grouped_convolution:
                continue

            weight_tensors = [graph_helper.evaluate_tensor_output(weight_op.outputs[0])
                              for weight_op in weight_ops]
            weights = np.concatenate(weight_tensors, axis=-1)

            bias_tensors = [graph_helper.evaluate_tensor_output(bias_op.outputs[0])
                              for bias_op in bias_ops]
            biases = np.concatenate(bias_tensors, axis=-1)

            descriptor = ConvolutionLayerResolver.Descriptor(
                str(concat_op.name), match.consumed_nodes, conv_ops[0], None,
                strides[0], paddings[0], weights, biases,
                output_names=[str(concat_op.outputs[0].name)])
            descriptor.input_ops = ss_ops
            descriptor.output_op = concat_op
            descriptors.append(descriptor)

        return descriptors

    @classmethod
    def _get_repeatable_op_by_id(cls, match, name):
        ops = []
        indexed_id = name + '_{}'
        i = 1
        while indexed_id.format(i) in match:
            ops.append(match[indexed_id.format(i)])
            i += 1
        return ops

    @classmethod
    def _elements_are_same(cls, array):
        return all([element == array[0] for element in array])