Exemplo n.º 1
0
    def __init__(self):
        sequence_resize = GraphSequence([ConverterSequenceNode('root', ['ResizeBilinear'])])
        sequence_resize.set_outputs(['root'])

        sequence_shape_stridedslice_resize = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('shape', ['Shape']),
            ConverterSequenceNode('stridedSlice', ['StridedSlice']),
            ConverterSequenceNode('mul', ['Mul']),
            ConverterSequenceNode('const_stridedSlice_1', ['?']),
            ConverterSequenceNode('const_stridedSlice_2', ['?']),
            ConverterSequenceNode('const_stridedSlice_3', ['?']),
            ConverterSequenceNode('mul_const', ['?']),
            ConverterSequenceNode('root', ['ResizeBilinear'])])

        sequence_shape_stridedslice_resize.set_inputs('shape', ['input'])
        sequence_shape_stridedslice_resize.set_inputs('stridedSlice', ['shape',
                                                                       'const_stridedSlice_1',
                                                                       'const_stridedSlice_2',
                                                                       'const_stridedSlice_3'])
        sequence_shape_stridedslice_resize.set_inputs('mul', ['stridedSlice', 'mul_const'])
        sequence_shape_stridedslice_resize.set_inputs('root', ['mul', 'input'])
        sequence_shape_stridedslice_resize.set_outputs(['root'])

        self.sequences = [sequence_resize, sequence_shape_stridedslice_resize]
Exemplo n.º 2
0
class Relu6LayerResolver(ReluMinMaxLayerResolver, object):
    class Descriptor(ReluMinMaxLayerResolver.Descriptor):
        def __init__(self, name, nodes):
            super(Relu6LayerResolver.Descriptor, self).__init__('Relu6',
                                                                name,
                                                                nodes,
                                                                min_clamp=0,
                                                                max_clamp=6)

    def __init__(self):
        self.sequence = GraphSequence(
            [ConverterSequenceNode('root', ['Relu6'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            relu6_op = match['root']
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                Relu6LayerResolver.Descriptor(str(relu6_op.name),
                                              consumed_nodes))
        return potential_descriptors
Exemplo n.º 3
0
    def __init__(self):

        sequence = GraphSequence([
            ConverterSequenceNode('matmul_op', ['MatMul']),
            ConverterSequenceNode('bias_op', ['BiasAdd', 'Add']),  # output
            NonConsumableConverterSequenceNode('biases',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('weights',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence.set_inputs('matmul_op', ['inputs', 'weights'])
        sequence.set_inputs('bias_op', ['matmul_op', 'biases'])
        sequence.set_outputs(['bias_op'])

        sequence_without_bias = GraphSequence([
            ConverterSequenceNode('matmul_op', ['MatMul']),
            NonConsumableConverterSequenceNode('weights',
                                               ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_without_bias.set_inputs('matmul_op', ['inputs', 'weights'])
        sequence_without_bias.set_outputs(['matmul_op'])

        self.sequences = [sequence_without_bias, sequence]
Exemplo n.º 4
0
    def __init__(self):
        sequence_two_dim_softmax = GraphSequence(
            [ConverterSequenceNode('root', ['SoftMax'])])
        sequence_two_dim_softmax.set_outputs(['root'])

        sequence_multi_dim_softmax = GraphSequence([
            ConverterSequenceNode('max', ['Max']),
            ConverterSequenceNode('max_reduction_indicies', ['Const']),
            ConverterSequenceNode('sub', ['Sub']),
            ConverterSequenceNode('exp', ['Exp']),
            ConverterSequenceNode('sum', ['Sum']),
            ConverterSequenceNode('sum_reduction_indicies', ['Const']),
            ConverterSequenceNode('root', ['RealDiv']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        sequence_multi_dim_softmax.set_inputs(
            'max', ['input', 'max_reduction_indicies'])
        sequence_multi_dim_softmax.set_inputs('sub', ['input', 'max'])
        sequence_multi_dim_softmax.set_inputs('exp', ['sub'])
        sequence_multi_dim_softmax.set_inputs(
            'sum', ['exp', 'sum_reduction_indicies'])
        sequence_multi_dim_softmax.set_inputs('root', ['exp', 'sum'])
        sequence_multi_dim_softmax.set_outputs(['root'])

        self.sequences = [sequence_two_dim_softmax, sequence_multi_dim_softmax]
Exemplo n.º 5
0
class EltWiseUnaryLayerResolver(LayerResolver, object):
    __metaclass__ = ABCMeta

    def __init__(self, layer_type, op_type, descriptor_class):
        super(EltWiseUnaryLayerResolver, self).__init__()
        self._layer_type = layer_type
        self._op_type = op_type
        self._descriptor_class = descriptor_class

        self.sequence = GraphSequence([
            ConverterSequenceNode('root', [self._op_type]),
            NonConsumableConverterSequenceNode('input1', ['?']),
        ])
        self.sequence.set_inputs('root', ['input1'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        non_const_input_sequences = [self.sequence]
        for sequence in non_const_input_sequences:
            for match in graph_matcher.match_sequence(sequence):
                eltwise_op = match['root']
                descriptor = self._descriptor_class(self._layer_type,
                                                    str(eltwise_op.name),
                                                    match.consumed_nodes)
                descriptors.append(descriptor)

        return descriptors
Exemplo n.º 6
0
class EluLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, layer_type, name, nodes):
            super(EluLayerResolver.Descriptor,
                  self).__init__(layer_type, name, nodes)

        @property
        def output_names(self):
            return [str(self.child_ops[0].outputs[0].name)]

        def is_output_op(self, op):
            return op in self.child_ops

        def get_output_names_for(self, input_tensors):
            return self.output_names

    def __init__(self):
        self.sequence = GraphSequence([ConverterSequenceNode('root', ['Elu'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            elu_op = match['root']
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                EluLayerResolver.Descriptor('ELU', str(elu_op.name),
                                            consumed_nodes))
        return potential_descriptors
Exemplo n.º 7
0
class GenericBatchNormLayerResolver(BatchNormLayerResolver):
    class Descriptor(BatchNormLayerResolver.Descriptor):
        pass

    def __init__(self):
        self.sequence = GraphSequence([
            NonConsumableConverterSequenceNode('inputs', ['?']),
            ConverterSequenceNode('a', ['Mul']),
            ConverterSequenceNode('b', ['Add']),
            ConverterSequenceNode('weights', ['Const', 'Identity']),
            ConverterSequenceNode('biases', ['Const', 'Identity'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'weights'])
        self.sequence.set_inputs('b', ['a', 'biases'])
        self.sequence.set_outputs(['b'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            inputs_op = match['inputs']
            biases_op = match['biases']
            weights_op = match['weights']

            inputs_shape = graph_helper.get_op_output_shape(inputs_op)
            biases_op = graph_helper.evaluate_tensor_output(
                biases_op.outputs[0])
            weights_op = graph_helper.evaluate_tensor_output(
                weights_op.outputs[0])

            if np.isscalar(biases_op):
                biases_op = self._broadcast_tensor(biases_op, inputs_shape)
            if np.isscalar(weights_op):
                weights_op = self._broadcast_tensor(weights_op, inputs_shape)

            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            bn_op = match['a']
            potential_descriptors.append(
                GenericBatchNormLayerResolver.Descriptor(
                    str(bn_op.name),
                    consumed_nodes,
                    bn_mul_op=bn_op,
                    pre_calculated=True,
                    weights=weights_op,
                    biases=biases_op,
                    output_names=output_op_nodes_names))
        return potential_descriptors

    @classmethod
    def _broadcast_tensor(cls, tensor, shape):
        broadcasted_tensor = np.zeros(shape, dtype=np.float32)
        broadcasted_tensor = broadcasted_tensor + tensor
        return broadcasted_tensor
Exemplo n.º 8
0
    def __init__(self):
        super(GroupedConvolutionLayerResolver, self).__init__()

        # grouped convolution with split
        tree_output_node = ConverterSequenceNode('conv_op', ['Conv2D'])
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Split']),
            ConverterSequenceNode('b', ['Split']),
            ConverterRepeatableSequenceTreeNode('repeatable_graph',
                                                tree_output_node,
                                                tree_output_node),
            ConverterSequenceNode('concat_op', ['Concat']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            NonConsumableConverterSequenceNode('inputs', ['?']),
            NonConsumableConverterSequenceNode('concat_dim', ['Const']),
            NonConsumableConverterSequenceNode('split_dim1', ['Const']),
            ConverterSequenceNode('split_dim2', ['Const'])
        ])
        self.sequence.set_inputs('a', ['inputs', 'split_dim1'])
        self.sequence.set_inputs('b', ['weights', 'split_dim2'])
        self.sequence.set_inputs('repeatable_graph', ['a', 'b'])
        self.sequence.set_inputs('concat_op',
                                 ['repeatable_graph', 'concat_dim'])
        self.sequence.set_outputs(['concat_op'])

        # grouped convolution with strided slice
        repeatable_sequence = GraphSequence([
            ConverterSequenceNode('ss', ['StridedSlice']),
            ConverterSequenceNode('ss_begin', ['Const']),
            ConverterSequenceNode('ss_end', ['Const']),
            ConverterSequenceNode('ss_strides', ['Const']),
            ConverterSequenceNode('conv', ['Conv2D']),
            ConverterSequenceNode('bias', ['BiasAdd']),
            ConverterSequenceNode('weights', ['Identity', 'Const']),
            ConverterSequenceNode('biases', ['Identity', 'Const'])
        ])
        repeatable_sequence.set_inputs('ss',
                                       ['ss_begin', 'ss_end', 'ss_strides'])
        repeatable_sequence.set_inputs('conv', ['ss', 'weights'])
        repeatable_sequence.set_inputs('bias', ['biases', 'conv'])
        repeatable_sequence.set_outputs(['bias'])

        self.sequence_with_strided_slice = GraphSequence([
            ConverterRepeatableSequenceTreeNode(
                'repeatable_graph',
                tree_output_node=repeatable_sequence['bias'],
                tree_input_node=repeatable_sequence['ss']),
            ConverterSequenceNode('concat', ['Concat', 'ConcatV2']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?'])
        ])
        self.sequence_with_strided_slice.set_inputs('repeatable_graph',
                                                    ['input'])
        self.sequence_with_strided_slice.set_inputs(
            'concat', ['repeatable_graph', 'axis'])
        self.sequence_with_strided_slice.set_outputs(['concat'])
Exemplo n.º 9
0
    def __init__(self):
        sequence_scalar_pow = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('pow', ['Pow']),
            ConverterSequenceNode('const', ['Const'])
        ])
        sequence_scalar_pow.set_inputs('pow', ['input', 'const'])
        sequence_scalar_pow.set_outputs(['pow'])

        self.sequences = [sequence_scalar_pow]
Exemplo n.º 10
0
    def __init__(self):
        super(MomentsLayerResolver, self).__init__()

        # Graph sequence where keep_dims is False and dims of 1 are stripped (default)
        sequence = GraphSequence([
            ConverterSequenceNode('moments/mean', ['Mean']),
            ConverterSequenceNode('moments/StopGradient', ['StopGradient']),
            ConverterSequenceNode('moments/SquaredDifference',
                                  ['SquaredDifference']),
            ConverterSequenceNode('moments/variance', ['Mean']),
            ConverterSequenceNode('moments/squeeze_mean', ['Squeeze']),
            ConverterSequenceNode('moments/squeeze_variance', ['Squeeze']),
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('mean_reduction_indices',
                                               ['?']),
            NonConsumableConverterSequenceNode('variance_reduction_indices',
                                               ['?']),
        ])
        sequence.set_inputs('moments/mean',
                            ['input', 'mean_reduction_indices'])
        sequence.set_inputs('moments/StopGradient', ['moments/mean'])
        sequence.set_inputs('moments/SquaredDifference',
                            ['input', 'moments/StopGradient'])
        sequence.set_inputs(
            'moments/variance',
            ['moments/SquaredDifference', 'variance_reduction_indices'])
        sequence.set_inputs('moments/squeeze_mean', ['moments/mean'])
        sequence.set_inputs('moments/squeeze_variance', ['moments/variance'])
        sequence.set_outputs(
            ['moments/squeeze_mean', 'moments/squeeze_variance'])

        # Graph sequence where keep_dims is True and input dimensions are maintained
        sequence_keep_dims = GraphSequence([
            ConverterSequenceNode('moments/mean', ['Mean']),
            ConverterSequenceNode('moments/StopGradient', ['StopGradient']),
            ConverterSequenceNode('moments/SquaredDifference',
                                  ['SquaredDifference']),
            ConverterSequenceNode('moments/variance', ['Mean']),
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('variance_reduction_indices',
                                               ['?']),
            NonConsumableConverterSequenceNode('mean_reduction_indices',
                                               ['?']),
        ])
        sequence_keep_dims.set_inputs('moments/mean',
                                      ['input', 'mean_reduction_indices'])
        sequence_keep_dims.set_inputs('moments/StopGradient', ['moments/mean'])
        sequence_keep_dims.set_inputs('moments/SquaredDifference',
                                      ['input', 'moments/StopGradient'])
        sequence_keep_dims.set_inputs(
            'moments/variance',
            ['moments/SquaredDifference', 'variance_reduction_indices'])
        sequence_keep_dims.set_outputs(['moments/mean', 'moments/variance'])

        self.sequences = [sequence, sequence_keep_dims]
Exemplo n.º 11
0
    def __init__(self):
        sequence_extract_glimpse = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('offsets', ['?']),
            ConverterSequenceNode('size', ['Const']),
            ConverterSequenceNode('extract_glimpse', ['ExtractGlimpse'])
        ])
        sequence_extract_glimpse.set_inputs('extract_glimpse',
                                            ['input', 'size', 'offsets'])
        sequence_extract_glimpse.set_outputs(['extract_glimpse'])

        self.sequences = [sequence_extract_glimpse]
Exemplo n.º 12
0
class ConcatLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, axis, output_names=None):
            super(ConcatLayerResolver.Descriptor, self).__init__('Concatenation', name, nodes,
                                                                 output_names=output_names)
            self.axis = axis

    def __init__(self):
        self.sequence = GraphSequence([ConverterSequenceNode('root', ['Concat', 'ConcatV2'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            concat_op = match['root']
            consumed_nodes = match.consumed_nodes
            concat_descriptor = ConcatLayerResolver.Descriptor(str(concat_op.name), consumed_nodes,
                                                               None, [concat_op.outputs[0].name])

            non_const_inputs = [tensor for tensor in concat_op.inputs if tensor.op.type != 'Const']
            const_ops = [tensor.op for tensor in concat_op.inputs if tensor.op.type == 'Const']
            axis_tensor = None
            if len(non_const_inputs) < 2 or len(const_ops) > 1:
                for i in range(0, len(const_ops) - 1):
                    const_value = graph_helper.evaluate_tensor_output(const_ops[i].outputs[0])
                    const_shape = graph_helper.get_op_output_shape(const_ops[i].outputs[0])
                    descriptors.append(ConstantLayerResolver.Descriptor(str(const_ops[i]),
                                                                        [const_ops[i]],
                                                                        const_value,
                                                                        const_shape,
                                                                        concat_descriptor))
                # Make the assumption that the axis is always the last constant
                axis_tensor = const_ops[-1]

            max_shape = 0
            for t in non_const_inputs:
                shape = graph_helper.get_op_output_shape(t.op)
                if len(shape) > max_shape:
                    max_shape = len(shape)

            if not axis_tensor:
                axis_tensor = GraphHelper.filter_single_op_by_type([t.op for t in concat_op.inputs], 'Const')
            axis = int(graph_helper.evaluate_tensor_output(axis_tensor.outputs[0]))
            if axis < 0:
                axis += max_shape

            concat_descriptor.axis = axis
            descriptors.append(concat_descriptor)

        return descriptors
Exemplo n.º 13
0
    def __init__(self):
        sequence_crop_and_resize = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('boxes', ['?']),
            NonConsumableConverterSequenceNode('box_ind', ['?']),
            NonConsumableConverterSequenceNode('crop_size', ['?']),
            ConverterSequenceNode('crop_and_resize', ['CropAndResize']),
        ])
        sequence_crop_and_resize.set_inputs('crop_and_resize', ['input', 'boxes', 'box_ind', 'crop_size'])
        sequence_crop_and_resize.set_outputs(['crop_and_resize'])

        self.sequences = [sequence_crop_and_resize]
Exemplo n.º 14
0
class InstanceNormRMSLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, operations, shape):
            super(InstanceNormRMSLayerResolver.Descriptor,
                  self).__init__('InstanceNormRMS', name, operations)
            self.shape = shape
            # SNPE runtime algo is y = x * WEIGHT / rms + BIAS
            # While L2 Normalization is y = x / rms
            # That requires WEIGHT = 1.0 and BIAS = 0.0 to mimic L2 Norm in SNPE
            # Shape of weights/biases should be same as the last dimension of input.
            self.weights = np.ones(shape[-1])
            self.biases = np.zeros(shape[-1])

    def __init__(self):
        # Graph topology of tf.math.l2_normalize
        self.sequence = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('a', ['Square']),
            ConverterSequenceNode('weights', ['Const', 'Identity']),
            ConverterSequenceNode('b', ['Sum']),
            ConverterSequenceNode('epsilon', ['Const', 'Identity']),
            ConverterSequenceNode('c', ['Maximum']),
            ConverterSequenceNode('d', ['Rsqrt']),
            ConverterSequenceNode('e', ['Mul'])
        ])
        self.sequence.set_inputs('a', ['input'])
        self.sequence.set_inputs('b', ['a', 'weights'])
        self.sequence.set_inputs('c', ['b', 'epsilon'])
        self.sequence.set_inputs('d', ['c'])
        self.sequence.set_inputs('e', ['d', 'input'])
        self.sequence.set_outputs(['e'])

    # For now, elementwise resolver cannot work with epsilon node.
    # Will meet error "ElementWise resolver must implement broadcast method.".
    def is_final_resolution(self):
        return True

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        potential_descriptors = []
        for match in matches:
            bn_op = match['SquaredDifference']
            input_op = match['input']

            shape = graph_helper.get_op_output_shape(input_op)

            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                InstanceNormRMSLayerResolver.Descriptor(str(bn_op.name),
                                                        consumed_nodes,
                                                        shape=shape))
        return potential_descriptors
Exemplo n.º 15
0
class ArgMaxLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, axis, output_names=None):
            super(ArgMaxLayerResolver.Descriptor,
                  self).__init__('ArgMax',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.axis = axis

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['ArgMax']),
            ConverterSequenceNode('axis', ['Const']),
            NonConsumableConverterSequenceNode('input', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'axis'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            argmax_op = match['root']
            input_op = match['input']
            axis_op = match['axis']

            input_shape = graph_helper.get_op_output_shape(input_op)
            input_rank = len(input_shape)

            axis = int(graph_helper.evaluate_tensor_output(axis_op.outputs[0]))
            if axis < 0:
                axis += input_rank

            if axis < 0 or axis >= input_rank:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_ARGMAX_INVALID_AXIS')(axis, input_rank))

            consumed_nodes = match.consumed_nodes
            argmax_descriptor = ArgMaxLayerResolver.Descriptor(
                str(argmax_op.name),
                consumed_nodes,
                axis,
                output_names=[str(argmax_op.outputs[0].name)])
            descriptors.extend([argmax_descriptor])

        return descriptors
Exemplo n.º 16
0
    def __init__(self):
        sequence_keras = GraphSequence([
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('root', ['Relu']),
            ConverterSequenceNode('min', ['Minimum']),
            ConverterSequenceNode('min_cast', ['Cast']),
            ConverterSequenceNode('min_const', ['Const']),
            ConverterSequenceNode('max', ['Maximum']),
            ConverterSequenceNode('max_const', ['Const'])
        ])
        sequence_keras.set_inputs('root', ['input'])
        sequence_keras.set_inputs('min_cast', ['min_const'])
        sequence_keras.set_inputs('min', ['root', 'min_cast'])
        sequence_keras.set_inputs('max', ['min', 'max_const'])
        sequence_keras.set_outputs(['max'])

        self.sequences = [sequence_keras]
Exemplo n.º 17
0
class BatchNormWithGlobalNormLayerResolver(BatchNormLayerResolver):
    class Descriptor(BatchNormLayerResolver.Descriptor):
        pass

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['BatchNormWithGlobalNormalization'])
        ])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            bn_op = match['root']
            parameter_tensors = self._const_inputs(graph_helper, bn_op)
            if len(parameter_tensors) < 4:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_GLOBALNORMALIZATION_INPUT'))
            epsilon = bn_op.get_attr('variance_epsilon')
            mean = parameter_tensors[0]
            variance = parameter_tensors[1]
            beta = parameter_tensors[2]
            scale = parameter_tensors[3]
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                BatchNormWithGlobalNormLayerResolver.Descriptor(
                    str(bn_op.name),
                    consumed_nodes,
                    bn_mul_op=bn_op,
                    mean=mean,
                    variance=variance,
                    epsilon=epsilon,
                    scale=scale,
                    beta=beta))
        return potential_descriptors

    @classmethod
    def _const_inputs(cls, graph_helper, bn_op):
        return [
            graph_helper.evaluate_tensor_output(tensor)
            for tensor in bn_op.inputs if tensor.op.type == 'Const'
        ]
class ImageProjectiveTransformLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self,
                     name,
                     operations,
                     interpolation_mode,
                     output_names=None):
            super(ImageProjectiveTransformLayerResolver.Descriptor,
                  self).__init__('ImageProjectiveTransform',
                                 name,
                                 operations,
                                 output_names=output_names)
            self.interpolation_mode = interpolation_mode

    def __init__(self):
        self.sequence = GraphSequence(
            [ConverterSequenceNode('root', ['ImageProjectiveTransform'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        potential_descriptors = []
        matches = graph_matcher.match_sequence(self.sequence)
        for match in matches:
            image_proj_transform = match['root']

            output_op_nodes_names = [str(image_proj_transform.outputs[0].name)]
            consumed_nodes = match.consumed_nodes

            interpolation = str(
                image_proj_transform.get_attr('interpolation').decode('utf-8'))
            if interpolation == "BILINEAR":
                interpolation_mode = 0
            elif interpolation == "NEAREST":
                interpolation_mode = 1
            else:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_RESOLVE_IMAGE_TRANSFORM_INTERPOLATION'))

            potential_descriptors.append(
                ImageProjectiveTransformLayerResolver.Descriptor(
                    str(image_proj_transform.name),
                    consumed_nodes,
                    interpolation_mode,
                    output_names=output_op_nodes_names))
        return potential_descriptors
Exemplo n.º 19
0
class FusedBatchNormNormLayerResolver(BatchNormLayerResolver):
    class Descriptor(BatchNormLayerResolver.Descriptor):
        pass

    def __init__(self):
        self.sequence = GraphSequence(
            [ConverterSequenceNode('root', ['FusedBatchNorm'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        potential_descriptors = []
        for match in matches:
            bn_op = match['root']
            parameter_tensors = self._get_parameter_tensors(
                graph_helper, bn_op)
            if len(parameter_tensors) < 4:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_GLOBALNORMALIZATION_INPUT'))
            epsilon = bn_op.get_attr('epsilon')
            scale = parameter_tensors[0]
            beta = parameter_tensors[1]
            mean = parameter_tensors[2]
            variance = parameter_tensors[3]
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                FusedBatchNormNormLayerResolver.Descriptor(str(bn_op.name),
                                                           consumed_nodes,
                                                           bn_mul_op=bn_op,
                                                           mean=mean,
                                                           variance=variance,
                                                           epsilon=epsilon,
                                                           scale=scale,
                                                           beta=beta))
        return potential_descriptors

    @classmethod
    def _get_parameter_tensors(cls, graph_helper, bn_op):
        parameter_tensors = [
            t for t in bn_op.inputs if t.op.type in ['Const', 'Identity']
        ]
        tensors_outputs = graph_helper.evaluate_tensors_output(
            parameter_tensors)
        return [tensors_outputs[t] for t in parameter_tensors]
Exemplo n.º 20
0
class SliceLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, axis, split_sizes, split_count):
            super(SliceLayerResolver.Descriptor, self).__init__('Slice', name, nodes)
            self.axis = axis
            self.split_sizes = split_sizes
            self.split_count = split_count

        @property
        def output_names(self):
            return [str(t.name) for t in self.child_ops[-1].outputs]

    def __init__(self):
        self.sequence = GraphSequence([ConverterSequenceNode('root', ['Split', 'SplitV'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            split_op = match['root']
            split_axis, split_sizes = self.get_split_axis_and_sizes(graph_helper, split_op)
            split_count = int(split_op.get_attr('num_split'))
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                SliceLayerResolver.Descriptor(str(split_op.name), consumed_nodes,
                                              split_axis,
                                              split_sizes,
                                              split_count))
        return potential_descriptors

    @classmethod
    def get_split_axis_and_sizes(cls, graph_helper, split_op):
        try:
            _, split_sizes, split_axis = GraphHelper.get_op_input_tensors(split_op, ('?', 'Const', 'Const'))
            split_sizes = list(graph_helper.evaluate_tensor_output(split_sizes))
        except TensorNotFoundError:
            split_axis, _ = GraphHelper.get_op_input_tensors(split_op, ('Const', '?'))
            split_sizes = []

        split_axis = int(graph_helper.evaluate_tensor_output(split_axis))
        return split_axis, split_sizes
Exemplo n.º 21
0
class DepthwiseConvolutionLayerResolver(ConvolutionLayerResolver, object):
    def __init__(self):
        super(DepthwiseConvolutionLayerResolver, self).__init__()
        self.graph_sequence_with_bias = GraphSequence([
            ConverterSequenceNode('conv', ['DepthwiseConv2dNative']),
            ConverterSequenceNode('bias', ['BiasAdd', 'Add']),
            NonConsumableConverterSequenceNode('other', ['?'])
        ])
        self.graph_sequence_with_bias.set_inputs('bias', ['conv', 'other'])
        self.graph_sequence_with_bias.set_outputs(['bias'])

        self.graph_sequence = GraphSequence(
            [ConverterSequenceNode('conv', ['DepthwiseConv2dNative'])])
        self.graph_sequence.set_outputs(['conv'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        matches += graph_matcher.match_sequence(self.graph_sequence_with_bias)
        descriptors = []
        for match in matches:
            self._resolve_from_match(descriptors, graph_helper, match)
        return descriptors

    def _resolve_from_match(self, descriptors, graph_helper, match):
        conv_op = match['conv']
        strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
        padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
        weights = self.get_weights(graph_helper, conv_op)
        weights = np.transpose(weights, [0, 1, 3, 2])

        if 'bias' in match:
            biases = self.get_biases(graph_helper, conv_op, match['bias'])
        else:
            biases = np.zeros(np.shape(weights)[-1], dtype=np.float32)
        consumed_nodes = match.consumed_nodes
        d = ConvolutionLayerResolver.Descriptor(str(conv_op.name),
                                                consumed_nodes, conv_op, None,
                                                strides, padding, weights,
                                                biases)
        input_tensor, _ = GraphHelper.get_op_input_tensors(conv_op, ('?', '?'))
        d.groups = graph_helper.get_op_output_shape(input_tensor)[-1]
        descriptors.append(d)
Exemplo n.º 22
0
class TanhLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        pass

    def __init__(self):
        self.sequence = GraphSequence(
            [ConverterSequenceNode('root', ['Tanh'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            tanh_op = match['root']
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                TanhLayerResolver.Descriptor('Tanh', str(tanh_op.name),
                                             consumed_nodes))
        return potential_descriptors
Exemplo n.º 23
0
    def __init__(self):

        sequence_1 = GraphSequence([
            ConverterSequenceNode('gather', ['GatherV2']),
            NonConsumableConverterSequenceNode('params', ['?']),
            NonConsumableConverterSequenceNode('axis', ['?']),
            NonConsumableConverterSequenceNode('indices', ['Placeholder'])
        ])
        sequence_1.set_inputs('gather', ['params', 'axis', 'indices'])
        sequence_1.set_outputs(['gather'])

        # Filter seqs 2
        sequence_2 = GraphSequence([
            ConverterSequenceNode('gather', ['Gather']),
            NonConsumableConverterSequenceNode('params', ['?']),
            NonConsumableConverterSequenceNode('indices', ['Placeholder'])
        ])
        sequence_2.set_inputs('gather', ['params', 'indices'])
        sequence_2.set_outputs(['gather'])

        self.sequences = [sequence_1, sequence_2]
Exemplo n.º 24
0
class PixelShuffleLayerResolver(LayerResolver, object):
    TF_ATTRIBUTE_BLOCK_SIZE = 'block_size'

    class Descriptor(LayerDescriptor):
        def __init__(self, layer_type, name, nodes, upscale_factor):
            super(PixelShuffleLayerResolver.Descriptor, self).__init__(layer_type, name, nodes)

            self.upscale_factor = upscale_factor

        @property
        def output_names(self):
            return [str(self.child_ops[0].outputs[0].name)]

        def is_output_op(self, op):
            return op in self.child_ops

        def get_output_names_for(self, input_tensors):
            return self.output_names

    def __init__(self):
        self.sequence = GraphSequence([ConverterSequenceNode('root', ['DepthToSpace'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)

        # Nothing matched
        if len(matches) == 0:
            return []

        potential_descriptors = []
        for match in matches:
            depth_to_space_op = match['root']
            upscale_factor = depth_to_space_op.get_attr(self.TF_ATTRIBUTE_BLOCK_SIZE)
            consumed_nodes = match.consumed_nodes

            potential_descriptors.append(
                PixelShuffleLayerResolver.Descriptor('PixelShuffle', str(depth_to_space_op.name), consumed_nodes, upscale_factor))

        return potential_descriptors
Exemplo n.º 25
0
class PoolingLayerResolver(LayerResolver, object):
    __metaclass__ = ABCMeta

    class Descriptor(LayerDescriptor):
        def __init__(self, layer_type, name, operations, pooling_type, strides,
                     padding, kernel_dims):
            super(PoolingLayerResolver.Descriptor,
                  self).__init__(layer_type, name, operations)
            self.pooling_type = pooling_type
            self.strides = strides
            self.padding = padding
            self.kernel_dims = kernel_dims

    def __init__(self, layer_type, descriptor_type, pooling_type, op_type):
        super(PoolingLayerResolver, self).__init__()
        self._layer_type = layer_type
        self._descriptor_type = descriptor_type
        self._polling_type = pooling_type
        self._op_type = op_type

        self.sequence = GraphSequence(
            [ConverterSequenceNode('root', [self._op_type])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        potential_descriptors = []
        for match in matches:
            pooling_op = match['root']
            kernel_dims = pooling_op.get_attr('ksize')
            strides = pooling_op.get_attr('strides')
            padding = pooling_op.get_attr('padding')
            consumed_nodes = match.consumed_nodes
            potential_descriptors.append(
                self._descriptor_type(self._layer_type, str(pooling_op.name),
                                      consumed_nodes, self._polling_type,
                                      strides, padding, kernel_dims))
        return potential_descriptors
Exemplo n.º 26
0
class CropLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, offset, size, output_names=None):
            super(CropLayerResolver.Descriptor,
                  self).__init__('Crop',
                                 name,
                                 nodes,
                                 output_names=output_names)
            self.offset = offset
            self.size = size

    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('root', ['Slice']),
            NonConsumableConverterSequenceNode('input', ['?']),
            NonConsumableConverterSequenceNode('offsets', ['?']),
            NonConsumableConverterSequenceNode('size', ['?']),
        ])
        self.sequence.set_inputs('root', ['input', 'offsets', 'size'])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        descriptors = []
        for match in matches:
            slice_op = match['root']
            input_shape = graph_helper.get_op_output_shape(match['input'])
            offset = graph_helper.evaluate_tensor_output(
                match['offsets'].outputs[0])
            size = graph_helper.evaluate_tensor_output(
                match['size'].outputs[0])
            for index in range(0, len(size)):
                if size[index] == -1:
                    size[index] = input_shape[index] - offset[index]

            consumed_nodes = match.consumed_nodes
            descriptors.append(
                CropLayerResolver.Descriptor(str(slice_op.name),
                                             consumed_nodes, offset, size))
        return descriptors
Exemplo n.º 27
0
    def __init__(self):
        sequence_prelu = GraphSequence([
            ConverterSequenceNode('a', ['Relu']),
            ConverterSequenceNode('b', ['Abs']),
            ConverterSequenceNode('c', ['Sub']),
            ConverterSequenceNode('d', ['Mul']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('unknown', ['?']),
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu.set_inputs('a', ['inputs'])
        sequence_prelu.set_inputs('b', ['inputs'])
        sequence_prelu.set_inputs('c', ['inputs', 'b'])
        sequence_prelu.set_inputs('d', ['alphas', 'c'])
        sequence_prelu.set_inputs('e', ['d', 'unknown'])
        sequence_prelu.set_inputs('f', ['a', 'e'])
        sequence_prelu.set_outputs(['f'])

        sequence_prelu_negative_alpha = GraphSequence([
            ConverterSequenceNode('a', ['Relu']),
            ConverterSequenceNode('b', ['Neg']),
            ConverterSequenceNode('c', ['Neg']),
            ConverterSequenceNode('d', ['Relu']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu_negative_alpha.set_inputs('a', ['inputs'])
        sequence_prelu_negative_alpha.set_inputs('b', ['inputs'])
        sequence_prelu_negative_alpha.set_inputs('c', ['alphas'])
        sequence_prelu_negative_alpha.set_inputs('d', ['b'])
        sequence_prelu_negative_alpha.set_inputs('e', ['d', 'c'])
        sequence_prelu_negative_alpha.set_inputs('f', ['a', 'e'])
        sequence_prelu_negative_alpha.set_outputs(['f'])

        sequence_prelu_negative_relu = GraphSequence([
            ConverterSequenceNode('relu_pos', ['Relu']),
            ConverterSequenceNode('neg_1', ['Neg']),
            ConverterSequenceNode('neg_2', ['Neg']),
            ConverterSequenceNode('relu_neg', ['Relu']),
            ConverterSequenceNode('mul', ['Mul']),
            ConverterSequenceNode('f', ['Add']),  # output
            ConverterSequenceNode('alphas', ['?']),
            NonConsumableConverterSequenceNode('inputs', ['?'])
        ])
        sequence_prelu_negative_relu.set_inputs('relu_pos', ['inputs'])
        sequence_prelu_negative_relu.set_inputs('neg_1', ['inputs'])
        sequence_prelu_negative_relu.set_inputs('relu_neg', ['neg_1'])
        sequence_prelu_negative_relu.set_inputs('neg_2', ['relu_neg'])
        sequence_prelu_negative_relu.set_inputs('mul', ['neg_2', 'alphas'])
        sequence_prelu_negative_relu.set_inputs('f', ['relu_pos', 'mul'])
        sequence_prelu_negative_relu.set_outputs(['f'])

        self.sequences = [
            sequence_prelu, sequence_prelu_negative_alpha,
            sequence_prelu_negative_relu
        ]
Exemplo n.º 28
0
class AddNLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes):
            super(AddNLayerResolver.Descriptor,
                  self).__init__('ElementWiseSumN', name, nodes)

    def __init__(self):
        self.sequence = GraphSequence(
            [ConverterSequenceNode('root', ['AddN'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []

        descriptors = []
        for match in matches:
            add_op = match['root']
            descriptors.append(
                AddNLayerResolver.Descriptor(str(add_op.name),
                                             match.consumed_nodes))
        return descriptors
Exemplo n.º 29
0
class FillLayerResolver(LayerResolver, object):
    class Descriptor(LayerDescriptor):
        def __init__(self, name, nodes, shape, scalar):
            super(FillLayerResolver.Descriptor,
                  self).__init__('Fill', name, nodes)
            self.shape = shape
            self.scalar = scalar

    def __init__(self):
        self.sequence = GraphSequence(
            [ConverterSequenceNode('root', ['Fill'])])
        self.sequence.set_outputs(['root'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            fill_op = match['root']
            consumed_nodes = match.consumed_nodes
            shape_tensor, scalar_tensor = GraphHelper.get_op_input_tensors(
                fill_op, ('?', 'Const'))
            shape = graph_helper.evaluate_tensor_output(shape_tensor).tolist()
            while len(shape) > 4:
                shape = shape[1:]

            while len(shape) < 4:
                shape = [1] + shape
            scalar = graph_helper.evaluate_tensor_output(scalar_tensor)

            d = FillLayerResolver.Descriptor(str(fill_op.name), consumed_nodes,
                                             shape, scalar)
            descriptors.append(d)

        return descriptors
Exemplo n.º 30
0
class ScaledBatchNormLayerResolver(BatchNormLayerResolver):
    def __init__(self):
        self.sequence = GraphSequence([
            ConverterSequenceNode('a', ['Add']),
            ConverterSequenceNode('b', ['Rsqrt']),
            ConverterSequenceNode('c', ['Mul']),
            ConverterSequenceNode('d', ['Mul']),
            ConverterSequenceNode('e', ['Mul']),
            ConverterSequenceNode('f', ['Sub']),
            ConverterSequenceNode('g', ['Add']),
            ConverterSequenceNode('scale', ['?']),
            NonConsumableConverterSequenceNode('input', ['?']),
            ConverterSequenceNode('mean', ['?']),
            ConverterSequenceNode('beta', ['?']),
            ConverterSequenceNode('variance', ['?']),
            ConverterSequenceNode('epsilon', ['?'])
        ])
        self.sequence.set_inputs('a', ['variance', 'epsilon'])
        self.sequence.set_inputs('b', ['a'])
        self.sequence.set_inputs('c', ['b', 'scale'])
        self.sequence.set_inputs('d', ['c', 'input'])
        self.sequence.set_inputs('e', ['c', 'mean'])
        self.sequence.set_inputs('f', ['e', 'beta'])
        self.sequence.set_inputs('g', ['d', 'f'])
        self.sequence.set_outputs(['g'])

    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            variance_op = match['variance']
            epsilon_op = match['epsilon']
            if variance_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_VARIANCE'))
            variance = graph_helper.evaluate_tensor_output(
                variance_op.outputs[0])

            if epsilon_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_EPSILON'))
            epsilon = graph_helper.evaluate_tensor_output(
                epsilon_op.outputs[0])

            scale_op = match['scale']
            if scale_op.type not in ['Identity', 'Const', 'Fill']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_SCALE'))
            scale = graph_helper.evaluate_tensor_output(scale_op.outputs[0])

            mean_op = match['mean']
            if mean_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_MEAN'))
            mean = graph_helper.evaluate_tensor_output(mean_op.outputs[0])

            beta_op = match['beta']
            if beta_op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_BATCHNORM_RESOLVE_BETA'))
            beta = graph_helper.evaluate_tensor_output(beta_op.outputs[0])

            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            descriptors.append(
                BatchNormLayerResolver.Descriptor(
                    str(match['d'].name),
                    match.consumed_nodes,
                    bn_mul_op=match['d'],
                    mean=mean,
                    variance=variance,
                    epsilon=epsilon,
                    scale=scale,
                    beta=beta,
                    output_names=output_op_nodes_names))
        return descriptors