def __init__(self): self.sequence = GraphSequence([ NonConsumableConverterSequenceNode('input', ['?']), ConverterSequenceNode('tile', ['Tile']), NonConsumableConverterSequenceNode('multiples', ['?']) ]) self.sequence.set_inputs('tile', ['input', 'multiples']) self.sequence.set_outputs(['tile']) # sequence input->shape->stridedslice->pack->tile self.sequence_pack = GraphSequence([ NonConsumableConverterSequenceNode('input', ['?']), NonConsumableConverterSequenceNode('shape', ['Shape']), NonConsumableConverterSequenceNode('stridedslice', ['StridedSlice']), NonConsumableConverterSequenceNode('const_3', ['Const']), NonConsumableConverterSequenceNode('const_4', ['Const']), NonConsumableConverterSequenceNode('const_5', ['Const']), ConverterSequenceNode('tile', ['Tile']), NonConsumableConverterSequenceNode('tile_multiples_pack', ['Pack']), NonConsumableConverterSequenceNode('const_1', ['Const']), NonConsumableConverterSequenceNode('const_2', ['Const']), NonConsumableConverterSequenceNode('tile_input', ['?']) ]) self.sequence_pack.set_inputs('shape', ['input']) self.sequence_pack.set_inputs('stridedslice', ['shape', 'const_3', 'const_4', 'const_5']) self.sequence_pack.set_inputs('tile_multiples_pack', ['stridedslice', 'const_1', 'const_2']) self.sequence_pack.set_inputs('tile', ['tile_input', 'tile_multiples_pack']) self.sequence_pack.set_outputs(['tile'])
class Relu6LayerResolver(ReluMinMaxLayerResolver, object): class Descriptor(ReluMinMaxLayerResolver.Descriptor): def __init__(self, name, nodes): super(Relu6LayerResolver.Descriptor, self).__init__('Relu6', name, nodes, min_clamp=0, max_clamp=6) def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['Relu6'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: relu6_op = match['root'] consumed_nodes = match.consumed_nodes potential_descriptors.append( Relu6LayerResolver.Descriptor(str(relu6_op.name), consumed_nodes)) return potential_descriptors
def __init__(self): self.sequence_with_explicit_order = GraphSequence([ ConverterSequenceNode('root', ['Transpose']), ConverterSequenceNode('order', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence_with_explicit_order.set_inputs('root', ['input', 'order']) self.sequence_with_explicit_order.set_outputs(['root']) self.sequence_with_implicit_order = GraphSequence([ ConverterSequenceNode('root', ['Transpose']), ConverterSequenceNode('order', ['Sub']), ConverterSequenceNode('a', ['Sub']), ConverterSequenceNode('b', ['Const']), ConverterSequenceNode('c', ['Range']), ConverterSequenceNode('d', ['Const']), ConverterSequenceNode('e', ['Const']), ConverterSequenceNode('f', ['Rank']), NonConsumableConverterSequenceNode('input', ['?']) ]) self.sequence_with_implicit_order.set_inputs('root', ['input', 'order']) self.sequence_with_implicit_order.set_inputs('order', ['a', 'c']) self.sequence_with_implicit_order.set_inputs('a', ['b', 'f']) self.sequence_with_implicit_order.set_inputs('c', ['d', 'e', 'f']) self.sequence_with_implicit_order.set_inputs('f', ['input']) self.sequence_with_implicit_order.set_outputs(['root']) self.sequences = [ self.sequence_with_explicit_order, self.sequence_with_implicit_order ]
class EluLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, layer_type, name, nodes): super(EluLayerResolver.Descriptor, self).__init__(layer_type, name, nodes) @property def output_names(self): return [str(self.child_ops[0].outputs[0].name)] def is_output_op(self, op): return op in self.child_ops def get_output_names_for(self, input_tensors): return self.output_names def __init__(self): self.sequence = GraphSequence([ConverterSequenceNode('root', ['Elu'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: elu_op = match['root'] consumed_nodes = match.consumed_nodes potential_descriptors.append( EluLayerResolver.Descriptor('ELU', str(elu_op.name), consumed_nodes)) return potential_descriptors
class EltWiseUnaryLayerResolver(LayerResolver, object): __metaclass__ = ABCMeta def __init__(self, layer_type, op_type, descriptor_class): super(EltWiseUnaryLayerResolver, self).__init__() self._layer_type = layer_type self._op_type = op_type self._descriptor_class = descriptor_class self.sequence = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), NonConsumableConverterSequenceNode('input1', ['?']), ]) self.sequence.set_inputs('root', ['input1']) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): descriptors = [] non_const_input_sequences = [self.sequence] for sequence in non_const_input_sequences: for match in graph_matcher.match_sequence(sequence): eltwise_op = match['root'] descriptor = self._descriptor_class(self._layer_type, str(eltwise_op.name), match.consumed_nodes) descriptors.append(descriptor) return descriptors
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['ArgMax']), ConverterSequenceNode('axis', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'axis']) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['Slice']), NonConsumableConverterSequenceNode('input', ['?']), NonConsumableConverterSequenceNode('offsets', ['?']), NonConsumableConverterSequenceNode('size', ['?']), ]) self.sequence.set_inputs('root', ['input', 'offsets', 'size']) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['FakeQuantWithMinMaxVars']), ConverterSequenceNode('min', ['?']), ConverterSequenceNode('max', ['?']), NonConsumableConverterSequenceNode('input', ['?']) ]) self.sequence.set_inputs('root', ['input', 'min', 'max']) self.sequence.set_outputs(['root'])
def __init__(self, layer_type, descriptor_type, pooling_type, op_type): super(PoolingLayerResolver, self).__init__() self._layer_type = layer_type self._descriptor_type = descriptor_type self._polling_type = pooling_type self._op_type = op_type self.sequence = GraphSequence( [ConverterSequenceNode('root', [self._op_type])]) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['StridedSlice']), ConverterSequenceNode('begin', ['Const']), ConverterSequenceNode('end', ['Const']), ConverterSequenceNode('strides', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'begin', 'end', 'strides']) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ NonConsumableConverterSequenceNode('inputs', ['?']), ConverterSequenceNode('a', ['Mul']), ConverterSequenceNode('b', ['Add']), ConverterSequenceNode('weights', ['Const', 'Identity']), ConverterSequenceNode('biases', ['Const', 'Identity']) ]) self.sequence.set_inputs('a', ['inputs', 'weights']) self.sequence.set_inputs('b', ['a', 'biases']) self.sequence.set_outputs(['b'])
class ConcatLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, axis, output_names=None): super(ConcatLayerResolver.Descriptor, self).__init__('Concatenation', name, nodes, output_names=output_names) self.axis = axis def __init__(self): self.sequence = GraphSequence([ConverterSequenceNode('root', ['Concat', 'ConcatV2'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] descriptors = [] for match in matches: concat_op = match['root'] consumed_nodes = match.consumed_nodes concat_descriptor = ConcatLayerResolver.Descriptor(str(concat_op.name), consumed_nodes, None, [concat_op.outputs[0].name]) non_const_inputs = [tensor for tensor in concat_op.inputs if tensor.op.type != 'Const'] const_ops = [tensor.op for tensor in concat_op.inputs if tensor.op.type == 'Const'] axis_tensor = None if len(non_const_inputs) < 2 or len(const_ops) > 1: for i in range(0, len(const_ops) - 1): const_value = graph_helper.evaluate_tensor_output(const_ops[i].outputs[0]) const_shape = graph_helper.get_op_output_shape(const_ops[i].outputs[0]) descriptors.append(ConstantLayerResolver.Descriptor(str(const_ops[i]), [const_ops[i]], const_value, const_shape, concat_descriptor)) # Make the assumption that the axis is always the last constant axis_tensor = const_ops[-1] max_shape = 0 for t in non_const_inputs: shape = graph_helper.get_op_output_shape(t.op) if len(shape) > max_shape: max_shape = len(shape) if not axis_tensor: axis_tensor = GraphHelper.filter_single_op_by_type([t.op for t in concat_op.inputs], 'Const') axis = int(graph_helper.evaluate_tensor_output(axis_tensor.outputs[0])) if axis < 0: axis += max_shape concat_descriptor.axis = axis descriptors.append(concat_descriptor) return descriptors
def __init__(self, layer_type, op_type, descriptor_class): super(EltWiseUnaryLayerResolver, self).__init__() self._layer_type = layer_type self._op_type = op_type self._descriptor_class = descriptor_class self.sequence = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), NonConsumableConverterSequenceNode('input1', ['?']), ]) self.sequence.set_inputs('root', ['input1']) self.sequence.set_outputs(['root'])
class InstanceNormRMSLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, operations, shape): super(InstanceNormRMSLayerResolver.Descriptor, self).__init__('InstanceNormRMS', name, operations) self.shape = shape # SNPE runtime algo is y = x * WEIGHT / rms + BIAS # While L2 Normalization is y = x / rms # That requires WEIGHT = 1.0 and BIAS = 0.0 to mimic L2 Norm in SNPE # Shape of weights/biases should be same as the last dimension of input. self.weights = np.ones(shape[-1]) self.biases = np.zeros(shape[-1]) def __init__(self): # Graph topology of tf.math.l2_normalize self.sequence = GraphSequence([ NonConsumableConverterSequenceNode('input', ['?']), ConverterSequenceNode('a', ['Square']), ConverterSequenceNode('weights', ['Const', 'Identity']), ConverterSequenceNode('b', ['Sum']), ConverterSequenceNode('epsilon', ['Const', 'Identity']), ConverterSequenceNode('c', ['Maximum']), ConverterSequenceNode('d', ['Rsqrt']), ConverterSequenceNode('e', ['Mul']) ]) self.sequence.set_inputs('a', ['input']) self.sequence.set_inputs('b', ['a', 'weights']) self.sequence.set_inputs('c', ['b', 'epsilon']) self.sequence.set_inputs('d', ['c']) self.sequence.set_inputs('e', ['d', 'input']) self.sequence.set_outputs(['e']) # For now, elementwise resolver cannot work with epsilon node. # Will meet error "ElementWise resolver must implement broadcast method.". def is_final_resolution(self): return True def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) potential_descriptors = [] for match in matches: bn_op = match['SquaredDifference'] input_op = match['input'] shape = graph_helper.get_op_output_shape(input_op) consumed_nodes = match.consumed_nodes potential_descriptors.append( InstanceNormRMSLayerResolver.Descriptor(str(bn_op.name), consumed_nodes, shape=shape)) return potential_descriptors
def __init__(self): super(DepthwiseConvolutionLayerResolver, self).__init__() self.graph_sequence_with_bias = GraphSequence([ ConverterSequenceNode('conv', ['DepthwiseConv2dNative']), ConverterSequenceNode('bias', ['BiasAdd', 'Add']), NonConsumableConverterSequenceNode('other', ['?']) ]) self.graph_sequence_with_bias.set_inputs('bias', ['conv', 'other']) self.graph_sequence_with_bias.set_outputs(['bias']) self.graph_sequence = GraphSequence( [ConverterSequenceNode('conv', ['DepthwiseConv2dNative'])]) self.graph_sequence.set_outputs(['conv'])
def __init__(self, layer_type, op_type, descriptor_class): super(ReductionLayerResolver, self).__init__() self._layer_type = layer_type self._op_type = op_type self._descriptor_class = descriptor_class self.sequence = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), ConverterSequenceNode('reduction_indices', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'reduction_indices']) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('reshape_out', ['Reshape']), ConverterSequenceNode('transpose', ['Transpose']), ConverterSequenceNode('reshape_in', ['Reshape']), ConverterSequenceNode('shape_in', ['Const']), ConverterSequenceNode('order', ['Const']), ConverterSequenceNode('shape_out', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('reshape_out', ['shape_out', 'transpose']) self.sequence.set_inputs('transpose', ['order', 'reshape_in']) self.sequence.set_inputs('reshape_in', ['shape_in', 'input']) self.sequence.set_outputs(['reshape_out'])
class ArgMaxLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, axis, output_names=None): super(ArgMaxLayerResolver.Descriptor, self).__init__('ArgMax', name, nodes, output_names=output_names) self.axis = axis def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['ArgMax']), ConverterSequenceNode('axis', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'axis']) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): descriptors = [] for match in graph_matcher.match_sequence(self.sequence): argmax_op = match['root'] input_op = match['input'] axis_op = match['axis'] input_shape = graph_helper.get_op_output_shape(input_op) input_rank = len(input_shape) axis = int(graph_helper.evaluate_tensor_output(axis_op.outputs[0])) if axis < 0: axis += input_rank if axis < 0 or axis >= input_rank: raise ConverterError( code_to_message.get_error_message( 'ERROR_TF_ARGMAX_INVALID_AXIS')(axis, input_rank)) consumed_nodes = match.consumed_nodes argmax_descriptor = ArgMaxLayerResolver.Descriptor( str(argmax_op.name), consumed_nodes, axis, output_names=[str(argmax_op.outputs[0].name)]) descriptors.extend([argmax_descriptor]) return descriptors
class ImageProjectiveTransformLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, operations, interpolation_mode, output_names=None): super(ImageProjectiveTransformLayerResolver.Descriptor, self).__init__('ImageProjectiveTransform', name, operations, output_names=output_names) self.interpolation_mode = interpolation_mode def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['ImageProjectiveTransform'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): potential_descriptors = [] matches = graph_matcher.match_sequence(self.sequence) for match in matches: image_proj_transform = match['root'] output_op_nodes_names = [str(image_proj_transform.outputs[0].name)] consumed_nodes = match.consumed_nodes interpolation = str( image_proj_transform.get_attr('interpolation').decode('utf-8')) if interpolation == "BILINEAR": interpolation_mode = 0 elif interpolation == "NEAREST": interpolation_mode = 1 else: raise ConverterError( code_to_message.get_error_message( 'ERROR_TF_RESOLVE_IMAGE_TRANSFORM_INTERPOLATION')) potential_descriptors.append( ImageProjectiveTransformLayerResolver.Descriptor( str(image_proj_transform.name), consumed_nodes, interpolation_mode, output_names=output_op_nodes_names)) return potential_descriptors
class BatchNormWithGlobalNormLayerResolver(BatchNormLayerResolver): class Descriptor(BatchNormLayerResolver.Descriptor): pass def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['BatchNormWithGlobalNormalization']) ]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: bn_op = match['root'] parameter_tensors = self._const_inputs(graph_helper, bn_op) if len(parameter_tensors) < 4: raise ConverterError( code_to_message.get_error_message( 'ERROR_TF_BATCHNORM_GLOBALNORMALIZATION_INPUT')) epsilon = bn_op.get_attr('variance_epsilon') mean = parameter_tensors[0] variance = parameter_tensors[1] beta = parameter_tensors[2] scale = parameter_tensors[3] consumed_nodes = match.consumed_nodes potential_descriptors.append( BatchNormWithGlobalNormLayerResolver.Descriptor( str(bn_op.name), consumed_nodes, bn_mul_op=bn_op, mean=mean, variance=variance, epsilon=epsilon, scale=scale, beta=beta)) return potential_descriptors @classmethod def _const_inputs(cls, graph_helper, bn_op): return [ graph_helper.evaluate_tensor_output(tensor) for tensor in bn_op.inputs if tensor.op.type == 'Const' ]
def __init__(self): sequence_keras = GraphSequence([ NonConsumableConverterSequenceNode('input', ['?']), ConverterSequenceNode('root', ['Relu']), ConverterSequenceNode('min', ['Minimum']), ConverterSequenceNode('min_cast', ['Cast']), ConverterSequenceNode('min_const', ['Const']), ConverterSequenceNode('max', ['Maximum']), ConverterSequenceNode('max_const', ['Const']) ]) sequence_keras.set_inputs('root', ['input']) sequence_keras.set_inputs('min_cast', ['min_const']) sequence_keras.set_inputs('min', ['root', 'min_cast']) sequence_keras.set_inputs('max', ['min', 'max_const']) sequence_keras.set_outputs(['max']) self.sequences = [sequence_keras]
class FusedBatchNormNormLayerResolver(BatchNormLayerResolver): class Descriptor(BatchNormLayerResolver.Descriptor): pass def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['FusedBatchNorm'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) potential_descriptors = [] for match in matches: bn_op = match['root'] parameter_tensors = self._get_parameter_tensors( graph_helper, bn_op) if len(parameter_tensors) < 4: raise ConverterError( code_to_message.get_error_message( 'ERROR_TF_BATCHNORM_GLOBALNORMALIZATION_INPUT')) epsilon = bn_op.get_attr('epsilon') scale = parameter_tensors[0] beta = parameter_tensors[1] mean = parameter_tensors[2] variance = parameter_tensors[3] consumed_nodes = match.consumed_nodes potential_descriptors.append( FusedBatchNormNormLayerResolver.Descriptor(str(bn_op.name), consumed_nodes, bn_mul_op=bn_op, mean=mean, variance=variance, epsilon=epsilon, scale=scale, beta=beta)) return potential_descriptors @classmethod def _get_parameter_tensors(cls, graph_helper, bn_op): parameter_tensors = [ t for t in bn_op.inputs if t.op.type in ['Const', 'Identity'] ] tensors_outputs = graph_helper.evaluate_tensors_output( parameter_tensors) return [tensors_outputs[t] for t in parameter_tensors]
def __init__(self): # Graph topology of tf.math.l2_normalize self.sequence = GraphSequence([ NonConsumableConverterSequenceNode('input', ['?']), ConverterSequenceNode('a', ['Square']), ConverterSequenceNode('weights', ['Const', 'Identity']), ConverterSequenceNode('b', ['Sum']), ConverterSequenceNode('epsilon', ['Const', 'Identity']), ConverterSequenceNode('c', ['Maximum']), ConverterSequenceNode('d', ['Rsqrt']), ConverterSequenceNode('e', ['Mul']) ]) self.sequence.set_inputs('a', ['input']) self.sequence.set_inputs('b', ['a', 'weights']) self.sequence.set_inputs('c', ['b', 'epsilon']) self.sequence.set_inputs('d', ['c']) self.sequence.set_inputs('e', ['d', 'input']) self.sequence.set_outputs(['e'])
class SliceLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, axis, split_sizes, split_count): super(SliceLayerResolver.Descriptor, self).__init__('Slice', name, nodes) self.axis = axis self.split_sizes = split_sizes self.split_count = split_count @property def output_names(self): return [str(t.name) for t in self.child_ops[-1].outputs] def __init__(self): self.sequence = GraphSequence([ConverterSequenceNode('root', ['Split', 'SplitV'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: split_op = match['root'] split_axis, split_sizes = self.get_split_axis_and_sizes(graph_helper, split_op) split_count = int(split_op.get_attr('num_split')) consumed_nodes = match.consumed_nodes potential_descriptors.append( SliceLayerResolver.Descriptor(str(split_op.name), consumed_nodes, split_axis, split_sizes, split_count)) return potential_descriptors @classmethod def get_split_axis_and_sizes(cls, graph_helper, split_op): try: _, split_sizes, split_axis = GraphHelper.get_op_input_tensors(split_op, ('?', 'Const', 'Const')) split_sizes = list(graph_helper.evaluate_tensor_output(split_sizes)) except TensorNotFoundError: split_axis, _ = GraphHelper.get_op_input_tensors(split_op, ('Const', '?')) split_sizes = [] split_axis = int(graph_helper.evaluate_tensor_output(split_axis)) return split_axis, split_sizes
def __init__(self): super(DilatedConvolutionLayerResolver, self).__init__() self.graph_sequence = GraphSequence([ ConverterSequenceNode('space_to_batch', ['SpaceToBatchND']), NonConsumableConverterSequenceNode('inputs', ['?']), ConverterSequenceNode('dilation_sizes', ['?']), ConverterSequenceNode('paddings', ['?']), ConverterSequenceNode('conv_op', ['Conv2D']), ConverterSequenceNode('kernel', ['?']), ConverterSequenceNode('batch_to_space', ['BatchToSpaceND']), ConverterSequenceNode('block_shape_out', ['?']), ConverterSequenceNode('crops', ['?']) ]) self.graph_sequence.set_inputs( 'space_to_batch', ['inputs', 'dilation_sizes', 'paddings']) self.graph_sequence.set_inputs('conv_op', ['space_to_batch', 'kernel']) self.graph_sequence.set_inputs('batch_to_space', ['conv_op', 'block_shape_out', 'crops']) self.graph_sequence.set_outputs(['batch_to_space'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('a', ['Add']), ConverterSequenceNode('b', ['Rsqrt']), ConverterSequenceNode('c', ['Mul']), ConverterSequenceNode('d', ['Mul']), ConverterSequenceNode('e', ['Sub']), ConverterSequenceNode('f', ['Add']), # output NonConsumableConverterSequenceNode('inputs', ['?']), ConverterSequenceNode('mean', ['?']), ConverterSequenceNode('beta', ['?']), ConverterSequenceNode('variance', ['?']), ConverterSequenceNode('epsilon', ['?']) ]) self.sequence.set_inputs('a', ['variance', 'epsilon']) self.sequence.set_inputs('b', ['a']) self.sequence.set_inputs('c', ['b', 'inputs']) self.sequence.set_inputs('d', ['b', 'mean']) self.sequence.set_inputs('e', ['d', 'beta']) self.sequence.set_inputs('f', ['c', 'e']) self.sequence.set_outputs(['f'])
class TanhLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): pass def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['Tanh'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: tanh_op = match['root'] consumed_nodes = match.consumed_nodes potential_descriptors.append( TanhLayerResolver.Descriptor('Tanh', str(tanh_op.name), consumed_nodes)) return potential_descriptors
def __init__(self): self.sequence_with_zero_padding = GraphSequence([ ConverterSequenceNode('root', ['Pad', 'PadV2']), ConverterSequenceNode('paddings', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence_with_zero_padding.set_inputs('root', ['input', 'paddings']) self.sequence_with_zero_padding.set_outputs(['root']) self.sequence_with_const_padding = GraphSequence([ ConverterSequenceNode('root', ['Pad', 'PadV2']), ConverterSequenceNode('paddings', ['Const']), ConverterSequenceNode('const_values', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence_with_const_padding.set_inputs( 'root', ['input', 'paddings', 'const_values']) self.sequence_with_const_padding.set_outputs(['root']) self.sequence_with_reflect_padding = GraphSequence([ ConverterSequenceNode('mirror_pad', ['MirrorPad']), ConverterSequenceNode('paddings', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence_with_reflect_padding.set_inputs('mirror_pad', ['input', 'paddings']) self.sequence_with_reflect_padding.set_outputs(['mirror_pad']) self.sequences = [ self.sequence_with_zero_padding, self.sequence_with_const_padding, self.sequence_with_reflect_padding ]
class PixelShuffleLayerResolver(LayerResolver, object): TF_ATTRIBUTE_BLOCK_SIZE = 'block_size' class Descriptor(LayerDescriptor): def __init__(self, layer_type, name, nodes, upscale_factor): super(PixelShuffleLayerResolver.Descriptor, self).__init__(layer_type, name, nodes) self.upscale_factor = upscale_factor @property def output_names(self): return [str(self.child_ops[0].outputs[0].name)] def is_output_op(self, op): return op in self.child_ops def get_output_names_for(self, input_tensors): return self.output_names def __init__(self): self.sequence = GraphSequence([ConverterSequenceNode('root', ['DepthToSpace'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) # Nothing matched if len(matches) == 0: return [] potential_descriptors = [] for match in matches: depth_to_space_op = match['root'] upscale_factor = depth_to_space_op.get_attr(self.TF_ATTRIBUTE_BLOCK_SIZE) consumed_nodes = match.consumed_nodes potential_descriptors.append( PixelShuffleLayerResolver.Descriptor('PixelShuffle', str(depth_to_space_op.name), consumed_nodes, upscale_factor)) return potential_descriptors
class PoolingLayerResolver(LayerResolver, object): __metaclass__ = ABCMeta class Descriptor(LayerDescriptor): def __init__(self, layer_type, name, operations, pooling_type, strides, padding, kernel_dims): super(PoolingLayerResolver.Descriptor, self).__init__(layer_type, name, operations) self.pooling_type = pooling_type self.strides = strides self.padding = padding self.kernel_dims = kernel_dims def __init__(self, layer_type, descriptor_type, pooling_type, op_type): super(PoolingLayerResolver, self).__init__() self._layer_type = layer_type self._descriptor_type = descriptor_type self._polling_type = pooling_type self._op_type = op_type self.sequence = GraphSequence( [ConverterSequenceNode('root', [self._op_type])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: pooling_op = match['root'] kernel_dims = pooling_op.get_attr('ksize') strides = pooling_op.get_attr('strides') padding = pooling_op.get_attr('padding') consumed_nodes = match.consumed_nodes potential_descriptors.append( self._descriptor_type(self._layer_type, str(pooling_op.name), consumed_nodes, self._polling_type, strides, padding, kernel_dims)) return potential_descriptors