class LrnLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, operations, window_size, alpha, beta, bias): super(LrnLayerResolver.Descriptor, self).__init__('LRN', name, operations) self.window_size = window_size self.alpha = alpha self.beta = beta self.bias = bias def __init__(self): self.sequence = GraphSequence([ConverterSequenceNode('root', ['LRN'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: lrn_op = match['root'] window_size = 1 + lrn_op.get_attr('depth_radius') * 2 alpha = lrn_op.get_attr('alpha') beta = lrn_op.get_attr('beta') bias = lrn_op.get_attr('bias') consumed_nodes = match.consumed_nodes potential_descriptors.append( LrnLayerResolver.Descriptor(str(lrn_op.name), consumed_nodes, window_size, alpha, beta, bias)) return potential_descriptors
class FillLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, shape, scalar): super(FillLayerResolver.Descriptor, self).__init__('Fill', name, nodes) self.shape = shape self.scalar = scalar def __init__(self): self.sequence = GraphSequence([ConverterSequenceNode('root', ['Fill'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] descriptors = [] for match in matches: fill_op = match['root'] consumed_nodes = match.consumed_nodes shape_tensor, scalar_tensor = GraphHelper.get_op_input_tensors(fill_op, ('?', 'Const')) shape = graph_helper.evaluate_tensor_output(shape_tensor).tolist() while len(shape) > 3: shape = shape[1:] while len(shape) < 3: shape = [1] + shape scalar = graph_helper.evaluate_tensor_output(scalar_tensor) d = FillLayerResolver.Descriptor(str(fill_op.name), consumed_nodes, shape, scalar) descriptors.append(d) return descriptors
class CropLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, offset, size, output_names=None): super(CropLayerResolver.Descriptor, self).__init__('Crop', name, nodes, output_names=output_names) self.offset = offset self.size = size def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['Slice']), NonConsumableConverterSequenceNode('input', ['?']), NonConsumableConverterSequenceNode('offsets', ['?']), NonConsumableConverterSequenceNode('size', ['?']), ]) self.sequence.set_inputs('root', ['input', 'offsets', 'size']) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) descriptors = [] for match in matches: slice_op = match['root'] input_shape = graph_helper.get_op_output_shape(match['input']) offset = graph_helper.evaluate_tensor_output(match['offsets'].outputs[0]) size = graph_helper.evaluate_tensor_output(match['size'].outputs[0]) for index in range(0, len(size)): if size[index] == -1: size[index] = input_shape[index] - offset[index] consumed_nodes = match.consumed_nodes descriptors.append( CropLayerResolver.Descriptor(str(slice_op.name), consumed_nodes, offset, size)) return descriptors
class ReluLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, layer_type, name, nodes): super(ReluLayerResolver.Descriptor, self).__init__(layer_type, name, nodes) @property def output_names(self): return [str(self.child_ops[0].outputs[0].name)] def is_output_op(self, op): return op in self.child_ops def get_output_names_for(self, input_tensors): return self.output_names def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['Relu'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: relu_op = match['root'] consumed_nodes = match.consumed_nodes potential_descriptors.append( ReluLayerResolver.Descriptor('RELU', str(relu_op.name), consumed_nodes)) return potential_descriptors
def __init__(self): self.sequence_with_explicit_order = GraphSequence([ ConverterSequenceNode('root', ['Transpose']), ConverterSequenceNode('order', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence_with_explicit_order.set_inputs('root', ['input', 'order']) self.sequence_with_explicit_order.set_outputs(['root']) self.sequence_with_implicit_order = GraphSequence([ ConverterSequenceNode('root', ['Transpose']), ConverterSequenceNode('order', ['Sub']), ConverterSequenceNode('a', ['Sub']), ConverterSequenceNode('b', ['Const']), ConverterSequenceNode('c', ['Range']), ConverterSequenceNode('d', ['Const']), ConverterSequenceNode('e', ['Const']), ConverterSequenceNode('f', ['Rank']), NonConsumableConverterSequenceNode('input', ['?']) ]) self.sequence_with_implicit_order.set_inputs('root', ['input', 'order']) self.sequence_with_implicit_order.set_inputs('order', ['a', 'c']) self.sequence_with_implicit_order.set_inputs('a', ['b', 'f']) self.sequence_with_implicit_order.set_inputs('c', ['d', 'e', 'f']) self.sequence_with_implicit_order.set_inputs('f', ['input']) self.sequence_with_implicit_order.set_outputs(['root']) self.sequences = [self.sequence_with_explicit_order, self.sequence_with_implicit_order]
class Relu6LayerResolver(ReluMinMaxLayerResolver, object): class Descriptor(ReluMinMaxLayerResolver.Descriptor): def __init__(self, name, nodes): super(Relu6LayerResolver.Descriptor, self).__init__('Relu6', name, nodes, min_clamp=0, max_clamp=6) def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['Relu6'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: relu6_op = match['root'] consumed_nodes = match.consumed_nodes potential_descriptors.append( Relu6LayerResolver.Descriptor(str(relu6_op.name), consumed_nodes)) return potential_descriptors
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['ArgMax']), ConverterSequenceNode('axis', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'axis']) self.sequence.set_outputs(['root'])
def __init__(self, layer_type, descriptor_type, pooling_type, op_type): super(PoolingLayerResolver, self).__init__() self._layer_type = layer_type self._descriptor_type = descriptor_type self._polling_type = pooling_type self._op_type = op_type self.sequence = GraphSequence([ConverterSequenceNode('root', [self._op_type])]) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['Slice']), NonConsumableConverterSequenceNode('input', ['?']), NonConsumableConverterSequenceNode('offsets', ['?']), NonConsumableConverterSequenceNode('size', ['?']), ]) self.sequence.set_inputs('root', ['input', 'offsets', 'size']) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['StridedSlice']), ConverterSequenceNode('begin', ['Const']), ConverterSequenceNode('end', ['Const']), ConverterSequenceNode('strides', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'begin', 'end', 'strides']) self.sequence.set_outputs(['root'])
def __init__(self): self.sequence = GraphSequence([ NonConsumableConverterSequenceNode('inputs', ['?']), ConverterSequenceNode('a', ['Mul']), ConverterSequenceNode('b', ['Add']), ConverterSequenceNode('weights', ['Const', 'Identity']), ConverterSequenceNode('biases', ['Const', 'Identity']) ]) self.sequence.set_inputs('a', ['inputs', 'weights']) self.sequence.set_inputs('b', ['a', 'biases']) self.sequence.set_outputs(['b'])
def __init__(self): super(DepthwiseConvolutionLayerResolver, self).__init__() self.graph_sequence_with_bias = GraphSequence([ ConverterSequenceNode('conv', ['DepthwiseConv2dNative']), ConverterSequenceNode('bias', ['BiasAdd']), NonConsumableConverterSequenceNode('other', ['?']) ]) self.graph_sequence_with_bias.set_inputs('bias', ['conv', 'other']) self.graph_sequence_with_bias.set_outputs(['bias']) self.graph_sequence = GraphSequence([ConverterSequenceNode('conv', ['DepthwiseConv2dNative'])]) self.graph_sequence.set_outputs(['conv'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('matmul_op', ['MatMul']), ConverterSequenceNode('bias_op', ['BiasAdd', 'Add']), # output NonConsumableConverterSequenceNode('biases', ['Identity', 'Const']), NonConsumableConverterSequenceNode('weights', ['Identity', 'Const']), NonConsumableConverterSequenceNode('inputs', ['?']) ]) self.sequence.set_inputs('matmul_op', ['inputs', 'weights']) self.sequence.set_inputs('bias_op', ['matmul_op', 'biases']) self.sequence.set_outputs(['bias_op'])
def __init__(self, layer_type, op_type, descriptor_class): super(ReductionLayerResolver, self).__init__() self._layer_type = layer_type self._op_type = op_type self._descriptor_class = descriptor_class self.sequence = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), ConverterSequenceNode('reduction_indices', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'reduction_indices']) self.sequence.set_outputs(['root'])
def __init__(self, layer_type, op_type, descriptor_class): super(EltWiseLayerResolver, self).__init__() self._layer_type = layer_type self._op_type = op_type self._descriptor_class = descriptor_class self.sequence = GraphSequence([ConverterSequenceNode('root', [self._op_type])]) self.sequence.set_outputs(['root']) self.sequence_with_identity = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), ConverterSequenceNode('identity', ['Identity']) ]) self.sequence_with_identity.set_inputs('identity', ['root']) self.sequence_with_identity.set_outputs(['identity']) self.sequence_with_const_input = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), NonConsumableConverterSequenceNode('const', ['Const', 'Identity']), NonConsumableConverterSequenceNode('other', ['?']) ]) self.sequence_with_const_input.set_inputs('root', ['const', 'other']) self.sequence_with_const_input.set_outputs(['root']) self.sequence_with_const_input_and_identity = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), ConverterSequenceNode('identity', ['Identity']), NonConsumableConverterSequenceNode('const', ['Const']), NonConsumableConverterSequenceNode('other', ['?']) ]) self.sequence_with_const_input_and_identity.set_inputs('root', ['const', 'other']) self.sequence_with_const_input_and_identity.set_inputs('identity', ['root']) self.sequence_with_const_input_and_identity.set_outputs(['identity'])
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('reshape_out', ['Reshape']), ConverterSequenceNode('transpose', ['Transpose']), ConverterSequenceNode('reshape_in', ['Reshape']), ConverterSequenceNode('shape_in', ['Const']), ConverterSequenceNode('order', ['Const']), ConverterSequenceNode('shape_out', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('reshape_out', ['shape_out', 'transpose']) self.sequence.set_inputs('transpose', ['order', 'reshape_in']) self.sequence.set_inputs('reshape_in', ['shape_in', 'input']) self.sequence.set_outputs(['reshape_out'])
class SliceLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, axis, split_sizes, split_count): super(SliceLayerResolver.Descriptor, self).__init__('Slice', name, nodes) self.axis = axis self.split_sizes = split_sizes self.split_count = split_count @property def output_names(self): return [str(t.name) for t in self.child_ops[-1].outputs] def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['Split', 'SplitV'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: split_op = match['root'] split_axis, split_sizes = self.get_split_axis_and_sizes( graph_helper, split_op) split_count = int(split_op.get_attr('num_split')) consumed_nodes = match.consumed_nodes potential_descriptors.append( SliceLayerResolver.Descriptor(str(split_op.name), consumed_nodes, split_axis, split_sizes, split_count)) return potential_descriptors @classmethod def get_split_axis_and_sizes(cls, graph_helper, split_op): try: _, split_sizes, split_axis = GraphHelper.get_op_input_tensors( split_op, ('?', 'Const', 'Const')) split_sizes = list( graph_helper.evaluate_tensor_output(split_sizes)) except TensorNotFoundError: split_axis, _ = GraphHelper.get_op_input_tensors( split_op, ('Const', '?')) split_sizes = [] split_axis = int(graph_helper.evaluate_tensor_output(split_axis)) return split_axis, split_sizes
class DilatedConvolutionLayerResolver(ConvolutionLayerResolver, object): class Descriptor(ConvolutionLayerResolver.Descriptor): pass def __init__(self): super(DilatedConvolutionLayerResolver, self).__init__() self.graph_sequence = GraphSequence([ ConverterSequenceNode('space_to_batch', ['SpaceToBatchND']), NonConsumableConverterSequenceNode('inputs', ['?']), ConverterSequenceNode('dilation_sizes', ['?']), ConverterSequenceNode('paddings', ['?']), ConverterSequenceNode('conv_op', ['Conv2D']), ConverterSequenceNode('kernel', ['?']), ConverterSequenceNode('batch_to_space', ['BatchToSpaceND']), ConverterSequenceNode('block_shape_out', ['?']), ConverterSequenceNode('crops', ['?'])] ) self.graph_sequence.set_inputs('space_to_batch', ['inputs', 'dilation_sizes', 'paddings']) self.graph_sequence.set_inputs('conv_op', ['space_to_batch', 'kernel']) self.graph_sequence.set_inputs('batch_to_space', ['conv_op', 'block_shape_out', 'crops']) self.graph_sequence.set_outputs(['batch_to_space']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.graph_sequence) if len(matches) == 0: return [] descriptors = [] for match in matches: conv_op = match['conv_op'] strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES) padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING) weights = self.get_weights(graph_helper, conv_op) consumed_nodes = match.consumed_nodes output_op_nodes_names = [str(match[node.identifier].outputs[0].name) for node in self.graph_sequence.output_nodes] try: batch_to_space_op = match['batch_to_space'] conv_output_ops = graph_helper.get_op_outputs(batch_to_space_op) bias_op = GraphHelper.filter_single_op_by_type(conv_output_ops, 'BiasAdd') biases = self.get_biases(graph_helper, conv_op, bias_op) consumed_nodes.append(bias_op) output_op_nodes_names = [str(bias_op.outputs[0].name)] except OperationNotFoundError: bias_op = None biases = np.zeros(weights.shape[-1], dtype=np.float32) dilation_sizes = match['dilation_sizes'] dilation_sizes = graph_helper.evaluate_tensor_output(dilation_sizes.outputs[0]) if np.shape(dilation_sizes) != (2,): raise ConverterError(code_to_message.get_message('ERROR_TF_CONV_RESOLVE_DILATION')(conv_op.name)) d = ConvolutionLayerResolver.Descriptor(str(conv_op.name), consumed_nodes, conv_op, bias_op, strides, padding, weights, biases, output_names=output_op_nodes_names) d.dilationY = int(dilation_sizes[0]) d.dilationX = int(dilation_sizes[1]) d.input_ops = [match['space_to_batch']] descriptors.append(d) return descriptors
class ReductionLayerResolver(LayerResolver, object): __metaclass__ = ABCMeta class Descriptor(LayerDescriptor): def __init__(self, layer_type, name, nodes, axes, keep_dims, output_names=None): super(ReductionLayerResolver.Descriptor, self).__init__(layer_type, name, nodes, output_names=output_names) self.axes = axes self.keep_dims = keep_dims def __init__(self, layer_type, op_type, descriptor_class): super(ReductionLayerResolver, self).__init__() self._layer_type = layer_type self._op_type = op_type self._descriptor_class = descriptor_class self.sequence = GraphSequence([ ConverterSequenceNode('root', [self._op_type]), ConverterSequenceNode('reduction_indices', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'reduction_indices']) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): descriptors = [] for match in graph_matcher.match_sequence(self.sequence): reduction_op = match['root'] input_op = match['input'] reduction_indices_op = match['reduction_indices'] axes = graph_helper.evaluate_tensor_output(reduction_indices_op.outputs[0]) keep_dims = bool(reduction_op.get_attr('keep_dims')) input_shape = graph_helper.get_op_output_shape(input_op) input_rank = len(input_shape) axes = [axes] if np.isscalar(axes) else axes.tolist() for i in range(len(axes)): axes[i] = int(axes[i]) if axes[i] < 0: axes[i] += input_rank reduction_descriptor = self._descriptor_class(self._layer_type, str(reduction_op.name), match.consumed_nodes, axes, keep_dims, output_names=[str(reduction_op.outputs[0].name)]) descriptors.extend([reduction_descriptor]) return descriptors
class BatchNormWithGlobalNormLayerResolver(BatchNormLayerResolver): class Descriptor(BatchNormLayerResolver.Descriptor): pass def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['BatchNormWithGlobalNormalization']) ]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: bn_op = match['root'] parameter_tensors = self._const_inputs(graph_helper, bn_op) if len(parameter_tensors) < 4: raise ConverterError( code_to_message.get_message( 'ERROR_TF_BATCHNORM_GLOBALNORMALIZATION_INPUT')) epsilon = bn_op.get_attr('variance_epsilon') mean = parameter_tensors[0] variance = parameter_tensors[1] beta = parameter_tensors[2] scale = parameter_tensors[3] consumed_nodes = match.consumed_nodes potential_descriptors.append( BatchNormWithGlobalNormLayerResolver.Descriptor( str(bn_op.name), consumed_nodes, bn_mul_op=bn_op, mean=mean, variance=variance, epsilon=epsilon, scale=scale, beta=beta)) return potential_descriptors @classmethod def _const_inputs(cls, graph_helper, bn_op): return [ graph_helper.evaluate_tensor_output(tensor) for tensor in bn_op.inputs if tensor.op.type == 'Const' ]
def __init__(self): super(DilatedDepthwiseConvolutionLayerResolver, self).__init__() self.graph_sequence = GraphSequence([ NonConsumableConverterSequenceNode('space_to_batch', ['SpaceToBatchND']), NonConsumableConverterSequenceNode('inputs', ['?']), NonConsumableConverterSequenceNode('dilation_sizes', ['?']), NonConsumableConverterSequenceNode('paddings', ['?']), ConverterSequenceNode('conv_op', ['DepthwiseConv2dNative']), ConverterSequenceNode('kernel', ['?']), NonConsumableConverterSequenceNode('batch_to_space', ['BatchToSpaceND']), # output NonConsumableConverterSequenceNode('block_shape_out', ['?']), NonConsumableConverterSequenceNode('crops', ['?']) ]) self.graph_sequence.set_inputs('space_to_batch', ['inputs', 'dilation_sizes', 'paddings']) self.graph_sequence.set_inputs('conv_op', ['space_to_batch', 'kernel']) self.graph_sequence.set_inputs('batch_to_space', ['conv_op', 'block_shape_out', 'crops']) self.graph_sequence.set_outputs(['batch_to_space'])
class ConcatLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, axis): super(ConcatLayerResolver.Descriptor, self).__init__('Concatenation', name, nodes) self.axis = axis def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['Concat', 'ConcatV2'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] descriptors = [] for match in matches: concat_op = match['root'] non_const_inputs = [ tensor for tensor in concat_op.inputs if tensor.op.type != 'Const' ] if len(non_const_inputs) < 2: continue max_shape = 0 for t in non_const_inputs: shape = graph_helper.get_op_output_shape(t.op) if len(shape) > max_shape: max_shape = len(shape) axis_tensor = GraphHelper.filter_single_op_by_type( [t.op for t in concat_op.inputs], 'Const') axis = int( graph_helper.evaluate_tensor_output(axis_tensor.outputs[0])) if axis < 0: axis += max_shape consumed_nodes = match.consumed_nodes descriptors.append( ConcatLayerResolver.Descriptor(str(concat_op.name), consumed_nodes, axis)) return descriptors
class FusedBatchNormNormLayerResolver(BatchNormLayerResolver): class Descriptor(BatchNormLayerResolver.Descriptor): pass def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['FusedBatchNorm'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) potential_descriptors = [] for match in matches: bn_op = match['root'] parameter_tensors = self._get_parameter_tensors( graph_helper, bn_op) if len(parameter_tensors) < 4: raise ConverterError( code_to_message.get_message( 'ERROR_TF_BATCHNORM_GLOBALNORMALIZATION_INPUT')) epsilon = bn_op.get_attr('epsilon') scale = parameter_tensors[0] beta = parameter_tensors[1] mean = parameter_tensors[2] variance = parameter_tensors[3] consumed_nodes = match.consumed_nodes potential_descriptors.append( FusedBatchNormNormLayerResolver.Descriptor(str(bn_op.name), consumed_nodes, bn_mul_op=bn_op, mean=mean, variance=variance, epsilon=epsilon, scale=scale, beta=beta)) return potential_descriptors @classmethod def _get_parameter_tensors(cls, graph_helper, bn_op): parameter_tensors = [ t for t in bn_op.inputs if t.op.type in ['Const', 'Identity'] ] tensors_outputs = graph_helper.evaluate_tensors_output( parameter_tensors) return [tensors_outputs[t] for t in parameter_tensors]
class PReLuLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, operations, coefficients, output_names): super(PReLuLayerResolver.Descriptor, self).__init__('PReLU', name, operations, output_names=output_names) self.coefficients = coefficients def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('a', ['Relu']), ConverterSequenceNode('b', ['Abs']), ConverterSequenceNode('c', ['Sub']), ConverterSequenceNode('d', ['Mul']), ConverterSequenceNode('e', ['Mul']), ConverterSequenceNode('f', ['Add']), # output ConverterSequenceNode('unknown', ['?']), ConverterSequenceNode('alphas', ['?']), NonConsumableConverterSequenceNode('inputs', ['?']) ]) self.sequence.set_inputs('a', ['inputs']) self.sequence.set_inputs('b', ['inputs']) self.sequence.set_inputs('c', ['inputs', 'b']) self.sequence.set_inputs('d', ['alphas', 'c']) self.sequence.set_inputs('e', ['d', 'unknown']) self.sequence.set_inputs('f', ['a', 'e']) self.sequence.set_outputs(['f']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: coefficients = match['alphas'] add_op = match['f'] if coefficients.type not in ['Identity', 'Const']: raise ConverterError(code_to_message.get_message('ERROR_TF_RESOLVE_PRELU_COEFF')) output_op_nodes_names = [str(match[node.identifier].outputs[0].name) for node in self.sequence.output_nodes] consumed_nodes = match.consumed_nodes potential_descriptors.append( PReLuLayerResolver.Descriptor(str(add_op.name), consumed_nodes, graph_helper.evaluate_tensor_output(coefficients.outputs[0]), output_names=output_op_nodes_names)) return potential_descriptors
def __init__(self): self.sequence_with_zero_padding = GraphSequence([ ConverterSequenceNode('root', ['Pad', 'PadV2']), ConverterSequenceNode('paddings', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence_with_zero_padding.set_inputs('root', ['input', 'paddings']) self.sequence_with_zero_padding.set_outputs(['root']) self.sequence_with_const_padding = GraphSequence([ ConverterSequenceNode('root', ['Pad', 'PadV2']), ConverterSequenceNode('paddings', ['Const']), ConverterSequenceNode('const_values', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence_with_const_padding.set_inputs('root', ['input', 'paddings', 'const_values']) self.sequence_with_const_padding.set_outputs(['root']) self.sequences = [self.sequence_with_zero_padding, self.sequence_with_const_padding]
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('a', ['Relu']), ConverterSequenceNode('b', ['Abs']), ConverterSequenceNode('c', ['Sub']), ConverterSequenceNode('d', ['Mul']), ConverterSequenceNode('e', ['Mul']), ConverterSequenceNode('f', ['Add']), # output ConverterSequenceNode('unknown', ['?']), ConverterSequenceNode('alphas', ['?']), NonConsumableConverterSequenceNode('inputs', ['?']) ]) self.sequence.set_inputs('a', ['inputs']) self.sequence.set_inputs('b', ['inputs']) self.sequence.set_inputs('c', ['inputs', 'b']) self.sequence.set_inputs('d', ['alphas', 'c']) self.sequence.set_inputs('e', ['d', 'unknown']) self.sequence.set_inputs('f', ['a', 'e']) self.sequence.set_outputs(['f'])
class AddNLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes): super(AddNLayerResolver.Descriptor, self).__init__('ElementWiseSumN', name, nodes) def __init__(self): self.sequence = GraphSequence([ConverterSequenceNode('root', ['AddN'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] descriptors = [] for match in matches: add_op = match['root'] descriptors.append(AddNLayerResolver.Descriptor(str(add_op.name), match.consumed_nodes)) return descriptors
def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('a', ['Add']), ConverterSequenceNode('b', ['Rsqrt']), ConverterSequenceNode('c', ['Mul']), ConverterSequenceNode('d', ['Mul']), ConverterSequenceNode('e', ['Sub']), ConverterSequenceNode('f', ['Add']), # output NonConsumableConverterSequenceNode('inputs', ['?']), ConverterSequenceNode('mean', ['?']), ConverterSequenceNode('beta', ['?']), ConverterSequenceNode('variance', ['?']), ConverterSequenceNode('epsilon', ['?']) ]) self.sequence.set_inputs('a', ['variance', 'epsilon']) self.sequence.set_inputs('b', ['a']) self.sequence.set_inputs('c', ['b', 'inputs']) self.sequence.set_inputs('d', ['b', 'mean']) self.sequence.set_inputs('e', ['d', 'beta']) self.sequence.set_inputs('f', ['c', 'e']) self.sequence.set_outputs(['f'])
class TanhLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): pass def __init__(self): self.sequence = GraphSequence( [ConverterSequenceNode('root', ['Tanh'])]) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): matches = graph_matcher.match_sequence(self.sequence) if len(matches) == 0: return [] potential_descriptors = [] for match in matches: tanh_op = match['root'] consumed_nodes = match.consumed_nodes potential_descriptors.append( TanhLayerResolver.Descriptor('Tanh', str(tanh_op.name), consumed_nodes)) return potential_descriptors
class ArgMaxLayerResolver(LayerResolver, object): class Descriptor(LayerDescriptor): def __init__(self, name, nodes, axis, output_names=None): super(ArgMaxLayerResolver.Descriptor, self).__init__('ArgMax', name, nodes, output_names=output_names) self.axis = axis def __init__(self): self.sequence = GraphSequence([ ConverterSequenceNode('root', ['ArgMax']), ConverterSequenceNode('axis', ['Const']), NonConsumableConverterSequenceNode('input', ['?']), ]) self.sequence.set_inputs('root', ['input', 'axis']) self.sequence.set_outputs(['root']) def resolve_layer(self, graph_matcher, graph_helper): descriptors = [] for match in graph_matcher.match_sequence(self.sequence): argmax_op = match['root'] input_op = match['input'] axis_op = match['axis'] input_shape = graph_helper.get_op_output_shape(input_op) input_rank = len(input_shape) axis = int(graph_helper.evaluate_tensor_output(axis_op.outputs[0])) if axis < 0: axis += input_rank if axis < 0 or axis >= input_rank: raise ConverterError(code_to_message.get_message('ERROR_TF_ARGMAX_INVALID_AXIS')(axis, input_rank)) consumed_nodes = match.consumed_nodes argmax_descriptor = ArgMaxLayerResolver.Descriptor( str(argmax_op.name), consumed_nodes, axis, output_names=[str(argmax_op.outputs[0].name)]) descriptors.extend([argmax_descriptor]) return descriptors