def map_crop(cls, node): offset = node.parameters.offset if offset: kwargs = {'offset': int(node.parameters.offset[0])} return Node.create('crop', **kwargs) else: return Node.create('crop')
def map_scale(cls, node): # TODO: The gamma parameter has to be set (in node.data?) and this should work. # Also, mean should be set to 0, and var to 1, just to be safe. if node.data: scale_value = float(node.parameters.filler.value) if node.parameters.bias_term: bias_value = float(node.parameters.bias_filler.value) kwargs = { 'use_scale': True, 'use_bias': node.parameters.bias_term, 'gamma': scale_value, 'beta': bias_value, 'epsilon': 0 } else: kwargs = { 'use_scale': True, 'use_bias': node.parameters.bias_term, 'gamma': scale_value, 'epsilon': 0 } cls._convert_output_shape(kwargs, node) return Node.create('Affine', **kwargs) else: return Node.create('Mul')
def map_scale(cls, node): # TODO: The gamma parameter has to be set (in node.data?) and this should work. # Also, mean should be set to 0, and var to 1, just to be safe. if node.data: raise NotImplementedError scale_value = float(node.parameters.filler.value) kwargs = { 'scale': False, 'bias': False, 'gamma': scale_value, 'epsilon': 0 } cls._convert_output_shape(kwargs, node) return Node.create('Scale', **kwargs) else: return Node.create('Mul')
def map_eltwise(cls, node): operations = {0: 'Mul', 1: 'Add', 2: 'Max'} op_code = node.parameters.operation try: return Node.create(operations[op_code]) except KeyError: raise ConversionError('Unknown elementwise operation: {}'.format(op_code))
def map_batch_norm(cls, node): scale_offset = len(node.data) == 4 kwargs = {} if scale_offset else {'scale': False, 'bias': False} epsilon = node.parameters.eps kwargs['epsilon'] = epsilon cls._convert_output_shape(kwargs, node) return Node.create('BatchNorm', **kwargs)
def map_inner_product(cls, node): #TODO: Axis assert node.parameters.axis == 1 #TODO: Unbiased shape = TensorShape() dim = shape.dim.add() dim.size = -1 dim = shape.dim.add() dim.size = 1 for i in node.output_shape[1:]: dim.size *= i kwargs = { 'use_bias': node.parameters.bias_term, 'units': node.parameters.num_output, '_output_shapes': [shape] } # check if need the Flatten layer parent, _ = node.get_only_parent() ret = [] # if parent.output_shape.height > 1 or parent.output_shape.width > 1: ret.append(cls._add_flatten_layer(parent)) ret.append(Node.create('FullyConnected', **kwargs)) return ret
def map_scale(cls, node): raise NotImplementedError # TODO: The gamma parameter has to be set (in node.data?) and this should work. # Also, mean should be set to 0, and var to 1, just to be safe. scale_value = float(node.parameters.filler.value) kwargs = {'scale' : True, 'bias' : False, 'gamma' : scale_value, 'epsilon': 0} return Node.create('BatchNorm', **kwargs)
def map_convolution(cls, node): parent, _ = node.get_only_parent() kwargs = cls.get_kernel_params(node, parent.output_shape) kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output] kwargs['use_bias'] = node.parameters.bias_term kwargs['group'] = node.parameters.group return Node.create('Conv', **kwargs)
def map_p_re_lu(cls, node): # print(node.parameters) # assert False kwargs = {} # kwargs['gamma'] = 0.25 cls._convert_output_shape(kwargs, node) return Node.create('PRelu', **kwargs)
def map_deconvolution(cls, node): raise NotImplementedError() parent, _ = node.get_only_parent() kwargs = cls.get_kernel_params(node, parent.output_shape) kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output] kwargs['group'] = node.parameters.group return Node.create('deconv', **kwargs)
def map_eltwise(cls, node): operations = {0: 'Mul', 1: 'Add', 2: 'Max'} op_code = node.parameters.operation try: kwargs = {} cls._convert_output_shape(kwargs, node) return Node.create(operations[op_code], **kwargs) except KeyError: raise ConversionError( 'Unknown elementwise operation: {}'.format(op_code))
def map_convolution(cls, node): kwargs, padding = cls.get_kernel_params(node) parent, _ = node.get_only_parent() kwargs['filter'] = [ node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output ] kwargs['use_bias'] = node.parameters.bias_term group = node.parameters.group if group != 1: kwargs['group'] = group if padding['paddings'] != None: return [ Node.create('Pad', **padding), Node.create('Convolution', **kwargs) ] else: return Node.create('Convolution', **kwargs)
def map_p_re_lu(cls, node): # print(node.parameters) # assert False try: scale_value = float(node.parameters.filler.value) kwargs = {'gamma': scale_value} except ConversionError: kwargs = {'gamma': 0.25} cls._convert_output_shape(kwargs, node) return Node.create('PRelu', **kwargs)
def map_lrn(cls, node): params = node.parameters assert params.local_size % 2 == 1 kwargs = { 'size': int((params.local_size + 1) / 2), 'alpha': params.alpha, 'beta': params.beta, 'k': params.k } cls._convert_output_shape(kwargs, node) return Node.create('LRN', **kwargs)
def _add_flatten_layer(cls, node): shape = TensorShape() dim = shape.dim.add() dim.size = -1 dim = shape.dim.add() dim.size = 1 for i in node.output_shape[1:]: dim.size *= i kwargs = {'_output_shapes': [shape]} return Node.create('Flatten', **kwargs)
def _add_flatten_layer(cls, node): shape = TensorShape() dim = shape.dim.add() dim.size = -1 dim = shape.dim.add() dim.size = 1 for i in node.output_shape[1:]: dim.size *= i kwargs = {'_output_shapes' : [shape]} return Node.create('Flatten', **kwargs)
def map_crop(cls, node): kwargs = {} cls._convert_output_shape(kwargs, node) offset = node.parameters.offset if offset: if len(offset) == 1: kwargs['border'] = [offset[0], 0, offset[0], 0] else: kwargs['border'] = [offset[0], 0, offset[1], 0] return Node.create('Crop', **kwargs)
def map_pooling(cls, node): parent, _ = node.get_only_parent() kwargs = cls.get_kernel_params(node, parent.output_shape) if node.parameters.pool == 0: kwargs['pooling_type'] = 'MAX' elif node.parameters.pool == 1: kwargs['pooling_type'] = 'AVG' else: # Stochastic pooling, for instance. raise ConversionError('Unsupported pooling type.') cls._convert_output_shape(kwargs, node) return Node.create('Pool', **kwargs)
def map_deconvolution(cls, node): parent, _ = node.get_only_parent() kwargs = cls.get_kernel_params(node, parent.output_shape) kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, node.parameters.num_output, parent.output_shape.channels] kwargs['use_bias'] = node.parameters.bias_term if node.parameters.dilation: dilation = node.parameters.dilation[0] if dilation != 1: kwargs['dilations'] = [1, dilation, dilation, 1] kwargs['group'] = node.parameters.group return Node.create('ConvTranspose', **kwargs)
def map_pooling(cls, node): kwargs, padding = cls.get_kernel_params(node) if node.parameters.pool == 0: kwargs['pooling_type'] = 'MAX' elif node.parameters.pool == 1: kwargs['pooling_type'] = 'AVG' else: # Stochastic pooling, for instance. raise ConversionError('Unsupported pooling type.') kwargs['window_shape'] = [ 1, node.kernel_parameters.k_h, node.kernel_parameters.k_w, 1 ] cls._convert_output_shape(kwargs, node) if padding['paddings'] != None: return [ Node.create('Pad', **padding), Node.create('Pool', **kwargs) ] else: return Node.create('Pool', **kwargs)
def map_unpooling(cls, node): kwargs = {} kwargs['kernel_shape'] = [ 1, node.kernel_parameters.k_h, node.kernel_parameters.k_w, 1 ] kwargs['pads'] = [ 0, node.kernel_parameters.p_h, node.kernel_parameters.p_w, 0 ] kwargs['strides'] = [ 1, node.kernel_parameters.s_h, node.kernel_parameters.s_w, 1 ] cls._convert_output_shape(kwargs, node) return Node.create('Unpool', **kwargs)
def map_inner_product(cls, node): #TODO: Axis assert node.parameters.axis == 1 #TODO: Unbiased kwargs = {'use_bias' : node.parameters.bias_term, 'units' : node.parameters.num_output} # check if need the Flatten layer parent, _ = node.get_only_parent() ret = [] # if parent.output_shape.height > 1 or parent.output_shape.width > 1: ret.append(cls._add_flatten_layer(parent)) ret.append(Node.create('FullyConnected', **kwargs)) return ret
def map_data(cls, node): # TODO: We need to identify whether this is 4D image data, otherwise we shouldn't change the dimension order shape = TensorShape() dim = shape.dim.add() dim.size = -1 for i in node.output_shape[2:]: dim = shape.dim.add() dim.size = i dim = shape.dim.add() dim.size = node.output_shape.channels kwargs = {'shape': shape} # Ignore the dimension of batch size cls._convert_output_shape(kwargs, node) return Node.create('DataInput', **kwargs)
def map_relu(cls, node): kwargs = {} cls._convert_output_shape(kwargs, node) return Node.create('Relu', **kwargs)
def map_reshape(cls, node): kwargs = {'shape' : [dim for dim in node.output_shape]} cls._convert_output_shape(kwargs, node) return Node.create('Reshape', **kwargs)
def map_flatten(cls, node): return Node.create('Flatten')
def map_dropout(cls, node): kwargs = {'keep_prob': node.parameters.dropout_ratio} cls._convert_output_shape(kwargs, node) return Node.create('Dropout', **kwargs)
def map_abs_val(cls, node): return Node.create('Abs')
def map_sigmoid(cls, node): return Node.create('Sigmoid')
def map_tanh(cls, node): return Node.create('Tanh')
def map_softmax(cls, node): kwargs = {} cls._convert_output_shape(kwargs, node) return Node.create('Softmax', **kwargs)
def map_concat(cls, node): kwargs = {'axis': (2, 3, 1, 0)[node.parameters.axis]} cls._convert_output_shape(kwargs, node) return Node.create('Concat', **kwargs)
def map_lrn(cls, node): params = node.parameters assert params.local_size % 2 == 1 kwargs = {'size': int((params.local_size + 1) / 2), 'alpha': params.alpha, 'beta': params.beta, 'k' : params.k} cls._convert_output_shape(kwargs, node) return Node.create('LRN', **kwargs)
def map_batch_norm(cls, node): kwargs = {'scale': len(node.data) >= 3, 'bias': len(node.data) == 4} epsilon = node.parameters.eps kwargs['epsilon'] = epsilon cls._convert_output_shape(kwargs, node) return Node.create('BatchNorm', **kwargs)
def map_batch_norm(cls, node): kwargs = {'scale' : len(node.data) >= 3, 'bias' : len(node.data) == 4} epsilon = node.parameters.eps kwargs['epsilon'] = epsilon cls._convert_output_shape(kwargs, node) return Node.create('BatchNorm', **kwargs)
def map_softmax(cls, node): return Node.create('Softmax')