def map_inner_product(cls, node): #TODO: Axis assert node.parameters.axis == 1 #TODO: Unbiased shape = TensorShape() dim = shape.dim.add() dim.size = -1 dim = shape.dim.add() dim.size = 1 for i in node.output_shape[1:]: dim.size *= i kwargs = { 'use_bias': node.parameters.bias_term, 'units': node.parameters.num_output, '_output_shapes': [shape] } # check if need the Flatten layer parent, _ = node.get_only_parent() ret = [] # if parent.output_shape.height > 1 or parent.output_shape.width > 1: ret.append(cls._add_flatten_layer(parent)) ret.append(Node.create('FullyConnected', **kwargs)) return ret
def _add_flatten_layer(cls, node): shape = TensorShape() dim = shape.dim.add() dim.size = -1 dim = shape.dim.add() dim.size = 1 for i in node.output_shape[1:]: dim.size *= i kwargs = {'_output_shapes': [shape]} return Node.create('Flatten', **kwargs)
def map_data(cls, node): # TODO: We need to identify whether this is 4D image data, otherwise we shouldn't change the dimension order shape = TensorShape() dim = shape.dim.add() dim.size = -1 for i in node.output_shape[2:]: dim = shape.dim.add() dim.size = i dim = shape.dim.add() dim.size = node.output_shape.channels kwargs = {'shape': shape} # Ignore the dimension of batch size cls._convert_output_shape(kwargs, node) return Node.create('DataInput', **kwargs)
def _convert_output_shape(cls, kwargs, node): shape = TensorShape() dim = shape.dim.add() dim.size = -1 if len(node.output_shape) > 2: for i in node.output_shape[2:]: dim = shape.dim.add() dim.size = i dim = shape.dim.add() dim.size = node.output_shape.channels else: dim = shape.dim.add() dim.size = node.output_shape[1] kwargs['_output_shapes'] = [shape]