def make_graph(self): layer1 = self.add_layers( RNNLayer.new(self.rnnNumCellUnits, numStepsToOutput_=self.numRnnOutputSteps), self.input, (-1, -1, self.vecDim)) # just last row of the rnn output layer2a_output = layer1.output[:, -1, :] layer2a_outputshape = (layer1.output_shape[0], layer1.output_shape[2]) layer2b = ConvMaxpoolLayer(layer1.output, layer1.output_shape, convParams_={ 'filterShape': (2, 2), 'numFeaturesPerFilter': 16, 'activation': 'relu' }, maxPoolParams_={ 'ksize': (self.numRnnOutputSteps, 1), 'padding': 'SAME' }, loggerFactory=self.loggerFactory) layer2b_output, layer2b_output_numcols = convert_to_2d( layer2b.output, layer2b.output_shape) layer2c = ConvMaxpoolLayer(layer1.output, layer1.output_shape, convParams_={ 'filterShape': (4, 4), 'numFeaturesPerFilter': 16, 'activation': 'relu' }, maxPoolParams_={ 'ksize': (self.numRnnOutputSteps, 1), 'padding': 'SAME' }, loggerFactory=self.loggerFactory) layer2c_output, layer2c_output_numcols = convert_to_2d( layer2c.output, layer2c.output_shape) layer2_output = tf.concat( [layer2a_output, layer2b_output, layer2c_output], axis=1) layer2_output_numcols = layer2a_outputshape[ 1] + layer2b_output_numcols + layer2c_output_numcols self.layers.append([layer2b, layer2c]) self.outputs.append({ 'output': layer2_output, 'output_shape': (layer2a_outputshape[0], layer2_output_numcols) }) self.add_layers(DropoutLayer.new(self.pooledKeepProb)) lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): makers = [RNNLayer.new(c.numCellUnits, c.keepProbs, activation=c.activation) for c in self.rnnConfigs] self.add_layers(makers, self.input, (-1, -1, self.vecDim)) self.add_layers(DropoutLayer.new(self.pooledKeepProb, self.pooledActivation)) lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * ( tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases) if self.l2Scheme=='final_stage' else sum([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) )
def make_graph(self): layer1 = self.add_layers( RNNLayer.new(self.rnnNumCellUnits, self.rnnKeepProbs, numStepsToOutput_=self.numRnnOutputSteps), self.input, (-1, -1, self.vecDim)) # just last row of the rnn output numCols = layer1.output_shape[2] layer2a_output = layer1.output[:, -1, :] layer2a_outputshape = (layer1.output_shape[0], numCols) layer2b = ConvLayer(layer1.output, layer1.output_shape, filterShape=(2, numCols), numFeaturesPerFilter=self.convNumFeaturesPerFilter, activation='relu', loggerFactory=self.loggerFactory) layer2b_output, layer2b_output_numcols = convert_to_2d( layer2b.output, layer2b.output_shape) layer2c = ConvLayer(layer1.output, layer1.output_shape, filterShape=(4, numCols), numFeaturesPerFilter=self.convNumFeaturesPerFilter, activation='relu', loggerFactory=self.loggerFactory) layer2c_output, layer2c_output_numcols = convert_to_2d( layer2c.output, layer2c.output_shape) layer2_output = tf.concat( [layer2a_output, layer2b_output, layer2c_output], axis=1) layer2_output_numcols = layer2a_outputshape[ 1] + layer2b_output_numcols + layer2c_output_numcols self.layers.append([layer2b, layer2c]) self.outputs.append({ 'output': layer2_output, 'output_shape': (layer2a_outputshape[0], layer2_output_numcols) }) self.add_layers(DropoutLayer.new(self.pooledKeepProb)) lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): outputs = [] outputShapes = [] for filterShape, keepProb in zip(self.convFilterShapes, self.convKeepProbs): cnn = ConvLocalnormLayer( self.x, (-1, self.maxNumSeqs, self.vecDim), convParams_={ 'filterShape': (filterShape[0], self.vecDim if filterShape[1] == -1 else filterShape[1]), 'numFeaturesPerFilter': self.convNumFeaturesPerFilter, 'keepProb': keepProb, 'activation': 'relu' }) newInput, newInputNumCols = convert_to_3d(cnn.output, cnn.output_shape) rnn = RNNLayer( { 'x': newInput, 'numSeqs': self.numSeqs - filterShape[0] + 1 }, (-1, cnn.output_shape[1], newInputNumCols), self.rnnNumCellUnits, self.rnnKeepProbs) outputs.append(rnn.output) outputShapes.append(rnn.output_shape) self.add_outputs(outputs, outputShapes) # last layer: fully connected lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): # -------------- RNN -------------- rnn = RNNLayer(self.input, (-1, -1, self.vecDim), self.rnnNumCellUnits, self.rnnKeepProbs, loggerFactory=self.loggerFactory) rnn_output, rnn_numcols = convert_to_2d(rnn.output, rnn.output_shape) # -------------- CNN -------------- cnn_outputs = [] cnn_numcols = 0 cnn_layers = [] for filterSize, keepProb in zip(self.convFilterSizes, self.convKeepProbs): l = ConvLocalnormLayer(self.x, (-1, self.maxNumSeqs, self.vecDim), convParams_={'filterShape': (filterSize, self.vecDim), 'numFeaturesPerFilter': self.convNumFeaturesPerFilter, 'keepProb': keepProb, 'activation': 'relu'}, loggerFactory=self.loggerFactory) o, col = convert_to_2d(l.output, l.output_shape) cnn_outputs.append(o) cnn_numcols += col cnn_layers.append(l) cnn_output = tf.concat(cnn_outputs, axis=1) # -------------- combine RNN & CNN -------------- self.layers.append([rnn, cnn_layers]) self.add_outputs([rnn_output, cnn_output], [(-1, rnn_numcols), (-1, cnn_numcols)]) # -------------- dropout -------------- self.add_layers(DropoutLayer.new(self.pooledKeepProb)) # -------------- fully connected -------------- lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): self.add_layers(RNNLayer.new(self.rnnNumCellUnits), self.input, (-1, -1, self.vecDim)) self.add_layers(FullyConnectedLayer.new(self.numClasses))