def make_graph(self): layer1 = self.add_layers( RNNLayer.new(self.rnnNumCellUnits, numStepsToOutput_=self.numRnnOutputSteps), self.input, (-1, -1, self.vecDim)) # just last row of the rnn output layer2a_output = layer1.output[:, -1, :] layer2a_outputshape = (layer1.output_shape[0], layer1.output_shape[2]) layer2b = ConvMaxpoolLayer(layer1.output, layer1.output_shape, convParams_={ 'filterShape': (2, 2), 'numFeaturesPerFilter': 16, 'activation': 'relu' }, maxPoolParams_={ 'ksize': (self.numRnnOutputSteps, 1), 'padding': 'SAME' }, loggerFactory=self.loggerFactory) layer2b_output, layer2b_output_numcols = convert_to_2d( layer2b.output, layer2b.output_shape) layer2c = ConvMaxpoolLayer(layer1.output, layer1.output_shape, convParams_={ 'filterShape': (4, 4), 'numFeaturesPerFilter': 16, 'activation': 'relu' }, maxPoolParams_={ 'ksize': (self.numRnnOutputSteps, 1), 'padding': 'SAME' }, loggerFactory=self.loggerFactory) layer2c_output, layer2c_output_numcols = convert_to_2d( layer2c.output, layer2c.output_shape) layer2_output = tf.concat( [layer2a_output, layer2b_output, layer2c_output], axis=1) layer2_output_numcols = layer2a_outputshape[ 1] + layer2b_output_numcols + layer2c_output_numcols self.layers.append([layer2b, layer2c]) self.outputs.append({ 'output': layer2_output, 'output_shape': (layer2a_outputshape[0], layer2_output_numcols) }) self.add_layers(DropoutLayer.new(self.pooledKeepProb)) lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): layer1 = self.add_layers( RNNLayer.new(self.rnnNumCellUnits, self.rnnKeepProbs, numStepsToOutput_=self.numRnnOutputSteps), self.input, (-1, -1, self.vecDim)) # just last row of the rnn output numCols = layer1.output_shape[2] layer2a_output = layer1.output[:, -1, :] layer2a_outputshape = (layer1.output_shape[0], numCols) layer2b = ConvLayer(layer1.output, layer1.output_shape, filterShape=(2, numCols), numFeaturesPerFilter=self.convNumFeaturesPerFilter, activation='relu', loggerFactory=self.loggerFactory) layer2b_output, layer2b_output_numcols = convert_to_2d( layer2b.output, layer2b.output_shape) layer2c = ConvLayer(layer1.output, layer1.output_shape, filterShape=(4, numCols), numFeaturesPerFilter=self.convNumFeaturesPerFilter, activation='relu', loggerFactory=self.loggerFactory) layer2c_output, layer2c_output_numcols = convert_to_2d( layer2c.output, layer2c.output_shape) layer2_output = tf.concat( [layer2a_output, layer2b_output, layer2c_output], axis=1) layer2_output_numcols = layer2a_outputshape[ 1] + layer2b_output_numcols + layer2c_output_numcols self.layers.append([layer2b, layer2c]) self.outputs.append({ 'output': layer2_output, 'output_shape': (layer2a_outputshape[0], layer2_output_numcols) }) self.add_layers(DropoutLayer.new(self.pooledKeepProb)) lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): # -------------- RNN -------------- rnn = RNNLayer(self.input, (-1, -1, self.vecDim), self.rnnNumCellUnits, self.rnnKeepProbs, loggerFactory=self.loggerFactory) rnn_output, rnn_numcols = convert_to_2d(rnn.output, rnn.output_shape) # -------------- CNN -------------- cnn_outputs = [] cnn_numcols = 0 cnn_layers = [] for filterSize, keepProb in zip(self.convFilterSizes, self.convKeepProbs): l = ConvLocalnormLayer(self.x, (-1, self.maxNumSeqs, self.vecDim), convParams_={'filterShape': (filterSize, self.vecDim), 'numFeaturesPerFilter': self.convNumFeaturesPerFilter, 'keepProb': keepProb, 'activation': 'relu'}, loggerFactory=self.loggerFactory) o, col = convert_to_2d(l.output, l.output_shape) cnn_outputs.append(o) cnn_numcols += col cnn_layers.append(l) cnn_output = tf.concat(cnn_outputs, axis=1) # -------------- combine RNN & CNN -------------- self.layers.append([rnn, cnn_layers]) self.add_outputs([rnn_output, cnn_output], [(-1, rnn_numcols), (-1, cnn_numcols)]) # -------------- dropout -------------- self.add_layers(DropoutLayer.new(self.pooledKeepProb)) # -------------- fully connected -------------- lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): # layer1: a bunch of conv-maxpools layer1_outputs = [] layer1_numcols = 0 layer1_layers = [] for filterSize in self.filterSizes: l = ConvMaxpoolLayer(self.x, (-1, self.maxNumSeqs, self.vecDim), convParams_={ 'filterShape': (filterSize, self.vecDim), 'numFeaturesPerFilter': self.numFeaturesPerFilter, 'activation': 'relu' }, maxPoolParams_={ 'ksize': (self.maxNumSeqs - filterSize + 1, 1), 'padding': 'VALID' }, loggerFactory=self.loggerFactory) o, col = convert_to_2d(l.output, l.output_shape) layer1_outputs.append(o) layer1_numcols += col layer1_layers.append(l) self.layers.append(layer1_layers) # layer1_outputShape = -1, self.numFeaturesPerFilter * len(self.filterSizes) # layer1_output = tf.reshape(tf.concat(layer1_outputs, 3), layer1_outputShape) layer1_output = tf.concat(layer1_outputs, axis=1) self.add_output(layer1_output, (-1, layer1_numcols)) # layer2: dropout self.add_layers(DropoutLayer.new(self.pooledKeepProb)) # layer3: fully connected lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
def make_graph(self): # layer1: a bunch of conv-maxpools layer1_outputs = [] layer1_numcols = 0 layer1_layers = [] for filterSize, keepProb in zip(self.filterSizes, self.keepProbs): l = ConvLocalnormLayer(self.x, (-1, self.maxNumSeqs, self.vecDim), convParams_={ 'filterShape': (filterSize, self.vecDim), 'numFeaturesPerFilter': self.numFeaturesPerFilter, 'keepProb': keepProb, 'activation': 'relu' }, loggerFactory=self.loggerFactory) o, col = convert_to_2d(l.output, l.output_shape) layer1_outputs.append(o) layer1_numcols += col layer1_layers.append(l) self.layers.append(layer1_layers) layer1_output = tf.concat(layer1_outputs, axis=1) self.add_output(layer1_output, (-1, layer1_numcols)) # layer2: dropout self.add_layers(DropoutLayer.new(self.pooledKeepProb)) # layer3: fully connected lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses)) self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))