def make_graph(self):

        inputNumCols = self.x.get_shape()[1].value

        # layer1: embedding
        layer1 = self.add_layers(EmbeddingLayer.new(self.vocabSize, self.embeddingDim),
                                 self.input['x'], (-1, inputNumCols))

        # layer2: a bunch of conv-maxpools
        layer2_outputs = []

        for filterSize in self.filterSizes:

            l = ConvMaxpoolLayer(layer1.output, layer1.output_shape,
                                 convParams_={'filterShape': (filterSize, self.embeddingDim),
                                              'numFeaturesPerFilter': self.numFeaturesPerFilter, 'activation': 'relu'},
                                 maxPoolParams_={'ksize': (inputNumCols - filterSize + 1, 1), 'padding': 'VALID'},
                                 loggerFactory=self.loggerFactory)

            layer2_outputs.append(l.output)


        layer2_outputShape = -1, self.numFeaturesPerFilter * len(self.filterSizes)
        layer2_output = tf.reshape(tf.concat(layer2_outputs, 3), layer2_outputShape)

        self.add_output(layer2_output, layer2_outputShape)

        # layer3: dropout
        self.add_layers(DropoutLayer.new(self.pooledKeepProb))

        # layer4: fully connected
        lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses))

        self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
    def make_graph(self):

        layer1 = self.add_layers(
            RNNLayer.new(self.rnnNumCellUnits,
                         numStepsToOutput_=self.numRnnOutputSteps), self.input,
            (-1, -1, self.vecDim))

        # just last row of the rnn output
        layer2a_output = layer1.output[:, -1, :]
        layer2a_outputshape = (layer1.output_shape[0], layer1.output_shape[2])

        layer2b = ConvMaxpoolLayer(layer1.output,
                                   layer1.output_shape,
                                   convParams_={
                                       'filterShape': (2, 2),
                                       'numFeaturesPerFilter': 16,
                                       'activation': 'relu'
                                   },
                                   maxPoolParams_={
                                       'ksize': (self.numRnnOutputSteps, 1),
                                       'padding': 'SAME'
                                   },
                                   loggerFactory=self.loggerFactory)
        layer2b_output, layer2b_output_numcols = convert_to_2d(
            layer2b.output, layer2b.output_shape)

        layer2c = ConvMaxpoolLayer(layer1.output,
                                   layer1.output_shape,
                                   convParams_={
                                       'filterShape': (4, 4),
                                       'numFeaturesPerFilter': 16,
                                       'activation': 'relu'
                                   },
                                   maxPoolParams_={
                                       'ksize': (self.numRnnOutputSteps, 1),
                                       'padding': 'SAME'
                                   },
                                   loggerFactory=self.loggerFactory)
        layer2c_output, layer2c_output_numcols = convert_to_2d(
            layer2c.output, layer2c.output_shape)

        layer2_output = tf.concat(
            [layer2a_output, layer2b_output, layer2c_output], axis=1)
        layer2_output_numcols = layer2a_outputshape[
            1] + layer2b_output_numcols + layer2c_output_numcols

        self.layers.append([layer2b, layer2c])
        self.outputs.append({
            'output':
            layer2_output,
            'output_shape': (layer2a_outputshape[0], layer2_output_numcols)
        })

        self.add_layers(DropoutLayer.new(self.pooledKeepProb))

        lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses))

        self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) +
                                          tf.nn.l2_loss(lastLayer.biases))
Exemplo n.º 3
0
    def make_graph(self):
        makers = [RNNLayer.new(c.numCellUnits, c.keepProbs, activation=c.activation) for c in self.rnnConfigs]
        self.add_layers(makers, self.input, (-1, -1, self.vecDim))

        self.add_layers(DropoutLayer.new(self.pooledKeepProb, self.pooledActivation))
        lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses))

        self.l2Loss = self.l2RegLambda * (
            tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases) if self.l2Scheme=='final_stage'
            else sum([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
        )
    def make_graph(self):

        layer1 = self.add_layers(
            RNNLayer.new(self.rnnNumCellUnits,
                         self.rnnKeepProbs,
                         numStepsToOutput_=self.numRnnOutputSteps), self.input,
            (-1, -1, self.vecDim))

        # just last row of the rnn output
        numCols = layer1.output_shape[2]

        layer2a_output = layer1.output[:, -1, :]
        layer2a_outputshape = (layer1.output_shape[0], numCols)

        layer2b = ConvLayer(layer1.output,
                            layer1.output_shape,
                            filterShape=(2, numCols),
                            numFeaturesPerFilter=self.convNumFeaturesPerFilter,
                            activation='relu',
                            loggerFactory=self.loggerFactory)
        layer2b_output, layer2b_output_numcols = convert_to_2d(
            layer2b.output, layer2b.output_shape)

        layer2c = ConvLayer(layer1.output,
                            layer1.output_shape,
                            filterShape=(4, numCols),
                            numFeaturesPerFilter=self.convNumFeaturesPerFilter,
                            activation='relu',
                            loggerFactory=self.loggerFactory)
        layer2c_output, layer2c_output_numcols = convert_to_2d(
            layer2c.output, layer2c.output_shape)

        layer2_output = tf.concat(
            [layer2a_output, layer2b_output, layer2c_output], axis=1)
        layer2_output_numcols = layer2a_outputshape[
            1] + layer2b_output_numcols + layer2c_output_numcols

        self.layers.append([layer2b, layer2c])
        self.outputs.append({
            'output':
            layer2_output,
            'output_shape': (layer2a_outputshape[0], layer2_output_numcols)
        })

        self.add_layers(DropoutLayer.new(self.pooledKeepProb))

        lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses))

        self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) +
                                          tf.nn.l2_loss(lastLayer.biases))
Exemplo n.º 5
0
    def make_graph(self):

        # layer1: a bunch of conv-maxpools
        layer1_outputs = []
        layer1_numcols = 0
        layer1_layers = []

        for filterSize in self.filterSizes:

            l = ConvMaxpoolLayer(self.x, (-1, self.maxNumSeqs, self.vecDim),
                                 convParams_={
                                     'filterShape': (filterSize, self.vecDim),
                                     'numFeaturesPerFilter':
                                     self.numFeaturesPerFilter,
                                     'activation': 'relu'
                                 },
                                 maxPoolParams_={
                                     'ksize':
                                     (self.maxNumSeqs - filterSize + 1, 1),
                                     'padding': 'VALID'
                                 },
                                 loggerFactory=self.loggerFactory)

            o, col = convert_to_2d(l.output, l.output_shape)
            layer1_outputs.append(o)
            layer1_numcols += col
            layer1_layers.append(l)

        self.layers.append(layer1_layers)

        # layer1_outputShape = -1, self.numFeaturesPerFilter * len(self.filterSizes)
        # layer1_output = tf.reshape(tf.concat(layer1_outputs, 3), layer1_outputShape)
        layer1_output = tf.concat(layer1_outputs, axis=1)

        self.add_output(layer1_output, (-1, layer1_numcols))

        # layer2: dropout
        self.add_layers(DropoutLayer.new(self.pooledKeepProb))

        # layer3: fully connected
        lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses))

        self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) +
                                          tf.nn.l2_loss(lastLayer.biases))
Exemplo n.º 6
0
    def make_graph(self):

        # -------------- RNN --------------
        rnn = RNNLayer(self.input, (-1, -1, self.vecDim),
                       self.rnnNumCellUnits, self.rnnKeepProbs,
                       loggerFactory=self.loggerFactory)

        rnn_output, rnn_numcols = convert_to_2d(rnn.output, rnn.output_shape)

        # -------------- CNN --------------
        cnn_outputs = []
        cnn_numcols = 0
        cnn_layers = []

        for filterSize, keepProb in zip(self.convFilterSizes, self.convKeepProbs):

            l = ConvLocalnormLayer(self.x, (-1, self.maxNumSeqs, self.vecDim),
                                   convParams_={'filterShape': (filterSize, self.vecDim),
                                                'numFeaturesPerFilter': self.convNumFeaturesPerFilter,
                                                'keepProb': keepProb,
                                                'activation': 'relu'},
                                   loggerFactory=self.loggerFactory)

            o, col = convert_to_2d(l.output, l.output_shape)
            cnn_outputs.append(o)
            cnn_numcols += col
            cnn_layers.append(l)

        cnn_output = tf.concat(cnn_outputs, axis=1)

        # -------------- combine RNN & CNN --------------
        self.layers.append([rnn, cnn_layers])
        self.add_outputs([rnn_output, cnn_output], [(-1, rnn_numcols), (-1, cnn_numcols)])

        # -------------- dropout --------------
        self.add_layers(DropoutLayer.new(self.pooledKeepProb))

        # -------------- fully connected --------------
        lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses))

        self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) + tf.nn.l2_loss(lastLayer.biases))
    def make_graph(self):

        # layer1: a bunch of conv-maxpools
        layer1_outputs = []
        layer1_numcols = 0
        layer1_layers = []

        for filterSize, keepProb in zip(self.filterSizes, self.keepProbs):

            l = ConvLocalnormLayer(self.x, (-1, self.maxNumSeqs, self.vecDim),
                                   convParams_={
                                       'filterShape':
                                       (filterSize, self.vecDim),
                                       'numFeaturesPerFilter':
                                       self.numFeaturesPerFilter,
                                       'keepProb': keepProb,
                                       'activation': 'relu'
                                   },
                                   loggerFactory=self.loggerFactory)

            o, col = convert_to_2d(l.output, l.output_shape)
            layer1_outputs.append(o)
            layer1_numcols += col
            layer1_layers.append(l)

        self.layers.append(layer1_layers)

        layer1_output = tf.concat(layer1_outputs, axis=1)

        self.add_output(layer1_output, (-1, layer1_numcols))

        # layer2: dropout
        self.add_layers(DropoutLayer.new(self.pooledKeepProb))

        # layer3: fully connected
        lastLayer = self.add_layers(FullyConnectedLayer.new(self.numClasses))

        self.l2Loss = self.l2RegLambda * (tf.nn.l2_loss(lastLayer.weights) +
                                          tf.nn.l2_loss(lastLayer.biases))