def __init__(self, wv_matrix, lstm_hidden_dim, nn_hidden_dim, num_classes, mean_pool=False): print 'Initializing embedding model...' self.mean_pool = mean_pool input_dim = wv_matrix.shape[0] self.embeddingLayer = layers.embeddingLayer(wv_matrix) self.LSTMLayer = layers.LSTMLayer(input_dim, lstm_hidden_dim) nn_input_dim = 2 * lstm_hidden_dim self.fc1 = layers.FullyConnected(input_dim=nn_input_dim, output_dim=nn_hidden_dim, activation='relu') self.fc2 = layers.FullyConnected(input_dim=nn_hidden_dim, output_dim=nn_hidden_dim, activation='relu') self.linear_layer = layers.FullyConnected(input_dim=nn_hidden_dim, output_dim=num_classes, activation=None) self.layers = {'embeddingLayer': self.embeddingLayer, 'lstm': self.LSTMLayer, 'fc1': self.fc1, 'fc2': self.fc2, 'linear': self.linear_layer} self.params = self.embeddingLayer.params + self.LSTMLayer.params + \ self.fc1.params + self.fc2.params + self.linear_layer.params
def __init__(self, embeddings, lstm_hidden_dim): print 'Initializing attention model...' # self.reverse = reverse input_dim = 2 * embeddings.shape[0] self.embeddingLayer = layers.embeddingLayer(embeddings) self.LSTMLayer = layers.LSTMLayer(input_dim, lstm_hidden_dim) self.linear_layer = layers.FullyConnected(input_dim=lstm_hidden_dim, output_dim=2, activation=None) self.layers = {'lstm': self.LSTMLayer, 'linear': self.linear_layer, 'embeddings': self.embeddingLayer} self.params = self.LSTMLayer.params + self.linear_layer.params + self.embeddingLayer.params