예제 #1
0
maxlen = 400
batch_size = 32
embedding_dims = 50
epochs = 10

print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)...')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

print('Build model...')
model = TextRNN(maxlen, max_features, embedding_dims).get_model()
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])

print('Train...')
early_stopping = EarlyStopping(monitor='val_acc', patience=3, mode='max')
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          callbacks=[early_stopping],
          validation_data=(x_test, y_test))

print('Test...')
result = model.predict(x_test)
예제 #2
0
epochs = 1
EMBEDDING_SIZE = 768
batch_size = 64
CORPUS_DIR = '../data/'

print('Loading data...')
# Load data and labels
data = np.load("../npy/data.npy")
labels = np.load("../npy/labels.npy")
embeddings = np.load("../npy/embeddings.npy")
num_words = len(np.load("../npy/embeddings.npy"))

print('Build model...')
model = TextRNN(embedding_matrix=embeddings, maxlen=data.shape[1], max_features=num_words,
                embedding_dims=EMBEDDING_SIZE, class_num=labels.shape[1]).get_model()

model.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])

print('Train...')
model.fit(data, labels, batch_size=batch_size, epochs=epochs, verbose=1)

flatten_layer = K.function([model.get_layer("input").input, K.learning_phase()], [model.get_layer("lstm").output])
flatten_layer_vec = flatten_layer([data, 0])[0]
print(flatten_layer_vec)
with open("rnn.txt", "w", encoding="utf-8") as f:
    for i, j in enumerate(flatten_layer_vec):
        for k in j:
            f.write(str(k)+",")
        f.write(str(list(np.nonzero(labels[i]))[0][0]))
        f.write("\n")