def train():
    # 指定样本文件
    positive_data_file = "./rt-polaritydata/rt-polarity.pos"
    negative_data_file = "./rt-polaritydata/rt-polarity.neg"
    # 设置训练参数
    num_steps = 50  # 定义训练次数
    SaveFileName = "text_cnn_model"  # 定义保存模型文件夹名称
    # 设置模型参数
    num_classes = 2  # 设置模型分类
    l2_reg_lambda = 0.1  # 定义正则化系数
    filter_sizes = "3,4,5"  # 定义多通道卷积核
    num_filters = 64  # 定义每通道的输出个数

    # 加载数据集
    data, vocab_processor, max_len = dataset(positive_data_file,
                                             negative_data_file)
    #搭建模型
    text_cnn = TextCNN(seq_length=max_len,
                       num_classes=num_classes,
                       vocab_size=len(vocab_processor.vocabulary_),
                       embeding_size=128,
                       filter_sizes=list(map(int, filter_sizes.split(','))),
                       num_filters=num_filters)

    def l2_loss(y_true, y_pred):
        l2_loss = tf.constant(0.0)
        for tf_var in text_cnn.trainable_weights:
            if tf_var.name == "fully_connecred":
                l2_loss += tf.reduce_mean(tf.nn.l2_loss(tf_var))

        loss = tf.nn.softmax_cross_entropy_with_logits(logits=y_pred,
                                                       labels=y_true)
        return loss + l2_reg_lambda * l2_loss

    text_cnn.compile(loss=l2_loss,
                     optimizer=tf.keras.optimizers.Adam(lr=1e-3),
                     metrics=['acc'])
    text_cnn.fit(data, epochs=num_steps)

    text_cnn.save("textcnn.h5")
Beispiel #2
0
x_train, y_train, x_test, y_test, word2index = data_helpers.preprocess()
max_features = len(word2index)

max_len = max(len(x) for x in x_train)
print(max_len)

print('Pad sequences...')
x_train = sequence.pad_sequences(x_train, maxlen=max_len, value=0)
x_test = sequence.pad_sequences(x_test, maxlen=max_len, value=0)

print('Build model...')
model = TextCNN(max_len,
                embedding_dim,
                batch_size=batch_size,
                class_num=2,
                max_features=max_features,
                epochs=epochs)

print('Train...')
model.fit(x_train, x_test, y_train, y_test)

print('Test...')
result = model.predict(x_test)
result = np.argmax(np.array(result), axis=1)
y_test = np.argmax(np.array(y_test), axis=1)

print('f1:', f1_score(y_test, result, average='macro'))
print('accuracy:', accuracy_score(y_test, result))
print('classification report:\n', classification_report(y_test, result))
print('confusion matrix:\n', confusion_matrix(y_test, result))
Beispiel #3
0
logger.info('loading data...')
try:
    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=max_features)
except:
    logger.info('np bug occur...')
    (x_train, y_train), (x_test, y_test) = load_data(num_words=max_features)
logger.info('train data length: {}'.format(len(x_train)))
logger.info('test data length: {}'.format(len(x_test)))

logger.info('padding data...')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

logger.info('build model...')
model = TextCNN(max_features=max_features, maxlen=maxlen,
                emb_dim=emb_dim).build_model()

logger.info('training...')
earlystop = EarlyStopping(patience=3, mode='max', monitor='val_acc')
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          callbacks=[earlystop],
          validation_data=(x_test, y_test))

logger.info('test...')
pred = model.predict(x_test[:10])
logger.info(list(zip(pred, y_test[:10])))
Beispiel #4
0
# training hyperparameter
BATCH_SZIE = 128
EPOCHS = 10

# load data
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=MAX_WORD_NUM)

# padding sequence
x_train = sequence.pad_sequences(x_train, maxlen=MAX_SENT_LEN)
x_test = sequence.pad_sequences(x_test, maxlen=MAX_SENT_LEN)

# build model
model = TextCNN(max_sent_len=MAX_SENT_LEN,
                max_word_num=MAX_WORD_NUM,
                embedding_dims=EMBEDDING_DIMS,
                class_num=CLASS_NUM,
                last_activation=LAST_ACTIVATION).build_model()
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])

# train
early_stopping = EarlyStopping(monitor='val_acc', patience=3, mode='max')
model.fit(x_train,
          y_train,
          batch_size=BATCH_SZIE,
          epochs=EPOCHS,
          callbacks=[early_stopping],
          validation_data=(x_test, y_test))

# save model
# model.save('textcnn_model.h5')