def test_cnn_train(self): # Get them labels! print(PROJECT_DIR) print(DATA_DIR) with io.open(DATA_DIR + '.labels', 'r') as f: labels = [line.rstrip('\n') for line in f] labels = list(set(labels)) # Run the model model = Magpie() a = model.train_word2vec(DATA_DIR, vec_dim=300) print("done2") print("done3") model.init_word_vectors(DATA_DIR, vec_dim=300) model.train(DATA_DIR, labels, nn_model='cnn', test_ratio=0.2, epochs=30) path1 = PROJECT_DIR + '/here1.h5' path2 = PROJECT_DIR + '/embedinghere' path3 = PROJECT_DIR + '/scaler' model.save_word2vec_model(path2) model.save_scaler(path3, overwrite=True) model.save_model(path1) print("thuc hien test") # Do a simple prediction print( model.predict_from_text( 'cho em hỏi về lịch khám của bác_sỹ đào việt_hằng và số điện_thoại' ))
def train_dl(save, vec_dim, epochs): """ train process """ magpie = Magpie() # magpie.train_word2vec('/Users/sunxuan/Documents/PycharmProjects/ImpactPool/data/categories', vec_dim=100) # magpie.fit_scaler('/Users/sunxuan/Documents/PycharmProjects/ImpactPool/data/categories') magpie.init_word_vectors( '/Users/sunxuan/Documents/PycharmProjects/ImpactPool/data/categories', vec_dim=vec_dim) with open('data/categories.labels') as f: labels = f.readlines() labels = [x.strip() for x in labels] magpie.train( '/Users/sunxuan/Documents/PycharmProjects/ImpactPool/data/categories', labels, test_ratio=0.0, epochs=epochs) if save: """ Save model """ magpie.save_word2vec_model( '/Users/sunxuan/Documents/PycharmProjects/ImpactPool/data/save/embeddings/here' ) magpie.save_scaler( '/Users/sunxuan/Documents/PycharmProjects/ImpactPool/data/save/scaler/here', overwrite=True) magpie.save_model( '/Users/sunxuan/Documents/PycharmProjects/ImpactPool/data/save/model/here.h5' ) return magpie
labels4 = sys.argv[9] labels = [ labels1, labels2, labels3, labels4 ] #print (labels) dirName = 'D:\\xampp\\htdocs\\mtlbl\\webpage\\admin\\models\\' + model_name os.mkdir(dirName) model_path = dirName + '\\' + model_name scaler_path = dirName + '\\scaler_' + model_name keras_path = dirName + '\\keras_'+ model_name + '.h5' #print (model_path) #print (keras_path) from magpie import Magpie magpie = Magpie() magpie.init_word_vectors(data, vec_dim=vec_num) magpie.train(data, labels, test_ratio= test_rat, epochs = ep) #more epoch = more understanding of vector and lower lose rate #magpie.predict_from_text('ECB to reveal bad loan hurdles for euro zone bank test') #test magpie.save_word2vec_model(model_path) magpie.save_scaler(scaler_path, overwrite=True) magpie.save_model(keras_path)
for WORD2VEC_CONTEXT in [5, 10]: magpie.train_word2vec(train_dir, vec_dim=EMBEDDING_SIZE, MWC=MIN_WORD_COUNT, w2vc=WORD2VEC_CONTEXT) magpie.fit_scaler('C:\\magpie-master\\data\\hep-categories') magpie.train('C:\\magpie-master\\data\\hep-categories', labels, callbacks=[lossHistory], test_ratio=0.1, epochs=20) # 训练,20%数据作为测试数据,20轮 lossHistory.loss_plot( 'epoch', 'C:\\magpie-master\\' + train_dir[-3:] + '_' + str(EMBEDDING_SIZE) + '_' + str(MIN_WORD_COUNT) + '_' + str(WORD2VEC_CONTEXT) + '.jpg') magpie.save_word2vec_model( 'C:\\magpie-master\\save\\embeddings\\' + train_dir[-3:] + '_' + str(EMBEDDING_SIZE) + '_' + str(MIN_WORD_COUNT) + '_' + str(WORD2VEC_CONTEXT)) magpie.save_scaler('C:\\magpie-master\\save\\scaler\\' + train_dir[-3:] + '_' + str(EMBEDDING_SIZE) + '_' + str(MIN_WORD_COUNT) + '_' + str(WORD2VEC_CONTEXT)) magpie.save_model('C:\\magpie-master\\save\\model\\' + train_dir[-3:] + '_' + str(EMBEDDING_SIZE) + '_' + str(MIN_WORD_COUNT) + '_' + str(WORD2VEC_CONTEXT) + '.h5') print(Success + '\n' + train_dir[-3:] + '_' + str(EMBEDDING_SIZE) + '_' + str(MIN_WORD_COUNT) + '_' + str(WORD2VEC_CONTEXT) + ' Success!!!')
#!/usr/bin/python # -*- coding: UTF-8 -*- """ @Author: njuselhx @Time: 2021/1/21 下午7:01 @File: train.py @Software: PyCharm """ from magpie import Magpie magpie = Magpie() ''' magpie.init_word_vectors('data/hep-categories-zh', vec_dim=100) labels = ['军事', '旅游', '政治'] magpie.train('data/hep-categories-zh', labels, test_ratio=0.2, epochs=100) magpie.save_model('save/keras_model_zh.h5') magpie.save_word2vec_model('save/word2vec_model_zh', overwrite=True) magpie.save_scaler('save/scaler_zh', overwrite=True) print(magpie.predict_from_text('特朗普在联合国大会发表演讲谈到这届美国政府成绩时,称他已经取得了美国历史上几乎最大的成就。随后大会现场传出了嘲笑声,特朗普立即回应道:“这是真的。”')) ''' magpie.init_word_vectors('data/emotion-categories', vec_dim=100) labels = ['满意', '喜悦', '乐观', '愤怒', '悲哀', '恐惧', '厌恶', '焦虑', '怀疑'] magpie.train('data/emotion-categories', labels, test_ratio=0.2, epochs=2333) magpie.save_model('save/emotion_keras_model.h5') magpie.save_word2vec_model('save/emotion_word2vec_model', overwrite=True) magpie.save_scaler('save/emotion_scaler', overwrite=True)
import os import sys sys.path.append(os.path.realpath(os.getcwd())) sys.path.append("..") from magpie import Magpie magpie = Magpie() magpie.train_word2vec('../data/hep-categories', vec_dim=3) #训练一个word2vec magpie.fit_scaler('../data/hep-categories') #生成scaler magpie.init_word_vectors('../data/hep-categories', vec_dim=3) #初始化词向量 labels = ['军事', '旅游', '政治'] #定义所有类别 magpie.train('../data/hep-categories', labels, test_ratio=0.2, epochs=20) #训练,20%数据作为测试数据,5轮 #保存训练后的模型文件 magpie.save_word2vec_model('../workspace/embeddings', overwrite=True) magpie.save_scaler('../workspace/scaler', overwrite=True) magpie.save_model('../workspace/model.h5')
# scaler='/home/ydm/ren/remote/multiLabel/data/scaler', # labels=labels # ) magpie = Magpie() magpie.init_word_vectors( '/home/ydm/ren/remote/multiLabel/data/hep-categories', vec_dim=100) print(len(labels)) magpie.train('/home/ydm/ren/remote/multiLabel/data/hep-categories', labels, epochs=30, batch_size=128) magpie.save_word2vec_model( '/home/ydm/ren/remote/multiLabel/data/word2vec_mode_place') magpie.save_scaler('/home/ydm/ren/remote/multiLabel/data/scaler_place', overwrite=True) magpie.save_model('/home/ydm/ren/remote/multiLabel/data/model_place.h5') alltest = getlabel( '/home/ydm/ren/remote/multiLabel/data/allsents_test.txt') # alltest = [alltest] writes = open('/home/ydm/ren/remote/multiLabel/data/result_place.txt', 'w', encoding='utf-8') for sent in alltest: # print(sent) pre_result = magpie.predict_from_text(sent)[:30] # print(pre_result) resultDict = {} for item in pre_result:
min_lr=0) ''' #调参 for optimizer in ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']: for BATCH_SIZE in [16, 32, 64, 128, 256]: print(optimizer+str(BATCH_SIZE)) magpie.train('data/hep-categories', labels, batch_size=BATCH_SIZE, callbacks=[checkpoint, reduceLROnPlateau], test_ratio=0.1, epochs=60, verbose=1, optimizer=optimizer, logdir='C:\\magpie-master\\trainlog\\' + optimizer + '_' + str(BATCH_SIZE) + '.txt' ) ''' #形成最终模型 magpie.train( 'data/hep-categories', labels, batch_size=16, callbacks=[checkpoint, reduceLROnPlateau], test_ratio=0.0, epochs=60, verbose=1, optimizer='Adam', ) magpie.save_word2vec_model('save/embeddings/best', overwrite=True) magpie.save_scaler('save/scaler/best', overwrite=True) magpie.save_model('save/model/best.h5')