p = Preprocessor() model1 = TextClassifier(len(config.class_group[0]), 'lstm') model2 = TextClassifier(len(config.class_group[1]), 'lstm') model3 = TextClassifier(len(config.class_group[2]), 'lstm') model4 = TextClassifier(len(config.class_group[3]), 'lstm') model5 = TextClassifier(len(config.class_group[4]), 'lstm') model6 = TextClassifier(len(config.class_group[5]), 'lstm') F1 = [] for i in range(30): # 训练模型 train_seq1, train_label1 = p.shuffle(Train_seq_set[0], Train_label_set[0]) model1.train(train_seq1, [train_label1[l] for l in range(len(config.class_group[0]))], Validation_seq, [ Validation_label[config.class_group[0][i]] for i in range(len(config.class_group[0])) ]) train_seq2, train_label2 = p.shuffle(Train_seq_set[1], Train_label_set[1]) model2.train(train_seq2, [train_label2[l] for l in range(len(config.class_group[1]))], Validation_seq, [ Validation_label[config.class_group[1][i]] for i in range(len(config.class_group[1])) ]) train_seq3, train_label3 = p.shuffle(Train_seq_set[2], Train_label_set[2]) model1.train(train_seq3, [train_label3[l] for l in range(len(config.class_group[2]))], Validation_seq, [
batchListTrain = utils.buildBatchList(len(corpus.trainData), batchSize) batchListDev = utils.buildBatchList(len(corpus.devData), batchSize) while epoch < maxEpoch: aveLoss = 0.0 trainAcc = 0.0 epoch += 1 print('--- Epoch ' + str(epoch)) random.shuffle(corpus.trainData) opt = optim.SGD(classifier.parameters(), lr=initialLearningRate / (1.0 + lrDecay * epoch), weight_decay=weightDecay) classifier.train() for batch in batchListTrain: opt.zero_grad() # build input for the batch curBatchSize = batch[1] - batch[0] + 1 batchInput, batchTarget, lengths = utils.buildBatchInOutForClassifier( corpus.voc.getTokenIndex(corpus.voc.PAD), batch, corpus.trainData) target = Variable(batchTarget) if biDirectional: shape = 2, curBatchSize, hiddenDim else: shape = 1, curBatchSize, hiddenDim h0 = c0 = Variable(torch.zeros(*shape), requires_grad=False)