def train(args): train_iter, dev_iter = data_processor.load_data(args) # 将数据分为训练集和验证集 print('加载数据完成') model = TextRNN(args) Cuda = torch.cuda.is_available() if Cuda and args.cuda: model.cuda() """ Q5: Please give optimizer here Add lr_scheduler to adjust learning rate. """ optimizer = torch.optim.Adam(model.parameters(), lr = args.lr) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.8) steps = 0 best_acc = 0 last_step = 0 model.train() for epoch in range(1, args.epoch + 1): for batch in train_iter: feature, target = batch.text, batch.label # t_()函数表示将(max_len, batch_size)转置为(batch_size, max_len) with torch.no_grad(): feature.t_(), target.sub_(1) # target减去1 if args.cuda and Cuda: feature, target = feature.cuda(), target.cuda() optimizer.zero_grad() logits = model(feature) loss = F.cross_entropy(logits, target) loss.backward() optimizer.step() steps += 1 if steps % args.log_interval == 0: # torch.max(logits, 1)函数:返回每一行中最大值的那个元素,且返回其索引(返回最大元素在这一行的列索引) corrects = (torch.max(logits, 1)[1] == target).sum() train_acc = 100.0 * corrects / batch.batch_size sys.stdout.write( '\rBatch[{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(steps, loss.item(), train_acc, corrects, batch.batch_size)) if steps % args.test_interval == 0: dev_acc = eval(dev_iter, model, args) if dev_acc > best_acc: best_acc = dev_acc last_step = steps if args.save_best: print('Saving best model, acc: {:.4f}%\n'.format(best_acc)) save(model, args.save_dir, 'best', steps) else: scheduler.step() print('lr decayed to {}'.format(optimizer.state_dict()['param_groups'][0]['lr'])) if steps - last_step >= args.early_stopping: print('\nearly stop by {} steps, acc: {:.4f}%'.format(args.early_stopping, best_acc)) raise KeyboardInterrupt
def train(args): train_iter, dev_iter = data_processor.load_data(args) # 将数据分为训练集和验证集 print('加载数据完成') model = TextRNN(args) if args.cuda: model.cuda() """ Q5: Please give optimizer here """ optimizer = torch.optim.Adam(model.parameters()) steps = 0 best_acc = 0 last_step = 0 model.train() for epoch in range(1, args.epoch + 1): for batch in train_iter: feature, target = batch.text, batch.label # t_()函数表示将(max_len, batch_size)转置为(batch_size, max_len) with torch.no_grad(): #feature.t_() target.sub_(1) # target减去1 #print(feature.shape) if args.cuda: feature, target = feature.cuda(), target.cuda() optimizer.zero_grad() logits = model(feature) #print(logits.shape) loss = F.cross_entropy(logits, target) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1) optimizer.step() steps += 1 if steps % args.log_interval == 0: # torch.max(logits, 1)函数:返回每一行中最大值的那个元素,且返回其索引(返回最大元素在这一行的列索引) corrects = (torch.max(logits, 1)[1] == target).sum() train_acc = 100.0 * corrects / batch.batch_size sys.stdout.write( '\rBatch[{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format( steps, loss.item(), train_acc, corrects, batch.batch_size)) if steps % args.test_interval == 0: dev_acc = eval(dev_iter, model, args) if dev_acc > best_acc: best_acc = dev_acc last_step = steps if args.save_best: print('Saving best model, acc: {:.4f}%\n'.format( best_acc)) save(model, args.save_dir, 'best', steps) else: if steps - last_step >= args.early_stopping: print('\nearly stop by {} steps, acc: {:.4f}%'.format( args.early_stopping, best_acc)) raise KeyboardInterrupt