best_valid_loss = float('inf') # PAD_IDX = field_word.vocab.stoi["<pad>"] # PAD token for word, NOT CHAR model = ELMo(VOCAB_DIM, OUTPUT_DIM, CHAR_EMB_DIM, HID_DIM, PRJ_DIM, FILTERS, CHAR_LEN, N_LAYERS).to(DEVICE) # Initialize model.init_weights() print(f'The model has {count_parameters(model):,} trainable parameters') import time # criterion = cal_loss criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters()) train_losses = [] test_losses = [] for epoch in range(1, N_EPOCHS + 1): train_iter = gen_bptt_iter(train_data, BATCH_SIZE, SEQ_LEN, DEVICE) valid_iter = gen_bptt_iter(valid_data, BATCH_SIZE, SEQ_LEN, DEVICE) start_time = time.time() train_loss = train(model, train_iter, optimizer, criterion, CLIP) valid_loss = evaluate(model, valid_iter, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) print(f'Epoch: {epoch:02} | Time: {epoch_mins}m {epoch_secs}s') print(
def train(args): paddle.set_device(args.device) n_procs = dist.get_world_size() rank = dist.get_rank() if n_procs > 1: dist.init_parallel_env() vocab = load_vocab(args.vocab_file, args.max_characters_per_token) elmo = ELMo(args.batch_size, args.char_embed_dim, args.projection_dim, vocab.size, dropout=args.dropout, num_layers=args.num_layers, num_highways=args.num_highways, char_vocab_size=vocab.char_size) if n_procs > 1: elmo = paddle.DataParallel(elmo) elmo.train() gloabl_norm_clip = nn.ClipGradByGlobalNorm(args.max_grad_norm) optimizer = paddle.optimizer.Adagrad(learning_rate=args.lr, parameters=elmo.parameters(), initial_accumulator_value=1.0, grad_clip=gloabl_norm_clip) elmo_loss = ELMoLoss() # Loads pre-trained parameters. if args.init_from_ckpt: weight_state_dict = paddle.load(args.init_from_ckpt + '.pdparams') opt_state_dict = paddle.load(args.init_from_ckpt + '.pdopt') elmo.set_state_dict(weight_state_dict) optimizer.set_state_dict(opt_state_dict) print("Loaded checkpoint from %s" % args.init_from_ckpt) train_dataset = OneBillionWordDataset(args.train_data_path, vocab, args.batch_size, args.unroll_steps, n_procs=n_procs, rank=rank, mode='train', shuffle=True, seed=args.seed) train_dataloader = DataLoader(train_dataset, return_list=True, batch_size=None) n_tokens_per_batch = args.batch_size * args.unroll_steps * n_procs n_steps_per_epoch = int(train_dataset.number_of_tokens / n_tokens_per_batch) n_steps_total = args.epochs * n_steps_per_epoch print("Training for %s epochs and %s steps" % (args.epochs, n_steps_total)) total_time = 0.0 batch_start_time = time.time() for step, inputs in enumerate(train_dataloader, start=1): ids, next_ids, ids_reverse, next_ids_reverse = inputs outputs = elmo([ids, ids_reverse]) loss = elmo_loss(outputs, [next_ids, next_ids_reverse]) ppl = paddle.exp(loss) loss *= args.unroll_steps loss.backward() optimizer.step() optimizer.clear_grad() total_time += (time.time() - batch_start_time) if step % args.log_freq == 0: print("step %d/%d - loss: %.4f - Perplexity: %.4f - %.3fs/step" % (step, n_steps_total, loss.numpy()[0], ppl.numpy()[0], total_time / args.log_freq)) total_time = 0.0 if rank == 0 and step % args.save_freq == 0: save_params(elmo, optimizer, args.save_dir, step) if step == n_steps_total: # training done if rank == 0: save_params(elmo, optimizer, args.save_dir, 'final') break batch_start_time = time.time()