reference.append(sents.replace("\n", "")) for epoch in range(1, args.epochs + 1): g_loss, c_loss = train(epoch) data_test = list() for i in range(2): test_noise = torch.Tensor( np.random.normal(0, 1, (250, args.latent_size))).to(args.device) test_z = generator(test_noise).data new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 250, 0, 1) data_test.extend(new_sent) p_reference = random.sample(reference, 500) bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500) b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500) logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu)) if (bleu + b_bleu) > best_bleu: best_bleu = bleu + b_bleu logger.info( '* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}' .format(best_bleu, bleu, b_bleu)) torch.save( generator.state_dict(), args.output_dir + '/generator_' + str(args.gloabl_step_eval) + '.th') torch.save( critic.state_dict(), args.output_dir + '/critic_' + str(args.gloabl_step_eval) + '.th')
reference.append(sents.replace("\n", "")) for epoch in range(1, args.epochs + 1): g_loss, c_loss = train(epoch) data_test = list() test_lab = torch.LongTensor([0]*100 + [1]*100 + [2]*100 + [3]*100 + [4]*100).to(args.device) for i in range(5): test_noise = torch.Tensor(np.random.normal(0, 1, (100, args.latent_size))).to(args.device) test_z = generator(test_noise, test_lab[100*i:100*(i+1)]).data new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 100, 0, 1) data_test.extend(new_sent) p_reference = random.sample(reference, 500) data_test = [str(lab)+" "+str(sen) for lab,sen in zip(test_lab.tolist(), data_test)] bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500, True) b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500, True) logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu)) if (bleu+b_bleu) > best_bleu: best_bleu = bleu + b_bleu logger.info('* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}'.format(best_bleu, bleu, b_bleu)) torch.save(generator.state_dict(), args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th') torch.save(critic.state_dict(), args.output_dir+'/critic_'+str(args.gloabl_step_eval)+'.th') torch.save(classifier.state_dict(), args.output_dir+'/classifier_'+str(args.gloabl_step_eval)+'.th') if args.finetune_decoder: logger.info("Loading generator") generator.load_state_dict(torch.load(args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th')) model_decoder.train() generator.eval()