コード例 #1
0
def test(model_config):
	mode = 'test'
	batch_size = 1
	dataset = ShakespeareModern(train_shakespeare_path, test_shakespeare_path, train_modern_path, test_modern_path, mode=mode)
	dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

	shakespeare_disc = Discriminator(model_config['embedding_size'], model_config['hidden_dim'], len(dataset.vocab)).cuda()
	shakespeare_disc.load_state_dict(torch.load('./shakespeare_disc.pth'))

	shakespeare_disc.eval()

	num_correct = 0
	total_samples = 0

	for idx, (s, s_addn_feats, m, m_addn_feats) in tqdm(enumerate(dataloader)):
		s = s.transpose(0, 1)
		m = m.transpose(0, 1)

		total_samples += 2

		s = Variable(s).cuda()
		s_output = shakespeare_disc(s, s_addn_feats)

		if round(s_output.item()) == 1.0:
			num_correct += 1

		m = Variable(m).cuda()
		m_output = shakespeare_disc(m, m_addn_feats)

		if round(m_output.item()) == 0.0:
			num_correct += 1

	print ('Accuracy: {}'.format(num_correct/total_samples))
コード例 #2
0
            fake_norm = fake.data[0].norm()

            criticD_fake = netD(fake.detach(), batch_att)
            criticD_fake = criticD_fake.mean()
            criticD_fake.backward()
            
            # gradient penalty loss
            gradient_penalty = calc_gradient_penalty(netD, batch_feat, fake.data, batch_att, opts)
            gradient_penalty.backward()

            Wasserstein_D = criticD_real - criticD_fake
            D_cost = criticD_fake - criticD_real + gradient_penalty
            optimizerD.step()
        
        netG.train()
        netD.eval()

        # Train G
        netG.zero_grad()

        noise.normal_(0, 1)
        fake = netG(noise, batch_att)
        criticG_fake = netD(fake, batch_att)
        criticG_fake = criticG_fake.mean()
        G_cost = -criticG_fake
        
        # Classifier loss
        c_errG = cls_criterion(pre_cla(fake), Variable(utils.map_label(batch_l, seenclasses)))
       
        
        # Bi-sematic reconstruct loss