示例#1
0
del word2vec

# BERT Model
model = modeling.BertNoEmbed(vocab=vocab, hidden_size=1024, enc_num_layer=3)
model.load_state_dict(torch.load('checkpoint/bert-LanGen-last.pt')['state'])
model.cuda()
d_net = modeling.TextCNNClassify(vocab, vec, num_labels=2)
d_net.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
optimizer_d = torch.optim.SGD(d_net.parameters(), lr=0.01)

label_smoothing = modeling.LabelSmoothing(len(vocab), 0, 0.1)
label_smoothing.cuda()
gan_loss = GANLoss()
gan_loss.cuda()
G_STEP = 1
D_STEP = 3
D_PRE = 5
SAVE_EVERY = 50
PENALTY_EPOCH = -1
DRAW_LEARNING_CURVE = False
data = []

# Tokenized input
print('Tokenization...')
with open('pair.csv') as PAIR:
    for line in tqdm(PAIR):
        [text, summary, _] = line.split(',')
        texts = []
        summaries = []
示例#2
0
# gan loss
GAN_loss = GANLoss(opt.gan_type, real_label_val=1.0, fake_label_val=0.0)
edge_loss = edgeV_loss()
tv_loss = TV_loss()
# GPU
if opt.cuda and not torch.cuda.is_available():  # 检查是否有GPU
    raise Exception('No GPU found, please run without --cuda')
print("===> Setting GPU")
if opt.cuda:
    print('cuda_mode:', opt.cuda)
    generator = generator.cuda()
    discriminator = discriminator.cuda()
    feature_extractor = feature_extractor.cuda()
    content_loss = content_loss.cuda()
    pixel_loss = pixel_loss.cuda()
    GAN_loss = GAN_loss.cuda()
    edge_loss = edge_loss.cuda()
    tv_loss = tv_loss.cuda()

# optimizer
print("===> Setting Optimizer")
Gen_optim = torch.optim.Adam(generator.parameters(), lr=opt.lr)
Dis_optim = torch.optim.Adam(discriminator.parameters(), lr=opt.lr)

# visualizer
train_vis = Visualizer(env='training')


# training
def train(train_dataloader, generator, discriminator, Gen_optim, Dis_optim,
          content_loss, pixel_loss, save_img_dir):
示例#3
0
################################################################################
criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

real_a = torch.FloatTensor(opt.batchSize, opt.input_nc, 256, 256)
real_b = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)

if opt.cuda:
    netD = netD.cuda()
    netG = netG.cuda()
    criterionGAN = criterionGAN.cuda()
    criterionL1 = criterionL1.cuda()
    criterionMSE = criterionMSE.cuda()
    real_a = real_a.cuda()
    real_b = real_b.cuda()

real_a = Variable(real_a)
real_b = Variable(real_b)

################################################################################
def train(epoch):
    netD.train()
    netG.train()
    total_D_loss = 0
    total_G_loss = 0
    for iteration, batch in enumerate(training_data_loader, 1):