Exemple #1
0
    print(info_optimizer, file=f)
    print('----', file=f)

if gpu_mode:
    d_real_flag, d_fake_flag = d_real_flag.cuda(), d_fake_flag.cuda()
    G.cuda()
    D.cuda()
    BCE_loss = nn.BCELoss().cuda()
    CE_loss = nn.CrossEntropyLoss().cuda()
    MSE_loss = nn.MSELoss().cuda()
else:
    BCE_loss = nn.BCELoss()
    CE_loss = nn.CrossEntropyLoss()
    MSE_loss = nn.MSELoss()

d_loss_fn, g_loss_fn = loss_norm_gp.get_losses_fn('wgan')

train_hist = {}
train_hist['D_loss'] = []
train_hist['G_loss'] = []
train_hist['info_loss'] = []
train_hist['per_epoch_time'] = []
train_hist['total_time'] = []

#------------------train----------------------
D.train()
print('training start!!')
start_time = time.time()
for i in tqdm.trange(epoch):
    G.train()
    epoch_start_time = time.time()
    pin_memory=use_gpu,
    drop_last=True)

# model
D = model.Discriminator_v1_1(x_dim=1, dim=dim).to(device)
G = model.Generator_v1_1(x_dim=z_dim, dim=dim).to(device)

#save model in txt
with open('./output/%s/setting.txt' % experiment_name, 'a') as f:
    print('----', file=f)
    print(G, file=f)
    print('----', file=f)
    print(D, file=f)

# gan loss function
d_loss_fn, g_loss_fn = loss_norm_gp.get_losses_fn(
    'gan')  #'gan', 'lsgan', 'wgan', 'hinge_v1', 'hinge_v2'

# optimizer
d_optimizer = torch.optim.Adam(D.parameters(),
                               lr=d_learning_rate,
                               betas=(0.5, 0.999))
g_optimizer = torch.optim.Adam(G.parameters(),
                               lr=g_learning_rate,
                               betas=(0.5, 0.999))
info_optimizer = optim.Adam(itertools.chain(G.parameters(), D.parameters()),
                            lr=0.0001,
                            betas=(0.6, 0.95),
                            amsgrad=True)  #G,D都更新

# =                                    train                                   =