Example #1
0
    netD.load_state_dict(ckpt['netD'])
# if FLAGS.netHg:
#     pass
# if FLAGS.netD:
#     pass

if FLAGS.cuda:
    torch.backends.cudnn.benchmark = True
    netHg.cuda()
    netD.cuda()
    criterion.cuda()

optimHg = torch.optim.RMSprop(netHg.parameters(), lr=FLAGS.lr, alpha=FLAGS.alpha)
optimD = torch.optim.Adam(netD.parameters(), lr=FLAGS.lrD, betas=(0.9, 0.999))

log_dir = getLogDir(FLAGS.log_root)
sumWriter = SummaryWriter(log_dir)
ckpt_dir = makeCkptDir(log_dir)

def run(epoch, iter_start=0):
    global kt, global_step
    pbar = tqdm.tqdm(dataloader, desc='Epoch %02d' % epoch, dynamic_ncols=True)
    pbar_info = tqdm.tqdm(None, bar_format='{bar}{postfix}')  # showing info on the second line
    avg_acc = 0
    for it, sample in enumerate(pbar, start=iter_start):
        global_step += 1
        image, label, image_s = sample
        image = Variable(image)
        label = Variable(label)
        image_s = Variable(image_s)
        if FLAGS.cuda:
Example #2
0
if FLAGS.continue_exp:
    log_dir = FLAGS.continue_exp
    ckpt = torch.load(getLatestCkpt(FLAGS.continue_exp))
    netHg.load_state_dict(ckpt['netHg'])
    netD.load_state_dict(ckpt['netD'])
    optimHg.load_state_dict(ckpt['optimHg'])
    optimD.load_state_dict(ckpt['optimD'])
    epoch_init = ckpt['epoch'] + 1
    global_step = ckpt['global_step']
else:
    comment = 'lambda_G{}-gamma{}-kt_lr{}'.format(FLAGS.lambda_G, FLAGS.gamma,
                                                  FLAGS.kt_lr)
    if FLAGS.comment:
        comment += '_' + FLAGS.comment
    log_dir = getLogDir(FLAGS.log_root, comment=comment)
sumWriter = SummaryWriter(log_dir)
ckpt_dir = makeCkptDir(log_dir)


def train(epoch, iter_start=0):
    global global_step, kt

    netHg.train()
    pbar = tqdm.tqdm(train_loader,
                     desc='Epoch %02d' % epoch,
                     dynamic_ncols=True)
    pbar_info = tqdm.tqdm(bar_format='{bar}{postfix}')
    for it, sample in enumerate(pbar, start=iter_start):
        global_step += 1
        if FLAGS.debug: