train_batch = train_batch.cuda() ground_truth = ground_truth.cuda() gen = net(train_batch) optimizer.zero_grad() loss = criterion(gen, ground_truth) loss.backward() optimizer.step() print(step, loss.item()) if step < 200000: lr = 1e-4 elif step < 400000: lr = 1e-5 else: lr = 1e-6 for param_group in optimizer.param_groups: param_group["lr"] = lr writer.add_scalar("train/loss", loss.item(), step) if step % 100 == 0: net.eval() gen = net(train_batch) loss = criterion(gen, ground_truth) writer.add_scalar("val/loss", loss.item(), step) for i in range(32): G = gen[i].cpu().data.numpy() GT = ground_truth[i].cpu().data.numpy() writer.add_image("train/gen{}.png".format(i), G, step) writer.add_image("train/ground_truth{}.png".format(i), GT, step) if step % 1000 == 0: save_model() step += 1
train_batch = train_batch.cuda() ground_truth = ground_truth.cuda() gen = net(train_batch) optimizer.zero_grad() loss = criterion(gen, ground_truth) loss.backward() optimizer.step() print(step, loss.item()) if step < 200000: lr = 1e-4 elif step < 400000: lr = 1e-5 else: lr = 1e-6 for param_group in optimizer.param_groups: param_group['lr'] = lr writer.add_scalar('train/loss', loss.item(), step) if step % 100 == 0: net.eval() gen = net(train_batch) loss = criterion(gen, ground_truth) writer.add_scalar('val/loss', loss.item(), step) for i in range(32): G = gen[i].cpu().data.numpy() GT = ground_truth[i].cpu().data.numpy() writer.add_image('train/gen{}.png'.format(i), G, step) writer.add_image('train/ground_truth{}.png'.format(i), GT, step) if step % 1000 == 0: save_model() step += 1