outs = network(training_batch)
        adam_optim.zero_grad()
        loss = objective(outs, trues)
        loss.backward()
        adam_optim.step()
        print(step_num, loss.item())
        if step_num < 200000:
            lr = 1e-4
        elif step_num < 400000:
            lr = 1e-5
        else:
            lr = 1e-6
        for params in adam_optim.param_groups:
            params["lr"] = lr
        writer.add_scalar("train/loss", loss.item(), step_num)
        if step_num % 100 == 0:
            network.eval()
            outs = network(training_batch)
            loss = objective(outs, trues)
            writer.add_scalar("val/loss", loss.item(), step_num)
            for iter in np.arange(32):
                out_drawing = outs[iter].cpu().data.numpy()
                true_drawing = trues[iter].cpu().data.numpy()
                writer.add_image("train/out_drawing{}.png".format(iter),
                                 out_drawing, step_num)
                writer.add_image("train/true_drawing{}.png".format(iter),
                                 true_drawing, step_num)
        if step_num % 1000 == 0:
            save_model()
        step_num += 1
Beispiel #2
0
        train_batch = train_batch.cuda()
        ground_truth = ground_truth.cuda()
    gen = net(train_batch)
    optimizer.zero_grad()
    loss = criterion(gen, ground_truth)
    loss.backward()
    optimizer.step()
    print(step, loss.item())
    if step < 200000:
        lr = 1e-4
    elif step < 400000:
        lr = 1e-5
    else:
        lr = 1e-6
    for param_group in optimizer.param_groups:
        param_group["lr"] = lr
    writer.add_scalar("train/loss", loss.item(), step)
    if step % 100 == 0:
        net.eval()
        gen = net(train_batch)
        loss = criterion(gen, ground_truth)
        writer.add_scalar("val/loss", loss.item(), step)
        for i in range(32):
            G = gen[i].cpu().data.numpy()
            GT = ground_truth[i].cpu().data.numpy()
            writer.add_image("train/gen{}.png".format(i), G, step)
            writer.add_image("train/ground_truth{}.png".format(i), GT, step)
    if step % 1000 == 0:
        save_model()
    step += 1
        lr = 1e-4
    elif step < 400000:
        lr = 1e-5
    else:
        lr = 1e-6
    for param_group in optimizer.param_groups:
        param_group["lr"] = lr

    if args.constrained and (step + 1) % dec_brush_width_int == 0:
        curr_brush_width -= dec_brush_width
        curr_brush_width = max(curr_brush_width, CONSTRAINT_BRUSH_WIDTH)

    writer.add_scalar("train/loss", loss.item(), step)
    if step % 500 == 0:
        net.eval()
        gen = net(train_batch)
        loss = criterion(gen, ground_truth)
        writer.add_scalar("val/loss", loss.item(), step)
        for i in range(32):
            G = gen[i].cpu().data.numpy() * 255
            GT = ground_truth[i].cpu().data.numpy() * 255
            # G = np.array([G,G,G])
            # GT = np.array([GT,GT,GT])
            # writer.add_image("train/img{}.png".format(i), np.transpose(G, (1,2,0)), step)
            # writer.add_image("train/img{}_truth.png".format(i), np.transpose(GT, (1,2,0)), step)
            writer.add_image("train/img{}.png".format(i), G, step)
            writer.add_image("train/img{}_truth.png".format(i), GT, step)
    if step % 1000 == 0:
        save_model()
    step += 1