Esempio n. 1
0
def test():
    opt.device = 'cuda:0'
    opt.data_root = 'demo/input/'   # The location of your testing data
    opt.mask_root = 'demo/mask/'    # The location of your testing data mask
    testset = MyDataLoader(opt)
    print('Test with %d' % (len(testset)))

    model = MyModel()
    model.initialize(opt)
    model.load_networks('places_irregular')     # For irregular mask inpainting
    # model.load_networks('celebahq_center')    # For centering mask inpainting, i.e., 120*120 hole in 256*256 input

    val_ssim, val_psnr, val_mae, val_losses_G = [], [], [], []
    with torch.no_grad():
        for i, data in enumerate(testset):
            fname = data['fname'][0]
            model.set_input(data)
            I_g, I_o, val_loss_G = model.optimize_parameters(val=True)
            val_s, val_p, val_m = metrics(I_g, I_o)
            val_ssim.append(val_s)
            val_psnr.append(val_p)
            val_mae.append(val_m)
            val_losses_G.append(val_loss_G.detach().item())
            cv2.imwrite('demo/output/' + fname[:-4] + '.png', postprocess(I_o).numpy()[0])
            print('Val (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' % (
                i + 1, len(testset), np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)), end='\r')
        print('Val G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
              (np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)))
Esempio n. 2
0
def train():
    opt.device = 'cuda:0'

    opt.data_root = 'demo/input/'   # The location of your training data
    opt.mask_root = 'demo/mask/'    # The location of your training data mask
    train_set = MyDataLoader(opt)

    opt.data_root = 'demo/input/'   # The location of your validation data
    opt.mask_root = 'demo/mask/'    # The location of your validation data mask
    val_set = MyDataLoader(opt)

    model = MyModel()
    model.initialize(opt)

    print('Train/Val with %d/%d' % (len(train_set), len(val_set)))
    for epoch in range(1, 1000):
        print('Epoch: %d' % epoch)
        epoch_iter = 0
        losses_G, ssim, psnr, mae = [], [], [], []
        for i, data in enumerate(train_set):
            epoch_iter += opt.batchSize
            model.set_input(data)
            I_g, I_o, loss_G = model.optimize_parameters()
            s, p, m = metrics(I_g, I_o)
            ssim.append(s)
            psnr.append(p)
            mae.append(m)
            losses_G.append(loss_G.detach().item())
            print('Tra (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
                  (epoch_iter, len(train_set), np.mean(losses_G), np.mean(ssim), np.mean(psnr), np.mean(mae)), end='\r')
            if epoch_iter == len(train_set):
                val_ssim, val_psnr, val_mae, val_losses_G = [], [], [], []
                with torch.no_grad():
                    for i, data in enumerate(val_set):
                        fname = data['fname'][0]
                        model.set_input(data)
                        I_g, I_o, val_loss_G = model.optimize_parameters(val=True)
                        val_s, val_p, val_m = metrics(I_g, I_o)
                        val_ssim.append(val_s)
                        val_psnr.append(val_p)
                        val_mae.append(val_m)
                        val_losses_G.append(val_loss_G.item())
                        if i+1 <= 200:
                            cv2.imwrite('./demo/output/' + fname[:-4] + '.png', postprocess(I_o).numpy()[0])
                    print('Val (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
                          (epoch_iter, len(train_set), np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)))
                losses_G, ssim, psnr, mae = [], [], [], []
        model.save_networks('Model_weights')
Esempio n. 3
0
def train():
    opt.device = 'cuda:0'

    if not os.path.exists(opt.checkpoints_dir): os.mkdir(opt.checkpoints_dir)

    from dataset_tfrecord import define_dataset
    tfrecord_path = "/content/generator_layers_v2.1_categories.record"
    batch_size = opt.batchSize
    trainset, trainset_length = define_dataset(tfrecord_path,
                                               batch_size,
                                               train=True)
    valset, valset_length = define_dataset(tfrecord_path,
                                           batch_size,
                                           train=False)

    model = MyModel()
    model.initialize(opt)
    dpp = Preprocess()  # data pre-process (dpp)

    print('Train/Val with %d/%d' % (trainset_length, valset_length))
    for epoch in range(1, 1000):
        print('Epoch: %d' % epoch)

        train_iterator = iter(trainset)
        num_iterations = int(trainset_length / batch_size)

        epoch_iter = 0
        losses_G, ssim, psnr, mae = [], [], [], []
        for i in range(num_iterations):
            epoch_iter += opt.batchSize

            data, model_inputs = next(train_iterator)
            inpaint_region = data["inpaint_region"]

            person_cloth = data["person_cloth"]
            # warped_cloth_input = model_inputs["warped_cloth"]     # Not using masked cloth. (person_cloth*inpaint_region)

            data = dpp(person_cloth, inpaint_region)
            try:
                model.set_input(data)
                I_g, I_o, loss_G = model.optimize_parameters()
                s, p, m = metrics(I_g, I_o)
                ssim.append(s)
                psnr.append(p)
                mae.append(m)
                losses_G.append(loss_G.detach().item())
                if i % 100 == 0:
                    print('Tra (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
                          (epoch_iter, trainset_length, np.mean(losses_G),
                           np.mean(ssim), np.mean(psnr),
                           np.mean(mae)))  #, end='\r')
                if epoch_iter == trainset_length:
                    # val_ssim, val_psnr, val_mae, val_losses_G = [], [], [], []
                    # with torch.no_grad():
                    #     for i, data in enumerate(val_set):
                    #         fname = data['fname'][0]
                    #         model.set_input(data)
                    #         I_g, I_o, val_loss_G = model.optimize_parameters(val=True)
                    #         val_s, val_p, val_m = metrics(I_g, I_o)
                    #         val_ssim.append(val_s)
                    #         val_psnr.append(val_p)
                    #         val_mae.append(val_m)
                    #         val_losses_G.append(val_loss_G.item())
                    #         if i+1 <= 200:
                    #             cv2.imwrite('./demo/output/' + fname[:-4] + '.png', postprocess(I_o).numpy()[0])
                    #     print('Val (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
                    #         (epoch_iter, len(train_set), np.mean(val_losses_G), np.mean(val_ssim), np.mean(val_psnr), np.mean(val_mae)))
                    losses_G, ssim, psnr, mae = [], [], [], []
            except:
                print("Error Occured")
                pass
        model.save_networks(epoch)
Esempio n. 4
0
def test():
    opt.device = 'cuda:0'
    result_dir = 'results'
    if not os.path.exists(result_dir): os.mkdir(result_dir)

    from dataset_tfrecord import define_dataset
    tfrecord_path = "/content/generator_layers_v2.1_categories.record"
    batch_size = 1
    testset, testset_length = define_dataset(tfrecord_path,
                                             batch_size,
                                             train=False,
                                             test=True)
    dpp = Preprocess()  # data pre-process (dpp)

    print('Test with %d' % (testset_length))

    model = MyModel()
    model.initialize(opt)
    model.load_networks(str(38))  # For irregular mask inpainting

    val_ssim, val_psnr, val_mae, val_losses_G = [], [], [], []
    ids = []

    test_iterator = iter(testset)
    num_iterations = int(testset_length / batch_size)

    def tensor2array(xx):
        xx = xx.detach().cpu()[0]
        xx = xx.permute(1, 2, 0).numpy()
        xx = (xx + 1) * 0.5
        xx = np.clip(xx, 0, 1)
        if xx.shape[2] == 1:
            return np.concatenate([xx, xx, xx], -1)[:, 32:32 + 192, :]
        if xx.shape[2] == 3:
            return xx[:, 32:32 + 192, :]

    with torch.no_grad():
        for i in range(num_iterations):
            # if i == 1: break
            try:
                data, model_inputs = next(test_iterator)
            except:
                break
            inpaint_region = data["inpaint_region"]

            person_cloth = data["person_cloth"]
            cloth_no = int(data['clothno'].numpy()[0])
            person_no = int(data['personno'].numpy()[0])

            data = dpp(person_cloth, inpaint_region)

            model.set_input(data)
            I_g, I_o, I_i, val_loss_G, I_raw, L_o, mask = model.optimize_parameters(
                val=True)

            plt.figure(figsize=(12, 10))
            plt.subplot(2, 3, 1)
            plt.imshow(tensor2array(I_i))
            plt.title("Input", fontsize=20)
            plt.subplot(2, 3, 2)
            plt.imshow(tensor2array(mask))
            plt.title("Inpaint Region (M)", fontsize=20)
            plt.subplot(2, 3, 3)
            plt.imshow(tensor2array(I_g))
            plt.title("Ground Truth (GT)", fontsize=20)
            plt.subplot(2, 3, 4)
            plt.imshow(tensor2array(L_o))
            plt.title("LBP Output", fontsize=20)
            plt.subplot(2, 3, 5)
            plt.imshow(tensor2array(I_raw))
            plt.title("Generator Output (GO)", fontsize=20)
            plt.subplot(2, 3, 6)
            plt.imshow(tensor2array(I_o))
            plt.title("GO*M+GT*(1-M)", fontsize=20)
            plt.savefig(f"{result_dir}/{i}_result.jpg")
            plt.imsave(f"{result_dir}/{i}_input.jpg", tensor2array(I_i))
            plt.imsave(f"{result_dir}/{i}_mask.png", tensor2array(mask))
            plt.imsave(f"{result_dir}/{i}_output.jpg", tensor2array(I_o))

            val_s, val_p, val_m = metrics(I_g, I_o)
            val_ssim.append(val_s)
            val_psnr.append(val_p)
            val_mae.append(val_m)
            val_losses_G.append(val_loss_G.detach().item())

            ids.append(str(cloth_no) + '_' + str(person_no))
    losses = {
        'ssim': val_ssim,
        'val_mae': val_mae,
        'psnr': val_psnr,
        'loss_G': val_losses_G,
        'ids': ids
    }
    import pandas as pd
    csv = pd.DataFrame(losses)
    csv.to_csv(f"{result_dir}/losses.csv")

    cmd = f"gsutil -m cp -r {result_dir} gs://vinit_helper/cloth_inpainting_gan/cloth_inpainting_local_binary_pattern/{opt.checkpoints_dir.split('/')[1]}"
    os.system(cmd)
Esempio n. 5
0
def train():
    opt.device = 'cuda:0'

    if not os.path.exists(opt.checkpoints_dir): os.mkdir(opt.checkpoints_dir)
    log_dir = 'tensorboard'
    writer = SummaryWriter(f"{opt.checkpoints_dir}/{log_dir}")

    from dataset_tfrecord import define_dataset
    tfrecord_path = "/content/generator_layers_v2.2_categories_lbp.record"
    batch_size = opt.batchSize
    trainset, trainset_length = define_dataset(tfrecord_path,
                                               batch_size,
                                               train=True)
    valset, valset_length = define_dataset(tfrecord_path,
                                           batch_size,
                                           train=False)

    model = MyModel()
    model.initialize(opt)
    dpp = Preprocess()  # data pre-process (dpp)

    output_channel_idx = 0
    input_channel_idx = 0
    weights_b4 = copy.deepcopy(
        model.netG.dn11[0].weight[output_channel_idx,
                                  input_channel_idx].detach().cpu())

    from torchsummary import summary
    #     summary(model.netG, (4, 256, 256))

    print('Train/Val with %d/%d' % (trainset_length, valset_length))
    for epoch in range(1, 2):
        print('Epoch: %d' % epoch)

        train_iterator = iter(trainset)
        num_iterations = int(trainset_length / batch_size)

        epoch_iter = 0
        losses_G, ssim, psnr, mae = [], [], [], []
        for i in range(num_iterations):
            if i == 1: break
            epoch_iter += opt.batchSize

            data, model_inputs = next(train_iterator)
            inpaint_region = data["inpaint_region"]

            person_cloth = data["person_cloth"]

            data = dpp(person_cloth, inpaint_region)
            try:
                model.set_input(data)
                plt.imsave(
                    'input.png',
                    model.I_i.permute(0, 2, 3, 1)[0].cpu().numpy() * 0.5 + 0.5)
                plt.imsave(
                    'inpaint_gt.png',
                    model.I_g.permute(0, 2, 3, 1)[0].cpu().numpy() * 0.5 + 0.5)
                plt.imsave(
                    'inpaint_region.png',
                    model.mask.permute(0, 2, 3, 1)[0, :, :, 0].cpu().numpy())
                I_g, I_o, loss_G, loss_G_L2, loss_G_GAN, loss_style, loss_perceptual, loss_multi, loss_D_I_o, loss_D_I_g = model.optimize_parameters(
                )
                s, p, m = metrics(I_g, I_o)
                ssim.append(s)
                psnr.append(p)
                mae.append(m)
                losses_G.append(loss_G.detach().item())
                if i % 100 == 0:
                    print(
                        '\n\nTra (%d/%d) G:%5.4f, S:%4.4f, P:%4.2f, M:%4.4f' %
                        (epoch_iter, trainset_length, np.mean(losses_G),
                         np.mean(ssim), np.mean(psnr), np.mean(mae)))
                    print('D1_loss_gen: %4.4f | D1_loss_real: %4.4f | D1_G_loss: %4.4f | recon_loss: %4.4f |\
                        vgg_loss: %4.4f | features_loss: %4.4f | style_loss: %4.4f'                                                                                   %(loss_D_I_o.item(), loss_D_I_g.item(), \
                        loss_G_GAN.item(), loss_G_L2.item(), loss_perceptual.item(), loss_multi.item(), loss_style.item()))
            except:
                print("Error")
                pass

        print('\n\n')
        print('Before the training: ')
        print(weights_b4)
        print('\n')
        print('After training of first batch with batch size 1: ')
        weights_after = model.netG.dn11[0].weight[
            output_channel_idx, input_channel_idx].detach().cpu()
        print(weights_after)
        print('\n\n')
        print('Gradients: ')
        gradients = model.netG.dn11[0].weight.grad[
            output_channel_idx, input_channel_idx].detach().cpu()
        print(gradients)
        print('\n\n')