예제 #1
0
def eval():
    model.eval()
    avg_psnr_predicted = 0.0
    for batch in testing_data_loader:
        input, flow_f, flow_b, filename, d_dir = batch[0], batch[1], batch[2], batch[3], batch[4]
        
        with torch.no_grad():
            t_im1 = Variable(input[0]).cuda(gpus_list[0])
            t_im2 = Variable(input[1]).cuda(gpus_list[0])
            t_flow_f = Variable(flow_f).cuda(gpus_list[0]).float()
            t_flow_b = Variable(flow_b).cuda(gpus_list[0]).float()
            
        t0 = time.time()                
        if opt.chop_forward:
            with torch.no_grad():
                pred_l  = chop_forward(t_im1, t_im2, t_flow_f, t_flow_b, model)
        else:
            with torch.no_grad():
                _, _, _, pred_l  = model(t_im1, t_im2, t_flow_f, t_flow_b, train=False)
            
        t1 = time.time()
        
        print("===> Processing: %s || Timer: %.4f sec." % (d_dir[0]+'/frame10i11.png', (t1 - t0)))
        pred_l = utils.denorm(pred_l[0].cpu().data, vgg=True)
        pred_1 = utils.denorm(t_im1[0].cpu().data, vgg=True)
        pred_2 = utils.denorm(t_im2[0].cpu().data, vgg=True)

        if opt.data_dir == 'ucf101_interp_ours':
            save_img(pred_1, d_dir[0],'frame_00.png', False)
            save_img(pred_l, d_dir[0],'frame_01_gt.png', False)
            save_img(pred_2, d_dir[0],'frame_02.png', False)
        else:
            save_img(pred_1, d_dir[0],'im1.png', False)
            save_img(pred_l, d_dir[0],'im2.png', False)
            save_img(pred_2, d_dir[0],'im3.png', False)
예제 #2
0
    def print_info(self, step, total_steps, pbar):
        current_epoch = (step + 1) / self.train_steps_per_epoch

        if (step + 1) % self.args.info_step == 0:
            elapsed_num = time.time() - self.start_time
            elapsed = str(datetime.timedelta(seconds=elapsed_num))
            pbar.write(
                "Elapse:{:>.12s}, D_Step:{:>6d}/{}, G_Step:{:>6d}/{}, D_loss:{:>.4f}, G_loss:{:>.4f}, G_percep_loss:{:>.4f}, G_adv_loss:{:>.4f}, G_idt_loss:{:>.4f}"
                .format(elapsed, step + 1, total_steps, (step + 1),
                        total_steps, self.d_loss, self.g_loss,
                        self.g_percep_loss, self.g_adv_loss, self.g_idt_loss))

        # sample images
        if (step + 1) % self.args.sample_step == 0:
            for i in range(0, self.real_raw.size(0)):
                save_imgs = torch.cat([
                    denorm(self.real_raw.data)[i:i + 1, :, :, :],
                    denorm(self.fake_exp.data)[i:i + 1, :, :, :],
                    denorm(self.real_exp.data)[i:i + 1, :, :, :]
                ], 3)
                save_image(
                    save_imgs,
                    os.path.join(
                        self.sample_path,
                        '{:s}_{:0>3.2f}_{:0>2d}_realRaw_fakeExp_realExp.png'.
                        format(self.real_raw_name[i], current_epoch, i)))

        # save models
        if (step + 1) % self.model_save_step == 0:
            if self.args.parallel:
                if torch.cuda.device_count() > 1:
                    checkpoint = {
                        "G_net": self.G.module.state_dict(),
                        "D_net": self.D.module.state_dict(),
                        "epoch": current_epoch,
                        "g_optimizer": self.g_optimizer.state_dict(),
                        "d_optimizer": self.d_optimizer.state_dict(),
                        "lr_scheduler_g": self.lr_scheduler_g.state_dict(),
                        "lr_scheduler_d": self.lr_scheduler_d.state_dict()
                    }
            else:
                checkpoint = {
                    "G_net": self.G.state_dict(),
                    "D_net": self.D.state_dict(),
                    "epoch": current_epoch,
                    "g_optimizer": self.g_optimizer.state_dict(),
                    "d_optimizer": self.d_optimizer.state_dict(),
                    "lr_scheduler_g": self.lr_scheduler_g.state_dict(),
                    "lr_scheduler_d": self.lr_scheduler_d.state_dict()
                }
            torch.save(
                checkpoint,
                os.path.join(
                    self.model_save_path,
                    '{}_{}_{}.pth'.format(self.args.version,
                                          self.args.adv_loss_type,
                                          current_epoch)))

            pbar.write("======= Save model checkpoints into {} ======".format(
                self.model_save_path))
예제 #3
0
def generate_all(data_loader, device, args):
    """Generate Images using DIV2K Validation set"""

    # Single Results Path #
    if not os.path.exists(args.single_results_path):
        os.makedirs(args.single_results_path)

    # Prepare Networks #
    edsr = EDSR(channels=args.channels,
                features=args.dim,
                num_residuals=args.num_residuals,
                scale_factor=args.upscale_factor).to(device)

    # Weight Paths #
    edsr_weight_path = os.path.join(
        args.weights_path, '{}_Epoch_{}.pkl'.format(edsr.__class__.__name__,
                                                    args.num_epochs))
    #rdn_weight_path = os.path.join(args.weights_path, '{}_Epoch_{}.pkl'.format(rdn.__class__.__name__, args.num_epochs))
    #srgan_weight_path = os.path.join(args.weights_path, '{}_Epoch_{}.pkl'.format(srgan.__class__.__name__, args.num_epochs))
    #esrgan_weight_path = os.path.join(args.weights_path, '{}_Epoch_{}.pkl'.format(esrgan.__class__.__name__, args.num_epochs))

    # Load State Dict #
    edsr.load_state_dict(torch.load(edsr_weight_path))
    # rdn.load_state_dict(torch.load(rdn_weight_path))
    # srgan.load_state_dict(torch.load(srgan_weight_path))
    # esrgan.load_state_dict(torch.load(esrgan_weight_path))

    # Up-sampling Network #
    up_sampler = torch.nn.Upsample(scale_factor=args.upscale_factor,
                                   mode='bicubic').to(device)

    for i, (high, low) in enumerate(data_loader):

        # Prepare Data #
        high = high.to(device)
        low = low.to(device)

        # Forward Data to Networks #
        with torch.no_grad():
            bicubic = up_sampler(low.detach())
            generated_edsr = edsr(low.detach())
            # generated_rdn = rdn(low.detach())
            # generated_srgan = srgan(low.detach())
            # generated_esrgan = esrgan(low.detach())

        # Normalize and Save Images #
        save_image(
            denorm(high.data),
            os.path.join(args.single_results_path,
                         'Inference_Samples_%03d_TARGET.png' % (i + 1)))
        save_image(
            denorm(bicubic.data),
            os.path.join(args.single_results_path,
                         'Inference_Samples_%03d_BICUBIC.png' % (i + 1)))
        save_image(
            denorm(generated_edsr.data),
            os.path.join(
                args.single_results_path, 'Inference_Samples_%03d_%s.png' %
                (i + 1, edsr.__class__.__name__)))
예제 #4
0
def train(epoch):
    epoch_loss = 0
    model.train()
    feature_extractor.eval()
    for iteration, batch in enumerate(training_data_loader, 1):
        target, flow_f, flow_b, gt_flow_f, gt_flow_b = batch[0], batch[
            1], batch[2], batch[3], batch[4]

        if cuda:
            t_flow_f = Variable(flow_f).cuda(gpus_list[0]).float()
            t_flow_b = Variable(flow_b).cuda(gpus_list[0]).float()
            gt_flow_f = Variable(gt_flow_f).cuda(gpus_list[0]).float()
            gt_flow_b = Variable(gt_flow_b).cuda(gpus_list[0]).float()

            t_hr1 = Variable(target[0]).cuda(gpus_list[0])
            t_hr = Variable(target[1]).cuda(gpus_list[0])
            t_hr2 = Variable(target[2]).cuda(gpus_list[0])

        optimizer.zero_grad()
        t0 = time.time()

        _, _, _, pred_l, pred_flow_f, pred_flow_b = model(
            t_hr1, t_hr2, t_flow_f, t_flow_b)

        l_mse_lr = mse_loss_calc(pred_l, t_hr)
        l_mse = l_mse_lr

        l_feat_lr = feat_loss_calc(pred_l, t_hr)
        l_feat = l_feat_lr

        l_flow = criterion(pred_flow_f, gt_flow_f) + criterion(
            pred_flow_b, gt_flow_b)

        #loss = l_flow
        loss = l_mse + 0.1 * l_feat + 0.1 * l_flow
        t1 = time.time()

        ###show sample
        pred_l = utils.denorm(pred_l[0][0].cpu().data, vgg=True)
        pred_l1 = utils.denorm(t_hr1[0].cpu().data, vgg=True)
        pred_l2 = utils.denorm(t_hr2[0].cpu().data, vgg=True)
        save_img(pred_l, "lr2")
        save_img(pred_l1, "lr1")
        save_img(pred_l2, "lr3")

        epoch_loss += loss.data
        loss.backward()
        optimizer.step()
        #print(loss_mse_hr.data , loss_mse_lr.data, loss_feat_hr.data, loss_feat_l.data)
        print(
            "===> Epoch[{}]({}/{}): Loss: {:.4f} MSE_LR: {:.4f} FEAT_LR: {:.4f}  Flow: {:.8f} || Timer: {:.4f} sec."
            .format(epoch, iteration, len(training_data_loader), loss.data,
                    l_mse_lr, l_feat_lr, l_flow, (t1 - t0)))
    print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(
        epoch, epoch_loss / len(training_data_loader)))
예제 #5
0
def loss_function(batch_x, batch_y):

    logits = model(batch_x, training=True)

    denorm_x = denorm(logits, _min, _max)

    denorm_y = denorm(batch_y, _min, _max)

    lossL2 = tf.add_n(model.losses)

    return rmse(denorm_x, denorm_y) + lossL2
예제 #6
0
파일: main.py 프로젝트: zoq/STARnet
def train(epoch):
    epoch_loss = 0
    model.train()
    feature_extractor.eval()
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target, target_l, flow_f, flow_b = batch[0], batch[1], batch[2], batch[3], batch[4]
        
        if cuda:
            t_im1 = Variable(input[0]).cuda(gpus_list[0])
            t_im2 = Variable(input[1]).cuda(gpus_list[0])
            t_flow_f = Variable(flow_f).cuda(gpus_list[0]).float()
            t_flow_b = Variable(flow_b).cuda(gpus_list[0]).float()
            
            t_target1 = Variable(target[0]).cuda(gpus_list[0])
            t_target = Variable(target[1]).cuda(gpus_list[0])
            t_target2 = Variable(target[2]).cuda(gpus_list[0])
            t_target_l = Variable(target_l).cuda(gpus_list[0])
                
        optimizer.zero_grad()
        t0 = time.time()
        pred_ht, pred_h1, pred_h2, pred_l = model(t_im1, t_im2, t_flow_f, t_flow_b)
                                
        l_mse_ht = mse_loss_calc(pred_ht, t_target)
        l_mse_h1 = mse_loss_calc(pred_h1, t_target1)
        l_mse_h2 = mse_loss_calc(pred_h2, t_target2)
        l_mse_lr = mse_loss_calc(pred_l, t_target_l)        
        l_mse = l_mse_ht + 0.5*l_mse_h1 + 0.5*l_mse_h2 + l_mse_lr
            
        l_feat_ht = feat_loss_calc(pred_ht, t_target)
        l_feat_h1 = feat_loss_calc(pred_h1, t_target1)
        l_feat_h2 = feat_loss_calc(pred_h2, t_target2)
        l_feat_lr = feat_loss_calc(pred_l, t_target_l)    
        l_feat = l_feat_ht + 0.5*l_feat_h1 + 0.5*l_feat_h2 + l_feat_lr
        
        loss = l_mse + 0.1*l_feat
        t1 = time.time()
            
        ###show sample
        predictiont = utils.denorm(pred_ht[0][0].cpu().data,vgg=True)
        prediction1 = utils.denorm(pred_h1[0][0].cpu().data,vgg=True)
        prediction2 = utils.denorm(pred_h2[0][0].cpu().data,vgg=True)
        pred_l = utils.denorm(pred_l[0][0].cpu().data,vgg=True)
        save_img(prediction1, "1")
        save_img(predictiont, "2")
        save_img(prediction2, "3")
        save_img(pred_l, "lr")
            
        epoch_loss += loss.data
        loss.backward()
        optimizer.step()

        print("===> Epoch[{}]({}/{}): Loss: {:.4f} MSE_HR: {:.4f} Feat_HR: {:.4f} MSE_LR: {:.4f} || Timer: {:.4f} sec.".format(epoch, iteration, len(training_data_loader), loss.data, l_mse_ht, l_feat_ht, l_mse_lr, (t1 - t0)))
    print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader)))
예제 #7
0
    def test(self):
        model_list = glob(
            os.path.join(self.result_dir, self.dataset, 'model', '*.pt'))
        if not len(model_list) == 0:
            model_list.sort()
            iter = int(model_list[-1].split('_')[-1].split('.')[0])
            self.load(os.path.join(self.result_dir, self.dataset, 'model'),
                      iter)
            print(" [*] Load SUCCESS")
        else:
            print(" [*] Load FAILURE")
            return

        self.genA2B.eval(), self.genB2A.eval()
        for n, (real_A, _) in enumerate(self.testA_loader):
            real_A = real_A.to(self.device)

            fake_A2B, _, fake_A2B_heatmap = self.genA2B(real_A)

            fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(fake_A2B)

            fake_A2A, _, fake_A2A_heatmap = self.genB2A(real_A)

            A2B = np.concatenate((
                RGB2BGR(tensor2numpy(denorm(real_A[0]))),
                cam(tensor2numpy(fake_A2A_heatmap[0]), self.img_size),
                RGB2BGR(tensor2numpy(denorm(fake_A2A[0]))),
                cam(tensor2numpy(fake_A2B_heatmap[0]), self.img_size),
                RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))),
                cam(tensor2numpy(fake_A2B2A_heatmap[0]), self.img_size),
                RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0]))),
            ), 0)

            cv2.imwrite(
                os.path.join(self.result_dir, self.dataset, 'test',
                             'A2B_%d.png' % (n + 1)), A2B * 255.0)

        for n, (real_B, _) in enumerate(self.testB_loader):
            real_B = real_B.to(self.device)

            fake_B2A, _, fake_B2A_heatmap = self.genB2A(real_B)

            fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(fake_B2A)

            fake_B2B, _, fake_B2B_heatmap = self.genA2B(real_B)

            B2A = np.concatenate((
                RGB2BGR(tensor2numpy(denorm(real_B[0]))),
                cam(tensor2numpy(fake_B2B_heatmap[0]), self.img_size),
                RGB2BGR(tensor2numpy(denorm(fake_B2B[0]))),
                cam(tensor2numpy(fake_B2A_heatmap[0]), self.img_size),
                RGB2BGR(tensor2numpy(denorm(fake_B2A[0]))),
                cam(tensor2numpy(fake_B2A2B_heatmap[0]), self.img_size),
                RGB2BGR(tensor2numpy(denorm(fake_B2A2B[0]))),
            ), 0)

            cv2.imwrite(
                os.path.join(self.result_dir, self.dataset, 'test',
                             'B2A_%d.png' % (n + 1)), B2A * 255.0)
def inference():

    # Inference Path #
    make_dirs(config.inference_path)

    # Prepare Data Loader #
    test_loader_selfie, test_loader_anime = get_selfie2anime_loader('test', config.batch_size)

    # Prepare Generator #
    G_A2B = Generator(image_size=config.crop_size, num_blocks=config.num_blocks).to(device)

    G_A2B.load_state_dict(torch.load(os.path.join(config.weights_path, 'U-GAT-IT_G_A2B_Epoch_{}.pkl'.format(config.num_epochs))))

    # Inference #
    print("U-GAT-IT | Generating Selfie2Anime images started...")
    with torch.no_grad():
        for i, (selfie, anime) in enumerate(zip(test_loader_selfie, test_loader_anime)):

            # Prepare Data #
            real_A = selfie.to(device)

            # Generate Fake Images #
            fake_B = G_A2B(real_A)[0]

            # Save Images (Selfie -> Anime) #
            result = torch.cat((real_A, fake_B), dim=0)
            save_image(denorm(result.data),
                       os.path.join(config.inference_path, 'U-GAT-IT_Selfie2Anime_Results_%03d.png' % (i + 1))
                       )

    # Make a GIF file #
    make_gifs_test("U-GAT-IT", "Selfie2Anime", config.inference_path)
def interpolate(model,
                device,
                latent_dim,
                hair_classes,
                eye_classes,
                samples=10):
    z1 = torch.randn(1, latent_dim).to(device)
    h1 = torch.zeros(1, hair_classes).to(device)
    e1 = torch.zeros(1, eye_classes).to(device)
    h1[0][np.random.randint(hair_classes)] = 1
    e1[0][np.random.randint(eye_classes)] = 1
    c1 = torch.cat((h1, e1), 1)

    z2 = torch.randn(1, latent_dim).to(device)
    h2 = torch.zeros(1, hair_classes).to(device)
    e2 = torch.zeros(1, eye_classes).to(device)
    h2[0][np.random.randint(hair_classes)] = 1
    e2[0][np.random.randint(eye_classes)] = 1
    c2 = torch.cat((h2, e2), 1)

    z_diff = z2 - z1
    c_diff = c2 - c1
    z_step = z_diff / (samples + 1)
    c_step = c_diff / (samples + 1)

    img_list = []
    for i in range(0, samples + 2):
        z = z1 + z_step * i
        c = c1 + c_step * i
        img_list.append(model(z, c))
    output = torch.cat(img_list, 0)
    save_image(utils.denorm(output),
               '{}/interpolation_{}.png'.format(args.sample_dir, args.index),
               nrow=samples + 2)
예제 #10
0
def generate_faces():

    # Test Path #
    make_dirs(config.inference_path)

    # Prepare Generator #
    G = Generator().to(device)
    G.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'Face_Generator_Epoch_{}.pkl'.format(config.num_epochs))))
    G.eval()

    # Start Generating Faces #
    count = 1

    while (True):

        # Prepare Fixed Noise and Generator #
        noise = torch.randn(config.batch_size, config.noise_dim).to(device)
        generated = G(noise)

        for i in range(config.batch_size):
            save_image(
                denorm(generated[i].data),
                os.path.join(config.inference_path,
                             "Generated_CelebA_Faces_{}.png".format(count)),
            )
            count += 1

        if count > config.limit:
            print("Generating fake CelebA faces is finished.")
            break
예제 #11
0
def main():
    latent_dim = 200
    hair_classes, eye_classes, face_classes, glasses_classes = 6, 4, 3, 2
    num_classes = hair_classes + eye_classes + face_classes + glasses_classes

    G_path = './checkpoint/ACGAN_generator_FID166.ckpt'
    prev_state = torch.load(G_path, map_location=device)

    G = Generator(latent_dim=latent_dim, class_dim=num_classes).to(device)
    G.load_state_dict(prev_state['model'])
    G = G.eval()

    # root_dir = '../sample_test/sample_fid_testing_labels.txt'
    root_dir = args.label_dir
    transform = Transform.Compose([Transform.ToTensor(),
                                   Transform.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    dataset = Anime(root_dir=root_dir, transform=transform, batch_size=64)

    for i in tqdm(range(dataset.length())):
        # print("Step: ", i)
        hair_tags, eye_tags, face_tags, glasses_tags = dataset.get_item(i)
        hair_tags, eye_tags, face_tags, glasses_tags = hair_tags.to(device), \
                                                       eye_tags.to(device), \
                                                       face_tags.to(device), \
                                                       glasses_tags.to(device)

        z = torch.randn(1, latent_dim).to(device)
        fake_tag = torch.cat((hair_tags, eye_tags, face_tags, glasses_tags)).unsqueeze(0).float().to(device)
        fake_img = G(z, fake_tag).to(device)
        save_image(utils.denorm(fake_img), '{}/{}.png'.format(args.output_dir, i))
예제 #12
0
def main():
    latent_dim = 200
    device = args.device
    hair_classes, eye_classes, face_classes, glasses_classes = 6, 4, 3, 2
    num_classes = hair_classes + eye_classes + face_classes + glasses_classes

    # G_path = '{}/ACGAN-batch_size-[64]-steps-[{}]/G_{}.ckpt'.format(args.model_dir, args.steps, args.n_ckpt)
    G_path = '{}/G_{}.ckpt'.format(args.model_dir, args.n_ckpt)
    prev_state = torch.load(G_path)

    G = Generator(latent_dim=latent_dim, class_dim=num_classes).to(device)
    G.load_state_dict(prev_state['model'])
    G = G.eval()

    if not os.path.exists('{}/fid_gen_steps{}_with_G_{}'.format(
            args.out_dir, args.steps, args.n_ckpt)):
        os.makedirs('{}/fid_gen_steps{}_with_G_{}'.format(
            args.out_dir, args.steps, args.n_ckpt))

    z = torch.randn(1, latent_dim).to(device)  # fix noise

    img_i = 0
    with open('./sample_test/sample_fid_testing_labels.txt') as f:
        lines = f.readlines()
        for line in lines[2:]:
            label = list(map(int, line.split(' ')))
            fake_tag = torch.tensor(label).unsqueeze(0).float().to(device)
            fake_img = G(z, fake_tag).to(device)
            save_image(
                utils.denorm(fake_img),
                '{}/fid_gen_steps{}_with_G_{}/{}.png'.format(
                    args.out_dir, args.steps, args.n_ckpt, img_i))
            print('{}.png saved'.format(img_i))
            img_i += 1
예제 #13
0
파일: trainer.py 프로젝트: PanXiebit/my_dpc
    def __init__(self, args, model, criterion):
        self.args = args
        self.model = model
        self.criterion = criterion
        self.use_cuda = torch.cuda.is_available()

        if self.use_cuda:
            self.model = self.model.to("cuda")
            self.criterion = self.criterion.to("cuda")

        # optimizer
        if args.train_what == 'last':  # fine-tune????
            for name, param in model.module.resnet.named_parameters():
                param.requires_grad = False
        else:
            pass  # train all layers

        # print('\n===========Check Grad============')
        # for name, param in model.named_parameters():
        #     print(name, param.requires_grad)
        # print('=================================\n')

        params = list(
            filter(lambda p: p.requires_grad, self.model.parameters()))
        self.optimizer = torch.optim.Adam(params,
                                          lr=args.lr,
                                          weight_decay=args.wd)

        print('| num. module params: {} (num. trained: {})'.format(
            sum(p.numel() for p in params),
            sum(p.numel() for p in params if p.requires_grad),
        ))

        self.de_normalize = denorm()
예제 #14
0
def eval():
    model.eval()
    for batch in testing_data_loader:
        input, name = batch[0], batch[2]
        input[0] = utils.norm(input[0], vgg=True)

        with torch.no_grad():
            input = Variable(input)

        if cuda:
            input = input.cuda(gpus_list[0])

        t0 = time.time()
        if opt.chop_forward:
            with torch.no_grad():
                prediction = chop_forward(input, model, opt.upscale_factor)
        else:
            if opt.self_ensemble:
                with torch.no_grad():
                    prediction = x8_forward(input, model)
            else:
                with torch.no_grad():
                    prediction = model(input)
        t1 = time.time()
        print("===> Processing: %s || Timer: %.4f sec." % (name[0], (t1 - t0)))
        prediction = utils.denorm(prediction.data[0], vgg=True)
        save_img(prediction.cpu(), name[0])
예제 #15
0
    def train(self):
        """overrode default base function for custom function
             - logs total duration of training in seconds
         """
        tik = time.time()
        for it in trange(self.config.num_iter):
            g_loss, d_loss, d_loss1, d_loss2, d_loss3 = self.train_step()
            if it % self.config.save_iter == 0:
                self.model.save(self.sess)
            if it % self.config.sample_iter == 0:
                images = self.sess.run([self.model.sample_image])

                for i, image in enumerate(images[0]):
                    image = denorm(np.squeeze(image))
                    sample_path = os.path.join(
                        self.config.sample_dir,
                        '{}-{}-sample.jpg'.format(i, it))
                    skimage.io.imsave(sample_path, image)

            if it % 100 == 0:
                summaries_dict = {}
                summaries_dict['g_loss'] = g_loss
                summaries_dict['d_loss'] = d_loss
                summaries_dict['d_real_loss'] = d_loss1
                summaries_dict['d_wrong_loss'] = d_loss2
                summaries_dict['d_fake_loss'] = d_loss3
                self.logger.summarize(it, summaries_dict=summaries_dict)

        tok = time.time()
        logging.info('Duration: {} seconds'.format(tok - tik))
예제 #16
0
파일: sample.py 프로젝트: yqGANs/LOGAN
def generate_images(out_dir, G, n_images, config, device):
    im_batch_size = config['n_classes']
    z_bound = config['trunc_z']
    if z_bound > 0.0:
        print(f'Truncating z to (-{z_bound}, {z_bound})')

    for i_batch in range(0, n_images, im_batch_size):
        with torch.no_grad():
            if z_bound > 0.0:
                z = trunc_trick(im_batch_size, G.dim_z,
                                bound=z_bound).to(device)
            else:
                z = torch.randn(im_batch_size, G.dim_z, device=device)
            y = torch.arange(im_batch_size).to(device)
            images = G(z, G.shared(y)).float().cpu()

        if i_batch + im_batch_size > n_images:
            n_last_images = n_images - i_batch
            print(f'Taking only {n_last_images} images from the last batch...')
            images = images[:n_last_images]

        for i_image, image in enumerate(images):
            fname = os.path.join(out_dir, f'image_{i_batch+i_image:05d}.png')
            image = utils.denorm(image)
            torchvision.utils.save_image(image, fname)
예제 #17
0
    def logging(self, step):
        self.loss = {}
        self.images = {}
        self.loss['D/Total'] = self.d_loss
        self.loss['G/Total'] = self.g_loss
        self.loss['G/adv_loss'] = self.g_adv_loss
        self.loss['G/percep_loss'] = self.g_percep_loss
        self.loss['G/idt_loss'] = self.g_idt_loss

        self.images['Train_realExpIdt/realExp_realExpIdt'] = torch.cat([
            denorm(self.real_exp.cpu()),
            denorm(self.real_exp_idt.detach().cpu())
        ], 3)
        self.images['Train_compare/realRaw_fakeExp_realExp'] = torch.cat([
            denorm(self.real_raw.cpu()),
            denorm(self.fake_exp.detach().cpu()),
            denorm(self.real_exp.cpu())
        ], 3)
        self.images['Train_fakeExp/fakeExp'] = denorm(
            self.fake_exp.detach().cpu())
        self.images['Train_fakeExpStore/fakeExpStore'] = denorm(
            self.fake_exp_store.detach().cpu())

        if (step + 1) % self.args.log_step == 0:
            if self.args.use_tensorboard:
                for tag, value in self.loss.items():
                    self.logger.scalar_summary(tag, value, step + 1)
                for tag, image in self.images.items():
                    self.logger.images_summary(tag, image, step + 1)
예제 #18
0
def main():
    f = open("./sample_test/sample_human_testing_labels.txt", 'r')
    num_of_samples = f.readline()
    print(f.readline())
    tags_list = []
    #results=[]
    for line in f:
        #print(line)
        #print(line.split())
        line = line.split()
        #print(line[0])
        tags = list(map(int, line[:]))
        #        print(tags)
        #print(len(tags))
        #print(np.asarray(tags))
        tags_list.append(torch.from_numpy(np.asarray(tags)))


#        print(tags_list)
#        input()
    f.close()
    #print(len(tags_list)) #correct
    #input()
    if not os.path.exists(args.sample_dir):
        os.mkdir(args.sample_dir)
    latent_dim = 100
    hair_classes = 6
    eye_classes = 4
    face_classes = 3
    glass_classes = 2
    batch_size = 1

    device = 'cuda'
    G_path = '{}/G_50000.ckpt'.format(args.model_dir)
    #load trained generator here

    G = ACGAN_split.Generator(latent_dim=latent_dim,
                              class_dim=hair_classes + eye_classes +
                              face_classes + glass_classes)
    prev_state = torch.load(G_path)
    G.load_state_dict(prev_state['model'])
    G = G.eval()
    G = G.to(device)
    num = 0
    for tag in tags_list:
        #randomly generate a noise
        z = torch.randn(latent_dim).unsqueeze(0).to(device)
        #print(z.shape)
        tag = tag.unsqueeze(0).to(device)
        tag = tag.type(torch.float32)
        #print(tag.shape)
        #print(tag)
        #input()
        img = G(z, tag)
        save_image(utils.denorm(img), '{}/{}.png'.format(args.sample_dir, num))
        num += 1
    print(num)
예제 #19
0
def inference():

    # Inference Path #
    make_dirs(config.inference_path)

    # Prepare Data Loader #
    val_loader = get_edges2handbags_loader('val', config.val_batch_size)

    # Prepare Generator #
    G = Generator(z_dim=config.z_dim).to(device)
    G.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'BicycleGAN_Generator_Epoch_{}.pkl'.format(
                    config.num_epochs))))
    G.eval()

    # Fixed Noise #
    fixed_noise = torch.randn(config.test_size, config.num_images,
                              config.z_dim).to(device)

    # Test #
    print("BiCycleGAN | Generating Edges2Handbags Images started...")
    for iters, (sketch, ground_truth) in enumerate(val_loader):

        # Prepare Data #
        N = sketch.size(0)
        sketch = sketch.to(device)
        results = torch.FloatTensor(N * (1 + config.num_images), 3,
                                    config.crop_size, config.crop_size)

        # Generate Fake Images #
        for i in range(N):
            results[i * (1 + config.num_images)] = sketch[i].data

            for j in range(config.num_images):
                image = sketch[i].unsqueeze(dim=0)
                noise_to_generator = fixed_noise[i, j, :].unsqueeze(dim=0)

                out = G(image, noise_to_generator)
                results[i * (1 + config.num_images) + j + 1] = out

            # Save Images #
            save_image(
                denorm(results.data),
                os.path.join(
                    config.inference_path,
                    'BicycleGAN_Edges2Handbags_Results_%03d.png' %
                    (iters + 1)),
                nrow=(1 + config.num_images),
            )

    make_gifs_test("BicycleGAN", config.inference_path)
예제 #20
0
def generate_celeba_synthesize(sfs_net_model,
                               dl,
                               train_epoch_num=0,
                               use_cuda=False,
                               out_folder=None,
                               wandb=None):

    # debugging flag to dump image
    fix_bix_dump = 0
    recon_loss = nn.L1Loss()

    if use_cuda:
        recon_loss = recon_loss.cuda()

    tloss = 0  # Total loss
    rloss = 0  # Reconstruction loss

    for bix, data in enumerate(dl):
        face = data
        if use_cuda:
            face = face.cuda()

        # predicted_face == reconstruction
        predicted_normal, predicted_albedo, predicted_sh, predicted_shading, predicted_face = sfs_net_model(
            face)

        # save predictions in log folder
        file_name = out_folder + str(train_epoch_num) + '_' + str(bix)
        # log images
        predicted_normal = denorm(predicted_normal)
        save_image(predicted_normal, path=file_name + '_normal.png')
        save_image(predicted_albedo, path=file_name + '_albedo.png')
        save_image(predicted_shading, path=file_name + '_shading.png')
        save_image(predicted_face, path=file_name + '_recon.png')
        save_image(face, path=file_name + '_face.png')
        np.savetxt(file_name + '_light.txt',
                   predicted_sh.cpu().detach().numpy(),
                   delimiter='\t')

        # Loss computation
        # Reconstruction loss
        total_loss = recon_loss(predicted_face, face)

        # Logging for display and debugging purposes
        tloss += total_loss.item()

    len_dl = len(dl)

    f = open(out_folder + 'readme.txt', 'w')
    f.write('Average Reconstruction Loss: ' + str(tloss / len_dl))
    f.close()

    # return average loss over dataset
    return tloss / len_dl
예제 #21
0
def save_test_images(opt, preds, batch_dict, path, index):
    preds = preds.cpu().detach()
    if opt.dataset == 'hurricane':
        gt = batch_dict['orignal_data_to_predict'].cpu().detach()
    else:
        gt = batch_dict['data_to_predict'].cpu().detach()

    b, t, c, h, w = gt.shape
    
    if opt.input_norm:
        preds = utils.denorm(preds)
        gt = utils.denorm(gt)
    
    os.makedirs(os.path.join(path, 'pred'), exist_ok=True)
    os.makedirs(os.path.join(path, 'gt'), exist_ok=True)
    
    for i in range(b):
        for j in range(t):
            save_image(preds[i, j, ...], os.path.join(path, 'pred', f"pred_{index + i:03d}_{j:03d}.png"))
            save_image(gt[i, j, ...], os.path.join(path, 'gt', f"gt_{index + i:03d}_{j:03d}.png"))
def inference():

    # Inference Path #
    make_dirs(config.inference_path)

    # Prepare Data Loader #
    val_loader = get_edges2shoes_loader(purpose='val',
                                        batch_size=config.val_batch_size)

    # Prepare Generator #
    G_A2B = Generator().to(device)
    G_B2A = Generator().to(device)

    G_A2B.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'DiscoGAN_Generator_A2B_Epoch_{}.pkl'.format(
                    config.num_epochs))))
    G_B2A.load_state_dict(
        torch.load(
            os.path.join(
                config.weights_path,
                'DiscoGAN_Generator_B2A_Epoch_{}.pkl'.format(
                    config.num_epochs))))

    # Test #
    print("DiscoGAN | Generating Edges2Shoes images started...")
    for i, (real_A, real_B) in enumerate(val_loader):

        # Prepare Data #
        real_A = real_A.to(device)
        real_B = real_B.to(device)

        # Generate Fake Images #
        fake_B = G_A2B(real_A)
        fake_A = G_B2A(real_B)

        # Generated Reconstructed Images #
        fake_ABA = G_B2A(fake_B)
        fake_BAB = G_A2B(fake_A)

        # Save Images #
        result = torch.cat(
            (real_A, fake_A, fake_BAB, real_B, fake_B, fake_ABA), dim=0)
        save_image(denorm(result.data),
                   os.path.join(
                       config.inference_path,
                       'DiscoGAN_Edges2Shoes_Results_%03d.png' % (i + 1)),
                   nrow=8,
                   normalize=True)

    # Make a GIF file #
    make_gifs_test("DiscoGAN", config.inference_path)
예제 #23
0
    def test_change(self):
        model_list = os.listdir(
            os.path.join(self.result_dir, self.dataset, 'model'))
        if not len(model_list) == 0:
            model_list.sort()
            iter = int(model_list[-1].split('/')[-1])
            self.load(os.path.join(self.result_dir, self.dataset, 'model'),
                      iter)
            print("[*] Load SUCCESS")
        else:
            print("[*] Load FAILURE")
            return

        self.genA2B.eval(), self.genB2A.eval()
        for n, (real_A, fname) in enumerate(self.testA_loader()):
            real_A = np.array([real_A[0].reshape(3, 256,
                                                 256)]).astype("float32")
            real_A = to_variable(real_A)
            fake_A2B, _, _ = self.genA2B(real_A)

            A2B = RGB2BGR(tensor2numpy(denorm(fake_A2B[0])))

            cv2.imwrite(
                os.path.join(
                    self.result_dir, self.dataset, 'test', 'testA2B',
                    '%s_fake.%s' %
                    (fname.split('.')[0], fname.split('.')[-1])), A2B * 255.0)

        for n, (real_B, fname) in enumerate(self.testB_loader()):
            real_B = np.array([real_B[0].reshape(3, 256,
                                                 256)]).astype("float32")
            real_B = to_variable(real_B)
            fake_B2A, _, _ = self.genB2A(real_B)

            B2A = RGB2BGR(tensor2numpy(denorm(fake_B2A[0])))

            cv2.imwrite(
                os.path.join(
                    self.result_dir, self.dataset, 'test', 'testB2A',
                    '%s_fake.%s' %
                    (fname.split('.')[0], fname.split('.')[-1])), B2A * 255.0)
    def save_images(engine):
        if needs_save:
            if engine.state.epoch % config.save.save_epoch_interval == 0:
                image = norm(engine.state.batch['image'])

                with torch.no_grad():
                    z, _, _ = E(image)
                    x_r = D(z)
                    x_p = D(fixed_z)

                image = denorm(image).detach().cpu()
                x_r = denorm(x_r).detach().cpu()
                x_p = denorm(x_p).detach().cpu()

                image = image[:config.save.n_save_images, ...]
                x_r = x_r[:config.save.n_save_images, ...]
                x_p = x_p[:config.save.n_save_images, ...]

                save_path = os.path.join(
                    output_dir, 'result_{}.png'.format(engine.state.epoch))
                save_image(torch.cat([image, x_r, x_p]).data, save_path)
예제 #25
0
def generate_images(model, label, output_dir, device):
    #torch.manual_seed(42)
    all_tag = torch.FloatTensor(label).to(device)
    #z = torch.randn(1, 100).to(device)
    #    tag = all_tag[:64,:]
    #    output_img = model(z, tag)
    #    save_image(utils.denorm(output_img), './test.png', nrow=16)
    for i in tqdm(range(len(label))):
        z = torch.randn(1, 100).to(device)
        tag = all_tag[i].unsqueeze(0)
        output = model(z, tag)
        save_image(utils.denorm(output),
                   os.path.join(output_dir, '{}.png'.format(i)))
    def test_single(self, img_fn):
        # networks
        self.G = Generator(num_channels=self.num_channels,
                           base_filter=64,
                           num_residuals=16)

        if self.gpu_mode:
            self.G.cuda()

        # load model
        self.load_model()

        # load data
        img = Image.open(img_fn).convert('RGB')

        if self.num_channels == 1:
            img = img.convert('YCbCr')
            img_y, img_cb, img_cr = img.split()

            input = ToTensor()(img_y)
            y_ = Variable(utils.norm(input.unsqueeze(1), vgg=True))
        else:
            input = ToTensor()(img).view(1, -1, img.height, img.width)
            y_ = Variable(utils.norm(input, vgg=True))

        if self.gpu_mode:
            y_ = y_.cuda()

        # prediction
        self.G.eval()
        recon_img = self.G(y_)
        recon_img = utils.denorm(recon_img.cpu().data[0].clamp(0, 1), vgg=True)
        recon_img = ToPILImage()(recon_img)

        if self.num_channels == 1:
            # merge color channels with super-resolved Y-channel
            recon_y = recon_img
            recon_cb = img_cb.resize(recon_y.size, Image.BICUBIC)
            recon_cr = img_cr.resize(recon_y.size, Image.BICUBIC)
            recon_img = Image.merge(
                'YCbCr', [recon_y, recon_cb, recon_cr]).convert('RGB')

        # save img
        result_dir = os.path.join(self.save_dir, 'test_result')
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        save_fn = result_dir + '/SR_result.png'
        recon_img.save(save_fn)

        print('Single test result image is saved.')
        return recon_img
예제 #27
0
    def test(self):
        nrow = 8
        n_samples = nrow * nrow
        with torch.no_grad():
            for i, (x_real, noise, label) in enumerate(tqdm(self.test_loader)):
                if i == 10: break
                x_real = x_real.to(self.device)
                noise = noise.to(self.device)
                label = label.to(self.device)
                ''' test flop '''
                if i == 0:
                    from thop import profile
                    flops, params = profile(self.G,
                                            inputs=(noise[0].unsqueeze(0),
                                                    label[0].unsqueeze(0)))
                    print(
                        '======================================================================='
                    )
                    print('FLOPS: {:.2f} B, Params.: {:.1f} M'.format(
                        flops / 10**9, params / 10**6))
                    print(
                        '======================================================================='
                    )
                # recon
                x_fake = self.G(noise, label)
                comparison = torch.cat([x_real[:nrow], x_fake[:nrow].float()])
                sample_path = os.path.join(self.result_dir,
                                           '{}-rec.png'.format(i + 1))
                save_image(utils.denorm(comparison.cpu()), sample_path)

                # sample
                noise2 = torch.FloatTensor(utils.truncated_normal(n_samples*self.z_dim)) \
                     .view(n_samples, self.z_dim).to(self.device)
                label = label[:nrow].repeat(nrow)
                x_sample = self.G(noise2, label)
                sample_path = os.path.join(self.result_dir,
                                           '{}-sample.png'.format(i + 1))
                save_image(utils.denorm(x_sample), sample_path, nrow=nrow)
예제 #28
0
def project_on_generator(
        G: Generator,
        pix2pix: networks.UnetGenerator,
        target_image: np.ndarray,
        E: Encoder,
        dcgan_img_size: int = 64,
        pix2pix_img_size: int = 128) -> Tuple[np.ndarray, torch.Tensor]:
    """Projects the input image onto the manifold span by the GAN. It operates as follows:
    1. reshape and normalize the image
    2. run the encoder to obtain a latent vector
    3. run the DCGAN generator to obtain a low resolution image
    4. run the Pix2Pix model to obtain a high resulution image
    
    Arguments:
        G {Generator} -- DCGAN generator
        pix2pix {networks.UnetGenerator} -- Low resolution to high resolution Pix2Pix model
        target_image {np.ndarray} -- The image to project
        E {Encoder} -- The DCGAN encoder
    
    Keyword Arguments:
        dcgan_img_size {int} -- Low resolution image size (default: {64})
        pix2pix_img_size {int} -- High resolution image size (default: {128})
    
    Returns:
        Tuple[np.ndarray, torch.Tensor] -- The projected high resolution image and the latent vector that was used to generate it.
    """
    # reshape and normalize image
    target_image = torch.Tensor(target_image).cuda().reshape(
        1, 3, pix2pix_img_size, pix2pix_img_size)
    target_image = F.interpolate(target_image,
                                 scale_factor=dcgan_img_size /
                                 pix2pix_img_size,
                                 mode='bilinear')
    target_image = target_image.clamp(min=0)
    target_image = target_image / target_image.max()
    target_image = (target_image - 0.5) / 0.5

    # Run dcgan
    z = E(target_image)
    dcgan_image = G(z)

    # run pix2pix
    pix_input = F.interpolate(dcgan_image,
                              scale_factor=pix2pix_img_size / dcgan_img_size,
                              mode='bilinear')
    pix_outputs = pix2pix(pix_input)
    out_image = utils.denorm(pix_outputs.detach()).clamp(
        0, 1).cpu().numpy().reshape(3, -1, 1)
    return out_image, z
예제 #29
0
def generateUsingHairEye(model, device, hairClasses, eyeClasses, lDim, hColor, eColor):
    vecSize=64
    htag = torch.zeros(vecSize, hairClasses).to(device)
    etag = torch.zeros(vecSize, eyeClasses).to(device)
    hairLabelIndex = hair_dict[hColor]
    eyeLabelIndex = eye_dict[eColor]
    for i in range(vecSize):
        htag[i][hairLabelIndex] = 1
        etag[i][eyeLabelIndex] = 1
    
    fulltag = torch.cat((htag, etag), 1)
    z = torch.randn(vecSize, lDim).to(device)
    
    output = model(z, fulltag)
    save_image(utils.denorm(output), '../generated/{} hair {} eyes.png'.format(hairClassesList[hairLabelIndex], eyeClassesList[eyeLabelIndex]))
예제 #30
0
def make_save_sequence(opt, batch_dict, res):
    """ 4 cases: (interp, extrap) | (regular, irregular) """
    
    b, t, c, h, w = batch_dict['observed_data'].size()

    # Filter out / Select by mask
    if opt.irregular:
        observed_mask = batch_dict["observed_mask"]
        mask_predicted_data = batch_dict["mask_predicted_data"]
        selected_timesteps = int(observed_mask[0].sum())
        
        
        if opt.dataset in ['hurricane']:
            batch_dict['observed_data'] = batch_dict['observed_data'][observed_mask.squeeze(-1).byte(), ...].view(b, selected_timesteps, c, h, w)
            batch_dict['data_to_predict'] = batch_dict['data_to_predict'][mask_predicted_data.squeeze(-1).byte(), ...].view(b, selected_timesteps, c, h, w)
        else:
            batch_dict['observed_data'] = batch_dict['observed_data'] * observed_mask.unsqueeze(-1).unsqueeze(-1)
            batch_dict['data_to_predict'] = batch_dict['data_to_predict'] * mask_predicted_data.unsqueeze(-1).unsqueeze(-1)
        
    # Make sequence to save
    pred = res['pred_y'].cpu().detach()
    
    if opt.extrap:
        inputs = batch_dict['observed_data'].cpu().detach()
        gt_to_predict = batch_dict['data_to_predict'].cpu().detach()
        gt = torch.cat([inputs, gt_to_predict], dim=1)
    else:
        gt = batch_dict['data_to_predict'].cpu().detach()

    time_steps = None

    if opt.input_norm:
        gt = utils.denorm(gt)
        pred = utils.denorm(pred)
    
    return gt, pred, time_steps