Пример #1
0
def main(args):
    os.makedirs(args.outputs, exist_ok=True)
    generator = get_derivable_generator(args.gan_model, args.inversion_type,
                                        args)
    loss = get_loss(args.loss_type, args)
    generator.cuda()
    loss.cuda()
    inversion = get_inversion(args.optimization, args)
    image_list = image_files(args.target_images)
    frameSize = MODEL_POOL[args.gan_model]['resolution']

    for i, images in enumerate(split_to_batches(image_list, 1)):
        print('%d: Inverting %d images :' % (i + 1, 1), end='')
        pt_image_str = '%s\n'
        print(pt_image_str % tuple(images))

        image_name_list = []
        image_tensor_list = []
        for image in images:
            image_name_list.append(os.path.split(image)[1])
            image_tensor_list.append(_add_batch_one(load_as_tensor(image)))
        y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda()
        # Invert
        latent_estimates, history = inversion.invert(generator,
                                                     y_gt,
                                                     loss,
                                                     batch_size=1,
                                                     video=args.video)
        # Get Images
        y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid(
            generator(latent_estimates)),
                                                  min=0.,
                                                  max=1.).cpu(),
                                      1,
                                      dim=0)
        # Save
        for img_id, image in enumerate(images):
            y_estimate_pil = Tensor2PIL(y_estimate_list[img_id])
            y_estimate_pil.save(
                os.path.join(args.outputs, image_name_list[img_id]))

            # Create video
            if args.video:
                print('Create GAN-Inversion video.')
                video = cv2.VideoWriter(filename=os.path.join(
                    args.outputs,
                    '%s_inversion.avi' % image_name_list[img_id]),
                                        fourcc=cv2.VideoWriter_fourcc(*'MJPG'),
                                        fps=args.fps,
                                        frameSize=(frameSize, frameSize))
                print('Save frames.')
                for i, sample in enumerate(history):
                    image = generator(sample)
                    image_cv2 = convert_array_to_images(
                        image.detach().cpu().numpy())[0][:, :, ::-1]
                    video.write(image_cv2)
                video.release()
Пример #2
0
def report_reconstruction(images, gen, disc, Cs, gen_block_ids, disc_block_ids,
                          file_name):
    test_img = read_images_from_pathes(images, return_cuda=True)
    test_img_reshape = torch.from_numpy(
        resize_images(test_img.detach().cpu().numpy(), resize=256))
    test_img_rec = batch_run(2,
                             test_img * 2 - 1.0,
                             easy_forward,
                             gen,
                             disc,
                             Cs,
                             gen_block_ids,
                             disc_block_ids,
                             is_detach=True).detach()
    test_img_rec = torch.clamp(_tanh_to_sigmoid(test_img_rec), 0.0,
                               1.0).detach()
    test_img_rec = torch.from_numpy(
        resize_images(test_img_rec.detach().cpu().numpy(), resize=256))
    save_img = torch.cat([test_img_reshape, test_img_rec], dim=3).cpu()
    torchvision.utils.save_image(save_img, file_name, nrow=4, padding=0)
Пример #3
0
def main(args):
    os.makedirs(args.outputs, exist_ok=True)
    generator = get_derivable_generator(args.gan_model, args.inversion_type,
                                        args)  # 生成器
    loss = get_loss(args.loss_type, args)
    sr_loss = SR_loss(loss, args.down, args.factor)  # SR计算loss的方式
    # to cuda
    generator.cuda()
    loss.cuda()
    inversion = get_inversion(args.optimization, args)
    image_list = image_files(args.target_images)
    frameSize = MODEL_POOL[args.gan_model]['resolution']

    for i, images in enumerate(split_to_batches(image_list, 1)):
        print('%d: Super-resolving %d images ' % (i + 1, 1), end='')
        pt_image_str = '%s\n'
        print(pt_image_str % tuple(images))

        image_name_list = []
        image_tensor_list = []
        for image in images:
            image_name_list.append(os.path.split(image)[1])
            # image = _add_batch_one(load_as_tensor(image))
            image = convert2target(_add_batch_one(load_as_tensor(image)),
                                   'nearest')  # 更改size使得适用于更多类型的分辨率图像
            image_tensor_list.append(image)
            # print("add..: ", _add_batch_one(load_as_tensor(image)).size())      # torch.Size([1, 3, 64, 64])

        y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda()
        # Invert
        latent_estimates, history = inversion.invert(generator,
                                                     y_gt,
                                                     sr_loss,
                                                     batch_size=BATCH_SIZE,
                                                     video=args.video)
        # Get Images
        # 将optimizer优化好的latent_estimates再放入generator中生成图像
        # 并且将batch_size那一列的数据去除
        y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid(
            generator(latent_estimates)),
                                                  min=0.,
                                                  max=1.).cpu(),
                                      1,
                                      dim=0)
        # 保存结果
        for img_id, image in enumerate(images):
            # up_nn, up_bic, down = downsample_images(image_tensor_list[img_id], factor=args.factor, mode=args.down)
            # y_nn_pil = Tensor2PIL(up_nn)        # 低分辨率化后的图像
            y_estimate_pil = Tensor2PIL(y_estimate_list[img_id])
            y_estimate_pil.save(
                os.path.join(
                    os.path.join(args.outputs,
                                 '%s.png' % image_name_list[img_id][:-4])))
            #y_nn_pil.save(os.path.join(os.path.join(args.outputs, '%s-nn.png' % image_name_list[img_id][:-4])))
            # Create video
            if args.video:
                print('Create GAN-Inversion video.')
                video = cv2.VideoWriter(filename=os.path.join(
                    args.outputs, '%s_sr.avi' % image_name_list[img_id][:-4]),
                                        fourcc=cv2.VideoWriter_fourcc(*'MJPG'),
                                        fps=args.fps,
                                        frameSize=(frameSize, frameSize))
                print('Save frames.')
                for i, sample in enumerate(history):
                    image = generator(
                        sample
                    )  # 用generator从history(保存的训练中的estimate_latent的值)中生成图像
                    image_cv2 = convert_array_to_images(
                        image.detach().cpu().numpy())[0][:, :, ::-1]
                    video.write(image_cv2)
                video.release()
Пример #4
0
def main(args):
    os.makedirs(args.outputs, exist_ok=True)
    out_dir, exp_name = create_experiments_directory(args, args.exp_id)
    print(out_dir)
    print(exp_name)
    generator = get_derivable_generator(args.gan_model, args.inversion_type,
                                        args)
    generator.cuda()

    if args.target_images.endswith('.png') or args.target_images.endswith(
            '.jpg'):
        image_list = os.path.abspath(args.target_images)
        image_list = [image_list]
    else:
        image_list = image_files(args.target_images)
    frameSize = MODEL_POOL[args.gan_model]['resolution']
    n_blocks = generator.n_blocks
    print('There are %d blocks in this generator.' % n_blocks)  # 19 for pggan

    latent_space = generator.PGGAN_LATENT[args.layer]
    print('The latent space is ', latent_space)

    with open(args.matrix_dir, 'rb') as file_in:
        matrix = pkl.load(file_in)
        print('Load matrix successfully.')
        print('Matrix shape ', matrix.shape)

    matrix2 = thrC(matrix, args.alpha)
    predict, _ = post_proC(matrix2, args.n_subs, args.d_subs, args.power)
    print(predict)
    p_sum = [sum(predict == k) for k in range(1, args.cluster_numbers, 1)]
    p_sum = np.array(p_sum)
    p_sort = np.argsort(p_sum)[::-1]
    print(p_sum)
    predict_new = predict.copy()
    for i in range(1, args.cluster_numbers, 1):
        predict_new[predict == (p_sort[i - 1] + 1)] = i
    predict = predict_new.copy()
    p_sum = [sum(predict == k) for k in range(1, args.cluster_numbers, 1)]
    print(predict)
    print(p_sum)

    # pre_see the images
    gan_type, image_type = args.gan_model.split("_")
    print('The gan type is %s, and the image type is %s' %
          (gan_type, image_type))
    test_image_dir = os.path.join('./bin/', gan_type, image_type)
    print(test_image_dir)

    files = os.listdir(test_image_dir)
    test_zs = []
    for i in range(len(files)):
        if files[i].endswith('.pkl'):
            with open(os.path.join(test_image_dir, files[i]), 'rb') as file_in:
                test_zs.append(pkl.load(file_in))
    test_zs = torch.from_numpy(
        np.concatenate(test_zs, axis=0).astype(np.float32)).cuda()
    print('Load all testing zs, shape is ', test_zs.size())

    image_number = 3
    sel_idx = np.random.choice(test_zs.shape[0],
                               size=[image_number],
                               replace=False)
    F = generator([test_zs[sel_idx]], which_block=args.layer, pre_model=True)
    features = F.detach().cpu().numpy()

    predict_masks = []
    for i in range(1, args.cluster_numbers + 1, 1):
        mask = torch.from_numpy((predict == i).astype(np.float32)).cuda()
        predict_masks += [mask.reshape((1, -1, 1, 1))]

    for i, images in enumerate(split_to_batches(image_list, 1)):
        # input_size = generator.PGGAN_LATENT[args.layer + 1]
        # print("We are making ", input_size, ". ")
        print('%d: Inverting %d images :' % (i + 1, 1), end='')
        pt_image_str = '%s\n'
        print(pt_image_str % tuple(images))
        image_name_only = images[0].split(".")[0]
        image_name_only = image_name_only.split("/")[-1]
        print(image_name_only)

        image_name_list = []
        image_tensor_list = []
        for image in images:
            image_name_list.append(os.path.split(image)[1])
            image_tensor_list.append(_add_batch_one(load_as_tensor(image)))
        print("image_name_list", image_name_list)
        print("image_tensor_list, [", image_tensor_list[0].size(), "]")

        y_image = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda()
        print('image size is ', y_image.size())

        z_estimate = generator.init_value(batch_size=1,
                                          which_layer=0,
                                          init=args.init_type,
                                          z_numbers=args.cluster_numbers *
                                          args.code_per_cluster)
        base_estimate = generator.init_value(batch_size=1,
                                             which_layer=0,
                                             init=args.init_type,
                                             z_numbers=1)

        if args.optimization == 'GD':
            z_optimizer = torch.optim.SGD(z_estimate + base_estimate,
                                          lr=args.lr)
        elif args.optimization == 'Adam':
            z_optimizer = torch.optim.Adam(z_estimate + base_estimate,
                                           lr=args.lr)
        else:
            raise NotImplemented(
                'We don\'t support this type of optimization.')

        for iter in range(args.iterations):
            for estimate in z_estimate:
                estimate.requires_grad = True
            for estimate in base_estimate:
                estimate.requires_grad = True

            features = generator([
                z_estimate[0].reshape(
                    [args.cluster_numbers * args.code_per_cluster, 512, 1, 1])
            ],
                                 which_block=args.layer,
                                 pre_model=True)
            base_feature = generator(
                [base_estimate[0].reshape([1, 512, 1, 1])],
                which_block=args.layer,
                pre_model=True)

            for t in range(args.cluster_numbers * args.code_per_cluster):
                if t == 0:
                    f_mix = features[t].view(*(1, ) +
                                             latent_space) * predict_masks[int(
                                                 t / args.code_per_cluster)]
                else:
                    f_mix = f_mix + features[t].view(
                        *(1, ) + latent_space) * predict_masks[int(
                            t / args.code_per_cluster)]

            f_mix = f_mix + base_feature
            y_estimate = generator([f_mix],
                                   which_block=args.layer,
                                   post_model=True)
            y_raw_estimate = generator([base_feature],
                                       which_block=args.layer,
                                       post_model=True)
            z_optimizer.zero_grad()
            loss = 0.01 * torch.mean(torch.pow(
                y_estimate - y_image, 2.0)) + torch.mean(
                    torch.pow(y_raw_estimate - y_image, 2.0))
            loss.backward()
            z_optimizer.step()

            if iter % args.report_value == 0:
                print('Iter %d, layer %d, loss = %.4f.' %
                      (iter, args.layer, float(loss.item())))

            if iter % args.report_image == 0:
                print('Saving the images.')
                y_estimate_pil = Tensor2PIL(
                    torch.clamp(_tanh_to_sigmoid(y_estimate.detach().cpu()),
                                min=0.0,
                                max=1.0))
                y_estimate_pil.save(
                    os.path.join(
                        out_dir,
                        image_name_only + "_estimate_iter%d.png" % iter))
                y_estimate_pil = Tensor2PIL(
                    torch.clamp(_tanh_to_sigmoid(
                        y_raw_estimate.detach().cpu()),
                                min=0.0,
                                max=1.0))
                y_estimate_pil.save(
                    os.path.join(
                        out_dir,
                        image_name_only + "_raw_estimate_iter%d.png" % iter))
                # add bias added output picture.
                # save all the codes
                codes = []
                for code_idx in range(args.cluster_numbers *
                                      args.code_per_cluster):
                    code_f = generator([z_estimate[0][code_idx]],
                                       which_block=args.layer + 1,
                                       pre_model=True)
                    code_y = generator([code_f],
                                       which_block=args.layer + 1,
                                       post_model=True).detach().cpu()
                    codes.append(
                        torch.clamp(_tanh_to_sigmoid(code_y), min=0, max=1))
                codes = torch.cat(codes, dim=0).detach().cpu()
                torchvision.utils.save_image(
                    codes,
                    os.path.join(
                        out_dir,
                        image_name_only + '_codes_iter%d.png' % (iter)),
                    nrow=(args.cluster_numbers * args.code_per_cluster) // 2)

            if iter % args.report_model == 0:
                print('Save the models')
                save_dict = {
                    "z": z_estimate[0].detach().cpu().numpy(),
                    'matrix': matrix,
                    'layer': args.layer,
                    'predict': predict
                }
                with open(
                        os.path.join(
                            out_dir, 'save_dict_iter_%d_layer_%d.pkl' %
                            (iter, args.layer)), 'wb') as file_out:
                    pkl.dump(save_dict, file_out)
                print('Save the models OK!')
Пример #5
0
    def invert(self,
               generator,
               gt_image,
               loss_function,
               batch_size=1,
               video=True,
               init=(),
               z_iterations=3000,
               args=None,
               out_dir=None,
               image_name=None):
        input_size_list = generator.input_size()
        if len(init) == 0:
            if generator.init is False:
                latent_estimate = []
                for input_size in input_size_list:
                    if self.init_type == 'Zero':
                        print("Zero")
                        latent_estimate.append(
                            torch.zeros((batch_size, ) + input_size).cuda())
                    elif self.init_type == 'Normal':
                        latent_estimate.append(
                            torch.randn((batch_size, ) + input_size).cuda())
            else:
                latent_estimate = list(generator.init_value(batch_size))
        else:
            assert len(init) == len(
                input_size_list), 'Please check the number of init value'
            latent_estimate = init

        for latent in latent_estimate:
            latent.requires_grad = True  # 19 PGGAN - 0: 1   4 - 18: 1e-8 x 10 = 1e-6
        print(latent_estimate[0].requires_grad)
        optimizer = self.optimizer(latent_estimate, lr=self.lr)

        history = []
        # Opt
        for i in tqdm(range(z_iterations)):
            y_estimate = generator(latent_estimate)
            if i % args.report_image == 0:
                if out_dir is not None:
                    y_estimate_pil = Tensor2PIL(
                        torch.clamp(_tanh_to_sigmoid(
                            y_estimate.detach().cpu()),
                                    min=0.0,
                                    max=1.0))
                    y_estimate_pil.save(
                        os.path.join(
                            out_dir,
                            image_name.split('.')[0] + "_" + str(i) +
                            "_layer_" + str(args.composing_layer) + ".png"))
                    zs = latent_estimate[0].detach().cpu().numpy()
                    ps = latent_estimate[1].detach().cpu().numpy()
                    save_dict = {'zs': zs, 'ps': ps}
                    with open(
                            os.path.join(
                                out_dir,
                                image_name.split('.')[0] + "_" + str(i) +
                                "_layer_" + str(args.composing_layer) +
                                ".pkl"), 'wb') as file_out:
                        pkl.dump(save_dict, file_out)

            optimizer.zero_grad()
            loss = loss_function(y_estimate, gt_image)
            loss.backward()
            optimizer.step()
            if video:
                history.append(copy.deepcopy(latent_estimate))

        return latent_estimate, history
Пример #6
0
def post_process_image(input_img, resize=256):
    test_img_rec = torch.clamp(_tanh_to_sigmoid(input_img), 0.0, 1.0).detach()
    test_img_rec = torch.from_numpy(
        resize_images(test_img_rec.cpu().numpy(), resize=resize))
    return test_img_rec
Пример #7
0
def main(args):
    os.makedirs(args.outputs + '/input', exist_ok=True)
    os.makedirs(args.outputs + '/GT', exist_ok=True)
    os.makedirs(args.outputs + '/mGANoutput', exist_ok=True)
    with open(args.outputs + '/mGANargs.txt', 'w') as f:
        json.dump(args.__dict__, f, indent=2)
    generator = get_derivable_generator(args.gan_model, args.inversion_type,
                                        args)
    loss = get_loss(args.loss_type, args)
    mask = parsing_mask('code/mganprior/masks/' + args.mask).cuda()
    mask_cpu = parsing_mask('code/mganprior/masks/' + args.mask)
    crop_loss = masked_loss(loss, mask)
    generator.cuda()
    loss.cuda()
    inversion = get_inversion(args.optimization, args)
    image_list = image_files(args.target_images)
    if len(image_list) > 300:
        print('Limiting the image set to 300.')
        image_list = image_list[:300]
    frameSize = MODEL_POOL[args.gan_model]['resolution']

    start_time = time.time()
    for i, images in enumerate(split_to_batches(image_list, 1)):
        print('%d: Processing %d images :' % (i, 1), end='')
        pt_image_str = '%s\n'
        print(pt_image_str % tuple(images))

        image_name_list = []
        image_tensor_list = []
        for image in images:
            image_name_list.append(os.path.split(image)[1])
            image_tensor_list.append(_add_batch_one(load_as_tensor(image)))
        y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda()
        # Invert
        if args.varmask:
            os.makedirs(args.outputs + '/mask', exist_ok=True)
            mask_cpu = get_var_mask(y_gt.shape[-2:], args.min_p, args.max_p,
                                    args.width_mean, args.width_var)
            mask = mask_cpu.cuda()
            save_image(mask,
                       os.path.join(args.outputs + '/mask/%d%s' % (i, '.png')))
            crop_loss = masked_loss(loss, mask)

        latent_estimates, history = inversion.invert(generator,
                                                     y_gt,
                                                     crop_loss,
                                                     batch_size=1,
                                                     video=args.video)
        # Get Images
        y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid(
            generator(latent_estimates)),
                                                  min=0.,
                                                  max=1.).cpu(),
                                      1,
                                      dim=0)
        # Save
        for img_id, image in enumerate(images):
            y_RGB = Tensor2PIL(image_tensor_list[img_id])
            y_RGB.save(args.outputs + '/GT/%d%s' %
                       (i, image_name_list[img_id][-4:]))

            y_gt_pil = Tensor2PIL(
                mask_images(image_tensor_list[img_id], mask_cpu))
            y_estimate_pil = Tensor2PIL(y_estimate_list[img_id])
            y_estimate_pil.save(
                os.path.join(args.outputs + '/mGANoutput/%d%s' %
                             (i, image_name_list[img_id][-4:])))

            y_gt_pil.save(
                os.path.join(args.outputs + '/input/%d%s' %
                             (i, image_name_list[img_id][-4:])))
            # Create video
            if args.video:
                print('Create GAN-Inversion video.')
                video = cv2.VideoWriter(filename=os.path.join(
                    args.outputs, '%s_inpainting_%s.avi' %
                    (image_name_list[img_id][:-4], os.path.split(
                        args.mask[:-4])[1])),
                                        fourcc=cv2.VideoWriter_fourcc(*'MJPG'),
                                        fps=args.fps,
                                        frameSize=(frameSize, frameSize))
                print('Save frames.')
                for i, sample in enumerate(history):
                    image = generator(sample)
                    image_cv2 = convert_array_to_images(
                        image.detach().cpu().numpy())[0][:, :, ::-1]
                    video.write(image_cv2)
                video.release()

    print(f'{(time.time()-start_time)/60:.2f}', 'minutes taken in total;',
          f'{(time.time()-start_time)/60/len(image_list):.2f}', 'per image.')
Пример #8
0
def main(args):
    os.makedirs(args.outputs, exist_ok=True)
    out_dir, exp_name = create_experiments_directory(args, args.exp_id)
    print(out_dir)
    print(exp_name)
    generator = get_derivable_generator(args.gan_model, args.inversion_type, args)
    generator.cuda()

    print('There are %d blocks in this generator.' % generator.n_blocks)  # 19 for pggan
    latent_space = generator.PGGAN_LATENT[args.layer]
    print('The latent space is ', latent_space)

    Matrix = torch.ones([latent_space[0], latent_space[0]], dtype=torch.float32).cuda() * 0.0001
    Matrix.requires_grad = True

    gan_type, image_type = args.gan_model.split("_")
    print('The gan type is %s, and the image type is %s' % (gan_type, image_type))
    test_image_dir = os.path.join('./bin/', gan_type, image_type)
    print(test_image_dir)

    if args.optimization == 'GD':
        optimizer = torch.optim.SGD([Matrix], lr=args.lr)
    elif args.optimization == 'Adam':
        optimizer = torch.optim.Adam([Matrix], lr=args.lr)
    else:
        raise NotImplemented('Not Implemented.')

    files = os.listdir(test_image_dir)
    test_zs = []
    for i in range(len(files)):
        if files[i].endswith('.pkl'):
            with open(os.path.join(test_image_dir, files[i]), 'rb') as file_in:
                test_zs.append(pkl.load(file_in))
    test_zs = torch.from_numpy(np.concatenate(test_zs, axis=0).astype(np.float32)).cuda()
    print(test_zs.size())

    losses_dict = {'total_loss': [], 'feature_loss': [], 'data_loss': [], 'sparse_loss': []}

    for iter in range(args.iterations):
        if args.beta0 > 0:
            Z = torch.randn([args.batch_size, 512, 1, 1], dtype=torch.float32).cuda()
            F = generator([Z], which_block=args.layer, pre_model=True)
            F_reshape = F.transpose(0, 1).reshape((latent_space[0], -1))
            F_rec = torch.matmul(Matrix * (1. - torch.eye(n=latent_space[0], m=latent_space[0]).cuda()), F_reshape).\
                reshape((latent_space[0], args.batch_size,) + tuple(latent_space[1:])).transpose(0, 1)
            X_rec = generator([F_rec], which_block=args.layer, post_model=True)
            X = generator([F], which_block=args.layer, post_model=True)

            optimizer.zero_grad()
            if args.sparse_type == 'L2':
                sparse_loss = torch.mean(torch.pow(Matrix * (1. - torch.eye(n=latent_space[0], m=latent_space[0]).cuda()), 2.0))
            elif args.sparse_type == 'L1':
                sparse_loss = torch.mean(torch.abs(Matrix * (1. - torch.eye(n=latent_space[0], m=latent_space[0]).cuda())))
            else:
                raise NotImplemented('Type not implemented.')

            feature_loss = torch.mean(torch.pow(F - F_rec, 2.0))
            data_loss = torch.mean(torch.pow(X - X_rec, 2.0))
            loss = feature_loss + args.beta0 * data_loss + args.beta1 * sparse_loss
            loss.backward()
            optimizer.step()

        elif args.beta0 == 0:
            Z = torch.randn([args.batch_size, 512, 1, 1], dtype=torch.float32).cuda()
            F = generator([Z], which_block=args.layer, pre_model=True)
            F_reshape = F.transpose(0, 1).reshape((latent_space[0], -1))
            F_rec = torch.matmul(Matrix * (1. - torch.eye(n=latent_space[0], m=latent_space[0]).cuda()), F_reshape). \
                reshape((latent_space[0], args.batch_size,) + tuple(latent_space[1:])).transpose(0, 1)

            optimizer.zero_grad()
            if args.sparse_type == 'L2':
                sparse_loss = torch.mean(
                    torch.pow(Matrix * (1. - torch.eye(n=latent_space[0], m=latent_space[0]).cuda()), 2.0))
            elif args.sparse_type == 'L1':
                sparse_loss = torch.mean(
                    torch.abs(Matrix * (1. - torch.eye(n=latent_space[0], m=latent_space[0]).cuda())))
            else:
                raise NotImplemented('Type not implemented.')
            feature_loss = torch.mean(torch.pow(F - F_rec, 2.0))
            loss = feature_loss + args.beta1 * sparse_loss
            loss.backward()
            optimizer.step()
        else:
            raise NotImplemented()

        if iter % args.report_value == 0:
            if args.beta0 == 0:
                print('Iter %d, Layer %d, loss=%.6f, f_loss=%.6f, sparse_loss=%.6f' %
                      (iter, args.layer, float(loss.item()), float(feature_loss.item()), float(sparse_loss.item())))
            else:
                print('Iter %d, Layer %d, loss=%.6f, f_loss=%.6f, x_loss=%.6f, sparse_loss=%.6f' %
                      (iter, args.layer, float(loss.item()), float(feature_loss.item()),
                       float(data_loss.item()), float(sparse_loss.item())))
                losses_dict['total_loss'].append(float(loss.item()))
                losses_dict['feature_loss'].append(float(feature_loss.item()))
                losses_dict['data_loss'].append(float(data_loss.item()))
                losses_dict['sparse_loss'].append(float(sparse_loss.item()))

        if iter % args.report_image == 0:
            # save reconstruction images.
            if args.beta0 == 0:
                X_rec = generator([F_rec], which_block=args.layer, post_model=True).detach()
                X = generator([F], which_block=args.layer, post_model=True).detach()

            test_feature = generator([test_zs[:4]], which_block=args.layer, pre_model=True).detach()
            test_feature_reshape = test_feature.transpose(0, 1).reshape((latent_space[0], -1))
            test_feature_rec = torch.matmul(Matrix * (1. - torch.eye(n=latent_space[0], m=latent_space[0]).cuda()),
                                            test_feature_reshape). \
                reshape((latent_space[0], args.batch_size,) + tuple(latent_space[1:])).transpose(0, 1)
            test_rec = generator([test_feature_rec], which_block=args.layer, post_model=True).detach()
            test_images = generator([test_feature], which_block=args.layer, post_model=True).detach()
            image_number = min(64, args.batch_size)
            Xs = torch.clamp(_tanh_to_sigmoid(torch.cat([test_rec,
                                                         test_images], dim=0).detach().cpu()), min=0.0, max=1.0)
            torchvision.utils.save_image(Xs, os.path.join(out_dir, 'ReconstructionImages_%d.png' % iter), nrow=4)

            # visualize the affinity matrix.
            Matrix_abs = torch.relu(Matrix)
            S_val = Matrix_abs.detach().cpu().numpy()
            S_val = thrC(S_val.copy().T, args.alpha).T

            #S_val_norm = S_val / np.sum(S_val, axis=1, keepdims=True)
            #S_val_norm = S_val_norm / np.sum(S_val_norm, axis=0, keepdims=True)
            #S_val_visual = np.clip(S_val_norm, 0, args.times * 1/512)
            #S_val_visual = S_val_visual / np.max(S_val_visual, axis=1, keepdims=True)
            #plt.figure()
            #plt.imshow(S_val_visual, cmap='Oranges')
            #plt.savefig(os.path.join(out_dir, 'thrd_matrix_iter_%d.png' % iter))

            predict, L_val = post_proC(S_val, args.n_subs, args.d_subs, args.power)

            p_sum = [sum(predict == k) for k in range(1, args.n_subs+1, 1)]
            p_sum = np.array(p_sum)
            print(p_sum)
            p_sort = np.argsort(p_sum)[::-1]
            predict_new = predict.copy()
            for i in range(1, args.n_subs+1, 1):
                predict_new[predict == (p_sort[i - 1] + 1)] = i
            predict = predict_new.copy()
            p_sum = [sum(predict == k) for k in range(1, args.n_subs+1, 1)]
            print(predict)
            print(p_sum)

            S_val_blockized = switch_matrix(S_val.copy(), predict.copy())
            # S_val_blockized = np.clip(S_val_blockized, 0, S_val_blockized.mean() * args.times)
            torchvision.utils.save_image(torch.from_numpy(S_val_blockized + S_val_blockized.T),
                                         os.path.join(out_dir, 'SwitchedMatrix%d.png' % iter), nrow=1,
                                         normalize=True)
            sel_idx = np.random.choice(test_zs.shape[0], size=[image_number], replace=False)
            F = generator([test_zs[sel_idx]], which_block=args.layer, pre_model=True).detach()
            features = F.detach().cpu().numpy()

            for class_i in range(1, args.n_subs+1, 1):
                ex_images = []
                for ii in range(image_number):
                    ex_rows = []
                    for jj in range(image_number):
                        f_a = features[ii].copy()
                        f_b = features[jj].copy()
                        f_a[predict == class_i] = f_b[predict == class_i]
                        f_a = f_a.reshape((1, ) + latent_space)
                        ex_rows.append(f_a)
                    ex_rows = np.concatenate(ex_rows, axis=0).astype(np.float32)
                    ex_rows = torch.from_numpy(ex_rows).cuda()
                    ex_ys = generator([ex_rows], which_block=args.layer, post_model=True).detach()
                    ex_ys = torch.clamp(_tanh_to_sigmoid(ex_ys), min=0.0, max=1.0)
                    ex_images.append(ex_ys)
                ex_images = torch.cat(ex_images, dim=0)
                torchvision.utils.save_image(ex_images.detach().cpu(), os.path.join(out_dir,
                                                                                    'layer_%d_class_%d_iter_%d.png' %
                                                                                    (args.layer, class_i, iter)),
                                             nrow=image_number)
                # the subspace mask of class_i, save it.
                subspace_i = predict == class_i
                with open(os.path.join(out_dir, 'subspace_mask_layer%d_iter%d_class%d.pkl' %
                                                (args.layer, iter, class_i)), 'wb') as file_out:
                    pkl.dump(subspace_i, file_out)
                    print('Save layer %d iter %d class %d, out_dir=%s.' % (args.layer, iter, class_i, out_dir))

        if iter % args.report_model == 0:
            with open(os.path.join(out_dir, 'value%d_layer%d.pkl'%(iter, args.layer)), 'wb') as file_out:
                pkl.dump(Matrix.detach().cpu().numpy(), file_out)
                print('Save S.')

            with open(os.path.join(out_dir, 'loss_dict%d_layer%d.pkl'%(iter, args.layer)), 'wb') as file_out:
                pkl.dump(losses_dict, file_out)
                print('Save dict.')
def run(args):
    os.makedirs(args.outputs, exist_ok=True)  # 生成输出路径文件夹,存在则跳过
    # 生成器
    generator = get_derivable_generator(args.gan_model, args.inversion_type,
                                        args)
    loss = get_loss(args.loss_type, args)  # 损失函数
    generator.cuda()  # pytorch需要手动放入GPU进行运算
    loss.cuda()
    inversion = get_inversion(args.optimization, args)
    image_list = image_files(args.target_images)  # 获取输入图片路径
    frameSize = MODEL_POOL[args.gan_model]['resolution']  # 获取图像分辨率

    # 按照batch大小分批处理图像
    for i, images in enumerate(split_to_batches(image_list, 1)):
        print('%d: Inverting %d images :' % (i + 1, 1), end='')
        # pt_image_str = '%s\n'
        print('%s\n' % tuple(images))

        image_name_list = []
        image_tensor_list = []
        for image in images:
            image_name_list.append(os.path.split(image)[1])
            image_tensor_list.append(_add_batch_one(load_as_tensor(image)))
        # torch.cat(tensors, dim=0, out=None) → Tensor
        # tensors (sequence of Tensors) – any python sequence of tensors of the same type. Non-empty tensors provided must have the same shape, except in the cat dimension.
        # dim (int, optional) – the dimension over which the tensors are concatenated
        # out (Tensor, optional) – the output tensor.
        y_gt = _sigmoid_to_tanh(
            torch.cat(image_tensor_list,
                      dim=0)).cuda()  # 在维度0上连接所有的tensor并且将值域映射到[-1, 1]
        # 逆映射, 生成图像tensor
        latent_estimates, history = inversion.invert(generator,
                                                     y_gt,
                                                     loss,
                                                     batch_size=BATCH_SIZE,
                                                     video=args.video)
        # 将值域从[-1,1]映射到[0,1], 使用torch.clamp()进一步保证值域在[0,1]
        y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid(
            generator(latent_estimates)),
                                                  min=0.,
                                                  max=1.).cpu(),
                                      1,
                                      dim=0)
        # Save
        for img_id, image in enumerate(images):
            y_estimate_pil = Tensor2PIL(
                y_estimate_list[img_id])  # 从tensor转化为PIL image并保存
            y_estimate_pil.save(
                os.path.join(args.outputs, image_name_list[img_id]))

            # Create video
            if args.video:
                print('Create GAN-Inversion video.')
                video = cv2.VideoWriter(filename=os.path.join(
                    args.outputs,
                    '%s_inversion.avi' % image_name_list[img_id]),
                                        fourcc=cv2.VideoWriter_fourcc(*'MJPG'),
                                        fps=args.fps,
                                        frameSize=(frameSize, frameSize))
                print('Save frames.')
                for i, sample in enumerate(history):
                    image = generator(sample)
                    image_cv2 = convert_array_to_images(
                        image.detach().cpu().numpy())[0][:, :, ::-1]
                    video.write(image_cv2)
                video.release()
Пример #10
0
def main(args):
    os.makedirs(args.outputs, exist_ok=True)
    generator = get_derivable_generator(args.gan_model, args.inversion_type,
                                        args)
    loss = get_loss(args.loss_type, args)
    cor_loss = Color_loss(loss)
    generator.cuda()
    loss.cuda()
    inversion = get_inversion(args.optimization, args)
    image_list = image_files(args.target_images)
    frameSize = MODEL_POOL[args.gan_model]['resolution']

    for i, images in enumerate(split_to_batches(image_list, 1)):
        print('%d: Processing %d images :' % (i + 1, 1), end='')
        pt_image_str = '%s\n'
        print(pt_image_str % tuple(images))

        image_name_list = []
        image_tensor_list = []
        for image in images:
            image_name_list.append(os.path.split(image)[1])
            image_tensor_list.append(_add_batch_one(load_as_tensor(image)))
        y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda()
        # Invert
        latent_estimates, history = inversion.invert(generator,
                                                     y_gt,
                                                     cor_loss,
                                                     batch_size=1,
                                                     video=args.video)
        # Get Images
        y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid(
            generator(latent_estimates)),
                                                  min=0.,
                                                  max=1.).cpu(),
                                      1,
                                      dim=0)
        # Save
        for img_id, image in enumerate(images):
            up_gray = colorization_images(image_tensor_list[img_id])
            y_gray_pil = Tensor2PIL(up_gray, mode='L')
            y_gray_pil.save(
                os.path.join(args.outputs,
                             '%s-%s.png' % (image_name_list[img_id], 'gray')))

            Y_gt = Tensor2PIL(image_tensor_list[img_id],
                              mode='RGB').convert('YCbCr')
            y_estimate_pil = Tensor2PIL(y_estimate_list[img_id],
                                        mode='RGB').convert('YCbCr')

            _, Cb, Cr = y_estimate_pil.split()
            Y, _, _ = Y_gt.split()
            y_colorization = Image.merge('YCbCr', (Y, Cb, Cr))
            y_colorization.convert('RGB').save(
                os.path.join(
                    args.outputs, '%s-%d.png' %
                    (image_name_list[img_id], math.floor(time.time()))))
            # Create video
            if args.video:
                print('Create GAN-Inversion video.')
                video = cv2.VideoWriter(filename=os.path.join(
                    args.outputs,
                    '%s_inversion.avi' % image_name_list[img_id]),
                                        fourcc=cv2.VideoWriter_fourcc(*'MJPG'),
                                        fps=args.fps,
                                        frameSize=(frameSize, frameSize))
                print('Save frames.')
                for i, sample in enumerate(history):
                    image = torch.clamp(_tanh_to_sigmoid(generator(sample)),
                                        min=0.,
                                        max=1.).cpu()
                    image_pil = Tensor2PIL(image, mode='RGB').convert('YCbCr')
                    _, Cb, Cr = image_pil.split()
                    y_colorization = Image.merge('YCbCr',
                                                 (Y, Cb, Cr)).convert('RGB')
                    image_cv2 = cv2.cvtColor(np.asarray(y_colorization),
                                             cv2.COLOR_RGB2BGR)
                    # image_cv2 = cv2.cvtColor(np.asarray(image_pil.convert('RGB')), cv2.COLOR_RGB2BGR)
                    video.write(image_cv2)
                video.release()
Пример #11
0
def main(args):
    os.makedirs(args.outputs+'/input', exist_ok=True)
    os.makedirs(args.outputs+'/GT', exist_ok=True)
    os.makedirs(args.outputs+'/mGANoutput', exist_ok=True)
    with open(args.outputs+'/mGANargs.txt', 'w') as f:
        json.dump(args.__dict__, f, indent=2)
    generator = get_derivable_generator(args.gan_model, args.inversion_type, args)
    loss = get_loss(args.loss_type, args)
    cor_loss = Color_loss(loss)
    generator.cuda()
    loss.cuda()
    inversion = get_inversion(args.optimization, args)
    image_list = image_files(args.target_images)
    if len(image_list)>300:
        print('Limiting the image set to 300.')
        image_list = image_list[:300]
    frameSize = MODEL_POOL[args.gan_model]['resolution']

    start_time = time.time()
    for i, images in enumerate(split_to_batches(image_list, 1)):
        print('%d: Processing %d images :' % (i, 1), end='')
        pt_image_str = '%s\n'
        print(pt_image_str % tuple(images))

        image_name_list = []
        image_tensor_list = []
        for image in images:
            image_name_list.append(os.path.split(image)[1])
            image_tensor_list.append(_add_batch_one(load_as_tensor(image)))
        y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda()
        # Invert
        latent_estimates, history = inversion.invert(generator, y_gt, cor_loss, batch_size=1, video=args.video)
        # Get Images
        y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid(generator(latent_estimates)), min=0., max=1.).cpu(), 1, dim=0)
        # Save
        for img_id, image in enumerate(images):
            up_gray = colorization_images(image_tensor_list[img_id])
            y_gray_pil = Tensor2PIL(up_gray, mode='L')
            y_gray_pil.save(args.outputs + '/input/%d%s' % (i, image_name_list[img_id][-4:]))
            
            y_RGB = Tensor2PIL(image_tensor_list[img_id])
            y_RGB.save(args.outputs + '/GT/%d%s' % (i, image_name_list[img_id][-4:]))
            
            Y_gt = Tensor2PIL(image_tensor_list[img_id], mode='RGB').convert('YCbCr')
            y_estimate_pil = Tensor2PIL(y_estimate_list[img_id], mode='RGB').convert('YCbCr')

            _, Cb, Cr = y_estimate_pil.split()
            Y, _, _ = Y_gt.split()
            y_colorization = Image.merge('YCbCr', (Y, Cb, Cr))
            y_colorization.convert('RGB').save(args.outputs + '/mGANoutput/%d%s' % (i, image_name_list[img_id][-4:]))
            # Create video
            if args.video:
                print('Create GAN-Inversion video.')
                video = cv2.VideoWriter(
                    filename=os.path.join(args.outputs, '%s_inversion.avi' % image_name_list[img_id]),
                    fourcc=cv2.VideoWriter_fourcc(*'MJPG'),
                    fps=args.fps,
                    frameSize=(frameSize, frameSize))
                print('Save frames.')
                for i, sample in enumerate(history):
                    image = torch.clamp(_tanh_to_sigmoid(generator(sample)), min=0., max=1.).cpu()
                    image_pil = Tensor2PIL(image, mode='RGB').convert('YCbCr')
                    _, Cb, Cr = image_pil.split()
                    y_colorization = Image.merge('YCbCr', (Y, Cb, Cr)).convert('RGB')
                    image_cv2 = cv2.cvtColor(np.asarray(y_colorization), cv2.COLOR_RGB2BGR)
                    # image_cv2 = cv2.cvtColor(np.asarray(image_pil.convert('RGB')), cv2.COLOR_RGB2BGR)
                    video.write(image_cv2)
                video.release()

    print(f'{(time.time()-start_time)/60:.2f}','minutes taken in total;', f'{(time.time()-start_time)/60/len(image_list):.2f}', 'per image.')