Exemple #1
0
 def configure_datasets(self):
     if self.opts.dataset_type not in data_configs.DATASETS.keys():
         Exception('{} is not a valid dataset_type'.format(
             self.opts.dataset_type))
     print('Loading dataset for {}'.format(self.opts.dataset_type))
     dataset_args = data_configs.DATASETS[self.opts.dataset_type]
     transforms_dict = dataset_args['transforms'](
         self.opts).get_transforms()
     train_dataset_celeba = ImagesDataset(
         source_root=dataset_args['train_source_root'],
         target_root=dataset_args['train_target_root'],
         source_transform=transforms_dict['transform_source'],
         target_transform=transforms_dict['transform_gt_train'],
         opts=self.opts)
     test_dataset_celeba = ImagesDataset(
         source_root=dataset_args['test_source_root'],
         target_root=dataset_args['test_target_root'],
         source_transform=transforms_dict['transform_source'],
         target_transform=transforms_dict['transform_test'],
         opts=self.opts)
     train_dataset = train_dataset_celeba
     test_dataset = test_dataset_celeba
     print("Number of psp_training samples: {}".format(len(train_dataset)))
     print("Number of test samples: {}".format(len(test_dataset)))
     return train_dataset, test_dataset
Exemple #2
0
    def configure_datasets(self):
        if self.opts.dataset_type not in data_configs.DATASETS.keys():
            Exception(
                "{} is not a valid dataset_type".format(self.opts.dataset_type)
            )
        print("Loading dataset for {}".format(self.opts.dataset_type))
        dataset_args = data_configs.DATASETS[self.opts.dataset_type]
        transforms_dict = dataset_args["transforms"](
            self.opts
        ).get_transforms()

        train_latents_root = None
        test_latents_root = None
        self.labels_path = None

        if "labels" in dataset_args.keys():
            self.labels_path = dataset_args["labels"]

        if "train_latents_root" in dataset_args.keys():
            train_latents_root = dataset_args["train_latents_root"]

        if "test_latents_root" in dataset_args.keys():
            test_latents_root = dataset_args["test_latents_root"]

        train_dataset_celeba = ImagesDataset(
            source_root=dataset_args["train_source_root"],
            target_root=dataset_args["train_target_root"],
            source_transform=transforms_dict["transform_source"],
            target_transform=transforms_dict["transform_gt_train"],
            latents_root=train_latents_root,
            labels_path=self.labels_path,
            opts=self.opts,
        )

        test_dataset_celeba = ImagesDataset(
            source_root=dataset_args["test_source_root"],
            target_root=dataset_args["test_target_root"],
            source_transform=transforms_dict["transform_source"],
            target_transform=transforms_dict["transform_test"],
            latents_root=test_latents_root,
            opts=self.opts,
        )
        train_dataset = train_dataset_celeba
        test_dataset = test_dataset_celeba
        print("Number of training samples: {}".format(len(train_dataset)))
        print("Number of test samples: {}".format(len(test_dataset)))
        return train_dataset, test_dataset
Exemple #3
0
 def configure_datasets(self):
     if self.opts.dataset_type not in data_configs.DATASETS.keys():
         Exception(f'{self.opts.dataset_type} is not a valid dataset_type')
     print(f'Loading dataset for {self.opts.dataset_type}')
     dataset_args = data_configs.DATASETS[self.opts.dataset_type]
     transforms_dict = dataset_args['transforms'](
         self.opts).get_transforms()
     train_dataset = ImagesDataset(
         source_root=dataset_args['train_source_root'],
         target_root=dataset_args['train_target_root'],
         source_transform=transforms_dict['transform_source'],
         target_transform=transforms_dict['transform_gt_train'],
         opts=self.opts)
     test_dataset = ImagesDataset(
         source_root=dataset_args['test_source_root'],
         target_root=dataset_args['test_target_root'],
         source_transform=transforms_dict['transform_source'],
         target_transform=transforms_dict['transform_test'],
         opts=self.opts)
     print(f"Number of training samples: {len(train_dataset)}")
     print(f"Number of test samples: {len(test_dataset)}")
     return train_dataset, test_dataset
Exemple #4
0
    parser.add_argument("ckpt",
                        metavar="CHECKPOINT",
                        help="path to the model checkpoints")

    args = parser.parse_args()
    print(args)

    net, opts = setup_model(args.ckpt, device)
    dataset_args = data_configs.DATASETS[opts.dataset_type]
    transforms_dict = dataset_args['transforms'](opts).get_transforms()

    images_directory = dataset_args[
        'test_source_root'] if args.images_dir is None else args.images_dir
    test_dataset = ImagesDataset(
        source_root=images_directory,
        target_root=images_directory,
        source_transform=transforms_dict['transform_source'],
        target_transform=transforms_dict['transform_test'],
        opts=opts)

    data_loader = DataLoader(test_dataset,
                             batch_size=args.batch,
                             shuffle=False,
                             num_workers=2,
                             drop_last=True)

    print(f'dataset length: {len(test_dataset)}')

    # In the following example, we are using an InterfaceGAN based editing to calculate the LEC metric.
    # Change the provided example according to your domain and needs.
    direction = torch.load('../editings/interfacegan_directions/age.pt').to(
        device)