def get_train_dataloader(args, patches): dataset_list = args.source assert isinstance(dataset_list, list) train_datasets = [] val_datasets = [] img_transformer, tile_transformer = get_train_transformers(args) limit = args.limit_source for dname in dataset_list: name_train, name_val, labels_train, labels_val = get_split_dataset_info( join(dirname(__file__), 'txt_lists','%s_train.txt' % dname), args.val_size ) train_dataset = JigsawDataset(name_train, labels_train, patches=patches, img_transformer=img_transformer, tile_transformer=tile_transformer, jig_classes=args.jigsaw_n_classes, bias_whole_image=args.bias_whole_image) if limit: train_dataset = Subset(train_dataset, limit) train_datasets.append(train_dataset) # Validation test => subtracted from train split val_datasets.append( JigsawTestDataset(name_val, labels_val, img_transformer=get_val_transformer(args), patches=patches, jig_classes=args.jigsaw_n_classes)) train_dataset = ConcatDataset(train_datasets) val_dataset = ConcatDataset(val_datasets) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) return train_loader, val_loader
def get_target_jigsaw_loader(args): img_transformer, tile_transformer = get_train_transformers(args) if args.stylized: name_train, _, labels_train, _ = get_split_dataset_info( join(dirname(__file__), 'txt_lists', 'Stylized' + args.dataset, "{}_target".format(args.target), '%s_train.txt' % args.target), 0) else: name_train, _, labels_train, _ = get_split_dataset_info( join(dirname(__file__), 'txt_lists', 'Vanilla' + args.dataset, '%s_train.txt' % args.target), 0) dataset = JigsawDataset(name_train, labels_train, patches=False, img_transformer=img_transformer, tile_transformer=tile_transformer, jig_classes=args.jigsaw_n_classes, bias_whole_image=args.bias_whole_image, grid_size=args.grid_size) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) return loader
def get_jigsaw_val_dataloader(args, patches=False): names, labels = _dataset_info( join(dirname(__file__), 'txt_lists', '%s_test.txt' % args.target)) img_tr = [transforms.Resize((args.image_size, args.image_size))] tile_tr = [ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ] img_transformer = transforms.Compose(img_tr) tile_transformer = transforms.Compose(tile_tr) val_dataset = JigsawDataset(names, labels, patches=patches, img_transformer=img_transformer, tile_transformer=tile_transformer, jig_classes=args.jigsaw_n_classes, bias_whole_image=args.bias_whole_image) if args.limit_target and len(val_dataset) > args.limit_target: val_dataset = Subset(val_dataset, args.limit_target) print("Using %d subset of val dataset" % args.limit_target) dataset = ConcatDataset([val_dataset]) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) return loader
def get_train_dataloader(args): dataset_list = args.source assert isinstance(dataset_list, list) datasets = [] val_datasets = [] img_transformer = get_train_transformers(args) limit = args.limit_source for dname in dataset_list: if dname in digits_datasets: return get_digital_train_dataloader(args, dname) name_train, name_val, labels_train, labels_val = get_split_dataset_info( join(dirname(__file__), 'txt_lists', '%s_train.txt' % dname), args.val_size) train_dataset = JigsawDataset(name_train, labels_train, img_transformer=img_transformer) if limit: train_dataset = Subset(train_dataset, limit) datasets.append(train_dataset) val_datasets.append( JigsawDataset(name_val, labels_val, img_transformer=get_val_transformer(args))) dataset = ConcatDataset(datasets) val_dataset = ConcatDataset(val_datasets) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) return loader, val_loader
def get_target_dataloader(args, dname): names, labels = _dataset_info( join(dirname(__file__), 'txt_lists', '%s_test.txt' % dname)) img_tr = get_val_transformer(args) val_dataset = JigsawDataset(names, labels, img_transformer=img_tr) if args.limit_target and len(val_dataset) > args.limit_target: val_dataset = Subset(val_dataset, args.limit_target) print("Using %d subset of val dataset" % args.limit_target) dataset = ConcatDataset([val_dataset]) print("Load %s, size: %d" % (dname, len(dataset))) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) return loader
def get_train_dataloader(args, patches): dataset_list = args.source assert isinstance(dataset_list, list) datasets = [] val_datasets = [] img_transformer, tile_transformer = get_train_transformers(args) limit = args.limit_source for dname in dataset_list: if args.stylized: name_train, name_val, labels_train, labels_val = get_split_dataset_info( join(dirname(__file__), 'txt_lists', 'Stylized' + args.dataset, "{}_target".format(args.target), '%s_train.txt' % dname), args.val_size) # print(name_train) else: name_train, name_val, labels_train, labels_val = get_split_dataset_info( join(dirname(__file__), 'txt_lists', 'Vanilla' + args.dataset, '%s_train.txt' % dname), args.val_size) train_dataset = JigsawDataset(name_train, labels_train, patches=patches, img_transformer=img_transformer, tile_transformer=tile_transformer, jig_classes=args.jigsaw_n_classes, bias_whole_image=args.bias_whole_image, grid_size=args.grid_size) if limit: train_dataset = Subset(train_dataset, limit) datasets.append(train_dataset) if args.jig_only: val_datasets.append( JigsawDataset(name_val, labels_val, patches=patches, img_transformer=img_transformer, tile_transformer=tile_transformer, jig_classes=args.jigsaw_n_classes, bias_whole_image=args.bias_whole_image, grid_size=args.grid_size)) else: val_datasets.append( JigsawTestDataset(name_val, labels_val, img_transformer=get_val_transformer(args), patches=patches, jig_classes=args.jigsaw_n_classes)) #val_datasets.append(JigsawTestDataset(name_val, labels_val, img_transformer=get_val_transformer(args), # patches=patches, jig_classes=args.jigsaw_n_classes)) dataset = ConcatDataset(datasets) val_dataset = ConcatDataset(val_datasets) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) return loader, val_loader