def multilabel_idxcount_v2_val(args): image_information = loadpickle(args.val_file) dataset = ImageRelLists(image_paths=image_information, image_root=args.data_dir, transform=get_val_simple_transform(), target_transform=multilabelidxcount2KL( args.num_classes)) return dataset # if __name__ == '__main__': # # x_transform = multilabel2multihot(500) # # x = x_transform([4, 10]) # # print("DEB") # from argparse import Namespace # from CNNs.dataloaders.utils import none_collate # # args = Namespace(num_classes=742) # annotation_file = '/home/zwei/Dev/AttributeNet3/AdobeStockSelection/RetrieveSelected778/data_v2/CNNsplit_{}.pkl' # data_dir = '/home/zwei/datasets/stockimage_742/images-256' # dataset = multilabel_val(args, annotation_file, data_dir) # val_loader = torch.utils.data.DataLoader(dataset, # batch_size=10, shuffle=False, # num_workers=4, pin_memory=True, collate_fn=none_collate) # import tqdm # # for s_images, s_labels in tqdm.tqdm(val_loader): # pass # print("Done")
def deepsentiment_s_test(args): image_information = loadpickle(args.test_file) dataset = ImageRelLists(image_paths=image_information, image_root=args.data_dir, transform=get_val_simple_transform(), target_transform=None) return dataset
def deepsentiment_m_val(args): image_information = loadpickle(args.val_files[args.ind]) dataset = ImageRelLists( image_paths=image_information, #[:n_samples], image_root=args.data_dirs[args.ind], transform=get_val_simple_transform(), target_transform=None) return dataset
def multilabel_idxcount_v2_val(args): image_information = loadpickle(args.val_file) dataset = ImageRelLists(image_paths=image_information, image_root=args.data_dir, transform=get_val_simple_transform(), target_transform=multilabelidxcount2multihot( args.num_classes)) return dataset
def multilabel_idxcount_v2_train(args): image_information = loadpickle(args.train_file) dataset = ImageRelLists(image_paths=image_information, image_root=args.data_dir, transform=get_train_fix_size_transform(), target_transform=multilabelidxcount2KL( args.num_classes)) return dataset
def singlelabel_v2_val(args): #FIXME: image_information = loadpickle(args.val_file) dataset = ImageRelLists(image_paths=image_information, image_root=args.data_dir, transform=get_val_simple_transform(), target_transform=None) return dataset
def singlelabel_test(args, annotation_file, data_dir): #FIXME: annotation_file = annotation_file.format('test') image_information = loadpickle(annotation_file) dataset = ImageRelLists(image_paths=image_information, image_root=data_dir, transform=get_val_simple_transform(), target_transform=None) return dataset
def simple_multilabel_val(args): #FIXME: # annotation_file = annotation_file.format('train') image_information = loadpickle(args.val_file) dataset = ImageRelLists(image_paths=image_information, image_root=args.data_dir, transform=get_val_simple_transform(), target_transform=simple_multitrans()) return dataset
def multilabel_v2_val(args): #FIXME: # annotation_file = annotation_file.format('val') image_information = loadpickle(args.val_file) dataset = ImageRelLists(image_paths=image_information, image_root=args.data_dir, transform=get_val_simple_transform(), target_transform=multilabel2multihot( args.num_classes)) return dataset
def multilabel_BCE_test(args, annotation_file, data_dir): #FIXME: annotation_file = annotation_file.format('test') image_information = loadpickle(annotation_file) dataset = ImageRelLists(image_paths=image_information, image_root=data_dir, transform=get_val_simple_transform(), target_transform=multilabel2multi1( args.num_classes)) return dataset