def create_test_data_loader(args, name, dataset): train_transformer = T.Compose([ T.RectScale(args.height, args.width), T.ToTensor(), T.Normalize(mean=[0.486, 0.459, 0.408], std=[0.229, 0.224, 0.225]) ]) test_transformer = T.Compose([ T.RectScale(args.height, args.width), T.ToTensor(), T.Normalize(mean=[0.486, 0.459, 0.408], std=[0.229, 0.224, 0.225]) ]) train_loader = DataLoader( Preprocessor(dataset.train, root=os.path.join(dataset.images_dir, dataset.train_path), transform=train_transformer), batch_size=args.batch_size, num_workers=args.workers, shuffle=False, pin_memory=True, drop_last=False) query_loader = DataLoader( Preprocessor(dataset.query, root=os.path.join(dataset.images_dir, dataset.query_path), transform=test_transformer), batch_size=args.batch_size*4, num_workers=args.workers, shuffle=False, pin_memory=True, drop_last=False) gallery_loader = DataLoader( Preprocessor(dataset.gallery, root=os.path.join(dataset.images_dir, dataset.gallery_path), transform=test_transformer), batch_size=args.batch_size*4, num_workers=args.workers, shuffle=False, pin_memory=True, drop_last=False) print('{} Datasets Has beed loaded.'.format(name)) return train_loader, query_loader, gallery_loader
def __init__(self, dataset, name, root=None, with_pose=False, pose_root=None, is_test=False, test_root = None, pid_imgs=None, height=256, width=128, pose_aug='no', transform=None): super(Preprocessor, self).__init__() self.dataset = dataset self.root = root self.with_pose = with_pose self.pose_root = pose_root self.is_test = is_test self.test_root = test_root self.pid_imgs = pid_imgs self.height = height self.width = width self.pose_aug = pose_aug self.name = name normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if transform==None: self.transform = transforms.Compose([ transforms.RectScale(height, width), transforms.RandomSizedEarser(), #transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalizer, ]) else: self.transform = transform self.transform_p = transforms.Compose([ transforms.RectScale(height, width), transforms.ToTensor(), normalizer, ])
def get_dataloader(dataset,data_dir, training=False, height=256, width=128, batch_size=64, workers=1): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])#(0.1307,), (0.3081,) # normalizer = T.Normalize(mean=[0.1307, 0.1307, 0.1307], # std=[0.3081, 0.3081, 0.3081])#, (0.3081,) if training: transformer = T.Compose([ T.RectScale(config.input_size + 32, config.input_size + 32), torchvision.transforms.RandomCrop(config.input_size), torchvision.transforms.RandomHorizontalFlip(), # RandomRotation(degrees=20), T.ToTensor(), normalizer, ]) else: transformer = T.Compose([ T.RectScale(config.input_size, config.input_size), T.ToTensor(), normalizer, ]) data_loader = DataLoader( Preprocessor(dataset, root=data_dir, transform=transformer), batch_size=batch_size, num_workers=workers, shuffle=training, pin_memory=True, drop_last=training) return data_loader
def get_data(name, data_dir, height, width, batch_size, workers, pose_aug, skip, rate, eraser): root = osp.join(data_dir, name) dataset = datasets.create(name, root) video_dict = None if osp.isfile(osp.join(root, 'video.json')): video_dict = read_json(osp.join(root, 'video.json')) if eraser: train_transformer = T.Compose([ T.RectScale(height, width), T.RandomSizedEarser(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) else: train_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) # use combined trainval set for training as default train_loader = DataLoader(Preprocessor(dataset.trainval, name, root=dataset.images_dir, with_pose=True, pose_root=dataset.poses_dir, pid_imgs=dataset.trainval_query, height=height, width=width, pose_aug=pose_aug, transform=train_transformer), sampler=RandomTripletSampler( dataset.trainval, video_dict=video_dict, skip_frames=skip, inter_rate=rate), batch_size=batch_size, num_workers=workers, pin_memory=False) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), name, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=False) return dataset, train_loader, test_loader
def get_data(name, data_dir, height, width, batch_size, workers): root = osp.join(data_dir, name) root = data_dir dataset = datasets.create(name, root) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) num_classes = dataset.num_train_ids train_transformer = T.Compose([ T.RectScale(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) train_loader = DataLoader(Preprocessor(dataset.train, root=osp.join( dataset.images_dir, dataset.train_path), transform=train_transformer, random_mask=False), batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) query_loader = DataLoader(Preprocessor(dataset.query, root=osp.join( dataset.images_dir, dataset.query_path), transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) gallery_loader = DataLoader(Preprocessor(dataset.gallery, root=osp.join( dataset.images_dir, dataset.gallery_path), transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, query_loader, gallery_loader
def get_data(name, split_id, data_dir, height, width, batch_size, num_instances, workers, combine_trainval): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train num_classes = (dataset.num_trainval_ids if combine_trainval else dataset.num_train_ids) train_transformer = T.Compose([ T.RectScale(height, width), T.RandomSizedEarser(), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) train_loader = DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomMultipleGallerySampler( train_set, num_instances), pin_memory=True, drop_last=True) val_loader = DataLoader(Preprocessor(dataset.val, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, val_loader, test_loader
def get_data(name, data_dir, height, width, ratio, batch_size, workers, num_instances=8): root = osp.join(data_dir, name) root = data_dir dataset = datasets.create(name, root) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) listnormalizer = T.ListNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) num_classes = dataset.num_train_ids + 1 # plus 1 more label for the zero-padded feature train_transformer = T.Compose([ T.RectScale(height, width), T.RandomHorizontalFlip(), T.RandomVerticalCropCont(height,width), T.ListToTensor(), listnormalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) query_transformer = T.Compose([ T.ContVerticalCropDiscret(height,width, ratio), T.ToTensor(), normalizer, ]) train_loader = DataLoader( Preprocessor(dataset.train, root=osp.join(dataset.images_dir,dataset.train_path), transform=train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomIdentitySampler(dataset.train, num_instances), # shuffle=True, pin_memory=True, drop_last=True) query_loader = DataLoader( Preprocessor(dataset.query, root=osp.join(dataset.images_dir,dataset.query_path), transform=query_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) gallery_loader = DataLoader( Preprocessor(dataset.gallery, root=osp.join(dataset.images_dir,dataset.gallery_path), transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, query_loader, gallery_loader
def get_data(name, split_id, data_dir, height, width, crop_height, crop_width, batch_size, caffe_sampler=False, workers=4): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) train_set = dataset.trainval num_classes = dataset.num_trainval_ids # transforms train_transformer = T.Compose([ T.RectScale(height, width), # T.CenterCrop((crop_height, crop_width)), T.RandomHorizontalFlip(), T.ToTensor(), T.RGB_to_BGR(), T.NormalizeBy(255), ]) test_transformer = T.Compose([ T.RectScale(height, width), # T.CenterCrop((crop_height, crop_width)), T.ToTensor(), T.RGB_to_BGR(), T.NormalizeBy(255), ]) # dataloaders sampler = caffeSampler(train_set, name, batch_size=batch_size, root=dataset.images_dir) if caffe_sampler else \ RandomIdentitySampler(train_set, 10) #TODO train_loader = DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), batch_size=batch_size, num_workers=workers, sampler=sampler, pin_memory=True, drop_last=True) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, test_loader
def get_data(dataset_name, split_id, data_dir, batch_size, workers, num_instances, combine_trainval=False): root = osp.join(data_dir, dataset_name) dataset = get_dataset(dataset_name, root, split_id=split_id, num_val=1, download=True) normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train num_classes = (dataset.num_trainval_ids if combine_trainval else dataset.num_train_ids) train_processor = Preprocessor(train_set, root=dataset.images_dir, transform=transforms.Compose([ transforms.RandomSizedRectCrop(256, 128), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalizer, ])) if num_instances > 0: train_loader = DataLoader( train_processor, batch_size=batch_size, num_workers=workers, sampler=RandomIdentitySampler(train_set, num_instances), pin_memory=True) else: train_loader = DataLoader( train_processor, batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True) val_loader = DataLoader( Preprocessor(dataset.val, root=dataset.images_dir, transform=transforms.Compose([ transforms.RectScale(256, 128), transforms.ToTensor(), normalizer, ])), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader = DataLoader( Preprocessor(list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=transforms.Compose([ transforms.RectScale(256, 128), transforms.ToTensor(), normalizer, ])), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, val_loader, test_loader
def get_data(split_id, data_dir, height, width, batchSize, workers, combine_trainval, train_list, \ val_list, query_list, gallery_list): root = data_dir normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # RGB imagenet train_set = train_list + val_list if combine_trainval else train_list # a list train_transformer = T.Compose([ T.RectScale(height, width), T.RandomHorizontalFlip(), T.ToTensor(), # [0, 255] to [0.0, 1.0] normalizer, # normalize each channel of the input ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) train_loader = DataLoader( Preprocessor(train_set, root=root, transform=train_transformer), batch_size=batchSize, num_workers=workers, sampler=RandomSampler(train_set), # shuffle=True, pin_memory=True, drop_last=True) val_loader = DataLoader(Preprocessor(val_list, root=root, transform=test_transformer), batch_size=batchSize, num_workers=workers, shuffle=False, pin_memory=True) test_loader = DataLoader(Preprocessor( list(set(query_list) | set(gallery_list)), root=root, transform=test_transformer), batch_size=batchSize, num_workers=workers, shuffle=False, pin_memory=True) return train_loader, val_loader, test_loader
def get_data(name, split_id, data_dir, height, width, batch_size, workers, combine_trainval): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train num_classes = (dataset.num_trainval_ids if combine_trainval else dataset.num_train_ids) train_transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) train_loader = DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) val_loader = DataLoader(Preprocessor(dataset.val, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) pid_train = np.array(list(pid for _, pid, _ in train_set)) class_weight = np.array([(pid_train == i).sum() for i in range(num_classes)]) assert np.all(class_weight != 0) class_weight = pid_train.shape[0] / num_classes / class_weight class_weight = torch.Tensor(class_weight).cuda() return dataset, num_classes, class_weight, train_loader, val_loader, test_loader
def get_data2(args): (name, split_id, data_dir, height, width, batch_size, num_instances, workers, combine_trainval) = ( args.dataset, args.split, args.data_dir, args.height, args.width, args.batch_size, args.num_instances, args.workers, args.combine_trainval, ) pin_memory = args.pin_mem name_val = args.dataset_val or args.dataset npy = args.has_npy rand_ratio = args.random_ratio dataset_train = datasets.CUB2('train') dataset_test = datasets.CUB2('test') normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) num_classes = np.unique(dataset_train.train_labels).shape[0] train_transformer = T.Compose([ T.ToPILImage(), T.RandomCropFlip(height, width, area=args.area), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.ToPILImage(), T.RectScale(height, width), T.ToTensor(), normalizer, ])
def get_data(name, data_dir, height, width, batch_size, workers, pose_aug): root = osp.join(data_dir, name) dataset = datasets.create(name, root) # use combined trainval set for training as default train_loader = DataLoader(Preprocessor(dataset.trainval, root=dataset.images_dir, with_pose=True, pose_root=dataset.poses_dir, pid_imgs=dataset.trainval_query, height=height, width=width, pose_aug=pose_aug), sampler=RandomPairSampler(dataset.trainval, neg_pos_ratio=3), batch_size=batch_size, num_workers=workers, pin_memory=False) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=False) return dataset, train_loader, test_loader
def get_real_test_data(query_dir, gallery_dir, target_height, target_width, batch_size, workers): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_transformer = T.Compose([ T.RectScale(target_height, target_width), T.ToTensor(), normalizer, ]) query_loader = DataLoader(Flip_Preprocessor(data_dir=query_dir, is_flip=False, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) gallery_loader = DataLoader(Flip_Preprocessor(data_dir=gallery_dir, is_flip=False, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return query_loader, gallery_loader
def get_data(data_dir, big_height, big_width, target_height, target_width, batch_size, workers, is_train=True): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transformer = T.Compose([ T.ResizeRandomCrop(big_height, big_width, target_height, target_width), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(target_height, target_width), T.ToTensor(), normalizer, ]) if is_train: transformer = train_transformer else: transformer = test_transformer data_loader = DataLoader( Direct_Preprocessor(data_dir=data_dir, transform=transformer, is_train=is_train), batch_size=batch_size, num_workers=workers, shuffle=is_train, pin_memory=True) return data_loader
def get_loader(data, root, height=256, width=128, batch_size=32, workers=0, training=False): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if training: transformer = T.Compose([ T.RandomSizedRectCrop(height, width), # 对图像进行随机裁剪并缩放. T.RandomHorizontalFlip(), # 对给定的PIL.Image进行随机水平翻转,概率为0.5,属于数据增强. T.ToTensor(), # 将numpy图像转换为torch图像. normalizer, ]) else: transformer = T.Compose([ T.RectScale(height, width), # 缩放图像. T.ToTensor(), normalizer, ]) batch_size = batch_size * 8 data_loader = DataLoader(Preprocessor(data, root=root, transform=transformer), batch_size=batch_size, num_workers=workers, shuffle=training, pin_memory=True) return data_loader
def get_dataloader(dataset,data_dir, training=False, height=256, width=128, batch_size=64, workers=1): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if training: transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) else: transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) data_loader = DataLoader( Preprocessor(dataset, root=data_dir, transform=transformer), batch_size=batch_size, num_workers=workers, shuffle=training, pin_memory=True, drop_last=training) return data_loader
def get_dataloader(self, dataset, training=False) : normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if training: transformer = T.Compose([ T.RandomSizedRectCrop(self.data_height, self.data_width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) batch_size = self.batch_size else: transformer = T.Compose([ T.RectScale(self.data_height, self.data_width), T.ToTensor(), normalizer, ]) batch_size = self.eval_bs data_loader = DataLoader( Preprocessor(dataset, root=self.data_dir, transform=transformer, is_training=training, max_frames=self.max_frames), batch_size=batch_size, num_workers=self.data_workers, shuffle=training, pin_memory=True, drop_last=training) current_status = "Training" if training else "Test" print("create dataloader for {} with batch_size {}".format(current_status, batch_size)) return data_loader
def test(dataset, net, perturbation, args, evaluator, epoch, name,saveRank=False): print(">> Evaluating network on test datasets...") net = net.cuda() net.eval() normalize = T.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) mean = torch.Tensor(normalize.mean).view(1, 3, 1, 1).cuda() std = torch.Tensor(normalize.std).view(1, 3, 1, 1).cuda() test_transformer = T.Compose([ T.RectScale(args.height, args.width), T.ToTensor() ]) query_loader = DataLoader( Preprocessor(dataset.query, name, root=dataset.images_dir, transform=test_transformer), batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True ) gallery_loader = DataLoader( Preprocessor(dataset.gallery, name, root=dataset.images_dir, transform=test_transformer), batch_size=args.batch_size, num_workers=8, shuffle=False, pin_memory=True ) qFeats, gFeats, testQImage,noiseQIamge, qnames, gnames = [], [], [], [], [],[] qCams, gCams = [], [] import random with torch.no_grad(): for batch_index, (inputs, qname, _, qCam) in enumerate(query_loader): inputs = inputs.cuda() perturted_input = perturbation(inputs) successful_diffs = ((perturted_input - inputs) * 3 + 0.5).clamp(0, 1) if len(testQImage) < 8: testQImage.append(perturted_input[0, ...]) noiseQIamge.append(successful_diffs[0, ...]) perturted_input = torch.clamp(perturted_input, 0, 1) ###normalization norm_perturted_input = (perturted_input - mean) / std perturbed_feature = net(norm_perturted_input)[0] qFeats.append(perturbed_feature) qnames.extend(qname) qCams.append(qCam.cuda()) qFeats = torch.cat(qFeats, 0) for (inputs, gname, _, gCam) in gallery_loader: ###normalize#### inputs = inputs.cuda() inputs = (inputs - mean) / std gFeats.append(net(inputs)[0]) gnames.extend(gname) gCams.append(gCam.cuda()) gFeats = torch.cat(gFeats, 0) gCams, qCams = torch.cat(gCams).view(1, -1), torch.cat(qCams).view(-1, 1) distMat = calDist(qFeats, gFeats) # evaluate on test datasets s= evaluator.evaMat(distMat, dataset.query, dataset.gallery) return testQImage, noiseQIamge,s
def __init__(self, config): self.model_path = config.args['model_path'] self.width = config.args['width'] self.height = config.args['height'] self.model = init_model(self.model_path) self.normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) self.test_transformer = T.Compose([ T.RectScale(self.height, self.width), T.ToTensor(), self.normalizer, ])
def get_data(name, split_id, data_dir, height, width, batch_size, num_instances, workers, combine_trainval): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # load train_set, query_set, gallery_set mt_train_set = dataset.train mt_num_classes = dataset.num_train_tids_sub query_set = dataset.query gallery_set = dataset.gallery train_transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) # Random ID mt_train_set = flatten_dataset(mt_train_set) num_task = len( mt_num_classes) # num_task equals camera number, each camera is a task mt_train_loader = DataLoader( Preprocessor_Image(mt_train_set, root=dataset.dataset_dir, transform=train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomIdentitySampler( mt_train_set, num_instances, num_task), # Here is different between softmax_loss pin_memory=True, drop_last=True) query_set = flatten_dataset(query_set) gallery_set = flatten_dataset(gallery_set) test_set = list(set(query_set) | set(gallery_set)) test_loader = DataLoader(Preprocessor_Image(test_set, root=dataset.dataset_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return mt_train_loader, mt_num_classes, test_loader, query_set, gallery_set
def get_data(name, split_id, data_dir, height, width, batch_size, workers, combine_trainval, np_ratio): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train train_transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomSizedEarser(), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) train_loader = DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), sampler=RandomPairSampler( train_set, neg_pos_ratio=np_ratio), batch_size=batch_size, num_workers=workers, pin_memory=False) val_loader = DataLoader(Preprocessor(dataset.val, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=0, shuffle=False, pin_memory=False) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=0, shuffle=False, pin_memory=False) return dataset, train_loader, val_loader, test_loader
def get_dataloader(self, dataset, training=False): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if training: # import transforms as T transformer = T.Compose([ T.RandomSizedRectCrop( self.data_height, self.data_width), # data_height = 256 data_width = 128 T.RandomHorizontalFlip(), # 随机水平翻转 T.ToTensor( ), # Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. normalizer, # normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]) ]) batch_size = self.batch_size # batch_size = 16 else: transformer = T.Compose([ T.RectScale(self.data_height, self.data_width), # RectScale():三角缩放(?) T.ToTensor( ), # Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. normalizer, # normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], ]) # std=[0.229, 0.224, 0.225]) batch_size = self.eval_bs # batch_size = 64 data_dir = self.data_dir # data_dir = dataset_all.image_dir data_loader = DataLoader( # DataLoader() Preprocessor( dataset, root=data_dir, num_samples=self. frames_per_video, # root = dataset_all.image_dir num_samples = 1 transform=transformer, is_training=training, max_frames=self.max_frames ), # transform = T.compose()返回值 is_training = False max_frames = 900 batch_size=batch_size, num_workers=self. data_workers, # batch_size = 16 data_workers = 6 shuffle=training, pin_memory=True, drop_last=training) # shuffle = True drop_last = True current_status = "Training" if training else "Testing" # current_status = 'Training' print("Create dataloader for {} with batch_size {}".format( current_status, batch_size)) # Create dataloader for Training with batch_size 16 return data_loader
def get_data(name, split_id, data_dir, big_height, big_width, target_height, target_width, batch_size, num_instances, workers, combine_trainval): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id, download=True) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train num_classes = (dataset.num_trainval_ids if combine_trainval else dataset.num_train_ids) train_transformer = T.Compose([ T.ResizeRandomCrop(big_height, big_width, target_height, target_width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, T.RandomErasing(0.5), ]) test_transformer = T.Compose([ T.RectScale(target_height, target_width), T.ToTensor(), normalizer, ]) train_loader = DataLoader( Attribute_Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomIdentityAttributeSampler(train_set, num_instances), pin_memory=True, drop_last=True) test_loader = DataLoader(Attribute_Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, test_loader
def get_data(name, split_id, data_dir, height, width, batch_size, workers, combine_trainval): root = os.path.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) train_set = dataset.trainval if combine_trainval else dataset.train num_classes = (dataset.num_trainval_ids if combine_trainval else dataset.num_train_ids) train_transformer = t.Compose([ t.RandomSizedRectCrop(height, width), t.RandomHorizontalFlip(), ColorHistograms() ]) test_transformer = t.Compose( [t.RectScale(height, width), ColorHistograms()]) train_loader = DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) val_loader = DataLoader(Preprocessor(dataset.val, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, train_loader, val_loader, test_loader
def get_data(sourceName, mteName, split_id, data_dir, height, width, batch_size, workers, combine,num_instances=8): root = osp.join(data_dir, sourceName) rootMte = osp.join(data_dir, mteName) sourceSet = datasets.create(sourceName, root, num_val=0.1, split_id=split_id) mteSet = datasets.create(mteName, rootMte, num_val=0.1, split_id=split_id) num_classes = sourceSet.num_trainval_ids if combine else sourceSet.num_train_ids class_meta = mteSet.num_trainval_ids if combine else mteSet.num_train_ids normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) defen_train_transformer = T.Compose([ Resize((height, width)), T.RandomSizedRectCrop(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, T.RandomErasing(probability=0.5, sh=0.2, r1=0.3) ]) meta_train_loader = DataLoader( Preprocessor(sourceSet.trainval, root=sourceSet.images_dir, transform=defen_train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomIdentitySampler(sourceSet.trainval, num_instances), pin_memory=True, drop_last=True) meta_test_loader=DataLoader( Preprocessor(mteSet.trainval, root=mteSet.images_dir, transform=defen_train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomIdentitySampler(mteSet.trainval, num_instances), pin_memory=True, drop_last=True) sc_test_loader = DataLoader( Preprocessor(list(set(sourceSet.query) | set(sourceSet.gallery)), root=sourceSet.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return sourceSet, mteSet, num_classes, meta_train_loader, meta_test_loader,sc_test_loader,class_meta
def main(argv): #parser parser = argparse.ArgumentParser(description='test part bilinear network') parser.add_argument('--exp-dir', type=str, default='logs/market1501/exp1') parser.add_argument('--target-epoch', type=int, default=750) parser.add_argument('--gpus', type=str, default='0') args = parser.parse_args(argv) # Settings exp_dir = args.exp_dir target_epoch = args.target_epoch batch_size = 50 gpu_ids = args.gpus set_paths('paths') os.environ['CUDA_VISIBLE_DEVICES'] = gpu_ids args = json.load(open(osp.join(exp_dir, "args.json"), "r")) # Load data t = T.Compose([ T.RectScale(args['height'], args['width']), T.CenterCrop((args['crop_height'], args['crop_width'])), T.ToTensor(), T.RGB_to_BGR(), T.NormalizeBy(255), ]) dataset = datasets.create(args['dataset'], 'data/{}'.format(args['dataset'])) dataset_ = Preprocessor(list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=t) dataloader = DataLoader(dataset_, batch_size=batch_size, shuffle=False) # Load model model = models.create(args['arch'], dilation=args['dilation'], use_relu=args['use_relu'], initialize=False).cuda() weight_file = osp.join(exp_dir, 'epoch_{}.pth.tar'.format(target_epoch)) model.load(load_checkpoint(weight_file)) model.eval() # Evaluate evaluator = Evaluator(model) evaluator.evaluate(dataloader, dataset.query, dataset.gallery)
def get_data(args): (name, split_id, data_dir, height, width, batch_size, num_instances, workers, combine_trainval) = ( args.dataset, args.split, args.data_dir, args.height, args.width, args.batch_size, args.num_instances, args.workers, args.combine_trainval, ) pin_memory = args.pin_mem name_val = args.dataset_val or args.dataset npy = args.has_npy rand_ratio = args.random_ratio if isinstance(name, list): dataset = datasets.creates(name, split_id=split_id) else: dataset = datasets.create( name, split_id=split_id, ) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) test_loader = DataLoader( Preprocessor(dataset.query, transform=test_transformer, has_npy=npy), batch_size=batch_size, # * 2 num_workers=workers, shuffle=False, pin_memory=False) return test_loader
def get_data(name, data_dir, height, width, ratios, batch_size, workers): root = osp.join(data_dir, name) root = data_dir dataset = datasets.create(name, root) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) num_classes = dataset.num_train_ids + 1 # plus 1 more label for the zero-padded feature test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) query_transformers = [] for ratio in ratios: query_transformers.append(T.Compose([ T.ContVerticalCropDiscret(height, width, ratio), T.ToTensor(), normalizer])) query_loaders = [] for query_transformer in query_transformers: query_loaders.append(DataLoader( Preprocessor(dataset.query, root=osp.join(dataset.images_dir,dataset.query_path), transform=query_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True)) gallery_loader = DataLoader( Preprocessor(dataset.gallery, root=osp.join(dataset.images_dir,dataset.gallery_path), transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return dataset, num_classes, query_loaders, gallery_loader
def evaluate(args): # Settings exp_dir = './logs/{}/{}'.format(args.dataset, args.exp) target_epoch = args.epoch batch_size = args.batchsize gpu_ids = args.gpus set_paths('paths') os.environ['CUDA_VISIBLE_DEVICES'] = gpu_ids args = json.load(open(osp.join(exp_dir, "args.json"), "r")) # Load data t = T.Compose([ T.RectScale(args['height'], args['width']), T.CenterCrop((args['crop_height'], args['crop_width'])), T.ToTensor(), T.RGB_to_BGR(), T.NormalizeBy(255), ]) dataset = datasets.create(args['dataset'], 'data/{}'.format(args['dataset'])) dataset_ = Preprocessor(list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=t) dataloader = DataLoader(dataset_, batch_size=batch_size, shuffle=False) # Load model model = models.create(args['arch'], dilation=args['dilation'], use_relu=args['use_relu'], initialize=False).cuda() weight_file = osp.join(exp_dir, 'epoch_{}.pth.tar'.format(target_epoch)) model.load(load_checkpoint(weight_file)) model.eval() # Evaluate evaluator = Evaluator(model) evaluator.evaluate(dataloader, dataset.query, dataset.gallery)