def get_data(name, data_dir, height, width, batch_size, workers, pose_aug): root = osp.join(data_dir, name) dataset = datasets.create(name, root) # use combined trainval set for training as default train_loader = DataLoader(Preprocessor(dataset.trainval, root=dataset.images_dir, with_pose=True, pose_root=dataset.poses_dir, pid_imgs=dataset.trainval_query, height=height, width=width, pose_aug=pose_aug), sampler=RandomPairSampler(dataset.trainval, neg_pos_ratio=3), batch_size=batch_size, num_workers=workers, pin_memory=False) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=False) return dataset, train_loader, test_loader
def get_data(name, split_id, data_dir, height, width, batch_size, workers, combine_trainval, np_ratio): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train train_transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomSizedEarser(), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) train_loader = DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer), sampler=RandomPairSampler( train_set, neg_pos_ratio=np_ratio), batch_size=batch_size, num_workers=workers, pin_memory=False) val_loader = DataLoader(Preprocessor(dataset.val, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=0, shuffle=False, pin_memory=False) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=0, shuffle=False, pin_memory=False) return dataset, train_loader, val_loader, test_loader
def test_random(self): dataset = VIPeR('/tmp/open-reid/viper', split_id=0, num_val=100, download=True) sampler = RandomPairSampler(dataset.train, neg_pos_ratio=1) n = len(dataset.train) self.assertEquals(len(sampler), n * 2) sampler_iter = iter(sampler) for _ in range(n): i, j = next(sampler_iter) self.assertEquals(dataset.train[i][1], dataset.train[j][1]) self.assertNotEquals(dataset.train[i][0], dataset.train[j][0]) i, j = next(sampler_iter) self.assertNotEquals(dataset.train[i][1], dataset.train[j][1])
def get_data(name, split_id, data_dir, height, width, batch_size, workers, combine_trainval, np_ratio, model, instance_mode, eraser): root = osp.join(data_dir, name) dataset = datasets.create(name, root, split_id=split_id) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_set = dataset.trainval if combine_trainval else dataset.train if eraser: train_transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomSizedEarser(), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) else: train_transformer = T.Compose([ T.RandomSizedRectCrop(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) if (model == 'Single'): video_dict = None if osp.isfile(osp.join(root, 'video.json')): video_dict = read_json(osp.join(root, 'video.json')) sampler = RandomTripletSampler(train_set, video_dict=None, skip_frames=10, inter_rate=0.9, inst_sample=instance_mode) elif (model == 'Siamese'): sampler = RandomPairSampler(train_set, neg_pos_ratio=np_ratio) else: raise ValueError('unrecognized mode') train_loader = DataLoader(Preprocessor(train_set, name, root=dataset.images_dir, transform=train_transformer), sampler=sampler, batch_size=batch_size, num_workers=workers, pin_memory=False) val_loader = DataLoader(Preprocessor(dataset.val, name, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=False) test_loader = DataLoader(Preprocessor( list(set(dataset.query) | set(dataset.gallery)), name, root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=False) return dataset, train_loader, val_loader, test_loader