def process_dir(self, dir_path, file_path, if_test=False): datalist = [line for line in open(file_path, 'r').read().splitlines()] pid_container = set() for idx, item in enumerate(datalist): img_rel_path, pid = item.split() pid = int(pid) pid_container.add(pid) pid2label = {pid: label for label, pid in enumerate(pid_container)} data = [] for idx, item in enumerate(datalist): img_rel_path, pid = item.split() img_path = osp.join(dir_path, img_rel_path) img = read_image(img_path, True) contour_path = img_path.replace('/rgb/', '/contour/') contour_img = read_image(contour_path) pid = int(pid) camid = self.cam_var self.cam_var += 1 if not if_test: pid = pid2label[pid] # load data into memory data.append((img_path, pid, camid, img, contour_img)) return data
def process_dir(self, dir_path, file_path, if_test=False): datalist = [line for line in open(file_path, 'r').read().splitlines()] # Specify data dir sub_dir = 'train' if if_test: sub_dir = 'test' dir_path = osp.join(dir_path, 'rgb', sub_dir) pid_sample_cnts = dict() for idx, item in enumerate(datalist): img_rel_path, pid = item.split() pid = int(pid) if pid not in pid_sample_cnts: pid_sample_cnts[pid] = 1 else: pid_sample_cnts[pid] += 1 pid_container = set() for pid, cnt in pid_sample_cnts.items(): if cnt >= self.at_least_num: pid_container.add(pid) pid2label = {pid: label for label, pid in enumerate(pid_container)} data = [] for idx, item in enumerate(datalist): img_rel_path, pid = item.split() pid = int(pid) camid = int(osp.basename(img_rel_path).split('_') [2]) - 1 # index starts from 0 img_path = osp.join(dir_path, img_rel_path) img = read_image(img_path, True) contour_path = img_path.replace('/rgb/', '/contour/') contour_img = read_image(contour_path) if not if_test: if pid in pid2label: pid = pid2label[pid] data.append((img_path, pid, camid, img, contour_img)) else: data.append((img_path, pid, camid, img, contour_img)) return data
def process_dir(self, dir_path, file_path, if_test=False): datalist = [line for line in open(file_path, 'r').read().splitlines()] pid_sample_cnts = dict() for idx, item in enumerate(datalist): img_rel_path, pid, _ = item.split() pid = int(pid) if pid not in pid_sample_cnts: pid_sample_cnts[pid] = 1 else: pid_sample_cnts[pid] += 1 pid_container = set() for pid, cnt in pid_sample_cnts.items(): if cnt >= self.at_least_num: pid_container.add(pid) pid2label = {pid: label for label, pid in enumerate(pid_container)} data = [] for idx, item in enumerate(datalist): img_rel_path, pid, camid = item.split() pid, camid = int(pid), int(camid) clothid = int(osp.basename(img_rel_path).split('_')[1]) img_path = osp.join(dir_path, img_rel_path) img = read_image(img_path, True) contour_path = img_path.replace('/rgb/', '/contour/').replace( '.png', '.jpg') try: contour_img = read_image(contour_path) except Exception: continue if not if_test: if pid in pid2label: pid = pid2label[pid] data.append( (img_path, pid, camid, clothid, img, contour_img)) else: data.append((img_path, pid, camid, clothid, img, contour_img)) return data
def process_dir(self, dir_path, file_path, if_test=False): datalist = [line for line in open(file_path, 'r').read().splitlines()] pid_container = set() for idx, item in enumerate(datalist): img_rel_path, pid = item.split() pid = int(pid) pid_container.add(pid) pid2label = {pid: label for label, pid in enumerate(pid_container)} data = [] for idx, item in enumerate(datalist): img_rel_path, pid = item.split() img_path = osp.join(dir_path, img_rel_path) img = read_image(img_path, True) contour_path = img_path.replace('/rgb/', '/contour/') contour_img = read_image(contour_path) pid = int(pid) if if_test: camid = ROOM2CAMID[img_path.split('/')[-3]] else: cam = img_path.split('/')[-1].split('_')[0] camid = ROOM2CAMID[cam] assert 0 <= camid <= 2 if not if_test: pid = pid2label[pid] # load data into memory data.append((img_path, pid, camid, img, contour_img)) if not if_test: dataset_len = len(data) sample_factor = 0.5 sample_num = int(sample_factor * dataset_len) random.shuffle(data) data = data[:sample_num] return data
def __getitem__(self, index): img_paths, pid, camid = self.data[index] num_imgs = len(img_paths) if self.sample_method == 'random': # Randomly samples seq_len images from a tracklet of length num_imgs, # if num_imgs is smaller than seq_len, then replicates images indices = np.arange(num_imgs) replace = False if num_imgs >= self.seq_len else True indices = np.random.choice( indices, size=self.seq_len, replace=replace ) # sort indices to keep temporal order (comment it to be order-agnostic) indices = np.sort(indices) elif self.sample_method == 'evenly': # Evenly samples seq_len images from a tracklet if num_imgs >= self.seq_len: num_imgs -= num_imgs % self.seq_len indices = np.arange(0, num_imgs, num_imgs / self.seq_len) else: # if num_imgs is smaller than seq_len, simply replicate the last image # until the seq_len requirement is satisfied indices = np.arange(0, num_imgs) num_pads = self.seq_len - num_imgs indices = np.concatenate( [ indices, np.ones(num_pads).astype(np.int32) * (num_imgs - 1) ] ) assert len(indices) == self.seq_len elif self.sample_method == 'all': # Samples all images in a tracklet. batch_size must be set to 1 indices = np.arange(num_imgs) else: raise ValueError( 'Unknown sample method: {}'.format(self.sample_method) ) imgs = [] for index in indices: img_path = img_paths[int(index)] img = util.read_image(img_path) if self.transform is not None: img = self.transform(img) img = img.unsqueeze(0) # img must be torch.Tensor imgs.append(img) imgs = torch.cat(imgs, dim=0) return imgs, pid, camid
def __getitem__(self, index): img_path, pid, camid = self.data[index] img = util.read_image(img_path) if self.transform is not None: img = self.transform(img) return img, pid, camid, img_path