def __getitem__(self, index): img_path = self.img_list[index] img = np.array(cv2.resize(cv2.imread(img_path), (368, 368)), dtype=np.float32) kpt = self.kpt_list[index] center = self.center_list[index] scale = self.scale_list[index] # expand dataset img, kpt, center = self.transformer(img, kpt, center, scale) height, width, _ = img.shape # limbsMap = getLimbs(img, kpt, height, width, self.stride, self.bodyParts, 25, 1) box = getBoundingBox(img, self.kpt_list[index], height, width, self.stride) heatmap = np.zeros((int(height / self.stride), int( width / self.stride), int(len(kpt) + 1)), dtype=np.float32) for i in range(len(kpt)): # resize from 368 to 46 x = int(kpt[i][0]) * 1.0 / self.stride y = int(kpt[i][1]) * 1.0 / self.stride heat_map = guassian_kernel(size_h=int(height / self.stride), size_w=int(width / self.stride), center_x=x, center_y=y, sigma=self.sigma) heat_map[heat_map > 1] = 1 heat_map[heat_map < 0.0099] = 0 heatmap[:, :, i + 1] = heat_map heatmap[:, :, 0] = 1.0 - np.max(heatmap[:, :, 1:], axis=2) # for background centermap = np.zeros((height, width, 1), dtype=np.float32) center_map = guassian_kernel(size_h=height, size_w=width, center_x=center[0], center_y=center[1], sigma=3) center_map[center_map > 1] = 1 center_map[center_map < 0.0099] = 0 centermap[:, :, 0] = center_map img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0], [256.0, 256.0, 256.0]) heatmap = Mytransforms.to_tensor(heatmap) centermap = Mytransforms.to_tensor(centermap) # limbsMap = Mytransforms.to_tensor(limbsMap) box = Mytransforms.to_tensor(box) return img, heatmap, centermap, img_path, 0, box
def __getitem__(self, index): scale_factor = 0.25 variable = self.anno[self.img_List[index]] while not os.path.isfile(self.labels_dir + variable['img_paths'][:-4] + '.png'): index = index - 1 variable = self.anno[self.img_List[index]] img_path = self.images_dir + variable['img_paths'] # BBox was added to the labels by the authors to perform additional training and testing, as referred in the paper. # Intentionally left as comment since it is not part of the dataset. # bbox = np.load(self.labels_dir + "BBOX/" + variable['img_paths'][:-4] + '.npy') points = torch.Tensor(variable['joint_self']) center = torch.Tensor(variable['objpos']) scale = variable['scale_provided'] if center[0] != -1: center[1] = center[1] + 15 * scale scale = scale * 1.25 # Single Person nParts = points.size(0) img = cv2.imread(img_path) # box = np.zeros((2,2)) # for i in range(bbox.shape[0]): # if center[0] > bbox[i,0] and center[0] < bbox[i,2] and\ # center[1] > bbox[i,1] and center[1] < bbox[i,3]: # upperLeft = bbox[i,0:2].astype(int) # bottomRight = bbox[i,-2:].astype(int) # box = bbox[i,:] # img[:,0:upperLeft[0],:] = np.ones(img[:,0:upperLeft[0],:].shape) *255 # img[0:upperLeft[1],:,:] = np.ones(img[0:upperLeft[1],:,:].shape) *255 # img[:,bottomRight[0]:,:] = np.ones(img[:,bottomRight[0]:,:].shape)*255 # img[bottomRight[1]:,:,:] = np.ones(img[bottomRight[1]:,:,:].shape)*255 # break # img, upperLeft, bottomRight, points, center = crop(img, points, center, scale, [self.height, self.width]) kpt = points # img, kpt, center = self.transformer(img, points, center) if img.shape[0] != 368 or img.shape[1] != 368: kpt[:, 0] = kpt[:, 0] * (368 / img.shape[1]) kpt[:, 1] = kpt[:, 1] * (368 / img.shape[0]) img = cv2.resize(img, (368, 368)) height, width, _ = img.shape heatmap = np.zeros((int(height / self.stride), int( width / self.stride), int(len(kpt) + 1)), dtype=np.float32) for i in range(len(kpt)): # resize from 368 to 46 x = int(kpt[i][0]) * 1.0 / self.stride y = int(kpt[i][1]) * 1.0 / self.stride heat_map = guassian_kernel(size_h=int(height / self.stride), size_w=int(width / self.stride), center_x=x, center_y=y, sigma=self.sigma) heat_map[heat_map > 1] = 1 heat_map[heat_map < 0.0099] = 0 heatmap[:, :, i + 1] = heat_map heatmap[:, :, 0] = 1.0 - np.max(heatmap[:, :, 1:], axis=2) # for background centermap = np.zeros( (int(height / self.stride), int(width / self.stride), 1), dtype=np.float32) center_map = guassian_kernel(size_h=int(height / self.stride), size_w=int(width / self.stride), center_x=int(center[0] / self.stride), center_y=int(center[1] / self.stride), sigma=3) center_map[center_map > 1] = 1 center_map[center_map < 0.0099] = 0 centermap[:, :, 0] = center_map orig_img = cv2.imread(img_path) img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0], [256.0, 256.0, 256.0]) heatmap = Mytransforms.to_tensor(heatmap) centermap = Mytransforms.to_tensor(centermap) return img, heatmap, centermap, img_path
def __getitem__(self, index): im = cv2.imread(self.img_List[index]) if im is None: print(self.img_List[index]) im = cv2.imread(self.img_List[index - 1]) img = np.array(im, dtype=np.float32) kps = self.kps[index] shift = [img.shape[1] - 368, img.shape[0] - 368] img = img[-368:, -368:, :] # print(kps) kps[:, 0] = kps[:, 0] - shift[0] kps[:, 1] = kps[:, 1] - shift[1] # print(kps) # print(self.img_List[index]) # print(img.shape) center = {} center[0] = [img.shape[0] / 2, img.shape[1] / 2] # print(kps.shape, img.shape) # expand dataset # if self.is_train == "Train": # img, kps, center = self.transform(img, kps, center) height, width, _ = img.shape kps[kps < 0] = 0 # limbsMap = getLimbs(img, kpt, height, width, 8, self.bodyParts, self.parts_num, 1) box = getBoundingBox(img, kps, height, width, 8) heatmap = np.zeros((46, 46, int(len(kps) + 1)), dtype=np.float32) for i in range(len(kps)): # resize from 368 to 46 x = int(kps[i][0]) * 1.0 / 8 y = int(kps[i][1]) * 1.0 / 8 heat_map = guassian_kernel(size_h=368 / 8, size_w=368 / 8, center_x=x, center_y=y, sigma=self.sigma) heat_map[heat_map > 1] = 1 heat_map[heat_map < 0.0099] = 0 heatmap[:, :, i + 1] = heat_map heatmap[:, :, 0] = 1.0 - np.max(heatmap[:, :, 1:], axis=2) # for background centermap = np.zeros((height, width, 1), dtype=np.float32) center_map = guassian_kernel(size_h=height, size_w=width, center_x=184, center_y=184, sigma=3) center_map[center_map > 1] = 1 center_map[center_map < 0.0099] = 0 centermap[:, :, 0] = center_map img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0], [256.0, 256.0, 256.0]) heatmap = Mytransforms.to_tensor(heatmap) centermap = Mytransforms.to_tensor(centermap) # limbsMap = Mytransforms.to_tensor(limbsMap) box = Mytransforms.to_tensor(box) return img, heatmap, centermap, self.img_List[index], 0, box return img, 0, 0, self.img_List[index], 0, 0
def __getitem__(self, index): im = cv2.imread(self.img_List[index]) if im is None: print(self.img_List[index]) im = cv2.imread(self.img_List[index - 1]) img = np.array(im, dtype=np.float32) kps = self.kps[index] center = {} center[0] = [img.shape[0] / 2, img.shape[1] / 2] prefix_length = self.img_List[index].rfind('images') sufix_start = self.img_List[index].rfind('/') frame_number = int(self.img_List[index][sufix_start + 1:-4]) label_file = self.img_List[index][:prefix_length]+"labels"+\ self.img_List[index][prefix_length+6:sufix_start+1]+\ self.img_List[index][prefix_length+11:sufix_start]+".mat" if scipy.io.loadmat(label_file)['arr'][frame_number] is None: print(self.img_List[index]) kps2 = scipy.io.loadmat(label_file)['arr'][frame_number - 1] im = cv2.imread(self.img_List[index - 1]) else: kps2 = scipy.io.loadmat(label_file)['arr'][frame_number] # Remove Knee, ankle and foot detections (not visible in the image) kps2 = np.delete(kps2, [26, 27, 28, 29, 30, 31, 34, 35, 36, 37, 38, 39], axis=0) kpt = np.zeros((self.parts_num, 2)) # kpt2 = np.zeros((self.parts_num,2)) for i in range(0, self.parts_num): kpt[i, 0] = kps2[2 * i] kpt[i, 1] = kps2[2 * i + 1] # kpt2[i, 0] = kps2[2*i] # kpt2[i, 1] = kps2[2*i+1] # expand dataset # if self.is_train == "Train": # img, kpt, center = self.transform(img, kpt, center) height, width, _ = img.shape kpt[kpt < 0] = 0 # limbsMap = getLimbs(img, kpt, height, width, 8, self.bodyParts, self.parts_num, 1) box = getBoundingBox(img, kpt, height, width, 8) heatmap = np.zeros( (int(height / 8), int(width / 8), int(len(kpt) + 1)), dtype=np.float32) for i in range(len(kpt)): # resize from 368 to 46 x = int(kpt[i][0]) * 1.0 / 8 y = int(kpt[i][1]) * 1.0 / 8 heat_map = guassian_kernel(size_h=height / 8, size_w=width / 8, center_x=x, center_y=y, sigma=self.sigma) heat_map[heat_map > 1] = 1 heat_map[heat_map < 0.0099] = 0 heatmap[:, :, i + 1] = heat_map heatmap[:, :, 0] = 1.0 - np.max(heatmap[:, :, 1:], axis=2) # for background centermap = np.zeros((height, width, 1), dtype=np.float32) center_map = guassian_kernel(size_h=height, size_w=width, center_x=184, center_y=184, sigma=3) center_map[center_map > 1] = 1 center_map[center_map < 0.0099] = 0 centermap[:, :, 0] = center_map img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0], [256.0, 256.0, 256.0]) heatmap = Mytransforms.to_tensor(heatmap) centermap = Mytransforms.to_tensor(centermap) # limbsMap = Mytransforms.to_tensor(limbsMap) box = Mytransforms.to_tensor(box) return img, heatmap, centermap, self.img_List[index], 0, box
def __getitem__(self, index): scale_factor = 0.25 variable = self.anno[self.img_List[index]] while not os.path.isfile(self.labels_dir + variable['img_paths'][:-4] + '.png'): index = index - 1 variable = self.anno[self.img_List[index]] img_path = self.images_dir + variable['img_paths'] segmented = cv2.imread(self.labels_dir + "segmented/" + variable['img_paths'][:-4] + '.png') bbox = np.load(self.labels_dir + "BBOX/" + variable['img_paths'][:-4] + '.npy') points = torch.Tensor(variable['joint_self']) center = torch.Tensor(variable['objpos']) scale = variable['scale_provided'] if center[0] != -1: center[1] = center[1] + 15 * scale scale = scale * 1.25 # Single Person nParts = points.size(0) img = cv2.imread(img_path) box = np.zeros((2, 2)) for i in range(bbox.shape[0]): if center[0] > bbox[i,0] and center[0] < bbox[i,2] and\ center[1] > bbox[i,1] and center[1] < bbox[i,3]: upperLeft = bbox[i, 0:2].astype(int) bottomRight = bbox[i, -2:].astype(int) box = bbox[i, :] img[:, 0:upperLeft[0], :] = np.ones( img[:, 0:upperLeft[0], :].shape) * 255 img[0:upperLeft[1], :, :] = np.ones( img[0:upperLeft[1], :, :].shape) * 255 img[:, bottomRight[0]:, :] = np.ones( img[:, bottomRight[0]:, :].shape) * 255 img[bottomRight[1]:, :, :] = np.ones( img[bottomRight[1]:, :, :].shape) * 255 break # img, upperLeft, bottomRight, points, center = crop(img, points, center, scale, [self.height, self.width]) kpt = points # img, kpt, center = self.transformer(img, points, center) if img.shape[0] != 368 or img.shape[1] != 368: kpt[:, 0] = kpt[:, 0] * (368 / img.shape[1]) kpt[:, 1] = kpt[:, 1] * (368 / img.shape[0]) img = cv2.resize(img, (368, 368)) height, width, _ = img.shape # quit() heatmap = np.zeros((int(height / self.stride), int( width / self.stride), int(len(kpt) + 1)), dtype=np.float32) for i in range(len(kpt)): # resize from 368 to 46 x = int(kpt[i][0]) * 1.0 / self.stride y = int(kpt[i][1]) * 1.0 / self.stride heat_map = guassian_kernel(size_h=int(height / self.stride), size_w=int(width / self.stride), center_x=x, center_y=y, sigma=self.sigma) heat_map[heat_map > 1] = 1 heat_map[heat_map < 0.0099] = 0 heatmap[:, :, i + 1] = heat_map heatmap[:, :, 0] = 1.0 - np.max(heatmap[:, :, 1:], axis=2) # for background centermap = np.zeros( (int(height / self.stride), int(width / self.stride), 1), dtype=np.float32) center_map = guassian_kernel(size_h=int(height / self.stride), size_w=int(width / self.stride), center_x=int(center[0] / self.stride), center_y=int(center[1] / self.stride), sigma=3) center_map[center_map > 1] = 1 center_map[center_map < 0.0099] = 0 centermap[:, :, 0] = center_map orig_img = cv2.imread(img_path) img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0], [256.0, 256.0, 256.0]) heatmap = Mytransforms.to_tensor(heatmap) centermap = Mytransforms.to_tensor(centermap) segmented = Mytransforms.to_tensor(segmented) # cv2.imwrite("/home/bm3768/Desktop/Pose/Posezilla/samples/2.png",segmented) return img, heatmap, centermap, img_path, orig_img, segmented, box
def __getitem__(self, index): if self.is_train: items = "/home/bm3768/Desktop/Pose/dataset/PoseTrack/" + self.train_list[ index] else: items = "/home/bm3768/Desktop/Pose/dataset/PoseTrack/" + self.val_list[ index] im = cv2.imread(items) if im is None: print(items) img = np.array(im, dtype=np.float32) kps = np.asarray(self.keypoints[index]) center = {} center[0] = [img.shape[0] / 2, img.shape[1] / 2] # print("kps ", kps.shape) kpt = np.zeros((kps.shape[0], 17, 3)) for i in range(kps.shape[0]): points = np.reshape(kps[i], (17, 3)) kpt[i] = points kpts = np.zeros((kpt.shape[0] * 17, 3)) for i in range(kpt.shape[0]): kpts[17 * i:17 * (i + 1), :] = kpt[i, :, :] # print("Image ", img.shape) # print("Kpt ", kpt.shape) # print("Kpts ", kpts.shape) # print("Center ", center) img, kpts, center = self.transformer(img, kpts, center) for i in range(kpt.shape[0]): kpt[i, :, :] = kpts[17 * i:17 * (i + 1), :] # kpt = torch.Tensor(kpt) # print("Image ", img.shape) # print("Kpt ", kpt.shape) # print("Center ", center) height, width, _ = img.shape # kpt = np.zeros((17,3)) # for i in range(kpts.shape[0]): # kpt = kpt + kpts[i,:,:] # print(kpt[:,2]) # np.clip(kpt[:,2],0,1,kpt[:,2]) # print(kpt[:,2]) box = getBoundingBox(img, kpt, height, width, self.stride) heatmaps = np.zeros( (kpt.shape[0], int(height / self.stride), int( width / self.stride), int(kpt.shape[1] + 1)), dtype=np.float32) for i in range(kpt.shape[0]): for j in range(kpt.shape[1]): # resize from 368 to 46 x = int(kpt[i, j, 0]) * 1.0 / self.stride y = int(kpt[i, j, 1]) * 1.0 / self.stride heat_map = guassian_kernel(size_h=height / self.stride, size_w=width / self.stride, center_x=x, center_y=y, sigma=self.sigma) heat_map[heat_map > 1] = 1 heat_map[heat_map < 0.0099] = 0 heatmaps[i, :, :, j + 1] = heat_map heatmaps[i, :, :, 0] = 1.0 - np.max(heatmaps[i, :, :, 1:], axis=2) # for background # print(heatmaps.shape) # heatmap = np.zeros((int(height/self.stride), int(width/self.stride), int(kpt.shape[1]+1)), dtype=np.float32) heatmap = np.sum(heatmaps, axis=0) # print(heatmap.shape) centermap = np.zeros((height, width, 1), dtype=np.float32) center_map = guassian_kernel(size_h=height, size_w=width, center_x=center[0][0], center_y=center[0][1], sigma=3) center_map[center_map > 1] = 1 center_map[center_map < 0.0099] = 0 centermap[:, :, 0] = center_map img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0], [256.0, 256.0, 256.0]) heatmap = Mytransforms.to_tensor(heatmap) centermap = Mytransforms.to_tensor(centermap) box = Mytransforms.to_tensor(box) return img, heatmap, centermap, items, 0, box