def __getitem__(self, index): image_path = self.images[index] kps = self.kp2ds[index].copy() box = self.boxs[index] scale = np.random.rand(4) * (self.scale_range[1] - self.scale_range[0]) + self.scale_range[0] image, kps = cut_image(image_path, kps, scale, box[0], box[1]) ratio = 1.0 * args.crop_size / image.shape[0] kps[:, :2] *= ratio dst_image = cv2.resize(image, (args.crop_size, args.crop_size), interpolation=cv2.INTER_CUBIC) if self.use_flip and random.random() <= self.flip_prob: dst_image, kps = flip_image(dst_image, kps) #normalize kp to [-1, 1] ratio = 1.0 / args.crop_size kps[:, :2] = 2.0 * kps[:, :2] * ratio - 1.0 return { 'image': torch.tensor( convert_image_by_pixformat_normalize(dst_image, self.pix_format, self.normalize)).float(), 'kp_2d': torch.tensor(kps).float(), 'image_name': self.images[index], 'data_set': 'lsp' }
def __getitem__(self, index): imgPath = self.images[index] keyps = self.keyps[index].copy() bbox = self.boxes[index] scale = np.random.rand(4) * (self.scaleRange[1] - self.scaleRange[0]) + self.scaleRange[0] bboxImg, bboxKps = util.cut_image(imgPath, keyps, scale, bbox[0], bbox[1]) ratio = 1.0 * self.cropSize / bboxImg.shape[0] bboxKps[:, :2] *= ratio resImg = cv2.resize(bboxImg, (self.cropSize, self.cropSize), interpolation=cv2.INTER_CUBIC) ratio = 1.0 / self.cropSize bboxKps[:, :2] = 2.0 * bboxKps[:, :2] * ratio - 1.0 return { "image": torch.tensor( util.convert_image_by_pixformat_normalize( resImg, self.pixFormat, self.normalize)).float(), "kp_2d": torch.tensor(bboxKps).float(), "image_name": imgPath, "data_set": "COCO 2017" }
def __getitem__(self, index): image_path = self.images[index] kps = self.kp2ds[index].copy() pose = self.poses[index].copy() shape = self.betas[index].copy() dst_image = cv2.imread(image_path) if self.use_flip and random.random() <= self.flip_prob: dst_image, kps = flip_image(dst_image, kps) pose = reflect_poses(pose) #normalize kp to [-1, 1] ratio = 1.0 / args.crop_size kps[:, :2] = 2.0 * kps[:, :2] * ratio - 1.0 return { 'image': torch.tensor( convert_image_by_pixformat_normalize(dst_image, self.pix_format, self.normalize)).float(), 'kp_2d': torch.tensor(kps).float(), 'pose': torch.tensor(pose).float(), 'shape': torch.tensor(shape).float(), 'image_name': self.images[index], 'data_set': 'up_3d_evaluation' }
def __getitem__(self, index): imgPath = self.images[index] keyps2d = self.keyps2d[index].copy() # remove head and neck points keyps2d = keyps2d[0:12, :] bbox = self.boxes[index] keyps3d = self.keyps3d[index].copy() keyps3d = keyps3d[0:12, :] """# plot points on image and save imgVis=cv2.imread(imgPath) for r in range(keyps2d.shape[0]): currX=int(keyps2d[r,0]) currY=int(keyps2d[r,1]) cv2.circle(imgVis,(currX,currY),2,(0,0,255),4) cv2.putText(imgVis,str(r),(currX,currY),cv2.FONT_HERSHEY_SIMPLEX,0.2,(255,255,255)) cv2.imwrite("/home/BaseCode/trainedModels/5/tmpFull.png",imgVis)""" #scale=np.random.rand(4)*(self.scaleRange[1]-self.scaleRange[0])+self.scaleRange[0] scale = 1 bboxImg, bboxKps = util.cut_image(imgPath, keyps2d, scale, bbox[0], bbox[1]) bboxKps = bboxKps[:, 0:2] bboxKps[:, 0] *= (1.0 * self.cropSize / bboxImg.shape[0]) bboxKps[:, 1] *= (1.0 * self.cropSize / bboxImg.shape[1]) resImg = cv2.resize(bboxImg, (self.cropSize, self.cropSize), interpolation=cv2.INTER_CUBIC) #bboxKps[:,:2]=2.0*bboxKps[:, :2]*ratio-1.0 bboxKps = (2.0 * bboxKps * 1.0 / self.cropSize) - 1.0 #print(bboxKps) shape, pose = self.smplShapeParams[index], self.smplPoseParams[index] #theta=np.concatenate((np.zeros(3),pose,shape),axis = 0) theta = np.concatenate((pose, shape), axis=0) return { "image": torch.from_numpy( util.convert_image_by_pixformat_normalize( resImg, self.pixFormat, self.normalize)).float(), "kp_2d": torch.from_numpy(bboxKps).float(), "kp_3d": torch.from_numpy(keyps3d).float(), "theta": torch.from_numpy(theta).float(), "image_name": self.images[index], "w_smpl": 1.0, "w_3d": 1.0, "data_set": "Human3.6M" }
def __getitem__(self, index): image_path = self.images[index] kps = self.kp2ds[index].copy() box = self.boxs[index] kp_3d = self.kp3ds[index].copy() scale = np.random.rand(4) * (self.scale_range[1] - self.scale_range[0]) + self.scale_range[0] image, kps = cut_image(image_path, kps, scale, box[0], box[1]) ratio = 1.0 * args.crop_size / image.shape[0] kps[:, :2] *= ratio dst_image = cv2.resize(image, (args.crop_size, args.crop_size), interpolation=cv2.INTER_CUBIC) trival, shape, pose = np.zeros( 3), self.shapes[index], self.poses[index] if self.use_flip and random.random() <= self.flip_prob: dst_image, kps = flip_image(dst_image, kps) pose = reflect_pose(pose) kp_3d = reflect_lsp_kp(kp_3d) #normalize kp to [-1, 1] ratio = 1.0 / args.crop_size kps[:, :2] = 2.0 * kps[:, :2] * ratio - 1.0 theta = np.concatenate((trival, pose, shape), axis=0) return { 'image': torch.from_numpy( convert_image_by_pixformat_normalize(dst_image, self.pix_format, self.normalize)).float(), 'kp_2d': torch.from_numpy(kps).float(), 'kp_3d': torch.from_numpy(kp_3d).float(), 'theta': torch.from_numpy(theta).float(), 'image_name': self.images[index], 'w_smpl': 1.0, 'w_3d': 1.0, 'data_set': 'hum3.6m' }
def __getitem__(self, index): rgbImg = cv2.imread(self.rgbImages[index]) keyps2d = self.keyps2d[index].copy() """# plot points on image and save imgVis=rgbImg for r in range(len(keyps2d[0])): currX=int(keyps2d[0][r]) currY=int(keyps2d[1][r]) cv2.circle(imgVis,(currX,currY),2,(0,0,255),4) cv2.imwrite("/mnt/data/tmp/tmp/tmpKeyps1.png",imgVis)""" keyps2d[0] = [ x * (1.0 * self.cropSize / rgbImg.shape[1]) for x in keyps2d[0] ] keyps2d[1] = [ x * (1.0 * self.cropSize / rgbImg.shape[0]) for x in keyps2d[1] ] keyps2d /= self.cropSize keyps2d = np.asarray(keyps2d) resImg = cv2.resize(rgbImg, (self.cropSize, self.cropSize), interpolation=cv2.INTER_CUBIC) resImg = util.convert_image_by_pixformat_normalize( resImg, self.pixFormat, self.normalize) #resImg=rgbImg depthImg = self.normalizeDepth(np.load(self.depthImages[index])) #depthImg=cv2.resize(depthImg,(self.cropSize,self.cropSize),interpolation=cv2.INTER_NEAREST) depthImg = cv2.resize(depthImg, (self.cropSize, self.cropSize)) depthImgCh = np.zeros((3, depthImg.shape[0], depthImg.shape[1])) depthImgCh[0, :, :] = depthImg depthImgCh[1, :, :] = depthImg depthImgCh[2, :, :] = depthImg keyps3d = self.keyps3d[index].copy() keyps3d = np.asarray(keyps3d) keyps3dw = keyps3d.copy() shape, pose = self.smplShapeParams[index].copy( ), self.smplPoseParams[index].copy() currCamLoc = self.camLoc[index].copy() currExtrinsic, currR, currT = util.getSurrealExtrinsic( np.expand_dims(np.transpose(currCamLoc), axis=1)) currZRot = self.zrot[index].copy() RzBody = np.array( ((math.cos(currZRot), -math.sin(currZRot), 0), (math.sin(currZRot), math.cos(currZRot), 0), (0, 0, 1))) pose[0:3] = util.rotateBodyForVisSurreal(RzBody, pose[0:3]) ############ swap l/r pose, flip y/z pose = self.swapRightLeftPose(pose) pose[1] = -pose[1] pose[2] = -pose[2] ############ theta = np.concatenate((pose, shape), axis=0) ############ Swap keyps l/r keyps2d = self.swapRightLeftJoints(keyps2d) keyps3d = self.swapRightLeftJoints(keyps3d) keyps3dw = keyps3d.copy() ############ if self.cameraSpace: # move 3d joints to camera space keyps3d = np.concatenate( [keyps3d.transpose(), np.ones((keyps3d.shape[1], 1))], axis=1).transpose() keyps3d = np.dot(currExtrinsic, keyps3d) pelvis = keyps3d[:, 0].copy() keyps3d -= np.expand_dims(pelvis, 1) currGender = self.gender[index].copy() currGender = currGender[0] return { "rgbImage": torch.from_numpy(resImg).float(), "depthImage": torch.from_numpy(depthImgCh).float(), "kp_2d": torch.from_numpy(keyps2d).float(), "kp_3d": torch.from_numpy(keyps3d).float(), "pelvis": torch.from_numpy(np.expand_dims(pelvis, 1)).float(), "kp_3d_world": torch.from_numpy(np.asarray(keyps3dw)).float(), "theta": torch.from_numpy(theta).float(), "zrot": torch.from_numpy(np.asarray(currZRot)).float(), "camLoc": torch.from_numpy(np.asarray(currCamLoc)).float(), "extrinsic": torch.from_numpy(np.asarray(currExtrinsic)).float(), "intrinsic": torch.from_numpy(np.asarray(cfg.surrealIntrinsic)).float(), "rgbImageName": self.rgbImages[index], "depthImageName": self.depthImages[index], "gender": currGender, "data_set": "Surreal" }