def get_mugshot(self, dataset, scene, user, person_uid): # Get 10 annotation of the object uid result = annotationManager.get_annnotations_by_object( dataset, scene, user, person_uid) images = [] # Final cropped images for r in result: if 'objects' in r and r['objects'][0]['keypoints'] != []: if dataset.is_pt(): points = r['objects'][0]['keypoints'] f = Frame(r['frame'], scene, dataset) frame_result = frameManager.get_frame(f) if frame_result != 'Error': kpX, kpY = points[0] kpX2, kpY2 = points[1] img = cv2.imread(frame_result.path) crop_img = img[kpY:kpY2, kpX:kpX2] images.append({"image": self.img2binary(crop_img)}) else: kps3d = r['objects'][0]['keypoints'][0] # Nose 3d point # Check annotations in all 12 cameras for camera in range(12): # Check camera parameters and frame path f = Frame(r['frame'], camera, dataset) frame_result = frameManager.get_frame(f) if frame_result != 'Error': # Obtain 2d keypoints for corresponding camera kps2d = self.project_3D_points_to_camera( kps3d, frame_result.camera_parameters)[0] kpX, kpY = int(kps2d[0]), int(kps2d[1]) # Read img, make mugshot 200px and add to final images img = cv2.imread(frame_result.path) if kpX >= 0 and kpX <= img.shape[ 1] and kpY >= 0 and kpY <= img.shape[0]: kpY_min, kpY_max = max(kpY - 100, 0), min( kpY + 100, img.shape[0]) kpX_min, kpX_max = max(kpX - 100, 0), min( kpX + 100, img.shape[1]) crop_img = img[kpY_min:kpY_max, kpX_min:kpX_max] images.append( {"image": self.img2binary(crop_img)}) return True, images, 200
def obtain_3d_points_AIK(self, annotation): keypoints2d = annotation.objects[0].keypoints keypoints3d = [] # New 3d kps # Triangulate all keypoints of object for kp in keypoints2d: # Keypoints and camera parameters to triangulate keypoints_triangulate = [] camera_params_triangulate = [] points = kp["points"] cameras = kp["cameras"] # Add existing points to triangulate for i, p in enumerate(points): if points[i]: # only if it's not empty f = Frame(annotation.frame, cameras[i], annotation.dataset) frame = frameManager.get_frame(f) keypoints_triangulate.append(p) camera_params_triangulate.append(frame.camera_parameters) if len(keypoints_triangulate ) == 0: # If 0 points, let the keypoint empty keypoints_triangulate.append([]) camera_params_triangulate.append([]) keypoints3d.append([]) elif len(keypoints_triangulate) < 2: # Error if only 1 point return annotation.objects, True else: # Triangulate using all available points point3d = aikService.triangulate_2D_points( keypoints_triangulate, camera_params_triangulate) keypoints3d.append(point3d.tolist()) # Store 3d point # Modify original objects which contains info of object with calculated 3d keypoints return keypoints3d, False
def get_frames(self, video): try: result = self.collection.find({"dataset": video.dataset.name, "video": video.name}, {"_id": 0}) return [Frame.from_json(r) for r in list(result)] except errors.PyMongoError as e: log.exception('Error finding frames in db') return 'Error'
def get_video_frames(self, video, start_frame, end_frame): imgs = [] for frame in range(start_frame, end_frame + 1): # Get frame f = Frame(frame, video.name, video.dataset) f = frameManager.get_frame(f) imgs.append({'frame': f.number, 'video': f.video, 'image': f.path}) return True, imgs, 200
def get_frame_by_ID(self, frame_id, dataset): try: result = self.collection.find_one({"dataset": dataset.name, "frame_id": frame_id}, {"_id": 0}) if result is None: return 'Error' else: return Frame.from_json(result) except errors.PyMongoError as e: log.exception('Error finding frame in db') return 'Error'
def get_frame(self, frame): try: result = self.collection.find_one({"dataset": frame.dataset.name, "video": frame.video, "number": frame.number}, {"_id": 0}) if result is None: return 'Error' else: return Frame.from_json(result) except errors.PyMongoError as e: log.exception('Error finding frame in db') return 'Error'
def add_frames_PT(self, dataset, frames): init_frame_number = int( os.path.splitext(os.path.split(frames[0]["file_name"])[-1])[0]) n_frames = ptService.safely_read_dictionary(frames[0], "nframes") index = 0 frame = {} for frame_number in range( 0, n_frames): # For every frame in VIDEO (not JSON FILE) frame_object_number = os.path.splitext( os.path.split(frames[index]["file_name"])[-1])[0] # print("n_frames: ", n_frames, " frame_number: ", frame_number + init_frame_number, " frame_object_number: ", int(frame_object_number)) if (frame_number + init_frame_number ) == int(frame_object_number): # If there is data to add index += 1 # Advance index frame = dict( frames[frame_number]) # Reformat object to insert into db frame["number"] = frame_number + init_frame_number frame["dataset"] = dataset.name frame["video"] = ptService.safely_read_dictionary( frame, "vid_id") ptService.safely_delete_dictionary_key(frame, "vid_id") ptService.safely_delete_dictionary_key(frame, "vid_id") frame["path"] = os.path.join( dataset.STORAGE_DIR, dataset.name + "/" + ptService.safely_read_dictionary(frame, "file_name")) ptService.safely_delete_dictionary_key(frame, "file_name") frame["has_ignore_regions"] = False if ptService.safely_read_dictionary(frame, "ignore_regions_x") is None \ else True # If it has no ignore regions, store it so we know later. ptService.safely_delete_dictionary_key(frame, "ignore_regions_x") ptService.safely_delete_dictionary_key(frame, "ignore_regions_y") else: # If no data, initialize empty frame = dict() frame["number"] = frame_number + init_frame_number frame["dataset"] = dataset.name frame["video"] = ptService.safely_read_dictionary( frames[0], "vid_id") dirpath = os.path.join( dataset.STORAGE_DIR, dataset.name + "/" + os.path.split(frames[index]["file_name"])[-2]) frame["path"] = os.path.join( dirpath, str(frame_number).zfill(6) + ".jpg") frame["has_ignore_regions"] = False f = Frame.from_json(frame) result = frameManager.create_frame(f) if result == 'error': return False return True
def add_frame_AIK(self, dataset): # Load dataset aik = AIK(dataset.dir) for frame in aik.valid_frames: path, cameras = aik.get_frame(frame, return_paths=True) for i, cam in enumerate(cameras): # Frame directory, join datasetDir with relative path frame_path = os.path.join(dataset.dir, path[i]) # Create dictionary with frame, video, dataset, path and camera parameters and store it in db f = Frame(frame, i, dataset, frame_path, json.loads(cam.to_json())) result = frameManager.create_frame(f) if result == 'Error': return False return True
def project_to_camera(self, start_frame, end_frame, camera_name, dataset, points_array): # Convert the points json to Python list points_array = json.loads(points_array) final_points = [] for i, f in enumerate(range(start_frame, end_frame + 1)): if points_array[i]: # Get camera parameters for the frame, camera and dataset f = Frame(f, camera_name, dataset) frame = frameManager.get_frame(f) # Project points and add to final list final_points.append( self.project_3D_points_to_camera(points_array[i], frame.camera_parameters)) else: final_points.append([]) return True, final_points, 200