def calibrate_from_initialization(img, mask, A_init, R_init, T_init, edge_sfactor=0.5, visualize=False): h, w = img.shape[:2] edges = image_utils.robust_edge_detection( cv2.resize(img, None, fx=edge_sfactor, fy=edge_sfactor)) edges = cv2.resize(edges, None, fx=1. / edge_sfactor, fy=1. / edge_sfactor) edges = cv2.Canny(edges.astype(np.uint8) * 255, 100, 200) / 255.0 mask = cv2.dilate(mask, np.ones((25, 25), dtype=np.uint8)) edges = edges * (1 - mask) dist_transf = cv2.distanceTransform((1 - edges).astype(np.uint8), cv2.DIST_L2, 0) cam_init = cam_utils.Camera('tmp', A_init, R_init, T_init, h, w) template, field_mask = draw_utils.draw_field(cam_init) plt.imshow(template) plt.show() plt.imshow(field_mask) plt.show() II, JJ = (template > 0).nonzero() synth_field2d = np.array([[JJ, II]]).T[:, :, 0] field3d = cam_utils.plane_points_to_3d(synth_field2d, cam_init) A, R, T = _calibrate_camera_dist_transf(A_init, R_init, T_init, dist_transf, field3d) if visualize: cam_res = cam_utils.Camera('tmp', A, R, T, h, w) field2d, __ = cam_res.project(field3d) io.imshow(img, points=field2d) return A, R, T, field3d
def dump_video(self, vidtype, scale=4, mot_tracks=None, one_color=True): if vidtype not in ['calib', 'poses', 'detections', 'tracks']: raise Exception('Uknown video format') if vidtype == 'tracks' and mot_tracks is None: raise Exception('No MOT tracks provided') glog.info('Dumping {0} video'.format(vidtype)) fourcc = cv2.VideoWriter_fourcc(*'MP4V') out_file = join(self.path_to_dataset, '{0}.mp4'.format(vidtype)) out = cv2.VideoWriter(out_file, fourcc, 20.0, (self.shape[1] // scale, self.shape[0] // scale)) font = cv2.FONT_HERSHEY_SIMPLEX cmap = matplotlib.cm.get_cmap('hsv') if mot_tracks is not None: n_tracks = max(np.unique(mot_tracks[:, 1])) for i, basename in enumerate(tqdm(self.frame_basenames)): img = self.get_frame(i, dtype=np.uint8) if vidtype == 'poses': # Pose poses = self.poses[basename] draw_utils.draw_skeleton_on_image(img, poses, cmap, one_color=one_color) if vidtype == 'calib': # Calib cam = cam_utils.Camera('tmp', self.calib[basename]['A'], self.calib[basename]['R'], self.calib[basename]['T'], self.shape[0], self.shape[1]) canvas, mask = draw_utils.draw_field(cam) canvas = cv2.dilate(canvas.astype(np.uint8), np.ones((15, 15), dtype=np.uint8)).astype(float) img = img * (1 - canvas)[:, :, None] + np.dstack( (canvas * 255, np.zeros_like(canvas), np.zeros_like(canvas))) elif vidtype == 'detections': # Detection bbox = self.bbox[basename].astype(np.int32) if self.ball[basename] is not None: ball = self.ball[basename].astype(np.int32) else: ball = np.zeros((0, 4), dtype=np.int32) for j in range(bbox.shape[0]): cv2.rectangle(img, (bbox[j, 0], bbox[j, 1]), (bbox[j, 2], bbox[j, 3]), (255, 0, 0), 10) for j in range(ball.shape[0]): cv2.rectangle(img, (ball[j, 0], ball[j, 1]), (ball[j, 2], ball[j, 3]), (0, 255, 0), 10) elif vidtype == 'tracks': # Tracks cur_id = mot_tracks[:, 0] - 1 == i current_boxes = mot_tracks[cur_id, :] for j in range(current_boxes.shape[0]): track_id, x, y, w, h = current_boxes[j, 1:6] clr = cmap(track_id / float(n_tracks)) cv2.rectangle(img, (int(x), int(y)), (int(x + w), int(y + h)), (clr[0] * 255, clr[1] * 255, clr[2] * 255), 10) cv2.putText(img, str(int(track_id)), (int(x), int(y)), font, 2, (255, 255, 255), 2, cv2.LINE_AA) img = cv2.resize(img, (self.shape[1] // scale, self.shape[0] // scale)) out.write(np.uint8(img[:, :, (2, 1, 0)])) # Release everything if job is finished out.release() cv2.destroyAllWindows()
def visualize_calibration(cam1, cam2, trajectory_3d): # PyQt visualization view = gl.GLViewWidget() view.show() cam1_obj = cam_utils.Camera(name='cam1', A=np.array(cam1['A']), R=np.array(cam1['R']), T=np.array(cam1['T']), h=cam1['H'], w=cam1['W']) cam2_obj = cam_utils.Camera(name='cam2', A=np.array(cam2['A']), R=np.array(cam2['R']), T=np.array(cam2['T']), h=cam2['H'], w=cam2['W']) cam_list = [cam1_obj, cam2_obj] # Get field 3d points field3d_list = [] for i in range(2): # Field 3d points template, field_mask = draw_utils.draw_field(cam_list[i]) II, JJ = (template > 0).nonzero() synth_field2d = np.array([[JJ, II]]).T[:, :, 0] field3d = cam_utils.plane_points_to_3d(synth_field2d, cam_list[i]) field3d_list.append(field3d) # Frustum frustum_edges, frustum_vertices, frustum_faces = draw_utils.get_frustum_params( size=1.5) frustum_faces_colors = [ np.array([[255, 255, 0, 128] for i in range(frustum_faces.shape[0])]), np.array([[0, 255, 255, 128] for i in range(frustum_faces.shape[0])]) ] frustum_vertices = transf_utils.transform_3d( m=cam_list[i].to_opengl()[0], pts=frustum_vertices) # print("CV to GL : ", cam_utils.opencv_to_opengl(A=cam_list[i].A, R=cam_list[i].R, T=cam_list[i].T, h=cam_list[i].height, w=cam_list[i].width)[0].T) # print("GL : ", cam_list[i].to_opengl()[0]) # exit() axis, color = draw_utils.get_3d_axis(size=10) for j in range(len(axis)): axis[j] = transf_utils.transform_3d(m=cam_list[i].to_opengl()[0], pts=axis[j]) axis_draw = gl.GLLinePlotItem(pos=axis[j], width=3, antialias=False, color=color[j]) view.addItem(axis_draw) frustum_draw = gl.GLMeshItem(vertexes=frustum_vertices, faces=frustum_faces, faceColors=frustum_faces_colors[i], drawEdges=True, edgeColor=(0, 0, 255, 2)) view.addItem(frustum_draw) # Add field 3d points field3d_ = np.concatenate(field3d_list) field3d_[:, [1, 2]] *= -1 field3d_sp = gl.GLScatterPlotItem() field3d_sp.setData(pos=field3d_, size=2) view.addItem(field3d_sp) # Add field axis axis, color = draw_utils.get_3d_axis(size=25) for j in range(len(axis)): axis_draw = gl.GLLinePlotItem(pos=axis[j], width=3, antialias=False, color=color[j]) view.addItem(axis_draw) # Add trajectory_3d if exists if trajectory_3d is not None: trajectory_3d_sp = gl.GLScatterPlotItem() trajectory_3d_sp.setData(pos=trajectory_3d, size=5) view.addItem(trajectory_3d_sp) # Run Qt if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
def calibrate_from_initialization(img, mask, A_init, R_init, T_init, edge_sfactor=0.5, visualize=False): # set to 1 to use the improvd calibration propagation improved_method = 0 validate_pixels = 0 # check if segmented line pixels are on a white line, slow h, w = img.shape[:2] if improved_method: G_mag, G_direction = get_image_gradients(img) field_green = get_field_green(img) edges = get_field_markings(G_mag, field_green, mask) if validate_pixels: edges = validate_white_line(img, G_mag, G_direction, edges, field_green) dist_transf = cv2.distanceTransform((1 - edges).astype(np.uint8), cv2.DIST_L2, 0) cam_init = cam_utils.Camera('tmp', A_init, R_init, T_init, h, w) template, field_mask = draw_utils.draw_field(cam_init) II, JJ = (template > 0).nonzero() synth_field2d = np.array([[JJ, II]]).T[:, :, 0] field3d = cam_utils.plane_points_to_3d(synth_field2d, cam_init) A, R, T = _calibrate_camera_dist_transf(A_init, R_init, T_init, dist_transf, field3d) if visualize: cam_res = cam_utils.Camera('tmp', A, R, T, h, w) field2d, __ = cam_res.project(field3d) io.imshow(img, points=field2d) else: edges = image_utils.robust_edge_detection( cv2.resize(img, None, fx=edge_sfactor, fy=edge_sfactor)) edges = cv2.resize(edges, None, fx=1. / edge_sfactor, fy=1. / edge_sfactor) edges = cv2.Canny(edges.astype(np.uint8) * 255, 100, 200) / 255.0 mask = cv2.dilate(mask, np.ones((25, 25), dtype=np.uint8)) edges = edges * (1 - mask) dist_transf = cv2.distanceTransform((1 - edges).astype(np.uint8), cv2.DIST_L2, 0) cam_init = cam_utils.Camera('tmp', A_init, R_init, T_init, h, w) template, field_mask = draw_utils.draw_field(cam_init) II, JJ = (template > 0).nonzero() synth_field2d = np.array([[JJ, II]]).T[:, :, 0] field3d = cam_utils.plane_points_to_3d(synth_field2d, cam_init) A, R, T = _calibrate_camera_dist_transf(A_init, R_init, T_init, dist_transf, field3d) if visualize: cam_res = cam_utils.Camera('tmp', A, R, T, h, w) field2d, __ = cam_res.project(field3d) io.imshow(img, points=field2d) return A, R, T, field3d