def multiply_tables(table1, table2): assert can_broadcast(table1._shape, table2._shape),\ (f"multiply_tables: table shapes must broadcast " f"{table1._shape} vs {table2._shape}") return Table.create(poses=table1.poses @ table2.poses, valid=table1.valid & table2.valid)
def project_cameras(cameras, local_points): image_points = [ camera.project(p) for camera, p in zip(cameras, local_points.points) ] return Table.create(points=np.stack(image_points), valid=local_points.valid)
def annotate_image(workspace, calibration, layer, state, options): image = state.image scene = QtWidgets.QGraphicsScene() pixmap = QPixmap(qt_image(image)) scene.addPixmap(pixmap) detections = workspace.point_table._index[state.camera, state.frame] if layer == "detections": for board, color, points in zip(workspace.boards, workspace.board_colors, detections._sequence(0)): add_point_markers(scene, points, board, color, options) elif layer == "reprojection": assert calibration is not None projected = calibration.projected._index[state.camera, state.frame] inliers = calibration.inliers[state.camera, state.frame] valid_boards = calibration.pose_estimates.board.valid add_reprojections(scene, detections, projected, inliers, workspace.boards, valid_boards, options) elif layer == "detected_poses": board_poses = workspace.pose_table._index[state.camera, state.frame] camera = workspace.initialisation.cameras[state.camera] for board, pose, color in zip(workspace.boards, board_poses._sequence(0), workspace.board_colors): if pose.valid: projected = Table.create(points=camera.project( board.points, pose.poses), valid=np.ones(board.points.shape[0], dtype=np.bool)) add_point_markers(scene, projected, board, color, options) else: assert False, f"unknown layer {layer}" h, w, *_ = image.shape scene.setSceneRect(-w, -h, 3 * w, 3 * h) return scene
def reprojection_statistics(error, valid, inlier, axis=None): n = valid.sum(axis=axis) mse = np.square(error).sum(axis=axis) / np.maximum(n, 1) outliers = (valid & ~inlier).sum(axis=axis) quantiles = masked_quantile(error, valid, [0, 0.25, 0.5, 0.75, 1.0], axis=axis) min, lq, median, uq, max = quantiles return Table.create(detected=n, outliers=outliers, mse=mse, rms=np.sqrt(mse), min=min, lower_q=lq, median=median, upper_q=uq, max=max)
def initialise_poses(pose_table, camera_poses=None): # Find relative transforms between cameras and rig poses camera = estimate_relative_poses(pose_table, axis=0) if camera_poses is not None: info("Camera initialisation vs. supplied calibration") report_poses("camera", camera_poses, camera.poses) camera = Table.create(poses=camera_poses, valid=np.ones(camera_poses.shape[0], dtype=np.bool)) board = estimate_relative_poses_inv(pose_table, axis=2) # solve for the rig transforms cam @ rig @ board = pose # first take inverse of both sides by board pose # cam @ rig = board_relative = pose @ board^-1 board_relative = multiply_tables(pose_table, expand(inverse(board), [0, 1])) # solve for unknown rig expanded = broadcast_to(expand(camera, [1, 2]), board_relative) times = relative_between_n(expanded, board_relative, axis=1, inv=True) return struct(times=times, camera=camera, board=board)
def end_table(self): return Table.create(poses=self.pose_end, valid=self.valid)
def start_table(self): return Table.create(poses=self.pose_start, valid=self.valid)
def transform_points(pose_table, board_points): assert can_broadcast(pose_table._shape, board_points._shape) return Table.create(points=matrix.transform_homog( t=pose_table.poses, points=board_points.points), valid=pose_table.valid & board_points.valid)
def fill_poses(pose_dict, n): valid_ids = sorted(pose_dict) pose_table = np.array([pose_dict[k] for k in valid_ids]) values, mask = fill_sparse_tile(n, pose_table, valid_ids, np.eye(4)) return Table.create(poses=values, valid=mask)