def project_cameras(cameras, local_points): image_points = [ camera.project(p) for camera, p in zip(cameras, local_points.points) ] return Table.create(points=np.stack(image_points), valid=local_points.valid)
def multiply_tables(table1, table2): assert can_broadcast(table1._shape, table2._shape),\ (f"multiply_tables: table shapes must broadcast " f"{table1._shape} vs {table2._shape}") return Table.create(poses=table1.poses @ table2.poses, valid=table1.valid & table2.valid)
def relative_between_n(table1, table2, axis=0, inv=False): f = relative_between_inv if inv else relative_between relative_poses = [f(poses1, poses2) for poses1, poses2 in zip(table1._sequence(axis), table2._sequence(axis))] return Table.stack(relative_poses)
def mean_robust_n(pose_table, axis=0): def f(poses): if not np.any(poses.valid): return invalid_pose else: return valid_pose(matrix.mean_robust(poses.poses[poses.valid])) mean_poses = [f(poses) for poses in pose_table._sequence(axis)] return Table.stack(mean_poses)
def detect(self, image): detections = self.grid.compute_observation(image) if not detections.success: return empty_detection corner_detections = [struct(ids = id * 4 + k % 4, corners=corner) for k, id, corner in zip(range(len(detections.ids)), detections.ids, detections.image_points)] return subpix_corners(image, Table.stack(corner_detections), self.subpix_region)
def stack_boards(boards): padded_points = max([board.num_points for board in boards]) def pad_points(board): points = board.adjusted_points.astype(np.float64) return struct( points=np.pad(points, [(0, padded_points - points.shape[0]), (0, 0)]), valid = np.arange(padded_points) < board.num_points ) return Table.stack([pad_points(board) for board in boards])
def annotate_image(workspace, calibration, layer, state, options): image = state.image scene = QtWidgets.QGraphicsScene() pixmap = QPixmap(qt_image(image)) scene.addPixmap(pixmap) detections = workspace.point_table._index[state.camera, state.frame] if layer == "detections": for board, color, points in zip(workspace.boards, workspace.board_colors, detections._sequence(0)): add_point_markers(scene, points, board, color, options) elif layer == "reprojection": assert calibration is not None projected = calibration.projected._index[state.camera, state.frame] inliers = calibration.inliers[state.camera, state.frame] valid_boards = calibration.pose_estimates.board.valid add_reprojections(scene, detections, projected, inliers, workspace.boards, valid_boards, options) elif layer == "detected_poses": board_poses = workspace.pose_table._index[state.camera, state.frame] camera = workspace.initialisation.cameras[state.camera] for board, pose, color in zip(workspace.boards, board_poses._sequence(0), workspace.board_colors): if pose.valid: projected = Table.create(points=camera.project( board.points, pose.poses), valid=np.ones(board.points.shape[0], dtype=np.bool)) add_point_markers(scene, projected, board, color, options) else: assert False, f"unknown layer {layer}" h, w, *_ = image.shape scene.setSceneRect(-w, -h, 3 * w, 3 * h) return scene
def reprojection_statistics(error, valid, inlier, axis=None): n = valid.sum(axis=axis) mse = np.square(error).sum(axis=axis) / np.maximum(n, 1) outliers = (valid & ~inlier).sum(axis=axis) quantiles = masked_quantile(error, valid, [0, 0.25, 0.5, 0.75, 1.0], axis=axis) min, lq, median, uq, max = quantiles return Table.create(detected=n, outliers=outliers, mse=mse, rms=np.sqrt(mse), min=min, lower_q=lq, median=median, upper_q=uq, max=max)
def initialise_poses(pose_table, camera_poses=None): # Find relative transforms between cameras and rig poses camera = estimate_relative_poses(pose_table, axis=0) if camera_poses is not None: info("Camera initialisation vs. supplied calibration") report_poses("camera", camera_poses, camera.poses) camera = Table.create(poses=camera_poses, valid=np.ones(camera_poses.shape[0], dtype=np.bool)) board = estimate_relative_poses_inv(pose_table, axis=2) # solve for the rig transforms cam @ rig @ board = pose # first take inverse of both sides by board pose # cam @ rig = board_relative = pose @ board^-1 board_relative = multiply_tables(pose_table, expand(inverse(board), [0, 1])) # solve for unknown rig expanded = broadcast_to(expand(camera, [1, 2]), board_relative) times = relative_between_n(expanded, board_relative, axis=1, inv=True) return struct(times=times, camera=camera, board=board)
def end_table(self): return Table.create(poses=self.pose_end, valid=self.valid)
def start_table(self): return Table.create(poses=self.pose_start, valid=self.valid)
def make_nd_table(items, n): if n > 1: rows = [make_nd_table(row, n - 1) for row in items] return Table.stack(rows) else: return Table.stack(items)
def transform_points(pose_table, board_points): assert can_broadcast(pose_table._shape, board_points._shape) return Table.create(points=matrix.transform_homog( t=pose_table.poses, points=board_points.points), valid=pose_table.valid & board_points.valid)
def fill_poses(pose_dict, n): valid_ids = sorted(pose_dict) pose_table = np.array([pose_dict[k] for k in valid_ids]) values, mask = fill_sparse_tile(n, pose_table, valid_ids, np.eye(4)) return Table.create(poses=values, valid=mask)