def free_space_ray_trace(self, pc, origin, resolution, voxel_offset=0): # This is essentially independant of the VoxelMap; put elsewhere? free_pts = [] res = resolution v_off = np.ceil(voxel_offset * (res / self.get_resolution())) free_pts_OL = occupiedlist.OccupiedList(res, maxvoxels=3e6) count = 0 for point in pc.points: ray_vox_hits, range2, dir_unit, all_ray_pts = self.ray_trace_pts( origin, point, return_extras=True) all_ray_pts = all_ray_pts.points[:-(v_off + 1)] all_ray_vox = occupiedlist.pointstovoxels(all_ray_pts, res) all_ray_vox_ol = occupiedlist.OccupiedList(1) all_ray_vox_ol.addpoints(all_ray_vox) ray_vox_hits_ol = occupiedlist.OccupiedList(1) ray_vox_hits_ol.addpoints(ray_vox_hits.points) free_ray_vox_ol = all_ray_vox_ol - ray_vox_hits_ol free_pts_OL.addpoints(free_ray_vox_ol.getpoints() * res) count += 1 #if np.remainder(count / len(pc) * 100,1) == 0: # print count / len(pc) * 100 , '%' return pointcloud.PointCloud(free_pts_OL.getpoints())
def generate_freespace(self, resolution, pose=None, minR=0.5, maxR=5): '''Generates an occuplied list consisting of the observed free space at the specified resolution. This is useful for detecting object removal from a map.''' assert resolution >= self.get_resolution() if self._sensor_pts is None: sensor_model_ol = _build_Kinect_sensor_model( resolution, minR=minR, maxR=maxR) # for simulated data self._sensor_pts = sensor_model_ol.getpoints() print "Sensor model points:", len(self._sensor_pts) #vis = visualiser.robotvisualiser.Visualiser(npts=1e7) #vis.addmappts(self._sensor_pts) base_ol = self.mrol.getfinest() freespace_ol = occupiedlist.OccupiedList(resolution, maxvoxels=5e6) # loop through the trajectory of poses # ray trace from each pose and add voxels to the free space occupied voxel list if pose == None: for pose in self.trajectory: print pose inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts) # TODO some means of not adding points that are further away than the intersection (doesn't work very well without this!) # Julian suggests a masking operation whilst stepping through the layers of the model in X freespace_ol.addpoints(outliers) freespace_ol.removepoints(inliers) else: inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts) # TODO some means of not adding points that are further away than the intersection freespace_ol.addpoints(outliers) freespace_ol.removepoints(inliers) return freespace_ol
def _build_Kinect_sensor_model(resolution, minR=0.5, maxR=5): '''builds an occupiedlist sensor model aligned with the x-axis at the origin for free space determination''' fov = np.radians(75) #hres = 640 #vres = 480 hres = 320 vres = 240 dtheta = fov / hres A = 0.5 * hres / np.tan(0.5 * fov) # perpendicular distance of camera image plane in pixel space cols = np.arange(hres) - hres * 0.5 rows = np.arange(vres) - vres * 0.5 rows.shape = -1, 1 # normalise by A so you can step in x-axis rows /= A cols /= A Y = np.tile(cols, (vres, 1)).ravel() Z = np.tile(rows, (1, hres)).ravel() sensor_model_ol = occupiedlist.OccupiedList(resolution, maxvoxels=2e6) # this way voxels that have multiple rays passing through them are not # tested multiple times. xyzs = np.empty((vres * hres, 3)) for X in np.arange(minR, maxR, resolution): # TODO this is an approximation calculate the angles xyzs[:, 0] = X xyzs[:, 1] = Y * X xyzs[:, 2] = Z * X sensor_model_ol.addpoints(xyzs) print X return sensor_model_ol
def generate_freespace2(self, pc=None, pose=None, resolution=None, minR=0.5, maxR=5): ''' Uses existing ray-trace method to get around the current limitations of above implementation. Generates an occuplied list consisting of the observed free space at the specified resolution. This is useful for detecting object removal from a map. ''' if resolution == None: res = self.get_resolution() else: res = resolution assert res >= self.get_resolution() free_space_ol = occupiedlist.OccupiedList(res, maxvoxels=2e6) if pc == None and pose == None: # generate from map using pose list if self._sensor_pts is None: sensor_model_ol = _build_Kinect_sensor_model(res, minR=minR, maxR=maxR) self._sensor_pts = pointcloud.PointCloud( sensor_model_ol.getpoints()) print "Sensor model points:", len(self._sensor_pts) base_ol = self.mrol.getfinest() for pose in self.trajectory: print pose inliers, outliers = base_ol.segment_ovl( pose, self._sensor_pts.points) inlier_pc = pointcloud.PointCloud(inliers) free_pts = self.free_space_ray_trace(inliers, pose.getTuple()[:3], self.get_resolution()) free_space_ol.addpoints(free_pts.points) else: assert xyzs != None and pose != None, "Both xyzs and pose must be specified" # resampling to align with grid. bad hack, but makes ray_cast easier. voxs = occupiedlist.pointstovoxels(xyzs, res) voxs = quantizer.uniquerows(voxs, 0) X = occupiedlist.voxelstopoints(voxs, res) X_pc = pointcloud.PointCloud(X) free_pts = self.free_space_ray_trace(X_pc, pose.getTuple()[:3], self.get_resolution()) free_space_ol.addpoints(free_pts.points) return free_space_ol
def __init__(self, resolution, levels=3): # coarse to fine resolutions self.factor = 2 # multiplier between the resolutions might have to be an integer? resolutions = (pow(self.factor, np.arange(levels)) * resolution)[::-1] self.mapvoxels = [] # list of occupied voxel lists self.maptimes = [] for i, res in enumerate(resolutions): ol = occupiedlist.OccupiedList(res) self.mapvoxels.append(ol) #self.maptimes.append([]) # TODO self.keyframe = set() self.feature_range = 8.0
def add_points(self, xyzs, pose, timestamp=None): '''Adds the pointcloud xyzs to the map, transforming them by the given pose if required. Returns the overlap of the new points against the existing map.''' xyzs = xyzs.points xyzs, return_pose = poseutil.check_and_transform_points(xyzs, pose) self.trajectory.append(return_pose) if self._rv is not None: finest = self.mrol.getfinest() # determine unique new voxels convert to points and add to the map ol = occupiedlist.OccupiedList(finest.resolution) ol.addpoints(xyzs) ol = ol - finest self._rv.addmappts(ol.getpoints()) #self._rv.setminmax(np.min(self.get_points().points[:,2]),np.max(self.get_points().points[:,2])) self._rv.setrobotpose(return_pose.getMatrix()) self._rv.addtrajectorypoint(return_pose.getTuple()[0:3]) if self._rv_changes: inliers, outliers = self.mrol.getfinest().segment_ovl( None, xyzs) self._rv.setrightpts(inliers) self._rv.setleftpts(outliers) self.mrol.addpoints(xyzs, timestamp=timestamp) return self.get_overlap(return_pose.getTuple(), xyzs)