def get_current_bin_content_cloud(self, bin_letter, cur_camera_R, cur_camera_t, colorful=True, fit=False, threshold=0.02): ''' return a point cloud (either colored or not) of the current bin content in the global frame. shelf is removed. if fit is True, a new round of ICP is done on the model shelf cur_camera_R and cur_camera_t are the current camera transformation in world ''' color, scene_cloud, depth_uv, _ = self.read_once(unit='meter', Nx3_cloud=False) if colorful: scene_cloud = colorize_point_cloud(scene_cloud, color, depth_uv).reshape(-1, 4) else: scene_cloud = scene_cloud.reshape(-1, 3) keep_idxs = np.where(scene_cloud[:,2].flatten()!=0)[0] scene_cloud = scene_cloud[keep_idxs, :] scene_cloud = scene_cloud[::downsample_rate, :] scene_cloud_xyz = scene_cloud[:, 0:3] cur_camera_R = np.array(cur_camera_R).reshape(3,3).T cur_camera_t = np.array(cur_camera_t) scene_cloud_xyz = scene_cloud_xyz.dot(cur_camera_R.T)+cur_camera_t # colorless cloud scene_cloud[:, 0:3] = scene_cloud_xyz # transform scene_cloud scene_tree = KDTree(scene_cloud_xyz) model_cloud = self.load_model_bin_cloud(bin_letter, downsample=True) model_xform_R, model_xform_t = self.load_R_t(self.get_bin_viewing_camera_xform_path(bin_letter), nparray=True) model_cloud = model_cloud.dot(model_xform_R.T) + model_xform_t # apply saved transformation assert (self.shelf_perturb_R is not None) and (self.shelf_perturb_t is not None), 'Shelf perturbation has not been calibrated' model_cloud = model_cloud.dot(self.shelf_perturb_R.T) + self.shelf_perturb_t # apply perturbation transformation found by ICP if fit: R, t = icp.match(model_cloud, scene_tree) model_cloud = model_cloud.dot(R.T) + t keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold) content_cloud = scene_cloud[keep_idxs, :] # num_content_cloud_pts = content_cloud.shape[0] # print "Making content cloud with %i points"%content_cloud.shape[0] # content_tree = KDTree(content_cloud) # print "Done. Querying neighbors within 0.03 m" # idxs = content_tree.query_ball_point(content_cloud, 0.03) # unisolated_flag = map(lambda x: len(x)>=3, idxs) # content_cloud = content_cloud[np.where(unisolated_flag)[0], :] # print "Clean cloud has %i points"%content_cloud.shape[0] # print "Making content cloud with %i points"%content_cloud.shape[0] # content_tree = KDTree(content_cloud) # print "Done. Querying neighbors within 0.03 m" # idxs = content_tree.query_ball_point(content_cloud, 0.03) # unisolated_flag = map(lambda x: len(x)>=4, idxs) # content_cloud = content_cloud[np.where(unisolated_flag)[0], :] # print "Clean cloud has %i points"%content_cloud.shape[0] # print "Making content cloud with %i points"%content_cloud.shape[0] # content_tree = KDTree(content_cloud) # print "Done. Querying neighbors within 0.03 m" # idxs = content_tree.query_ball_point(content_cloud, 0.03) # unisolated_flag = map(lambda x: len(x)>=5, idxs) # content_cloud = content_cloud[np.where(unisolated_flag)[0], :] # print "Clean cloud has %i points"%content_cloud.shape[0] return content_cloud
def get_current_content_cloud(self, cur_camera_R, cur_camera_t, filename, colorful=True, threshold=0.02, fit=False, tolist=False, index=0): ''' should do some form of subtraction between a presaved image and the current configuration: 8/8/16 uses code from get_current_bin_content_cloud and get_current_tote_content_cloud ''' color, scene_cloud, depth_uv, _ = self.read_once(limb=None, unit='meter', Nx3_cloud=False, index=index) if colorful: scene_cloud = colorize_point_cloud(scene_cloud, color, depth_uv).reshape(-1, 4) else: scene_cloud = scene_cloud.reshape(-1, 3) keep_idxs = np.where(scene_cloud[:,2].flatten()!=0)[0] scene_cloud = scene_cloud[keep_idxs, :] scene_cloud = scene_cloud[::downsample_rate, :] scene_cloud_xyz = scene_cloud[:, 0:3] cur_camera_R = np.array(cur_camera_R).reshape(3,3).T cur_camera_t = np.array(cur_camera_t) scene_cloud_xyz = scene_cloud_xyz.dot(cur_camera_R.T)+cur_camera_t # colorless cloud scene_cloud[:, 0:3] = scene_cloud_xyz # transform scene_cloud if len(scene_cloud_xyz) <= 1: #it's empty return scene_cloud_xyz scene_tree = KDTree(scene_cloud_xyz) model_cloud = self.load_model_cloud( self.get_standard_cloud_path(filename), downsample=True) model_xform_R, model_xform_t = self.load_R_t(self.get_standard_viewing_camera_xform_path(filename), nparray=True) model_cloud = model_cloud.dot(model_xform_R.T) + model_xform_t # apply saved transformation assert (self.shelf_perturb_R is not None) and (self.shelf_perturb_t is not None), 'Shelf perturbation has not been calibrated' model_cloud = model_cloud.dot(self.shelf_perturb_R.T) + self.shelf_perturb_t # apply perturbation transformation found by ICP if fit: R, t = icp.match(model_cloud, scene_tree) model_cloud = model_cloud.dot(R.T) + t keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold) bin_content_cloud = scene_cloud[keep_idxs, :] if tolist: bin_content_cloud = bin_content_cloud.tolist() return bin_content_cloud
def get_current_point_cloud(self, cur_camera_R, cur_camera_t, colorful=False, tolist=True): ''' physical pre-condition: place the camera to the desired viewing angle. return the point cloud that the camera is currently perceiving. if colorful is True, a colorized point cloud is returned, (it may take longer); else, a black point cloud is returned. the black point cloud is a numpy matrix of h x w x 3, where point_cloud[i,j,:] gives the 3D coordinate of that point in camera frame in meter. the colored point cloud is a numpy matrix of h x w x 4, where the last layer is the rgb color, packed in PCL format into a single float. RGB can be retrieved by pcl_float_to_rgb(f) function defined above. if there is an error in retrieving data, None is returned ''' color, cloud, depth_uv, _ = self.read_once(unit='meter', Nx3_cloud=False) if cloud is None: return None if colorful: if color is None or depth_uv is None: return None cloud = colorize_point_cloud(cloud, color, depth_uv) ch = cloud.shape[-1] cloud = cloud.reshape(-1, ch) keep_idxs = np.where(cloud[:,2].flatten()!=0)[0] cloud = cloud[keep_idxs, :] cur_camera_R = np.array(cur_camera_R).reshape(3,3).T cur_camera_t = np.array(cur_camera_t) cloud_xyz = cloud[:,0:3] cloud_xyz = cloud_xyz.dot(cur_camera_R.T) + cur_camera_t cloud[:,0:3] = cloud_xyz if tolist: cloud = cloud.tolist() return cloud
def get_current_bin_content_cloud(self, bin_letter, cur_camera_R, cur_camera_t, limb, colorful=True, fit=False, threshold=0.01, crop=False, bin_bound=None, perturb_xform=None): ''' return a point cloud (either colored or not) of the current bin content in the global frame. shelf is removed. if fit is True, a new round of ICP is done on the model shelf cur_camera_R and cur_camera_t are the current camera transformation in world ''' color, scene_cloud, depth_uv, _ = self.read_once(limb, unit='meter', Nx3_cloud=False) if colorful: scene_cloud = colorize_point_cloud(scene_cloud, color, depth_uv).reshape(-1, 4) else: scene_cloud = scene_cloud.reshape(-1, 3) keep_idxs = np.where(scene_cloud[:,2].flatten()!=0)[0] scene_cloud = scene_cloud[keep_idxs, :] scene_cloud = scene_cloud[::downsample_rate, :] scene_cloud_xyz = scene_cloud[:, 0:3] cur_camera_R = np.array(cur_camera_R).reshape(3,3).T cur_camera_t = np.array(cur_camera_t) scene_cloud_xyz = scene_cloud_xyz.dot(cur_camera_R.T)+cur_camera_t # colorless cloud scene_cloud[:, 0:3] = scene_cloud_xyz # transform scene_cloud scene_tree = KDTree(scene_cloud_xyz) model_cloud = self.load_model_bin_cloud(bin_letter, limb, downsample=True) model_xform_R, model_xform_t = self.load_R_t(self.get_bin_viewing_camera_xform_path(bin_letter, limb), nparray=True) model_cloud = model_cloud.dot(model_xform_R.T) + model_xform_t # apply saved transformation assert (self.shelf_perturb_R is not None) and (self.shelf_perturb_t is not None), 'Shelf perturbation has not been calibrated' model_cloud = model_cloud.dot(self.shelf_perturb_R.T) + self.shelf_perturb_t # apply perturbation transformation found by ICP if fit: R, t = icp.match(model_cloud, scene_tree) model_cloud = model_cloud.dot(R.T) + t keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold) bin_content_cloud = scene_cloud[keep_idxs, :] if crop: if perturb_xform is None: perturb_xform = (list(self.shelf_perturb_R.T.flat), list(self.shelf_perturb_t.flat)) bin_content_cloud = self.crop_cloud(bin_content_cloud, bin_bound, perturb_xform) return bin_content_cloud
def get_current_tote_content_cloud(self, cur_camera_R, cur_camera_t, limb, colorful=True, threshold=0.02, fit=False): ''' subtract tote model from current point cloud of tote (with object in it) if fit is True, transform the model to align with the scene first. it will take some time return the point cloud of the remaining scene in global frame ''' color, scene_cloud, depth_uv, _ = self.read_once(limb, unit='meter', Nx3_cloud=False) if colorful: scene_cloud = colorize_point_cloud(scene_cloud, color, depth_uv).reshape(-1, 4) else: scene_cloud = scene_cloud.reshape(-1, 3) keep_idxs = np.where(scene_cloud[:,2].flatten()!=0)[0] scene_cloud = scene_cloud[keep_idxs, :] scene_cloud = scene_cloud[::downsample_rate, :] scene_cloud_xyz = scene_cloud[:, 0:3] model_cloud = self.load_model_tote_cloud(limb, downsample=True) assert model_cloud is not None, 'Error loading tote model' model_xform_R, model_xform_t = self.load_R_t(self.get_tote_viewing_camera_xform_path(limb), nparray=True) model_cloud = model_cloud.dot(model_xform_R) + model_xform_t cur_camera_R = np.array(cur_camera_R).reshape(3,3).T cur_camera_t = np.array(cur_camera_t) scene_cloud_xyz = scene_cloud_xyz.dot(cur_camera_R.T) + cur_camera_t # transform scene cloud scene_cloud[:, 0:3] = scene_cloud_xyz print "Making scene cloud with %i points"%scene_cloud.shape[0] scene_tree = KDTree(scene_cloud_xyz) print "Done" if fit: R, t = icp.match(model_cloud, scene_tree) model_cloud = model_cloud.dot(R.T) + t keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold) content_cloud = scene_cloud[keep_idxs, :] # print 'Dirty content cloud has %i points'%content_cloud.shape[0] num_content_cloud_pts = content_cloud.shape[0] print "Making content cloud with %i points"%content_cloud.shape[0] content_tree = KDTree(content_cloud[:, 0:3]) print "Done. Querying neighbors within 0.03 m" idxs = content_tree.query_ball_point(content_cloud[:, 0:3], 0.03) unisolated_flag = map(lambda x: len(x)>=3, idxs) content_cloud = content_cloud[np.where(unisolated_flag)[0], :] print "Clean cloud has %i points"%content_cloud.shape[0] print "Making content cloud with %i points"%content_cloud.shape[0] content_tree = KDTree(content_cloud[:, 0:3]) print "Done. Querying neighbors within 0.03 m" idxs = content_tree.query_ball_point(content_cloud[:, 0:3], 0.03) unisolated_flag = map(lambda x: len(x)>=4, idxs) content_cloud = content_cloud[np.where(unisolated_flag)[0], :] print "Clean cloud has %i points"%content_cloud.shape[0] print "Making content cloud with %i points"%content_cloud.shape[0] content_tree = KDTree(content_cloud[:, 0:3]) print "Done. Querying neighbors within 0.03 m" idxs = content_tree.query_ball_point(content_cloud[:, 0:3], 0.03) unisolated_flag = map(lambda x: len(x)>=5, idxs) content_cloud = content_cloud[np.where(unisolated_flag)[0], :] print "Clean cloud has %i points"%content_cloud.shape[0] return content_cloud