segmask = segmask.resize(rescale_factor, interp='nearest') # save segmask segmask.save(os.path.join(save_dir, 'segmask_%d.png' %(i))) # rescale images if rescale_factor != 1.0: color = color.resize(rescale_factor) depth = depth.resize(rescale_factor, interp='nearest') # save images color.save(os.path.join(save_dir, 'color_%d.png' %(i))) depth.save(os.path.join(save_dir, 'depth_%d.npy' %(i))) if ir is not None: ir.save(os.path.join(save_dir, 'ir_%d.npy' %(i))) if vis: from visualization import Visualizer2D as vis2d num_plots = 3 if workspace is not None else 2 vis2d.figure() vis2d.subplot(1,num_plots,1) vis2d.imshow(color) vis2d.subplot(1,num_plots,2) vis2d.imshow(depth) if workspace is not None: vis2d.subplot(1,num_plots,3) vis2d.imshow(segmask) vis2d.show() sensor.stop()
def _sample_suction_points(self, depth_im, camera_intr, num_samples, segmask=None, visualize=False, constraint_fn=None): """ Sample a set of 2D suction point candidates from a depth image by choosing points on an object surface uniformly at random and then sampling around the surface normal Parameters ---------- depth_im : :obj:'perception.DepthImage' Depth image to sample from camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera that captured the images num_samples : int number of grasps to sample segmask : :obj:`perception.BinaryImage` binary image segmenting out the object of interest visualize : bool whether or not to show intermediate samples (for debugging) Returns ------- :obj:`list` of :obj:`SuctionPoint2D` list of 2D suction point candidates """ # compute edge pixels filter_start = time() if self._depth_gaussian_sigma > 0: depth_im_mask = depth_im.apply(snf.gaussian_filter, sigma=self._depth_gaussian_sigma) else: depth_im_mask = depth_im.copy() if segmask is not None: depth_im_mask = depth_im.mask_binary(segmask) self._logger.debug('Filtering took %.3f sec' % (time() - filter_start)) if visualize: vis.figure() vis.subplot(1, 2, 1) vis.imshow(depth_im) vis.subplot(1, 2, 2) vis.imshow(depth_im_mask) vis.show() # project to get the point cloud cloud_start = time() point_cloud_im = camera_intr.deproject_to_image(depth_im_mask) normal_cloud_im = point_cloud_im.normal_cloud_im() nonzero_px = depth_im_mask.nonzero_pixels() num_nonzero_px = nonzero_px.shape[0] if num_nonzero_px == 0: return [] self._logger.debug('Normal cloud took %.3f sec' % (time() - cloud_start)) # randomly sample points and add to image sample_start = time() suction_points = [] k = 0 sample_size = min(self._max_num_samples, num_nonzero_px) indices = np.random.choice(num_nonzero_px, size=sample_size, replace=False) while k < sample_size and len(suction_points) < num_samples: # sample a point uniformly at random ind = indices[k] center_px = np.array([nonzero_px[ind, 1], nonzero_px[ind, 0]]) center = Point(center_px, frame=camera_intr.frame) axis = -normal_cloud_im[center.y, center.x] depth = point_cloud_im[center.y, center.x][2] # update number of tries k += 1 # check center px dist from boundary if center_px[0] < self._min_dist_from_boundary or \ center_px[1] < self._min_dist_from_boundary or \ center_px[1] > depth_im.height - self._min_dist_from_boundary or \ center_px[0] > depth_im.width - self._min_dist_from_boundary: continue # perturb depth delta_depth = self._depth_rv.rvs(size=1)[0] depth = depth + delta_depth # keep if the angle between the camera optical axis and the suction direction is less than a threshold dot = max(min(axis.dot(np.array([0, 0, 1])), 1.0), -1.0) psi = np.arccos(dot) if psi < self._max_suction_dir_optical_axis_angle: # create candidate grasp candidate = SuctionPoint2D(center, axis, depth, camera_intr=camera_intr) # check constraint satisfaction if constraint_fn is None or constraint_fn(candidate): if visualize: vis.figure() vis.imshow(depth_im) vis.scatter(center.x, center.y) vis.show() suction_points.append(candidate) self._logger.debug('Loop took %.3f sec' % (time() - sample_start)) return suction_points
def _sample_suction_points(self, depth_im, camera_intr, num_samples, segmask=None, visualize=False, constraint_fn=None): """ Sample a set of 2D suction point candidates from a depth image by choosing points on an object surface uniformly at random and then sampling around the surface normal Parameters ---------- depth_im : :obj:'perception.DepthImage' Depth image to sample from camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera that captured the images num_samples : int number of grasps to sample segmask : :obj:`perception.BinaryImage` binary image segmenting out the object of interest visualize : bool whether or not to show intermediate samples (for debugging) constraint_fn : :obj:`GraspConstraintFn` constraint function to apply to grasps Returns ------- :obj:`list` of :obj:`SuctionPoint2D` list of 2D suction point candidates """ # compute edge pixels filter_start = time() if self._depth_gaussian_sigma > 0: depth_im_mask = depth_im.apply(snf.gaussian_filter, sigma=self._depth_gaussian_sigma) else: depth_im_mask = depth_im.copy() if segmask is not None: depth_im_mask = depth_im.mask_binary(segmask) self._logger.debug('Filtering took %.3f sec' % (time() - filter_start)) if visualize: vis.figure() vis.subplot(1, 2, 1) vis.imshow(depth_im) vis.subplot(1, 2, 2) vis.imshow(depth_im_mask) vis.show() # project to get the point cloud cloud_start = time() point_cloud_im = camera_intr.deproject_to_image(depth_im_mask) normal_cloud_im = point_cloud_im.normal_cloud_im() nonzero_px = depth_im_mask.nonzero_pixels() num_nonzero_px = nonzero_px.shape[0] if num_nonzero_px == 0: return [] self._logger.debug('Normal cloud took %.3f sec' % (time() - cloud_start)) # randomly sample points and add to image sample_start = time() suction_points = [] k = 0 sample_size = min(self._max_num_samples, num_nonzero_px) indices = np.random.choice(num_nonzero_px, size=sample_size, replace=False) while k < sample_size and len(suction_points) < num_samples: # sample a point uniformly at random ind = indices[k] center_px = np.array([nonzero_px[ind, 1], nonzero_px[ind, 0]]) center = Point(center_px, frame=camera_intr.frame) axis = -normal_cloud_im[center.y, center.x] depth = point_cloud_im[center.y, center.x][2] orientation = 2 * np.pi * np.random.rand() # update number of tries k += 1 # skip bad axes if np.linalg.norm(axis) == 0: continue # rotation matrix x_axis = axis y_axis = np.array([axis[1], -axis[0], 0]) if np.linalg.norm(y_axis) == 0: y_axis = np.array([1, 0, 0]) y_axis = y_axis / np.linalg.norm(y_axis) z_axis = np.cross(x_axis, y_axis) R = np.array([x_axis, y_axis, z_axis]).T R_orig = np.copy(R) R = R.dot(RigidTransform.x_axis_rotation(orientation)) t = point_cloud_im[center.y, center.x] pose = RigidTransform(rotation=R, translation=t, from_frame='grasp', to_frame=camera_intr.frame) # check center px dist from boundary if center_px[0] < self._min_dist_from_boundary or \ center_px[1] < self._min_dist_from_boundary or \ center_px[1] > depth_im.height - self._min_dist_from_boundary or \ center_px[0] > depth_im.width - self._min_dist_from_boundary: continue # keep if the angle between the camera optical axis and the suction direction is less than a threshold dot = max(min(axis.dot(np.array([0, 0, 1])), 1.0), -1.0) psi = np.arccos(dot) if psi < self._max_suction_dir_optical_axis_angle: # check distance to ensure sample diversity candidate = MultiSuctionPoint2D(pose, camera_intr=camera_intr) # check constraint satisfaction if constraint_fn is None or constraint_fn(candidate): if visualize: vis.figure() vis.imshow(depth_im) vis.scatter(center.x, center.y) vis.show() suction_points.append(candidate) self._logger.debug('Loop took %.3f sec' % (time() - sample_start)) return suction_points
def run_experiment(): """ Run the experiment """ if not config['robot_off']: rospy.loginfo('Initializing YuMi') robot, subscriber, left_arm, right_arm, home_pose = init_robot(config) # create ROS CVBridge cv_bridge = CvBridge() # wait for Grasp Planning Service and create Service Proxy rospy.wait_for_service('plan_gqcnn_grasp') plan_grasp = rospy.ServiceProxy('plan_gqcnn_grasp', GQCNNGraspPlanner) # get camera intrinsics camera_intrinsics = sensor.ir_intrinsics # setup experiment logger experiment_logger = GraspIsolatedObjectExperimentLogger(config['experiment_dir'], config['supervisor'], camera_intrinsics, T_camera_world, '/home/autolab/Workspace/vishal_working/catkin_ws/src/gqcnn/cfg/ros_nodes/yumi_control_node.yaml', planner_type=config['planner_type']) logging.info('Saving experiment to %s' %(experiment_logger.experiment_path)) object_keys = config['test_object_keys'] trial_number = 1 re_try = False logging.info('Beginning experiment') while True: if not re_try: experiment_logger.start_trial() obj = np.random.choice(object_keys, size=1)[0] else: re_try = False rospy.loginfo('Please place object: ' + obj + ' on the workspace.') raw_input("Press ENTER when ready ...") # start the next trial rospy.loginfo('Trial %d' % (trial_number)) # get the images from the sensor color_image, depth_image, _ = sensor.frames() # log some trial info experiment_logger.update_trial_attribute('trial_num', trial_number) experiment_logger.update_trial_attribute('color_im', color_image) experiment_logger.update_trial_attribute('depth_im', depth_image) # inpaint to remove holes inpainted_color_image = color_image.inpaint(rescale_factor=config['inpaint_rescale_factor']) inpainted_depth_image = depth_image.inpaint(rescale_factor=config['inpaint_rescale_factor']) detector = RgbdDetectorFactory.detector('point_cloud_box') detection = detector.detect(inpainted_color_image, inpainted_depth_image, detector_cfg, camera_intrinsics, T_camera_world, vis_foreground=False, vis_segmentation=False )[0] if config['vis']['vis_detector_output']: vis.figure() vis.subplot(1,2,1) vis.imshow(detection.color_thumbnail) vis.subplot(1,2,2) vis.imshow(detection.depth_thumbnail) vis.show() boundingBox = BoundingBox() boundingBox.minY = detection.bounding_box.min_pt[0] boundingBox.minX = detection.bounding_box.min_pt[1] boundingBox.maxY = detection.bounding_box.max_pt[0] boundingBox.maxX = detection.bounding_box.max_pt[1] try: start_time = time.time() planned_grasp_data = plan_grasp(inpainted_color_image.rosmsg, inpainted_depth_image.rosmsg, camera_intrinsics.rosmsg, boundingBox) grasp_plan_time = time.time() - start_time lift_gripper_width, T_gripper_world = process_GQCNNGrasp(planned_grasp_data, robot, left_arm, right_arm, subscriber, home_pose, config) # get human label human_input = raw_input('Grasp success, or grasp failure? [s/f] ') while human_input.lower() != 's' and human_input.lower() != 'f': logging.info('Did not understand input. Please answer \'s\' or \'f\'') human_input = raw_input('Grasp success, or grasp failure? [s/f] ') if human_input.lower() == 's': experiment_logger.update_trial_attribute('human_label', 1) else: experiment_logger.update_trial_attribute('human_label', 0) # log result experiment_logger.update_trial_attribute('gripper_pose', T_gripper_world) experiment_logger.update_trial_attribute('planning_time', grasp_plan_time) experiment_logger.update_trial_attribute('gripper_width', lift_gripper_width) experiment_logger.update_trial_attribute('found_grasp', 1) experiment_logger.update_trial_attribute('completed', True) experiment_logger.update_trial_attribute('object_key', obj) trial_number += 1 except rospy.ServiceException as e: rospy.logerr("Service call failed: \n %s" % e) experiment_logger.update_trial_attribute('found_grasp', 0) experiment_logger.update_trial_attribute('completed', True) experiment_logger.update_trial_attribute('object_key', obj) trial_number += 1 except (YuMiCommException, YuMiControlException) as yce: rospy.logerr(str(yce)) if sensor is not None: sensor.stop() if robot is not None: robot.stop() if subscriber is not None and subscriber._started: subscriber.stop() rospy.loginfo("Re-trying") re_try = True robot, subscriber, left_arm, right_arm, home_pose = init_robot(config)
def _sample_antipodal_grasps(self, depth_im, camera_intr, num_samples, segmask=None, visualize=False, constraint_fn=None): """ Sample a set of 2D grasp candidates from a depth image by finding depth edges, then uniformly sampling point pairs and keeping only antipodal grasps with width less than the maximum allowable. Parameters ---------- depth_im : :obj:'perception.DepthImage' Depth image to sample from camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera that captured the images num_samples : int number of grasps to sample segmask : :obj:`perception.BinaryImage` binary image segmenting out the object of interest visualize : bool whether or not to show intermediate samples (for debugging) constraint_fn : :obj:`GraspConstraintFn` constraint function to apply to grasps Returns ------- :obj:`list` of :obj:`Grasp2D` list of 2D grasp candidates """ # compute edge pixels edge_start = time() depth_im = depth_im.apply(snf.gaussian_filter, sigma=self._depth_grad_gaussian_sigma) scale_factor = self._rescale_factor depth_im_downsampled = depth_im.resize(scale_factor) depth_im_threshed = depth_im_downsampled.threshold_gradients( self._depth_grad_thresh) edge_pixels = (1.0 / scale_factor) * depth_im_threshed.zero_pixels() edge_pixels = edge_pixels.astype(np.int16) depth_im_mask = depth_im.copy() if segmask is not None: edge_pixels = np.array( [p for p in edge_pixels if np.any(segmask[p[0], p[1]] > 0)]) depth_im_mask = depth_im.mask_binary(segmask) # re-threshold edges if there are too few if edge_pixels.shape[0] < self._min_num_edge_pixels: self._logger.info('Too few edge pixels!') depth_im_threshed = depth_im.threshold_gradients( self._depth_grad_thresh) edge_pixels = depth_im_threshed.zero_pixels() edge_pixels = edge_pixels.astype(np.int16) depth_im_mask = depth_im.copy() if segmask is not None: edge_pixels = np.array([ p for p in edge_pixels if np.any(segmask[p[0], p[1]] > 0) ]) depth_im_mask = depth_im.mask_binary(segmask) num_pixels = edge_pixels.shape[0] self._logger.debug('Depth edge detection took %.3f sec' % (time() - edge_start)) self._logger.debug('Found %d edge pixels' % (num_pixels)) # compute point cloud point_cloud_im = camera_intr.deproject_to_image(depth_im_mask) # compute_max_depth depth_data = depth_im_mask.data[depth_im_mask.data > 0] if depth_data.shape[0] == 0: return [] min_depth = np.min(depth_data) + self._min_depth_offset max_depth = np.max(depth_data) + self._max_depth_offset # compute surface normals normal_start = time() edge_normals = self._surface_normals(depth_im, edge_pixels) self._logger.debug('Normal computation took %.3f sec' % (time() - normal_start)) if visualize: edge_pixels = edge_pixels[::2, :] edge_normals = edge_normals[::2, :] vis.figure() vis.subplot(1, 3, 1) vis.imshow(depth_im) if num_pixels > 0: vis.scatter(edge_pixels[:, 1], edge_pixels[:, 0], s=2, c='b') X = [pix[1] for pix in edge_pixels] Y = [pix[0] for pix in edge_pixels] U = [3 * pix[1] for pix in edge_normals] V = [-3 * pix[0] for pix in edge_normals] plt.quiver(X, Y, U, V, units='x', scale=0.25, width=0.5, zorder=2, color='r') vis.title('Edge pixels and normals') vis.subplot(1, 3, 2) vis.imshow(depth_im_threshed) vis.title('Edge map') vis.subplot(1, 3, 3) vis.imshow(segmask) vis.title('Segmask') vis.show() # exit if no edge pixels if num_pixels == 0: return [] # form set of valid candidate point pairs pruning_start = time() max_grasp_width_px = Grasp2D(Point(np.zeros(2)), 0.0, min_depth, width=self._gripper_width, camera_intr=camera_intr).width_px normal_ip = edge_normals.dot(edge_normals.T) dists = ssd.squareform(ssd.pdist(edge_pixels)) valid_indices = np.where( (normal_ip < -np.cos(np.arctan(self._friction_coef))) & (dists < max_grasp_width_px) & (dists > 0.0)) valid_indices = np.c_[valid_indices[0], valid_indices[1]] self._logger.debug('Normal pruning %.3f sec' % (time() - pruning_start)) # raise exception if no antipodal pairs num_pairs = valid_indices.shape[0] if num_pairs == 0: return [] # prune out grasps contact_points1 = edge_pixels[valid_indices[:, 0], :] contact_points2 = edge_pixels[valid_indices[:, 1], :] contact_normals1 = edge_normals[valid_indices[:, 0], :] contact_normals2 = edge_normals[valid_indices[:, 1], :] v = contact_points1 - contact_points2 v_norm = np.linalg.norm(v, axis=1) v = v / np.tile(v_norm[:, np.newaxis], [1, 2]) ip1 = np.sum(contact_normals1 * v, axis=1) ip2 = np.sum(contact_normals2 * (-v), axis=1) ip1[ip1 > 1.0] = 1.0 ip1[ip1 < -1.0] = -1.0 ip2[ip2 > 1.0] = 1.0 ip2[ip2 < -1.0] = -1.0 beta1 = np.arccos(ip1) beta2 = np.arccos(ip2) alpha = np.arctan(self._friction_coef) antipodal_indices = np.where((beta1 < alpha) & (beta2 < alpha))[0] # raise exception if no antipodal pairs num_pairs = antipodal_indices.shape[0] if num_pairs == 0: return [] sample_size = min(self._max_rejection_samples, num_pairs) grasp_indices = np.random.choice(antipodal_indices, size=sample_size, replace=False) self._logger.debug('Grasp comp took %.3f sec' % (time() - pruning_start)) # compute grasps sample_start = time() k = 0 grasps = [] while k < sample_size and len(grasps) < num_samples: grasp_ind = grasp_indices[k] p1 = contact_points1[grasp_ind, :] p2 = contact_points2[grasp_ind, :] n1 = contact_normals1[grasp_ind, :] n2 = contact_normals2[grasp_ind, :] width = np.linalg.norm(p1 - p2) k += 1 # compute center and axis grasp_center = (p1 + p2) / 2 grasp_axis = p2 - p1 grasp_axis = grasp_axis / np.linalg.norm(grasp_axis) grasp_theta = np.pi / 2 if grasp_axis[1] != 0: grasp_theta = np.arctan2(grasp_axis[0], grasp_axis[1]) grasp_center_pt = Point( np.array([grasp_center[1], grasp_center[0]])) # compute grasp points in 3D x1 = point_cloud_im[p1[0], p1[1]] x2 = point_cloud_im[p2[0], p2[1]] if np.linalg.norm(x2 - x1) > self._gripper_width: continue # perturb if self._grasp_center_sigma > 0.0: grasp_center_pt = grasp_center_pt + ss.multivariate_normal.rvs( cov=self._grasp_center_sigma * np.diag(np.ones(2))) if self._grasp_angle_sigma > 0.0: grasp_theta = grasp_theta + ss.norm.rvs( scale=self._grasp_angle_sigma) # check center px dist from boundary if grasp_center[0] < self._min_dist_from_boundary or \ grasp_center[1] < self._min_dist_from_boundary or \ grasp_center[0] > depth_im.height - self._min_dist_from_boundary or \ grasp_center[1] > depth_im.width - self._min_dist_from_boundary: continue # sample depths for i in range(self._depth_samples_per_grasp): # get depth in the neighborhood of the center pixel depth_win = depth_im.data[grasp_center[0] - self._h:grasp_center[0] + self._h, grasp_center[1] - self._w:grasp_center[1] + self._w] center_depth = np.min(depth_win) if center_depth == 0 or np.isnan(center_depth): continue # sample depth between the min and max min_depth = center_depth + self._min_depth_offset max_depth = center_depth + self._max_depth_offset sample_depth = min_depth + (max_depth - min_depth) * np.random.rand() candidate_grasp = Grasp2D(grasp_center_pt, grasp_theta, sample_depth, width=self._gripper_width, camera_intr=camera_intr, contact_points=[p1, p2], contact_normals=[n1, n2]) if visualize: vis.figure() vis.imshow(depth_im) vis.grasp(candidate_grasp) vis.scatter(p1[1], p1[0], c='b', s=25) vis.scatter(p2[1], p2[0], c='b', s=25) vis.show() grasps.append(candidate_grasp) # return sampled grasps self._logger.debug('Loop took %.3f sec' % (time() - sample_start)) return grasps
def fast_grid_search(pc, indices, model, shadow, img_file): length, width, height = shadow.extents split_size = max(length, width) pc_data, ind = get_pc_data(pc, indices) maxes = np.max(pc_data, axis=0) mins = np.min(pc_data, axis=0) bin_base = mins[2] plane_normal = model[0:3] di_temp = ci.project_to_image(pc) vis2d.figure() vis2d.imshow(di_temp) vis2d.show() plane_data = pc.data.T[indices] #all_indices = np.where([(plane_data[::,2] > 0.795) & (plane_data[::,2] < 0.862)]) #all_indices = np.where((plane_data[::,1] < 0.16) & (plane_data[::,1] > -0.24) & (plane_data[::,0] > -0.3) & (plane_data[::,0] < 0.24))[0] #plane_data = plane_data[all_indices] plane_pc = PointCloud(plane_data.T, pc.frame) di = ci.project_to_image(plane_pc) bi = di.to_binary() scene = Scene() camera = VirtualCamera(ci, cp) scene.camera = camera # Get shadow depth img. shadow_obj = SceneObject(shadow) scene.add_object('shadow', shadow_obj) orig_tow = shadow_obj.T_obj_world scores = np.zeros((int(np.round((maxes[0] - mins[0]) / split_size)), int(np.round((maxes[1] - mins[1]) / split_size)))) for i in range(int(np.round((maxes[0] - mins[0]) / split_size))): x = mins[0] + i * split_size for j in range(int(np.round((maxes[1] - mins[1]) / split_size))): y = mins[1] + j * split_size for tow in transforms(pc, pc_data, shadow, x, y, x + split_size, y + split_size, 8): shadow_obj.T_obj_world = tow scores[i][j] = under_shadow(pc, pc_data, indices, model, shadow, x, x + split_size, y, y + split_size, scene, bi) shadow_obj.T_obj_world = orig_tow print("\nScores: \n" + str(scores)) best = best_cell(scores) print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]])) #------- # Visualize best placement vis3d.figure() x = mins[0] + best[0] * split_size y = mins[1] + best[1] * split_size cell_indices = np.where((x < pc_data[:, 0]) & (pc_data[:, 0] < x + split_size) & (y < pc_data[:, 1]) & (pc_data[:, 1] < y + split_size))[0] points = pc_data[cell_indices] rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)] vis3d.points(points, color=(0, 1, 1)) vis3d.points(rest, color=(1, 0, 1)) vis3d.show()
os.path.join(config["calib_dir"], sensor_frame, tf_filename)) # Setup sensor. sensor = RgbdSensorFactory.sensor(sensor_type, config["sensor"]) sensor.start() camera_intr = sensor.ir_intrinsics # Read images. color_im, depth_im, _ = sensor.frames() color_im = color_im.inpaint(rescale_factor=inpaint_rescale_factor) depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor) rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) # Sample grasps. grasp_sampler = AntipodalDepthImageGraspSampler(sample_config, gripper_width) grasps = grasp_sampler.sample(rgbd_im, camera_intr, num_grasp_samples, segmask=None, seed=100, visualize=visualize_sampling) # Visualize. vis.figure() vis.imshow(depth_im) for grasp in grasps: vis.grasp(grasp, scale=1.5, show_center=False, show_axis=True) vis.title("Sampled grasps") vis.show()
def _plan_grasp(self, color_im, depth_im, camera_intr, bounding_box=None, segmask=None): """Grasp planner request handler. Parameters --------- req: :obj:`ROS ServiceRequest` ROS `ServiceRequest` for grasp planner service. """ rospy.loginfo("Planning Grasp") # Inpaint images. color_im = color_im.inpaint( rescale_factor=self.cfg["inpaint_rescale_factor"]) depth_im = depth_im.inpaint( rescale_factor=self.cfg["inpaint_rescale_factor"]) # Init segmask. if segmask is None: segmask = BinaryImage(255 * np.ones(depth_im.shape).astype(np.uint8), frame=color_im.frame) # Visualize. if self.cfg["vis"]["color_image"]: vis.imshow(color_im) vis.show() if self.cfg["vis"]["depth_image"]: vis.imshow(depth_im) vis.show() if self.cfg["vis"]["segmask"] and segmask is not None: vis.imshow(segmask) vis.show() # Aggregate color and depth images into a single # BerkeleyAutomation/perception `RgbdImage`. rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) # Mask bounding box. if bounding_box is not None: # Calc bb parameters. min_x = bounding_box.minX min_y = bounding_box.minY max_x = bounding_box.maxX max_y = bounding_box.maxY # Contain box to image->don't let it exceed image height/width # bounds. if min_x < 0: min_x = 0 if min_y < 0: min_y = 0 if max_x > rgbd_im.width: max_x = rgbd_im.width if max_y > rgbd_im.height: max_y = rgbd_im.height # Mask. bb_segmask_arr = np.zeros([rgbd_im.height, rgbd_im.width]) bb_segmask_arr[min_y:max_y, min_x:max_x] = 255 bb_segmask = BinaryImage(bb_segmask_arr.astype(np.uint8), segmask.frame) segmask = segmask.mask_binary(bb_segmask) # Visualize. if self.cfg["vis"]["rgbd_state"]: masked_rgbd_im = rgbd_im.mask_binary(segmask) vis.figure() vis.subplot(1, 2, 1) vis.imshow(masked_rgbd_im.color) vis.subplot(1, 2, 2) vis.imshow(masked_rgbd_im.depth) vis.show() # Create an `RgbdImageState` with the cropped `RgbdImage` and # `CameraIntrinsics`. rgbd_state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) # Execute policy. try: return self.execute_policy(rgbd_state, self.grasping_policy, self.grasp_pose_publisher, camera_intr.frame) except NoValidGraspsException: rospy.logerr( ("While executing policy found no valid grasps from sampled" " antipodal point pairs. Aborting Policy!")) raise rospy.ServiceException( ("While executing policy found no valid grasps from sampled" " antipodal point pairs. Aborting Policy!"))
def _plan_grasp(self, color_im, depth_im, camera_intr, bounding_box=None, segmask=None): """ Grasp planner request handler . Parameters --------- req: :obj:`ROS ServiceRequest` ROS ServiceRequest for grasp planner service """ rospy.loginfo('Planning Grasp') # inpaint images color_im = color_im.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) depth_im = depth_im.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) # init segmask if segmask is None: segmask = BinaryImage(255 * np.ones(depth_im.shape).astype(np.uint8), frame=color_im.frame) # visualize if self.cfg['vis']['color_image']: vis.imshow(color_im) vis.show() if self.cfg['vis']['depth_image']: vis.imshow(depth_im) vis.show() if self.cfg['vis']['segmask'] and segmask is not None: vis.imshow(segmask) vis.show() # aggregate color and depth images into a single perception rgbdimage rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) # mask bounding box if bounding_box is not None: # calc bb parameters min_x = bounding_box.minX min_y = bounding_box.minY max_x = bounding_box.maxX max_y = bounding_box.maxY # contain box to image->don't let it exceed image height/width bounds if min_x < 0: min_x = 0 if min_y < 0: min_y = 0 if max_x > rgbd_im.width: max_x = rgbd_im.width if max_y > rgbd_im.height: max_y = rgbd_im.height # mask bb_segmask_arr = np.zeros([rgbd_image.height, rgbd_image.width]) bb_segmask_arr[min_y:max_y, min_x:max_x] = 255 bb_segmask = BinaryImage(bb_segmask_arr.astype(np.uint8), segmask.frame) segmask = segmask.mask_binary(bb_segmask) # visualize if self.cfg['vis']['rgbd_state']: masked_rgbd_im = rgbd_im.mask_binary(segmask) vis.figure() vis.subplot(1, 2, 1) vis.imshow(masked_rgbd_im.color) vis.subplot(1, 2, 2) vis.imshow(masked_rgbd_im.depth) vis.show() # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics rgbd_state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) # execute policy try: return self.execute_policy(rgbd_state, self.grasping_policy, self.grasp_pose_publisher, camera_intr.frame) except NoValidGraspsException: rospy.logerr( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' ) raise rospy.ServiceException( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' )
def generate_gqcnn_dataset(dataset_path, database, target_object_keys, env_rv_params, gripper_name, config): """ Generates a GQ-CNN TensorDataset for training models with new grippers, quality metrics, objects, and cameras. Parameters ---------- dataset_path : str path to save the dataset to database : :obj:`Hdf5Database` Dex-Net database containing the 3D meshes, grasps, and grasp metrics target_object_keys : :obj:`OrderedDict` dictionary mapping dataset names to target objects (either 'all' or a list of specific object keys) env_rv_params : :obj:`OrderedDict` parameters of the camera and object random variables used in sampling (see meshpy.UniformPlanarWorksurfaceImageRandomVariable for more info) gripper_name : str name of the gripper to use config : :obj:`autolab_core.YamlConfig` other parameters for dataset generation Notes ----- Required parameters of config are specified in Other Parameters Other Parameters ---------------- images_per_stable_pose : int number of object and camera poses to sample for each stable pose stable_pose_min_p : float minimum probability of occurrence for a stable pose to be used in data generation (used to prune bad stable poses gqcnn/crop_width : int width, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN) gqcnn/crop_height : int height, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN) gqcnn/final_width : int width, in pixels, of final transformed grasp image for input to the GQ-CNN (defaults to 32) gqcnn/final_height : int height, in pixels, of final transformed grasp image for input to the GQ-CNN (defaults to 32) table_alignment/max_approach_table_angle : float max angle between the grasp axis and the table normal when the grasp approach is maximally aligned with the table normal table_alignment/max_approach_offset : float max deviation from perpendicular approach direction to use in grasp collision checking table_alignment/num_approach_offset_samples : int number of approach samples to use in collision checking collision_checking/table_offset : float max allowable interpenetration between the gripper and table to be considered collision free collision_checking/table_mesh_filename : str path to a table mesh for collision checking (default data/meshes/table.obj) collision_checking/approach_dist : float distance, in meters, between the approach pose and final grasp pose along the grasp axis collision_checking/delta_approach : float amount, in meters, to discretize the straight-line path from the gripper approach pose to the final grasp pose tensors/datapoints_per_file : int number of datapoints to store in each unique tensor file on disk tensors/fields : :obj:`dict` dictionary mapping field names to dictionaries specifying the data type, height, width, and number of channels for each tensor debug : bool True (or 1) if the random seed should be set to enforce deterministic behavior, False (0) otherwise vis/candidate_grasps : bool True (or 1) if the collision free candidate grasps should be displayed in 3D (for debugging) vis/rendered_images : bool True (or 1) if the rendered images for each stable pose should be displayed (for debugging) vis/grasp_images : bool True (or 1) if the transformed grasp images should be displayed (for debugging) """ # read data gen params output_dir = dataset_path gripper = RobotGripper.load(gripper_name) image_samples_per_stable_pose = config['images_per_stable_pose'] stable_pose_min_p = config['stable_pose_min_p'] # read gqcnn params gqcnn_params = config['gqcnn'] im_crop_height = gqcnn_params['crop_height'] im_crop_width = gqcnn_params['crop_width'] im_final_height = gqcnn_params['final_height'] im_final_width = gqcnn_params['final_width'] cx_crop = float(im_crop_width) / 2 cy_crop = float(im_crop_height) / 2 # open database dataset_names = target_object_keys.keys() datasets = [database.dataset(dn) for dn in dataset_names] if CREATE_SUBSET: datasets = [ dataset.subset(INDEX_START, INDEX_END) for dataset in datasets ] # set target objects for dataset in datasets: if target_object_keys[dataset.name] == 'all': target_object_keys[dataset.name] = dataset.object_keys # setup grasp params table_alignment_params = config['table_alignment'] min_grasp_approach_offset = -np.deg2rad( table_alignment_params['max_approach_offset']) max_grasp_approach_offset = np.deg2rad( table_alignment_params['max_approach_offset']) max_grasp_approach_table_angle = np.deg2rad( table_alignment_params['max_approach_table_angle']) num_grasp_approach_samples = table_alignment_params[ 'num_approach_offset_samples'] phi_offsets = [] if max_grasp_approach_offset == min_grasp_approach_offset: phi_inc = 1 elif num_grasp_approach_samples == 1: phi_inc = max_grasp_approach_offset - min_grasp_approach_offset + 1 else: phi_inc = (max_grasp_approach_offset - min_grasp_approach_offset) / ( num_grasp_approach_samples - 1) phi = min_grasp_approach_offset while phi <= max_grasp_approach_offset: phi_offsets.append(phi) phi += phi_inc # setup collision checking coll_check_params = config['collision_checking'] approach_dist = coll_check_params['approach_dist'] delta_approach = coll_check_params['delta_approach'] table_offset = coll_check_params['table_offset'] table_mesh_filename = coll_check_params['table_mesh_filename'] if not os.path.isabs(table_mesh_filename): table_mesh_filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', table_mesh_filename) table_mesh = ObjFile(table_mesh_filename).read() # set tensor dataset config tensor_config = config['tensors'] tensor_config['fields']['depth_ims_tf_table']['height'] = im_final_height tensor_config['fields']['depth_ims_tf_table']['width'] = im_final_width tensor_config['fields']['obj_masks']['height'] = im_final_height tensor_config['fields']['obj_masks']['width'] = im_final_width # add available metrics (assuming same are computed for all objects) metric_names = [] dataset = datasets[0] obj_keys = dataset.object_keys if len(obj_keys) == 0: raise ValueError('No valid objects in dataset %s' % (dataset.name)) obj = dataset[obj_keys[0]] grasps = dataset.grasps(obj.key, gripper=gripper.name) grasp_metrics = dataset.grasp_metrics(obj.key, grasps, gripper=gripper.name) metric_names = grasp_metrics[grasp_metrics.keys()[0]].keys() for metric_name in metric_names: tensor_config['fields'][metric_name] = {} tensor_config['fields'][metric_name]['dtype'] = 'float32' # init tensor dataset tensor_dataset = TensorDataset(output_dir, tensor_config) tensor_datapoint = tensor_dataset.datapoint_template # setup log file experiment_log_filename = os.path.join(output_dir, 'dataset_generation.log') formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') hdlr = logging.FileHandler(experiment_log_filename) hdlr.setFormatter(formatter) logging.getLogger().addHandler(hdlr) root_logger = logging.getLogger() # copy config out_config_filename = os.path.join(output_dir, 'dataset_generation.json') ordered_dict_config = collections.OrderedDict() for key in config.keys(): ordered_dict_config[key] = config[key] with open(out_config_filename, 'w') as outfile: json.dump(ordered_dict_config, outfile) # 1. Precompute the set of valid grasps for each stable pose: # i) Perpendicular to the table # ii) Collision-free along the approach direction # load grasps if they already exist grasp_cache_filename = os.path.join(output_dir, CACHE_FILENAME) if os.path.exists(grasp_cache_filename): logging.info('Loading grasp candidates from file') candidate_grasps_dict = pkl.load(open(grasp_cache_filename, 'rb')) # otherwise re-compute by reading from the database and enforcing constraints else: # create grasps dict candidate_grasps_dict = {} # loop through datasets and objects for dataset in datasets: logging.info('Reading dataset %s' % (dataset.name)) for obj in dataset: if obj.key not in target_object_keys[dataset.name]: continue # init candidate grasp storage candidate_grasps_dict[obj.key] = {} # setup collision checker collision_checker = GraspCollisionChecker(gripper) collision_checker.set_graspable_object(obj) # read in the stable poses of the mesh stable_poses = dataset.stable_poses(obj.key) for i, stable_pose in enumerate(stable_poses): # render images if stable pose is valid if stable_pose.p > stable_pose_min_p: candidate_grasps_dict[obj.key][stable_pose.id] = [] # setup table in collision checker T_obj_stp = stable_pose.T_obj_table.as_frames( 'obj', 'stp') T_obj_table = obj.mesh.get_T_surface_obj( T_obj_stp, delta=table_offset).as_frames('obj', 'table') T_table_obj = T_obj_table.inverse() collision_checker.set_table(table_mesh_filename, T_table_obj) # read grasp and metrics grasps = dataset.grasps(obj.key, gripper=gripper.name) logging.info( 'Aligning %d grasps for object %s in stable %s' % (len(grasps), obj.key, stable_pose.id)) # align grasps with the table aligned_grasps = [ grasp.perpendicular_table(stable_pose) for grasp in grasps ] # check grasp validity logging.info( 'Checking collisions for %d grasps for object %s in stable %s' % (len(grasps), obj.key, stable_pose.id)) for aligned_grasp in aligned_grasps: # check angle with table plane and skip unaligned grasps _, grasp_approach_table_angle, _ = aligned_grasp.grasp_angles_from_stp_z( stable_pose) perpendicular_table = ( np.abs(grasp_approach_table_angle) < max_grasp_approach_table_angle) if not perpendicular_table: continue # check whether any valid approach directions are collision free collision_free = False for phi_offset in phi_offsets: rotated_grasp = aligned_grasp.grasp_y_axis_offset( phi_offset) collides = collision_checker.collides_along_approach( rotated_grasp, approach_dist, delta_approach) if not collides: collision_free = True break # store if aligned to table candidate_grasps_dict[obj.key][ stable_pose.id].append( GraspInfo(aligned_grasp, collision_free)) # visualize if specified if collision_free and config['vis'][ 'candidate_grasps']: logging.info('Grasp %d' % (aligned_grasp.id)) vis.figure() vis.gripper_on_object(gripper, aligned_grasp, obj, stable_pose.T_obj_world) vis.show() # save to file logging.info('Saving to file') pkl.dump(candidate_grasps_dict, open(grasp_cache_filename, 'wb')) # 2. Render a dataset of images and associate the gripper pose with image coordinates for each grasp in the Dex-Net database # setup variables obj_category_map = {} pose_category_map = {} cur_pose_label = 0 cur_obj_label = 0 cur_image_label = 0 # render images for each stable pose of each object in the dataset render_modes = [RenderMode.SEGMASK, RenderMode.DEPTH_SCENE] for dataset in datasets: logging.info('Generating data for dataset %s' % (dataset.name)) # iterate through all object keys object_keys = dataset.object_keys for obj_key in object_keys: obj = dataset[obj_key] obj.mesh.rescale(RESCALING_FACTOR) if obj.key not in target_object_keys[dataset.name]: continue # read in the stable poses of the mesh stable_poses = dataset.stable_poses(obj.key) if SAVE_ONE_OBJECT: # Save object mesh savefile = ObjFile("./data/meshes/dexnet/" + obj.key + ".obj") savefile.write(obj.mesh) # Save stable poses save_stp = StablePoseFile("./data/meshes/dexnet/" + obj.key + ".stp") save_stp.write(stable_poses) candidate_grasp_info = candidate_grasps_dict[obj.key][ stable_poses[0].id] print("Stable pose id:", stable_poses[0].id) candidate_grasps = [g.grasp for g in candidate_grasp_info] # Save candidate grasp info pkl.dump( candidate_grasps_dict[obj.key], open("./data/meshes/dexnet/" + obj.key + ".pkl", 'wb')) # Save grasp metrics grasp_metrics = dataset.grasp_metrics(obj.key, candidate_grasps, gripper=gripper.name) write_metrics = json.dumps(grasp_metrics) f = open("./data/meshes/dexnet/" + obj.key + ".json", "w") f.write(write_metrics) f.close() for i, stable_pose in enumerate(stable_poses): # render images if stable pose is valid if stable_pose.p > stable_pose_min_p: # log progress logging.info('Rendering images for object %s in %s' % (obj.key, stable_pose.id)) # add to category maps if obj.key not in obj_category_map.keys(): obj_category_map[obj.key] = cur_obj_label pose_category_map['%s_%s' % (obj.key, stable_pose.id)] = cur_pose_label # read in candidate grasps and metrics candidate_grasp_info = candidate_grasps_dict[obj.key][ stable_pose.id] candidate_grasps = [g.grasp for g in candidate_grasp_info] grasp_metrics = dataset.grasp_metrics(obj.key, candidate_grasps, gripper=gripper.name) # compute object pose relative to the table T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp') T_obj_stp = obj.mesh.get_T_surface_obj(T_obj_stp) # sample images from random variable T_table_obj = RigidTransform(from_frame='table', to_frame='obj') scene_objs = { 'table': SceneObject(table_mesh, T_table_obj) } urv = UniformPlanarWorksurfaceImageRandomVariable( obj.mesh, render_modes, 'camera', env_rv_params, stable_pose=stable_pose, scene_objs=scene_objs) render_start = time.time() render_samples = urv.rvs( size=image_samples_per_stable_pose) render_stop = time.time() logging.info('Rendering images took %.3f sec' % (render_stop - render_start)) # visualize if config['vis']['rendered_images']: d = int(np.ceil( np.sqrt(image_samples_per_stable_pose))) #segmask vis2d.figure() for j, render_sample in enumerate(render_samples): vis2d.subplot(d, d, j + 1) vis2d.imshow(render_sample.renders[ RenderMode.SEGMASK].image) # depth table vis2d.figure() for j, render_sample in enumerate(render_samples): vis2d.subplot(d, d, j + 1) vis2d.imshow(render_sample.renders[ RenderMode.DEPTH_SCENE].image) vis2d.show() if False: # binary fig = plt.figure(figsize=(8, 8)) fig.suptitle('SEGMASK') for j, render_sample in enumerate(render_samples): plt.subplot(d, d, j + 1) plt.imshow(render_sample.renders[ RenderMode.SEGMASK].image.data) # depth table fig = plt.figure(figsize=(8, 8)) fig.suptitle('DEPTH SCENE') for j, render_sample in enumerate(render_samples): plt.subplot(d, d, j + 1) plt.imshow(render_sample.renders[ RenderMode.DEPTH_SCENE].image.data) plt.show() # tally total amount of data num_grasps = len(candidate_grasps) num_images = image_samples_per_stable_pose num_save = num_images * num_grasps logging.info('Saving %d datapoints' % (num_save)) # for each candidate grasp on the object compute the projection # of the grasp into image space for render_sample in render_samples: # read images binary_im = render_sample.renders[ RenderMode.SEGMASK].image depth_im_table = render_sample.renders[ RenderMode.DEPTH_SCENE].image # read camera params T_stp_camera = render_sample.camera.object_to_camera_pose shifted_camera_intr = render_sample.camera.camera_intr # read pixel offsets cx = depth_im_table.center[1] cy = depth_im_table.center[0] # compute intrinsics for virtual camera of the final # cropped and rescaled images camera_intr_scale = float(im_final_height) / float( im_crop_height) cropped_camera_intr = shifted_camera_intr.crop( im_crop_height, im_crop_width, cy, cx) final_camera_intr = cropped_camera_intr.resize( camera_intr_scale) # create a thumbnail for each grasp for grasp_info in candidate_grasp_info: # read info grasp = grasp_info.grasp collision_free = grasp_info.collision_free # get the gripper pose T_obj_camera = T_stp_camera * T_obj_stp.as_frames( 'obj', T_stp_camera.from_frame) grasp_2d = grasp.project_camera( T_obj_camera, shifted_camera_intr) # center images on the grasp, rotate to image x axis dx = cx - grasp_2d.center.x dy = cy - grasp_2d.center.y translation = np.array([dy, dx]) binary_im_tf = binary_im.transform( translation, grasp_2d.angle) depth_im_tf_table = depth_im_table.transform( translation, grasp_2d.angle) # crop to image size binary_im_tf = binary_im_tf.crop( im_crop_height, im_crop_width) depth_im_tf_table = depth_im_tf_table.crop( im_crop_height, im_crop_width) # resize to image size binary_im_tf = binary_im_tf.resize( (im_final_height, im_final_width), interp='nearest') depth_im_tf_table = depth_im_tf_table.resize( (im_final_height, im_final_width)) # visualize the transformed images if config['vis']['grasp_images']: grasp_center = Point( depth_im_tf_table.center, frame=final_camera_intr.frame) # plot 2D grasp image grasp_center = Point( depth_im_tf_table.center, frame=final_camera_intr.frame) tf_grasp_2d = Grasp2D( grasp_center, 0, grasp_2d.depth, width=gripper.max_width, camera_intr=final_camera_intr) vis2d.figure() vis2d.subplot(2, 2, 1) vis2d.imshow(binary_im) vis2d.grasp(grasp_2d) vis2d.subplot(2, 2, 2) vis2d.imshow(depth_im_table) vis2d.grasp(grasp_2d) vis2d.subplot(2, 2, 3) vis2d.imshow(binary_im_tf) vis2d.grasp(tf_grasp_2d) vis2d.subplot(2, 2, 4) vis2d.imshow(depth_im_tf_table) vis2d.grasp(tf_grasp_2d) vis2d.title('Coll Free? %d' % (grasp_info.collision_free)) vis2d.show() if False: plt.figure(figsize=(8, 8)) plt.subplot(2, 2, 1) plt.imshow(binary_im.data) plt.title('Binary Image') plt.subplot(2, 2, 2) plt.imshow(depth_im_table.data) plt.subplot(2, 2, 3) plt.imshow(binary_im_tf.data) plt.subplot(2, 2, 4) plt.imshow(depth_im_tf_table.data) plt.title('Coll Free? %d' % (grasp_info.collision_free)) plt.show() plt.close() # plot 3D visualization # Whooza fig = plt.figure() ax = fig.add_subplot(111, projection='3d', transform=T_obj_camera) object_vertices = obj.mesh.trimesh.vertices table_vertices = table_mesh.trimesh.vertices # ax.plot_trisurf(table_vertices[:,0],table_vertices[:,1],triangles=table_mesh.trimesh.faces,Z=table_vertices[:,2],color='g') ax.plot_trisurf( object_vertices[:, 0], object_vertices[:, 1], triangles=obj.mesh.trimesh.faces, Z=object_vertices[:, 2], color='b') # Axes3D(fig,rect=(-0.2,-0.2,0.4,0.4),transform=T_obj_camera) plt.show() if False: vis.figure() T_obj_world = vis.mesh_stable_pose( obj.mesh.trimesh, stable_pose.T_obj_world, style='surface', dim=0.5) vis.gripper(gripper, grasp, T_obj_world, color=(0.3, 0.3, 0.3)) vis.show() # form hand pose array hand_pose = np.r_[grasp_2d.center.y, grasp_2d.center.x, grasp_2d.depth, grasp_2d.angle, grasp_2d.center.y - shifted_camera_intr.cy, grasp_2d.center.x - shifted_camera_intr.cx, grasp_2d.width_px] # store to data buffers tensor_datapoint[ 'depth_ims_tf_table'] = depth_im_tf_table.raw_data tensor_datapoint[ 'obj_masks'] = binary_im_tf.raw_data tensor_datapoint['hand_poses'] = hand_pose tensor_datapoint['collision_free'] = collision_free tensor_datapoint['obj_labels'] = cur_obj_label tensor_datapoint['pose_labels'] = cur_pose_label tensor_datapoint['image_labels'] = cur_image_label for metric_name, metric_val in grasp_metrics[ grasp.id].iteritems(): coll_free_metric = ( 1 * collision_free) * metric_val tensor_datapoint[ metric_name] = coll_free_metric tensor_dataset.add(tensor_datapoint) # update image label cur_image_label += 1 # update pose label cur_pose_label += 1 # force clean up gc.collect() # update object label cur_obj_label += 1 # force clean up gc.collect() # save last file tensor_dataset.flush() # save category mappings obj_cat_filename = os.path.join(output_dir, 'object_category_map.json') json.dump(obj_category_map, open(obj_cat_filename, 'w')) pose_cat_filename = os.path.join(output_dir, 'pose_category_map.json') json.dump(pose_category_map, open(pose_cat_filename, 'w'))
def plan_grasp(self, req): """ Grasp planner request handler . Parameters --------- req: :obj:`ROS ServiceRequest` ROS ServiceRequest for grasp planner service """ rospy.loginfo('Planning Grasp') # set min dimensions pad = max( math.ceil( np.sqrt(2) * (float(self.cfg['policy']['metric']['crop_width']) / 2)), math.ceil( np.sqrt(2) * (float(self.cfg['policy']['metric']['crop_height']) / 2))) min_width = 2 * pad + self.cfg['policy']['metric']['crop_width'] min_height = 2 * pad + self.cfg['policy']['metric']['crop_height'] # get the raw depth and color images as ROS Image objects raw_color = req.color_image raw_depth = req.depth_image segmask = None raw_segmask = req.segmask # get the raw camera info as ROS CameraInfo object raw_camera_info = req.camera_info # get the bounding box as a custom ROS BoundingBox msg bounding_box = req.bounding_box # wrap the camera info in a perception CameraIntrinsics object camera_intrinsics = CameraIntrinsics( raw_camera_info.header.frame_id, raw_camera_info.K[0], raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5], raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width) ### create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ### try: color_image = ColorImage(self.cv_bridge.imgmsg_to_cv2( raw_color, "rgb8"), frame=camera_intrinsics.frame) depth_image = DepthImage(self.cv_bridge.imgmsg_to_cv2( raw_depth, desired_encoding="passthrough"), frame=camera_intrinsics.frame) segmask = BinaryImage(self.cv_bridge.imgmsg_to_cv2( raw_segmask, desired_encoding="passthrough"), frame=camera_intrinsics.frame) except CvBridgeError as cv_bridge_exception: rospy.logerr(cv_bridge_exception) # check image sizes if color_image.height != depth_image.height or \ color_image.width != depth_image.width: rospy.logerr( 'Color image and depth image must be the same shape! Color is %d x %d but depth is %d x %d' % (color_image.height, color_image.width, depth_image.height, depth_image.width)) raise rospy.ServiceException( 'Color image and depth image must be the same shape! Color is %d x %d but depth is %d x %d' % (color_image.height, color_image.width, depth_image.height, depth_image.width)) if color_image.height < min_height or color_image.width < min_width: rospy.logerr( 'Color image is too small! Must be at least %d x %d resolution but the requested image is only %d x %d' % (min_height, min_width, color_image.height, color_image.width)) raise rospy.ServiceException( 'Color image is too small! Must be at least %d x %d resolution but the requested image is only %d x %d' % (min_height, min_width, color_image.height, color_image.width)) # inpaint images color_image = color_image.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) depth_image = depth_image.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) # visualize if self.cfg['vis']['color_image']: vis.imshow(color_image) vis.show() if self.cfg['vis']['depth_image']: vis.imshow(depth_image) vis.show() if self.cfg['vis']['segmask'] and segmask is not None: vis.imshow(segmask) vis.show() # aggregate color and depth images into a single perception rgbdimage rgbd_image = RgbdImage.from_color_and_depth(color_image, depth_image) # calc crop parameters minX = bounding_box.minX - pad minY = bounding_box.minY - pad maxX = bounding_box.maxX + pad maxY = bounding_box.maxY + pad # contain box to image->don't let it exceed image height/width bounds if minX < 0: minX = 0 if minY < 0: minY = 0 if maxX > rgbd_image.width: maxX = rgbd_image.width if maxY > rgbd_image.height: maxY = rgbd_image.height centroidX = (maxX + minX) / 2 centroidY = (maxY + minY) / 2 # compute width and height width = maxX - minX height = maxY - minY # crop camera intrinsics and rgbd image cropped_camera_intrinsics = camera_intrinsics.crop( height, width, centroidY, centroidX) cropped_rgbd_image = rgbd_image.crop(height, width, centroidY, centroidX) cropped_segmask = None if segmask is not None: cropped_segmask = segmask.crop(height, width, centroidY, centroidX) # visualize if self.cfg['vis']['cropped_rgbd_image']: vis.imshow(cropped_rgbd_image) vis.show() # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics image_state = RgbdImageState(cropped_rgbd_image, cropped_camera_intrinsics, segmask=cropped_segmask) # execute policy try: return self.execute_policy(image_state, self.grasping_policy, self.grasp_pose_publisher, cropped_camera_intrinsics.frame) except NoValidGraspsException: rospy.logerr( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' ) raise rospy.ServiceException( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' )
def sample_grasp(self, env, mesh_obj, vis=True): # Setup sensor. #camera_intr = CameraIntrinsics.load(camera_intr_filename) # Read images. # get depth image from camera inpaint_rescale_factor = 1.0 segmask_filename = None _, center, extents, obj_xquat, bbox_xpos = self.get_bbox_properties( env, mesh_obj) camera_pos = copy.deepcopy(center) camera_pos[2] += 0.5 * extents[2] + 0.2 env.set_camera(camera_pos, np.array([0.7071068, 0, 0, -0.7071068]), camera_name=f"ext_camera_0") rgb, depth = env.render_from_camera(int(self.config.camera_img_height), int(self.config.camera_img_width), camera_name=f"ext_camera_0") # enforce zoom out from scipy.interpolate import interp2d center_x = self.config.camera_img_height / 2 + 1 center_y = self.config.camera_img_width / 2 + 1 img_height = self.config.camera_img_height img_width = self.config.camera_img_width xdense = np.linspace(0, img_height - 1, img_height) ydense = np.linspace(0, img_width - 1, img_width) xintr = (xdense - center_x) * (1.0 / self.rescale_factor) + center_x yintr = (ydense - center_y) * (1.0 / self.rescale_factor) + center_y xintr[xintr < 0] = 0 xintr[xintr > (img_height - 1)] = img_height - 1 yintr[yintr < 0] = 0 yintr[yintr > (img_width - 1)] = img_width - 1 fr = interp2d(xdense, ydense, rgb[:, :, 0], kind="linear") rgb_r_new = fr(xintr, yintr) fg = interp2d(xdense, ydense, rgb[:, :, 1], kind="linear") rgb_g_new = fg(xintr, yintr) fb = interp2d(xdense, ydense, rgb[:, :, 2], kind="linear") rgb_b_new = fb(xintr, yintr) rgb_new = np.stack([rgb_r_new, rgb_g_new, rgb_b_new], axis=2) fd = interp2d(xdense, ydense, depth, kind="linear") depth_new = fd(xintr, yintr) #from skimage.transform import resize #rgb22, depth2 = env.render_from_camera(int(self.config.camera_img_height) , int(self.config.camera_img_width), camera_name=f"ext_camera_0") #import ipdb; ipdb.set_trace() # visualize the interpolation #import imageio #imageio.imwrite(f"tmp/rgb_{self.iter_id}.png", rgb) #imageio.imwrite(f"tmp/rgb2_{self.iter_id}.png", rgb_new) #imageio.imwrite(f"tmp/depth_{self.iter_id}.png", depth) #imageio.imwrite(f"tmp/depth2_{self.iter_id}.png", depth_new) #import ipdb; ipdb.set_trace() rgb = rgb_new depth = depth_new depth = depth * self.rescale_factor # rgb: 128 x 128 x 1 # depth: 128 x 128 x 1 scaled_camera_fov_y = self.config.camera_fov_y aspect = 1 scaled_fovx = 2 * np.arctan( np.tan(np.deg2rad(scaled_camera_fov_y) * 0.5) * aspect) scaled_fovx = np.rad2deg(scaled_fovx) scaled_fovy = scaled_camera_fov_y cx = self.config.camera_img_width * 0.5 cy = self.config.camera_img_height * 0.5 scaled_fx = cx / np.tan(np.deg2rad( scaled_fovx / 2.)) * (self.rescale_factor) scaled_fy = cy / np.tan(np.deg2rad( scaled_fovy / 2.)) * (self.rescale_factor) camera_intr = CameraIntrinsics(frame='phoxi', fx=scaled_fx, fy=scaled_fy, cx=self.config.camera_img_width * 0.5, cy=self.config.camera_img_height * 0.5, height=self.config.camera_img_height, width=self.config.camera_img_width) depth_im = DepthImage(depth, frame=camera_intr.frame) color_im = ColorImage(np.zeros([depth_im.height, depth_im.width, 3]).astype(np.uint8), frame=camera_intr.frame) # Optionally read a segmask. valid_px_mask = depth_im.invalid_pixel_mask().inverse() segmask = valid_px_mask # Inpaint. depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor) # Create state. rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) # Query policy. policy_start = time.time() action = self.policy(state) print("Planning took %.3f sec" % (time.time() - policy_start)) # numpy array with 2 values grasp_center = action.grasp.center._data[:, 0] #(width, depth) grasp_depth = action.grasp.depth * (1 / self.rescale_factor) grasp_angle = action.grasp.angle #np.pi*0.3 if self.config.data_collection_mode: self.current_grasp = action.grasp depth_im = state.rgbd_im.depth scale = 1.0 depth_im_scaled = depth_im.resize(scale) translation = scale * np.array([ depth_im.center[0] - grasp_center[1], depth_im.center[1] - grasp_center[0] ]) im_tf = depth_im_scaled im_tf = depth_im_scaled.transform(translation, grasp_angle) im_tf = im_tf.crop(self.gqcnn_image_size, self.gqcnn_image_size) # get the patch self.current_patch = im_tf.raw_data XYZ_origin, gripper_quat = self.compute_grasp_pts_from_grasp_sample( grasp_center, grasp_depth, grasp_angle, env) return XYZ_origin[:, 0], gripper_quat # Vis final grasp. if vis: from visualization import Visualizer2D as vis vis.figure(size=(10, 10)) vis.imshow(rgbd_im.depth, vmin=self.policy_config["vis"]["vmin"], vmax=self.policy_config["vis"]["vmax"]) vis.grasp(action.grasp, scale=2.5, show_center=False, show_axis=True) vis.title("Planned grasp at depth {0:.3f}m with Q={1:.3f}".format( action.grasp.depth, action.q_value)) vis.show(f"tmp/grasp2_{mesh_obj.name}_{self.iter_id}.png") vis.figure(size=(10, 10)) vis.imshow(im_tf, vmin=self.policy_config["vis"]["vmin"], vmax=self.policy_config["vis"]["vmax"]) vis.show(f"tmp/cropd_{mesh_obj.name}_{self.iter_id}.png") import ipdb ipdb.set_trace() return XYZ_origin[:, 0], gripper_quat import ipdb ipdb.set_trace()
def run_experiment(): """ Run the experiment """ print("run_experiment") rospy.loginfo('Wait for the service...') # wait for Grasp Planning Service and create Service Proxy rospy.wait_for_service('grasping_policy') plan_grasp = rospy.ServiceProxy('grasping_policy', GQCNNGraspPlanner) # create ROS CVBridge # cv_bridge = CvBridge() # get camera intrinsics # camera_intrinsics = sensor.color_intrinsics camera_intrinsics = sensor.ir_intrinsics rospy.loginfo('camera_intrinsics: {}'.format(camera_intrinsics.rosmsg)) rospy.loginfo('Beginning experiment') # get the images from the sensor color_image, depth_image, _ = sensor.frames() # inpaint to remove holes inpainted_color_image = color_image.inpaint( rescale_factor=config['inpaint_rescale_factor']) inpainted_depth_image = depth_image.inpaint( rescale_factor=config['inpaint_rescale_factor']) print("inpainted_color_image: shape", inpainted_depth_image.shape) detector_cfg['image_width'] = inpainted_depth_image.width // 3 detector_cfg['image_height'] = inpainted_depth_image.height // 3 detector = RgbdDetectorFactory.detector('point_cloud_box') rospy.loginfo("Detect bbox") detection = detector.detect(inpainted_color_image, inpainted_depth_image, detector_cfg, camera_intrinsics, T_camera_world, vis_foreground=False, vis_segmentation=False)[0] if config['vis']['vis_detector_output']: vis.figure() vis.subplot(1, 2, 1) vis.imshow(detection.color_thumbnail) vis.subplot(1, 2, 2) vis.imshow(detection.depth_thumbnail) vis.show() boundingBox = BoundingBox() boundingBox.minY = detection.bounding_box.min_pt[0] boundingBox.minX = detection.bounding_box.min_pt[1] boundingBox.maxY = detection.bounding_box.max_pt[0] boundingBox.maxX = detection.bounding_box.max_pt[1] try: rospy.loginfo("Start planning") start_time = time.time() planned_grasp_data = plan_grasp(inpainted_color_image.rosmsg, inpainted_depth_image.rosmsg, camera_intrinsics.rosmsg, boundingBox, None) grasp_plan_time = time.time() - start_time rospy.loginfo("Planning time: {}".format(grasp_plan_time)) rospy.loginfo("planned_grasp_data:\n{}".format( planned_grasp_data.grasp.pose)) grasp_camera_pose = planned_grasp_data.grasp # create Stamped ROS Transform camera_world_transform = TransformStamped() camera_world_transform.header.stamp = rospy.Time.now() camera_world_transform.header.frame_id = T_camera_world.from_frame camera_world_transform.child_frame_id = T_camera_world.to_frame camera_world_transform.transform.translation.x = T_camera_world.translation[ 0] camera_world_transform.transform.translation.y = T_camera_world.translation[ 1] camera_world_transform.transform.translation.z = T_camera_world.translation[ 2] q = T_camera_world.quaternion camera_world_transform.transform.rotation.x = q[1] camera_world_transform.transform.rotation.y = q[2] camera_world_transform.transform.rotation.z = q[3] camera_world_transform.transform.rotation.w = q[0] grasp_world_pose = tf2_geometry_msgs.do_transform_pose( grasp_camera_pose, camera_world_transform) rospy.loginfo( "World CS planned_grasp_data:\n{}".format(grasp_world_pose)) except rospy.ServiceException as e: rospy.logerr("Service call failed: \n %s" % e)
def quality(self, state, actions, params=None): """Given a suction point, compute a score based on a best-fit 3D plane of the neighboring points. Parameters ---------- state : :obj:`RgbdImageState` An RgbdImageState instance that encapsulates rgbd_im, camera_intr, segmask, full_observed. action: :obj:`SuctionPoint2D` A suction grasp in image space that encapsulates center, approach direction, depth, camera_intr. params: dict Stores params used in computing suction quality. Returns ------- :obj:`numpy.ndarray` Array of the quality for each grasp """ qualities = [] # deproject points point_cloud_image = state.camera_intr.deproject_to_image( state.rgbd_im.depth) # compute negative SSE from the best fit plane for each grasp for i, action in enumerate(actions): if not isinstance(action, SuctionPoint2D): raise ValueError( 'This function can only be used to evaluate suction quality' ) points = self._points_in_window( point_cloud_image, action, segmask=state.segmask) # x,y in matrix A and z is vector z. A, b = self._points_to_matrices(points) w = self._action_to_plane( point_cloud_image, action) # vector w w/ a bias term represents a best-fit plane. sse = self._sum_of_squared_residuals(w, A, b) if params is not None and params['vis']['plane']: from visualization import Visualizer2D as vis2d from visualization import Visualizer3D as vis3d mid_i = A.shape[0] / 2 pred_z = A.dot(w) p0 = np.array([A[mid_i, 0], A[mid_i, 1], pred_z[mid_i]]) n = np.array([w[0], w[1], -1]) n = n / np.linalg.norm(n) tx = np.array([n[1], -n[0], 0]) tx = tx / np.linalg.norm(tx) ty = np.cross(n, tx) R = np.array([tx, ty, n]).T c = state.camera_intr.deproject_pixel(action.depth, action.center) d = Point(c.data - 0.01 * action.axis, frame=c.frame) T_table_world = RigidTransform(rotation=R, translation=p0, from_frame='patch', to_frame='world') vis3d.figure() vis3d.points(point_cloud_image.to_point_cloud(), scale=0.0025, subsample=10, random=True, color=(0, 0, 1)) vis3d.points(PointCloud(points.T), scale=0.0025, color=(1, 0, 0)) vis3d.points(c, scale=0.005, color=(1, 1, 0)) vis3d.points(d, scale=0.005, color=(1, 1, 0)) vis3d.table(T_table_world, dim=0.01) vis3d.show() vis2d.figure() vis2d.imshow(state.rgbd_im.depth) vis2d.scatter(action.center.x, action.center.y, s=50, c='b') vis2d.show() quality = np.exp( -sse ) # evaluate how well best-fit plane describles all points in window. qualities.append(quality) return np.array(qualities)
def _action(self, state): """ Plans the grasp with the highest probability of success on the given RGB-D image. Attributes ---------- state : :obj:`RgbdImageState` image to plan grasps on Returns ------- :obj:`GraspAction` grasp to execute """ # check valid input if not isinstance(state, RgbdImageState): raise ValueError('Must provide an RGB-D image state.') # parse state rgbd_im = state.rgbd_im camera_intr = state.camera_intr segmask = state.segmask # sample grasps grasps = self._grasp_sampler.sample( rgbd_im, camera_intr, self._num_grasp_samples, segmask=segmask, visualize=self.config['vis']['grasp_sampling'], seed=None) num_grasps = len(grasps) if num_grasps == 0: logging.warning('No valid grasps could be found') raise NoValidGraspsException() # compute grasp quality compute_start = time() q_values = self._grasp_quality_fn(state, grasps, params=self._config) logging.debug('Grasp evaluation took %.3f sec' % (time() - compute_start)) if self.config['vis']['grasp_candidates']: # display each grasp on the original image, colored by predicted success norm_q_values = (q_values - np.min(q_values)) / (np.max(q_values) - np.min(q_values)) vis.figure(size=(FIGSIZE, FIGSIZE)) vis.imshow(rgbd_im.depth, vmin=self.config['vis']['vmin'], vmax=self.config['vis']['vmax']) for grasp, q in zip(grasps, norm_q_values): vis.grasp(grasp, scale=1.0, grasp_center_size=10, grasp_center_thickness=2.5, jaw_width=2.5, show_center=False, show_axis=True, color=plt.cm.RdYlGn(q)) vis.title('Sampled grasps') self.show('grasp_candidates.png') # select grasp index = self.select(grasps, q_values) grasp = grasps[index] q_value = q_values[index] if self.config['vis']['grasp_plan']: vis.figure() vis.imshow(rgbd_im.depth, vmin=self.config['vis']['vmin'], vmax=self.config['vis']['vmax']) vis.grasp(grasp, scale=2.0, show_axis=True) vis.title('Best Grasp: d=%.3f, q=%.3f' % (grasp.depth, q_value)) vis.show() return GraspAction(grasp, q_value, state.rgbd_im.depth)
def execute_policy(self, rgbd_image_state, resetting=False): """ Executes a grasping policy on an `RgbdImageState`. Parameters ---------- rgbd_image_state: type `gqcnn.RgbdImageState` The :py:class:`gqcnn.RgbdImageState` that encapsulates the depth and color image along with camera intrinsics. """ policy_start = time.time() if not self.grasping_policy: self._get_grasp_policy() try: grasping_action = self.grasping_policy(rgbd_image_state) except: vis.figure(size=(10, 10)) vis.imshow(self.rgbd_im.color, vmin=0, vmax=255) vis.title("No Valid Grasp, Task Finished") vis.show() self.logger.info("Planning took %.3f sec" % (time.time() - policy_start)) # Angle of grasping point w.r.t the x-axis of camera frame angle_wrt_x = grasping_action.grasp.angle angle_degree = angle_wrt_x * 180 / np.pi if angle_degree <= -270: angle_degree += 360 elif (angle_degree > -270 and angle_degree <= -180) or (angle_degree > -180 and angle_degree <= -90): angle_degree += 180 elif (angle_degree > 90 and angle_degree <= 180) or (angle_degree > 180 and angle_degree <= 270): angle_degree -= 180 elif (angle_degree > 270 and angle_degree <= 360): angle_degree -= 360 angle_wrt_x = angle_degree * np.pi / 180 if resetting: angle_wrt_x += np.pi / 2 # Translation of grasping point w.r.t the camera frame grasping_translation = np.array([ grasping_action.grasp.pose().translation[0] * -1, grasping_action.grasp.pose().translation[1], grasping_action.grasp.pose().translation[2] * -1 ]) # Rotation matrix from world frame to camera frame world_to_cam_rotation = np.dot( np.array([[1, 0, 0], [0, np.cos(np.pi), -np.sin(np.pi)], [0, np.sin(np.pi), np.cos(np.pi)]]), np.array([[np.cos(np.pi), -np.sin(np.pi), 0], [np.sin(np.pi), np.cos(np.pi), 0], [0, 0, 1]])) # Rotation matrix from camera frame to gripper frame cam_to_gripper_rotation = np.array( [[np.cos(angle_wrt_x), -np.sin(angle_wrt_x), 0], [np.sin(angle_wrt_x), np.cos(angle_wrt_x), 0], [0, 0, 1]]) else: # Translation of grasping point w.r.t the camera frame grasping_translation = np.array([ grasping_action.grasp.pose().translation[1], grasping_action.grasp.pose().translation[0], grasping_action.grasp.pose().translation[2] ]) * -1 # Rotation matrix from world frame to camera frame world_to_cam_rotation = np.dot( np.array([[1, 0, 0], [0, np.cos(np.pi), -np.sin(np.pi)], [0, np.sin(np.pi), np.cos(np.pi)]]), np.array([[np.cos(np.pi / 2), -np.sin(np.pi / 2), 0], [np.sin(np.pi / 2), np.cos(np.pi / 2), 0], [0, 0, 1]])) # Rotation matrix from camera frame to gripper frame cam_to_gripper_rotation = np.dot( np.array([[np.cos(angle_wrt_x), -np.sin(angle_wrt_x), 0], [np.sin(angle_wrt_x), np.cos(angle_wrt_x), 0], [0, 0, 1]]), np.array([[np.cos(np.pi / 2), -np.sin(np.pi / 2), 0], [np.sin(np.pi / 2), np.cos(np.pi / 2), 0], [0, 0, 1]])) world_to_gripper_rotation = np.dot(world_to_cam_rotation, cam_to_gripper_rotation) quat_wxyz = from_rotation_matrix(world_to_gripper_rotation) grasping_quaternion = np.array( [quat_wxyz.x, quat_wxyz.y, quat_wxyz.z, quat_wxyz.w]) grasping_pose = np.hstack((grasping_translation, grasping_quaternion)) vis.figure(size=(10, 10)) vis.imshow(self.rgbd_im.color, vmin=0, vmax=255) vis.grasp(grasping_action.grasp, scale=2.5, show_center=False, show_axis=True) vis.title("Planned grasp at depth {0:.3f}m \n".format( grasping_action.grasp.depth) + 'grasping pose {}'.format(grasping_pose)) vis.show() return grasping_pose
def _action(self, state): """ Plans the grasp with the highest probability of success on the given RGB-D image. Attributes ---------- state : :obj:`RgbdImageState` image to plan grasps on Returns ------- :obj:`GraspAction` grasp to execute """ # check valid input if not isinstance(state, RgbdImageState): raise ValueError('Must provide an RGB-D image state.') # parse state seed_set_start = time() rgbd_im = state.rgbd_im depth_im = rgbd_im.depth camera_intr = state.camera_intr segmask = state.segmask point_cloud_im = camera_intr.deproject_to_image(depth_im) normal_cloud_im = point_cloud_im.normal_cloud_im() # sample grasps grasps = self._grasp_sampler.sample( rgbd_im, camera_intr, self._num_seed_samples, segmask=segmask, visualize=self.config['vis']['grasp_sampling'], seed=self._seed) num_grasps = len(grasps) if num_grasps == 0: logging.warning('No valid grasps could be found') raise NoValidGraspsException() grasp_type = 'parallel_jaw' if isinstance(grasps[0], SuctionPoint2D): grasp_type = 'suction' logging.info('Sampled %d grasps' % (len(grasps))) logging.info('Computing the seed set took %.3f sec' % (time() - seed_set_start)) # iteratively refit and sample for j in range(self._num_iters): logging.info('CEM iter %d' % (j)) # predict grasps predict_start = time() q_values = self._grasp_quality_fn(state, grasps, params=self._config) logging.info('Prediction took %.3f sec' % (time() - predict_start)) # sort grasps resample_start = time() q_values_and_indices = zip(q_values, np.arange(num_grasps)) q_values_and_indices.sort(key=lambda x: x[0], reverse=True) if self.config['vis']['grasp_candidates']: # display each grasp on the original image, colored by predicted success norm_q_values = q_values #(q_values - np.min(q_values)) / (np.max(q_values) - np.min(q_values)) vis.figure(size=(FIGSIZE, FIGSIZE)) vis.imshow(rgbd_im.depth, vmin=self.config['vis']['vmin'], vmax=self.config['vis']['vmax']) for grasp, q in zip(grasps, norm_q_values): vis.grasp(grasp, scale=2.0, jaw_width=2.0, show_center=False, show_axis=True, color=plt.cm.RdYlGn(q)) vis.title('Sampled grasps iter %d' % (j)) filename = None if self._logging_dir is not None: filename = os.path.join(self._logging_dir, 'cem_iter_%d.png' % (j)) vis.show(filename) # fit elite set elite_start = time() num_refit = max(int(np.ceil(self._gmm_refit_p * num_grasps)), 1) elite_q_values = [i[0] for i in q_values_and_indices[:num_refit]] elite_grasp_indices = [ i[1] for i in q_values_and_indices[:num_refit] ] elite_grasps = [grasps[i] for i in elite_grasp_indices] elite_grasp_arr = np.array([g.feature_vec for g in elite_grasps]) if self.config['vis']['elite_grasps']: # display each grasp on the original image, colored by predicted success norm_q_values = (elite_q_values - np.min(elite_q_values)) / ( np.max(elite_q_values) - np.min(elite_q_values)) vis.figure(size=(FIGSIZE, FIGSIZE)) vis.imshow(rgbd_im.depth, vmin=self.config['vis']['vmin'], vmax=self.config['vis']['vmax']) for grasp, q in zip(elite_grasps, norm_q_values): vis.grasp(grasp, scale=1.5, show_center=False, show_axis=True, color=plt.cm.RdYlGn(q)) vis.title('Elite grasps iter %d' % (j)) filename = None if self._logging_dir is not None: filename = os.path.join(self._logging_dir, 'elite_set_iter_%d.png' % (j)) vis.show(filename) # normalize elite set elite_grasp_mean = np.mean(elite_grasp_arr, axis=0) elite_grasp_std = np.std(elite_grasp_arr, axis=0) elite_grasp_std[elite_grasp_std == 0] = 1e-6 elite_grasp_arr = (elite_grasp_arr - elite_grasp_mean) / elite_grasp_std logging.info('Elite set computation took %.3f sec' % (time() - elite_start)) # fit a GMM to the top samples num_components = max( int(np.ceil(self._gmm_component_frac * num_refit)), 1) uniform_weights = (1.0 / num_components) * np.ones(num_components) gmm = GaussianMixture(n_components=num_components, weights_init=uniform_weights, reg_covar=self._gmm_reg_covar) train_start = time() gmm.fit(elite_grasp_arr) logging.info('GMM fitting with %d components took %.3f sec' % (num_components, time() - train_start)) # sample the next grasps grasps = [] loop_start = time() num_tries = 0 while len( grasps ) < self._num_gmm_samples and num_tries < self._max_resamples_per_iteration: # sample from GMM sample_start = time() grasp_vecs, _ = gmm.sample(n_samples=self._num_gmm_samples) grasp_vecs = elite_grasp_std * grasp_vecs + elite_grasp_mean logging.info('GMM sampling took %.3f sec' % (time() - sample_start)) # convert features to grasps and store if in segmask for k, grasp_vec in enumerate(grasp_vecs): feature_start = time() if grasp_type == 'parallel_jaw': # form grasp object grasp = Grasp2D.from_feature_vec( grasp_vec, width=self._gripper_width, camera_intr=camera_intr) elif grasp_type == 'suction': # read depth and approach axis u = int(min(max(grasp_vec[1], 0), depth_im.height - 1)) v = int(min(max(grasp_vec[0], 0), depth_im.width - 1)) grasp_depth = depth_im[u, v] # approach_axis grasp_axis = -normal_cloud_im[u, v] # form grasp object grasp = SuctionPoint2D.from_feature_vec( grasp_vec, camera_intr=camera_intr, depth=grasp_depth, axis=grasp_axis) logging.debug('Feature vec took %.5f sec' % (time() - feature_start)) bounds_start = time() # check in bounds if state.segmask is None or \ (grasp.center.y >= 0 and grasp.center.y < state.segmask.height and \ grasp.center.x >= 0 and grasp.center.x < state.segmask.width and \ np.any(state.segmask[int(grasp.center.y), int(grasp.center.x)] != 0) and \ grasp.approach_angle < self._max_approach_angle): # check validity according to filters grasps.append(grasp) logging.debug('Bounds took %.5f sec' % (time() - bounds_start)) num_tries += 1 # check num grasps num_grasps = len(grasps) if num_grasps == 0: logging.warning('No valid grasps could be found') raise NoValidGraspsException() logging.info('Resample loop took %.3f sec' % (time() - loop_start)) logging.info('Resampling took %.3f sec' % (time() - resample_start)) # predict final set of grasps predict_start = time() q_values = self._grasp_quality_fn(state, grasps, params=self._config) logging.info('Final prediction took %.3f sec' % (time() - predict_start)) if self.config['vis']['grasp_candidates']: # display each grasp on the original image, colored by predicted success norm_q_values = q_values #(q_values - np.min(q_values)) / (np.max(q_values) - np.min(q_values)) vis.figure(size=(FIGSIZE, FIGSIZE)) vis.imshow(rgbd_im.depth, vmin=self.config['vis']['vmin'], vmax=self.config['vis']['vmax']) for grasp, q in zip(grasps, norm_q_values): vis.grasp(grasp, scale=2.0, jaw_width=2.0, show_center=False, show_axis=True, color=plt.cm.RdYlGn(q)) vis.title('Final sampled grasps') filename = None if self._logging_dir is not None: filename = os.path.join(self._logging_dir, 'final_grasps.png') vis.show(filename) # select grasp index = self.select(grasps, q_values) grasp = grasps[index] q_value = q_values[index] if self.config['vis']['grasp_plan']: vis.figure() vis.imshow(rgbd_im.depth, vmin=self.config['vis']['vmin'], vmax=self.config['vis']['vmax']) vis.grasp(grasp, scale=5.0, show_center=False, show_axis=True, jaw_width=1.0, grasp_axis_width=0.2) vis.title('Best Grasp: d=%.3f, q=%.3f' % (grasp.depth, q_value)) filename = None if self._logging_dir is not None: filename = os.path.join(self._logging_dir, 'planned_grasp.png') vis.show(filename) # form return image image = state.rgbd_im.depth if isinstance(self._grasp_quality_fn, GQCnnQualityFunction): image_arr, _ = self._grasp_quality_fn.grasps_to_tensors([grasp], state) image = DepthImage(image_arr[0, ...], frame=state.rgbd_im.frame) # return action action = GraspAction(grasp, q_value, image) return action
else: vis2d.subplot(1, 3, 1) vis2d.imshow(state.rgbd_im.color) vis2d.title("COLOR") vis2d.subplot(1, 3, 2) vis2d.imshow(state.rgbd_im.depth, vmin=policy_config["vis"]["vmin"], vmax=policy_config["vis"]["vmax"]) vis2d.title("DEPTH") vis2d.subplot(1, 3, 3) vis2d.imshow(state.segmask) vis2d.title("SEGMASK") filename = None if output_dir is not None: filename = os.path.join(output_dir, "input_images.png") vis2d.show(filename) # Query policy. policy_start = time.time() action = policy(state) logger.info("Planning took %.3f sec" % (time.time() - policy_start)) # Vis final grasp. if policy_config["vis"]["final_grasp"]: vis2d.figure(size=(10, 10)) vis2d.subplot(1, 2, 1) vis2d.imshow(state.rgbd_im.depth, vmin=policy_config["vis"]["vmin"], vmax=policy_config["vis"]["vmax"]) vis2d.grasp(original_action.grasp, scale=policy_config["vis"]["grasp_scale"],