def fast_grid_search(pc, indices, model, shadow, img_file): length, width, height = shadow.extents split_size = max(length, width) pc_data, ind = get_pc_data(pc, indices) maxes = np.max(pc_data, axis=0) mins = np.min(pc_data, axis=0) bin_base = mins[2] plane_normal = model[0:3] di_temp = ci.project_to_image(pc) vis2d.figure() vis2d.imshow(di_temp) vis2d.show() plane_data = pc.data.T[indices] #all_indices = np.where([(plane_data[::,2] > 0.795) & (plane_data[::,2] < 0.862)]) #all_indices = np.where((plane_data[::,1] < 0.16) & (plane_data[::,1] > -0.24) & (plane_data[::,0] > -0.3) & (plane_data[::,0] < 0.24))[0] #plane_data = plane_data[all_indices] plane_pc = PointCloud(plane_data.T, pc.frame) di = ci.project_to_image(plane_pc) bi = di.to_binary() scene = Scene() camera = VirtualCamera(ci, cp) scene.camera = camera # Get shadow depth img. shadow_obj = SceneObject(shadow) scene.add_object('shadow', shadow_obj) orig_tow = shadow_obj.T_obj_world scores = np.zeros((int(np.round((maxes[0] - mins[0]) / split_size)), int(np.round((maxes[1] - mins[1]) / split_size)))) for i in range(int(np.round((maxes[0] - mins[0]) / split_size))): x = mins[0] + i * split_size for j in range(int(np.round((maxes[1] - mins[1]) / split_size))): y = mins[1] + j * split_size for tow in transforms(pc, pc_data, shadow, x, y, x + split_size, y + split_size, 8): shadow_obj.T_obj_world = tow scores[i][j] = under_shadow(pc, pc_data, indices, model, shadow, x, x + split_size, y, y + split_size, scene, bi) shadow_obj.T_obj_world = orig_tow print("\nScores: \n" + str(scores)) best = best_cell(scores) print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]])) #------- # Visualize best placement vis3d.figure() x = mins[0] + best[0] * split_size y = mins[1] + best[1] * split_size cell_indices = np.where((x < pc_data[:, 0]) & (pc_data[:, 0] < x + split_size) & (y < pc_data[:, 1]) & (pc_data[:, 1] < y + split_size))[0] points = pc_data[cell_indices] rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)] vis3d.points(points, color=(0, 1, 1)) vis3d.points(rest, color=(1, 0, 1)) vis3d.show()
def quality(self, state, actions, params=None): """Given a suction point, compute a score based on a best-fit 3D plane of the neighboring points. Parameters ---------- state : :obj:`RgbdImageState` An RgbdImageState instance that encapsulates rgbd_im, camera_intr, segmask, full_observed. action: :obj:`SuctionPoint2D` A suction grasp in image space that encapsulates center, approach direction, depth, camera_intr. params: dict Stores params used in computing suction quality. Returns ------- :obj:`numpy.ndarray` Array of the quality for each grasp """ qualities = [] # deproject points point_cloud_image = state.camera_intr.deproject_to_image( state.rgbd_im.depth) # compute negative SSE from the best fit plane for each grasp for i, action in enumerate(actions): if not isinstance(action, SuctionPoint2D): raise ValueError( 'This function can only be used to evaluate suction quality' ) points = self._points_in_window( point_cloud_image, action, segmask=state.segmask) # x,y in matrix A and z is vector z. A, b = self._points_to_matrices(points) w = self._action_to_plane( point_cloud_image, action) # vector w w/ a bias term represents a best-fit plane. sse = self._sum_of_squared_residuals(w, A, b) if params is not None and params['vis']['plane']: from visualization import Visualizer2D as vis2d from visualization import Visualizer3D as vis3d mid_i = A.shape[0] / 2 pred_z = A.dot(w) p0 = np.array([A[mid_i, 0], A[mid_i, 1], pred_z[mid_i]]) n = np.array([w[0], w[1], -1]) n = n / np.linalg.norm(n) tx = np.array([n[1], -n[0], 0]) tx = tx / np.linalg.norm(tx) ty = np.cross(n, tx) R = np.array([tx, ty, n]).T c = state.camera_intr.deproject_pixel(action.depth, action.center) d = Point(c.data - 0.01 * action.axis, frame=c.frame) T_table_world = RigidTransform(rotation=R, translation=p0, from_frame='patch', to_frame='world') vis3d.figure() vis3d.points(point_cloud_image.to_point_cloud(), scale=0.0025, subsample=10, random=True, color=(0, 0, 1)) vis3d.points(PointCloud(points.T), scale=0.0025, color=(1, 0, 0)) vis3d.points(c, scale=0.005, color=(1, 1, 0)) vis3d.points(d, scale=0.005, color=(1, 1, 0)) vis3d.table(T_table_world, dim=0.01) vis3d.show() vis2d.figure() vis2d.imshow(state.rgbd_im.depth) vis2d.scatter(action.center.x, action.center.y, s=50, c='b') vis2d.show() quality = np.exp( -sse ) # evaluate how well best-fit plane describles all points in window. qualities.append(quality) return np.array(qualities)
point_cloud_filtered, scale=0.001, color=(0, 0, 1), subsample=5 ) vis3d.show() # convert to depth image project_start = time.time() point_cloud_cam = T_camera_world.inverse() * point_cloud_filtered depth_im_filtered = small_camera_intr.project_to_image(point_cloud_cam) noise_mask = depth_im_filtered.to_binary() logging.info("Project took %.3f sec" % (time.time() - project_start)) depth_im_filtered = depth_im_filtered.inpaint(0.5) filter_stop = time.time() logging.info("Filtering took %.3f sec" % (filter_stop - filter_start)) if vis_final_images: vis2d.figure() vis2d.subplot(2, 2, 1) vis2d.imshow(depth_im) vis2d.title("Orig") vis2d.subplot(2, 2, 2) vis2d.imshow(depth_im_orig) vis2d.title("Inpainted") vis2d.subplot(2, 2, 3) vis2d.imshow(noise_mask) vis2d.title("Mask") vis2d.subplot(2, 2, 4) vis2d.imshow(depth_im_filtered) vis2d.title("Filtered") vis2d.show()
sensor.start() camera_intr = sensor.ir_intrinsics n = 15 frame_rates = [] for i in range(n): logging.info("Reading frame %d of %d" % (i + 1, n)) read_start = time.time() color_im, depth_im, _ = sensor.frames() read_stop = time.time() frame_rates.append(1.0 / (read_stop - read_start)) logging.info("Avg fps: %.3f" % (np.mean(frame_rates))) color_im = color_im.inpaint(rescale_factor=0.5) depth_im = depth_im.inpaint(rescale_factor=0.5) point_cloud = camera_intr.deproject(depth_im) vis3d.figure() vis3d.points(point_cloud, subsample=15) vis3d.show() vis.figure() vis.subplot(1, 2, 1) vis.imshow(color_im) vis.subplot(1, 2, 2) vis.imshow(depth_im) vis.show() sensor.stop()
# optionally read a segmask segmask = None if segmask_filename is not None: segmask = BinaryImage.open(segmask_filename) valid_px_mask = depth_im.invalid_pixel_mask().inverse() if segmask is None: segmask = valid_px_mask else: segmask = segmask.mask_binary(valid_px_mask) # inpaint depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor) if 'input_images' in policy_config['vis'].keys() and policy_config['vis']['input_images']: vis.figure(size=(10,10)) num_plot = 1 if segmask is not None: num_plot = 2 vis.subplot(1,num_plot,1) vis.imshow(depth_im) if segmask is not None: vis.subplot(1,num_plot,2) vis.imshow(segmask) vis.show() # create state rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) # set input sizes for fully-convolutional policy
raise ValueError('Must provide mesh in Wavefront .OBJ format!') orig_mesh = ObjFile(mesh_filename).read() mesh = orig_mesh.subdivide(min_tri_length=0.01) mesh.compute_vertex_normals() stable_poses = mesh.stable_poses() if vis_normals: vis3d.figure() vis3d.mesh(mesh) vis3d.normals(NormalCloud(mesh.normals.T), PointCloud(mesh.vertices.T), subsample=10) vis3d.show() d = utils.sqrt_ceil(len(stable_poses)) vis.figure(size=(16, 16)) table_mesh = ObjFile('data/meshes/table.obj').read() table_mesh = table_mesh.subdivide() table_mesh.compute_vertex_normals() table_mat_props = MaterialProperties(color=(0, 255, 0), ambient=0.5, diffuse=1.0, specular=1, shininess=0) for k, stable_pose in enumerate(stable_poses): logging.info('Rendering stable pose %d' % (k)) # set resting pose T_obj_world = mesh.get_T_surface_obj(
# Read config. config = YamlConfig(config_filename) policy_config = config["policy"] # Load test case. state_path = os.path.join(test_case_path, "state") action_path = os.path.join(test_case_path, "action") state = RgbdImageState.load(state_path) original_action = ParallelJawGrasp.load(action_path) # Init policy. policy = CrossEntropyRobustGraspingPolicy(policy_config) if policy_config["vis"]["input_images"]: vis2d.figure() if state.segmask is None: vis2d.subplot(1, 2, 1) vis2d.imshow(state.rgbd_im.color) vis2d.title("COLOR") vis2d.subplot(1, 2, 2) vis2d.imshow(state.rgbd_im.depth, vmin=policy_config["vis"]["vmin"], vmax=policy_config["vis"]["vmax"]) vis2d.title("DEPTH") else: vis2d.subplot(1, 3, 1) vis2d.imshow(state.rgbd_im.color) vis2d.title("COLOR") vis2d.subplot(1, 3, 2) vis2d.imshow(state.rgbd_im.depth,
def run_experiment(): """ Run the experiment """ if not config['robot_off']: rospy.loginfo('Initializing YuMi') robot, subscriber, left_arm, right_arm, home_pose = init_robot(config) # create ROS CVBridge cv_bridge = CvBridge() # wait for Grasp Planning Service and create Service Proxy rospy.wait_for_service('plan_gqcnn_grasp') plan_grasp = rospy.ServiceProxy('plan_gqcnn_grasp', GQCNNGraspPlanner) # get camera intrinsics camera_intrinsics = sensor.ir_intrinsics # setup experiment logger experiment_logger = GraspIsolatedObjectExperimentLogger( config['experiment_dir'], config['supervisor'], camera_intrinsics, T_camera_world, '/home/autolab/Workspace/vishal_working/catkin_ws/src/gqcnn/cfg/ros_nodes/yumi_control_node.yaml', planner_type=config['planner_type']) logging.info('Saving experiment to %s' % (experiment_logger.experiment_path)) object_keys = config['test_object_keys'] trial_number = 1 re_try = False logging.info('Beginning experiment') while True: if not re_try: experiment_logger.start_trial() obj = np.random.choice(object_keys, size=1)[0] else: re_try = False rospy.loginfo('Please place object: ' + obj + ' on the workspace.') raw_input("Press ENTER when ready ...") # start the next trial rospy.loginfo('Trial %d' % (trial_number)) # get the images from the sensor color_image, depth_image, _ = sensor.frames() # log some trial info experiment_logger.update_trial_attribute('trial_num', trial_number) experiment_logger.update_trial_attribute('color_im', color_image) experiment_logger.update_trial_attribute('depth_im', depth_image) # inpaint to remove holes inpainted_color_image = color_image.inpaint( rescale_factor=config['inpaint_rescale_factor']) inpainted_depth_image = depth_image.inpaint( rescale_factor=config['inpaint_rescale_factor']) detector = RgbdDetectorFactory.detector('point_cloud_box') detection = detector.detect(inpainted_color_image, inpainted_depth_image, detector_cfg, camera_intrinsics, T_camera_world, vis_foreground=False, vis_segmentation=False)[0] if config['vis']['vis_detector_output']: vis.figure() vis.subplot(1, 2, 1) vis.imshow(detection.color_thumbnail) vis.subplot(1, 2, 2) vis.imshow(detection.depth_thumbnail) vis.show() boundingBox = BoundingBox() boundingBox.minY = detection.bounding_box.min_pt[0] boundingBox.minX = detection.bounding_box.min_pt[1] boundingBox.maxY = detection.bounding_box.max_pt[0] boundingBox.maxX = detection.bounding_box.max_pt[1] try: start_time = time.time() planned_grasp_data = plan_grasp(inpainted_color_image.rosmsg, inpainted_depth_image.rosmsg, camera_intrinsics.rosmsg, boundingBox) grasp_plan_time = time.time() - start_time lift_gripper_width, T_gripper_world = process_GQCNNGrasp( planned_grasp_data, robot, left_arm, right_arm, subscriber, home_pose, config) # get human label human_input = raw_input('Grasp success, or grasp failure? [s/f] ') while human_input.lower() != 's' and human_input.lower() != 'f': logging.info( 'Did not understand input. Please answer \'s\' or \'f\'') human_input = raw_input( 'Grasp success, or grasp failure? [s/f] ') if human_input.lower() == 's': experiment_logger.update_trial_attribute('human_label', 1) else: experiment_logger.update_trial_attribute('human_label', 0) # log result experiment_logger.update_trial_attribute('gripper_pose', T_gripper_world) experiment_logger.update_trial_attribute('planning_time', grasp_plan_time) experiment_logger.update_trial_attribute('gripper_width', lift_gripper_width) experiment_logger.update_trial_attribute('found_grasp', 1) experiment_logger.update_trial_attribute('completed', True) experiment_logger.update_trial_attribute('object_key', obj) trial_number += 1 except rospy.ServiceException as e: rospy.logerr("Service call failed: \n %s" % e) experiment_logger.update_trial_attribute('found_grasp', 0) experiment_logger.update_trial_attribute('completed', True) experiment_logger.update_trial_attribute('object_key', obj) trial_number += 1 except (YuMiCommException, YuMiControlException) as yce: rospy.logerr(str(yce)) if sensor is not None: sensor.stop() if robot is not None: robot.stop() if subscriber is not None and subscriber._started: subscriber.stop() rospy.loginfo("Re-trying") re_try = True robot, subscriber, left_arm, right_arm, home_pose = init_robot( config)
def generate_gqcnn_dataset(dataset_path, database, target_object_keys, env_rv_params, gripper_name, config): """ Generates a GQ-CNN TensorDataset for training models with new grippers, quality metrics, objects, and cameras. Parameters ---------- dataset_path : str path to save the dataset to database : :obj:`Hdf5Database` Dex-Net database containing the 3D meshes, grasps, and grasp metrics target_object_keys : :obj:`OrderedDict` dictionary mapping dataset names to target objects (either 'all' or a list of specific object keys) env_rv_params : :obj:`OrderedDict` parameters of the camera and object random variables used in sampling (see meshpy.UniformPlanarWorksurfaceImageRandomVariable for more info) gripper_name : str name of the gripper to use config : :obj:`autolab_core.YamlConfig` other parameters for dataset generation Notes ----- Required parameters of config are specified in Other Parameters Other Parameters ---------------- images_per_stable_pose : int number of object and camera poses to sample for each stable pose stable_pose_min_p : float minimum probability of occurrence for a stable pose to be used in data generation (used to prune bad stable poses gqcnn/crop_width : int width, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN) gqcnn/crop_height : int height, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN) gqcnn/final_width : int width, in pixels, of final transformed grasp image for input to the GQ-CNN (defaults to 32) gqcnn/final_height : int height, in pixels, of final transformed grasp image for input to the GQ-CNN (defaults to 32) table_alignment/max_approach_table_angle : float max angle between the grasp axis and the table normal when the grasp approach is maximally aligned with the table normal table_alignment/max_approach_offset : float max deviation from perpendicular approach direction to use in grasp collision checking table_alignment/num_approach_offset_samples : int number of approach samples to use in collision checking collision_checking/table_offset : float max allowable interpenetration between the gripper and table to be considered collision free collision_checking/table_mesh_filename : str path to a table mesh for collision checking (default data/meshes/table.obj) collision_checking/approach_dist : float distance, in meters, between the approach pose and final grasp pose along the grasp axis collision_checking/delta_approach : float amount, in meters, to discretize the straight-line path from the gripper approach pose to the final grasp pose tensors/datapoints_per_file : int number of datapoints to store in each unique tensor file on disk tensors/fields : :obj:`dict` dictionary mapping field names to dictionaries specifying the data type, height, width, and number of channels for each tensor debug : bool True (or 1) if the random seed should be set to enforce deterministic behavior, False (0) otherwise vis/candidate_grasps : bool True (or 1) if the collision free candidate grasps should be displayed in 3D (for debugging) vis/rendered_images : bool True (or 1) if the rendered images for each stable pose should be displayed (for debugging) vis/grasp_images : bool True (or 1) if the transformed grasp images should be displayed (for debugging) """ # read data gen params output_dir = dataset_path gripper = RobotGripper.load(gripper_name) image_samples_per_stable_pose = config['images_per_stable_pose'] stable_pose_min_p = config['stable_pose_min_p'] # read gqcnn params gqcnn_params = config['gqcnn'] im_crop_height = gqcnn_params['crop_height'] im_crop_width = gqcnn_params['crop_width'] im_final_height = gqcnn_params['final_height'] im_final_width = gqcnn_params['final_width'] cx_crop = float(im_crop_width) / 2 cy_crop = float(im_crop_height) / 2 # open database dataset_names = target_object_keys.keys() datasets = [database.dataset(dn) for dn in dataset_names] # set target objects for dataset in datasets: if target_object_keys[dataset.name] == 'all': target_object_keys[dataset.name] = dataset.object_keys # setup grasp params table_alignment_params = config['table_alignment'] min_grasp_approach_offset = -np.deg2rad( table_alignment_params['max_approach_offset']) max_grasp_approach_offset = np.deg2rad( table_alignment_params['max_approach_offset']) max_grasp_approach_table_angle = np.deg2rad( table_alignment_params['max_approach_table_angle']) num_grasp_approach_samples = table_alignment_params[ 'num_approach_offset_samples'] phi_offsets = [] if max_grasp_approach_offset == min_grasp_approach_offset: phi_inc = 1 elif num_grasp_approach_samples == 1: phi_inc = max_grasp_approach_offset - min_grasp_approach_offset + 1 else: phi_inc = (max_grasp_approach_offset - min_grasp_approach_offset) / ( num_grasp_approach_samples - 1) phi = min_grasp_approach_offset while phi <= max_grasp_approach_offset: phi_offsets.append(phi) phi += phi_inc # setup collision checking coll_check_params = config['collision_checking'] approach_dist = coll_check_params['approach_dist'] delta_approach = coll_check_params['delta_approach'] table_offset = coll_check_params['table_offset'] table_mesh_filename = coll_check_params['table_mesh_filename'] if not os.path.isabs(table_mesh_filename): table_mesh_filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', table_mesh_filename) table_mesh = ObjFile(table_mesh_filename).read() # set tensor dataset config tensor_config = config['tensors'] tensor_config['fields']['depth_ims_tf_table']['height'] = im_final_height tensor_config['fields']['depth_ims_tf_table']['width'] = im_final_width tensor_config['fields']['color_ims_tf_table']['height'] = im_final_height tensor_config['fields']['color_ims_tf_table']['width'] = im_final_height tensor_config['fields']['obj_masks']['height'] = im_final_height tensor_config['fields']['obj_masks']['width'] = im_final_width # add available metrics (assuming same are computed for all objects) metric_names = [] dataset = datasets[0] obj_keys = dataset.object_keys if len(obj_keys) == 0: raise ValueError('No valid objects in dataset %s' % (dataset.name)) obj = dataset[obj_keys[0]] grasps = dataset.grasps(obj.key, gripper=gripper.name) grasp_metrics = dataset.grasp_metrics(obj.key, grasps, gripper=gripper.name) metric_names = grasp_metrics[grasp_metrics.keys()[0]].keys() for metric_name in metric_names: tensor_config['fields'][metric_name] = {} tensor_config['fields'][metric_name]['dtype'] = 'float32' # init tensor dataset tensor_dataset = TensorDataset(output_dir, tensor_config) tensor_datapoint = tensor_dataset.datapoint_template # setup log file experiment_log_filename = os.path.join(output_dir, 'dataset_generation.log') formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') hdlr = logging.FileHandler(experiment_log_filename) hdlr.setFormatter(formatter) logging.getLogger().addHandler(hdlr) root_logger = logging.getLogger() # copy config out_config_filename = os.path.join(output_dir, 'dataset_generation.json') ordered_dict_config = collections.OrderedDict() for key in config.keys(): ordered_dict_config[key] = config[key] with open(out_config_filename, 'w') as outfile: json.dump(ordered_dict_config, outfile) # collectImage = collect_image.CollectImage(MESH_PATH) # 1. Precompute the set of valid grasps for each stable pose: # i) Perpendicular to the table # ii) Collision-free along the approach direction # load grasps if they already exist grasp_cache_filename = os.path.join(output_dir, CACHE_FILENAME) if os.path.exists(grasp_cache_filename): logging.info('Loading grasp candidates from file') candidate_grasps_dict = pkl.load(open(grasp_cache_filename, 'rb')) # otherwise re-compute by reading from the database and enforcing constraints else: # create grasps dict candidate_grasps_dict = {} # loop through datasets and objects for dataset in datasets: logging.info('Reading dataset %s' % (dataset.name)) for obj in dataset: if obj.key not in target_object_keys[dataset.name]: continue obj.mesh.trimesh.export(MESH_PATH) collectImage.load_new_object() # init candidate grasp storage candidate_grasps_dict[obj.key] = {} # collision_checker.set_graspable_object(obj) # read in the stable poses of the mesh stable_poses = dataset.stable_poses(obj.key) for i, stable_pose in enumerate(stable_poses): # render images if stable pose is valid if stable_pose.p > stable_pose_min_p: candidate_grasps_dict[obj.key][stable_pose.id] = [] # setup table in collision checker # T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp') # T_obj_table = obj.mesh.get_T_surface_obj(T_obj_stp, delta=table_offset).as_frames('obj', 'table') # T_table_obj = T_obj_table.inverse() # collision_checker.set_table(table_mesh_filename, T_table_obj) # read grasp and metrics grasps = dataset.grasps(obj.key, gripper=gripper.name) logging.info( 'Aligning %d grasps for object %s in stable %s' % (len(grasps), obj.key, stable_pose.id)) T_obj_stp = stable_pose.T_obj_table.as_frames( 'obj', 'stp') T_obj_stp = obj.mesh.get_T_surface_obj(T_obj_stp) pose_matrix = np.eye(4, 4) pose_matrix[:3, :3] = T_obj_stp.rotation pose_matrix[:3, 3] = T_obj_stp.translation collectImage.drop_object(pose_matrix) # align grasps with the table aligned_grasps = [ grasp.perpendicular_table(stable_pose) for grasp in grasps ] # check grasp validity logging.info( 'Checking collisions for %d grasps for object %s in stable %s' % (len(grasps), obj.key, stable_pose.id)) for aligned_grasp in aligned_grasps: # check angle with table plane and skip unaligned grasps _, grasp_approach_table_angle, _ = aligned_grasp.grasp_angles_from_stp_z( stable_pose) perpendicular_table = ( np.abs(grasp_approach_table_angle) < max_grasp_approach_table_angle) if not perpendicular_table: continue # check whether the aligned grasp is collision free and if it is successful gripper_pose = np.eye(4, 4) center_world = np.matmul(pose_matrix, [ aligned_grasp.center[0], aligned_grasp.center[1], aligned_grasp.center[2], 1 ]) axis_world = np.matmul(pose_matrix, [ aligned_grasp.axis_[0], aligned_grasp.axis_[1], aligned_grasp.axis_[2], 1 ]) gripper_angle = math.atan2(axis_world[1], axis_world[0]) gripper_pose[:3, 3] = center_world[:3] cos_angle = math.cos(gripper_angle) sin_angle = math.sin(gripper_angle) gripper_pose[0, 0] = cos_angle gripper_pose[0, 1] = -sin_angle gripper_pose[1, 0] = sin_angle gripper_pose[1, 1] = cos_angle collision_free = not collectImage.check_collision( gripper_pose) # collision_free = True # store if aligned to table candidate_grasps_dict[obj.key][ stable_pose.id].append( GraspInfo(aligned_grasp, collision_free)) # visualize if specified if collision_free and config['vis'][ 'candidate_grasps']: logging.info('Grasp %d' % (aligned_grasp.id)) vis.figure() vis.gripper_on_object(gripper, aligned_grasp, obj, stable_pose.T_obj_world) vis.show() # save to file logging.info('Saving to file') pkl.dump(candidate_grasps_dict, open(grasp_cache_filename, 'wb')) # collectImage.stop() # 2. Render a dataset of images and associate the gripper pose with image coordinates for each grasp in the Dex-Net database # setup variables obj_category_map = {} pose_category_map = {} cur_pose_label = 0 cur_obj_label = 0 cur_image_label = 0 # render images for each stable pose of each object in the dataset render_modes = [ RenderMode.SEGMASK, RenderMode.DEPTH_SCENE, RenderMode.COLOR ] for dataset in datasets: logging.info('Generating data for dataset %s' % (dataset.name)) # iterate through all object keys object_keys = dataset.object_keys for obj_key in object_keys: obj = dataset[obj_key] if obj.key not in target_object_keys[dataset.name]: continue obj.mesh.trimesh.export(MESH_PATH) # collectImage.load_new_object() # read in the stable poses of the mesh stable_poses = dataset.stable_poses(obj.key) for i, stable_pose in enumerate(stable_poses): # render images if stable pose is valid if stable_pose.p > stable_pose_min_p: # log progress logging.info('Rendering images for object %s in %s' % (obj.key, stable_pose.id)) # add to category maps if obj.key not in obj_category_map.keys(): obj_category_map[obj.key] = cur_obj_label pose_category_map['%s_%s' % (obj.key, stable_pose.id)] = cur_pose_label # read in candidate grasps and metrics candidate_grasp_info = candidate_grasps_dict[obj.key][ stable_pose.id] candidate_grasps = [g.grasp for g in candidate_grasp_info] grasp_metrics = dataset.grasp_metrics(obj.key, candidate_grasps, gripper=gripper.name) # compute object pose relative to the table T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp') T_obj_stp = obj.mesh.get_T_surface_obj(T_obj_stp) t_stable_pose = np.array( [0, 0, -stable_pose.r.dot(stable_pose.x0)[2]]) T_stable_pose = RigidTransform(rotation=stable_pose.r, translation=t_stable_pose, from_frame='obj', to_frame='stp') # pose_matrix = np.eye(4,4) # pose_matrix[:3,:3] = T_stable_pose.rotation # pose_matrix[:3, 3] = T_stable_pose.translation # collectImage.drop_object(pose_matrix) collectImage = None # sample images from random variable T_table_obj = RigidTransform(from_frame='table', to_frame='obj') scene_objs = { 'table': SceneObject(table_mesh, T_table_obj) } urv = UniformPlanarWorksurfaceImageRandomVariable( obj.mesh, render_modes, 'camera', env_rv_params, stable_pose=stable_pose, scene_objs=scene_objs, sim=collectImage) render_start = time.time() render_samples = urv.rvs( size=image_samples_per_stable_pose) render_stop = time.time() logging.info('Rendering images took %.3f sec' % (render_stop - render_start)) # visualize if config['vis']['rendered_images']: d = int(np.ceil( np.sqrt(image_samples_per_stable_pose))) # binary vis2d.figure() for j, render_sample in enumerate(render_samples): vis2d.subplot(d, d, j + 1) vis2d.imshow(render_sample.renders[ RenderMode.SEGMASK].image) # depth table vis2d.figure() for j, render_sample in enumerate(render_samples): vis2d.subplot(d, d, j + 1) vis2d.imshow(render_sample.renders[ RenderMode.DEPTH_SCENE].image) vis2d.show() # tally total amount of data num_grasps = len(candidate_grasps) num_images = image_samples_per_stable_pose num_save = num_images * num_grasps logging.info('Saving %d datapoints' % (num_save)) # for each candidate grasp on the object compute the projection # of the grasp into image space for render_sample in render_samples: # read images binary_im = render_sample.renders[ RenderMode.SEGMASK].image depth_im_table = render_sample.renders[ RenderMode.DEPTH_SCENE].image color_im_table = render_sample.renders[ RenderMode.COLOR].image # read camera params T_stp_camera = render_sample.camera.object_to_camera_pose shifted_camera_intr = render_sample.camera.camera_intr # read pixel offsets cx = depth_im_table.center[1] cy = depth_im_table.center[0] # compute intrinsics for virtual camera of the final # cropped and rescaled images camera_intr_scale = float(im_final_height) / float( im_crop_height) cropped_camera_intr = shifted_camera_intr.crop( im_crop_height, im_crop_width, cy, cx) final_camera_intr = cropped_camera_intr.resize( camera_intr_scale) # create a thumbnail for each grasp for grasp_info in candidate_grasp_info: # read info grasp = grasp_info.grasp collision_free = grasp_info.collision_free # get the gripper pose T_obj_camera = T_stp_camera * T_obj_stp.as_frames( 'obj', T_stp_camera.from_frame) grasp_2d = grasp.project_camera( T_obj_camera, shifted_camera_intr) # center images on the grasp, rotate to image x axis dx = cx - grasp_2d.center.x dy = cy - grasp_2d.center.y translation = np.array([dy, dx]) binary_im_tf = binary_im.transform( translation, grasp_2d.angle) depth_im_tf_table = depth_im_table.transform( translation, grasp_2d.angle) color_im_tf_table = color_im_table.transform( translation, grasp_2d.angle) # crop to image size binary_im_tf = binary_im_tf.crop( im_crop_height, im_crop_width) depth_im_tf_table = depth_im_tf_table.crop( im_crop_height, im_crop_width) color_im_tf_table = color_im_tf_table.crop( im_crop_height, im_crop_width) # resize to image size binary_im_tf = binary_im_tf.resize( (im_final_height, im_final_width), interp='nearest') depth_im_tf_table = depth_im_tf_table.resize( (im_final_height, im_final_width)) color_im_tf_table = color_im_tf_table.resize( (im_final_height, im_final_width)) # visualize the transformed images if config['vis']['grasp_images']: grasp_center = Point( depth_im_tf_table.center, frame=final_camera_intr.frame) tf_grasp_2d = Grasp2D( grasp_center, 0, grasp_2d.depth, width=gripper.max_width, camera_intr=final_camera_intr) # plot 2D grasp image vis2d.figure() vis2d.subplot(2, 2, 1) vis2d.imshow(binary_im) vis2d.grasp(grasp_2d) vis2d.subplot(2, 2, 2) vis2d.imshow(depth_im_table) vis2d.grasp(grasp_2d) vis2d.subplot(2, 2, 3) vis2d.imshow(binary_im_tf) vis2d.grasp(tf_grasp_2d) vis2d.subplot(2, 2, 4) vis2d.imshow(depth_im_tf_table) vis2d.grasp(tf_grasp_2d) vis2d.title('Coll Free? %d' % (grasp_info.collision_free)) vis2d.show() # plot 3D visualization vis.figure() T_obj_world = vis.mesh_stable_pose( obj.mesh.trimesh, stable_pose.T_obj_world, style='surface', dim=0.5) vis.gripper(gripper, grasp, T_obj_world, color=(0.3, 0.3, 0.3)) vis.show() # form hand pose array hand_pose = np.r_[grasp_2d.center.y, grasp_2d.center.x, grasp_2d.depth, grasp_2d.angle, grasp_2d.center.y - shifted_camera_intr.cy, grasp_2d.center.x - shifted_camera_intr.cx, grasp_2d.width_px] # store to data buffers tensor_datapoint[ 'depth_ims_tf_table'] = depth_im_tf_table.raw_data tensor_datapoint[ 'color_ims_tf_table'] = color_im_tf_table.raw_data tensor_datapoint[ 'obj_masks'] = binary_im_tf.raw_data tensor_datapoint['hand_poses'] = hand_pose tensor_datapoint['collision_free'] = collision_free tensor_datapoint['obj_labels'] = cur_obj_label tensor_datapoint['pose_labels'] = cur_pose_label tensor_datapoint['image_labels'] = cur_image_label for metric_name, metric_val in grasp_metrics[ grasp.id].iteritems(): coll_free_metric = ( 1 * collision_free) * metric_val tensor_datapoint[ metric_name] = coll_free_metric tensor_dataset.add(tensor_datapoint) # update image label cur_image_label += 1 # update pose label cur_pose_label += 1 # force clean up gc.collect() # update object label cur_obj_label += 1 # force clean up gc.collect() # stop simulation # collectImage.stop() # save last file tensor_dataset.flush() # save category mappings obj_cat_filename = os.path.join(output_dir, 'object_category_map.json') json.dump(obj_category_map, open(obj_cat_filename, 'w')) pose_cat_filename = os.path.join(output_dir, 'pose_category_map.json') json.dump(pose_category_map, open(pose_cat_filename, 'w'))
def _plan_grasp(self, color_im, depth_im, camera_intr, bounding_box=None, segmask=None): """ Grasp planner request handler . Parameters --------- req: :obj:`ROS ServiceRequest` ROS ServiceRequest for grasp planner service """ rospy.loginfo('Planning Grasp') # inpaint images color_im = color_im.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) depth_im = depth_im.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) # init segmask if segmask is None: segmask = BinaryImage(255 * np.ones(depth_im.shape).astype(np.uint8), frame=color_im.frame) # visualize if self.cfg['vis']['color_image']: vis.imshow(color_im) vis.show() if self.cfg['vis']['depth_image']: vis.imshow(depth_im) vis.show() if self.cfg['vis']['segmask'] and segmask is not None: vis.imshow(segmask) vis.show() # aggregate color and depth images into a single perception rgbdimage rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) # mask bounding box if bounding_box is not None: # calc bb parameters min_x = bounding_box.minX min_y = bounding_box.minY max_x = bounding_box.maxX max_y = bounding_box.maxY # contain box to image->don't let it exceed image height/width bounds if min_x < 0: min_x = 0 if min_y < 0: min_y = 0 if max_x > rgbd_im.width: max_x = rgbd_im.width if max_y > rgbd_im.height: max_y = rgbd_im.height # mask bb_segmask_arr = np.zeros([rgbd_image.height, rgbd_image.width]) bb_segmask_arr[min_y:max_y, min_x:max_x] = 255 bb_segmask = BinaryImage(bb_segmask_arr.astype(np.uint8), segmask.frame) segmask = segmask.mask_binary(bb_segmask) # visualize if self.cfg['vis']['rgbd_state']: masked_rgbd_im = rgbd_im.mask_binary(segmask) vis.figure() vis.subplot(1, 2, 1) vis.imshow(masked_rgbd_im.color) vis.subplot(1, 2, 2) vis.imshow(masked_rgbd_im.depth) vis.show() # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics rgbd_state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) # execute policy try: return self.execute_policy(rgbd_state, self.grasping_policy, self.grasp_pose_publisher, camera_intr.frame) except NoValidGraspsException: rospy.logerr( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' ) raise rospy.ServiceException( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' )
def _plot(self, model_dir, model_output_dir, train_result, val_result): """Plot analysis curves.""" self.logger.info("Plotting") _, model_name = os.path.split(model_output_dir) # Set params. colors = ["g", "b", "c", "y", "m", "r"] styles = ["-", "--", "-.", ":", "-"] # PR, ROC. vis2d.clf() train_result.precision_recall_curve(plot=True, line_width=self.line_width, color=colors[0], style=styles[0], label="TRAIN") val_result.precision_recall_curve(plot=True, line_width=self.line_width, color=colors[1], style=styles[1], label="VAL") vis2d.title("Precision Recall Curves", fontsize=self.font_size) handles, labels = vis2d.gca().get_legend_handles_labels() vis2d.legend(handles, labels, loc="best") figname = os.path.join(model_output_dir, "precision_recall.png") vis2d.savefig(figname, dpi=self.dpi) vis2d.clf() train_result.roc_curve(plot=True, line_width=self.line_width, color=colors[0], style=styles[0], label="TRAIN") val_result.roc_curve(plot=True, line_width=self.line_width, color=colors[1], style=styles[1], label="VAL") vis2d.title("Reciever Operating Characteristic", fontsize=self.font_size) handles, labels = vis2d.gca().get_legend_handles_labels() vis2d.legend(handles, labels, loc="best") figname = os.path.join(model_output_dir, "roc.png") vis2d.savefig(figname, dpi=self.dpi) # Plot histogram of prediction errors. num_bins = min(self.num_bins, train_result.num_datapoints) # Train positives. pos_ind = np.where(train_result.labels == 1)[0] diffs = np.abs(train_result.labels[pos_ind] - train_result.pred_probs[pos_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Positive Training Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "pos_train_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Train negatives. neg_ind = np.where(train_result.labels == 0)[0] diffs = np.abs(train_result.labels[neg_ind] - train_result.pred_probs[neg_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Negative Training Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "neg_train_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Histogram of validation errors. num_bins = min(self.num_bins, val_result.num_datapoints) # Val positives. pos_ind = np.where(val_result.labels == 1)[0] diffs = np.abs(val_result.labels[pos_ind] - val_result.pred_probs[pos_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Positive Validation Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "pos_val_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Val negatives. neg_ind = np.where(val_result.labels == 0)[0] diffs = np.abs(val_result.labels[neg_ind] - val_result.pred_probs[neg_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Negative Validation Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "neg_val_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Losses. try: train_errors_filename = os.path.join(model_dir, GQCNNFilenames.TRAIN_ERRORS) val_errors_filename = os.path.join(model_dir, GQCNNFilenames.VAL_ERRORS) val_iters_filename = os.path.join(model_dir, GQCNNFilenames.VAL_ITERS) pct_pos_val_filename = os.path.join(model_dir, GQCNNFilenames.PCT_POS_VAL) train_losses_filename = os.path.join(model_dir, GQCNNFilenames.TRAIN_LOSSES) raw_train_errors = np.load(train_errors_filename) val_errors = np.load(val_errors_filename) val_iters = np.load(val_iters_filename) pct_pos_val = float(val_errors[0]) if os.path.exists(pct_pos_val_filename): pct_pos_val = 100.0 * np.load(pct_pos_val_filename) raw_train_losses = np.load(train_losses_filename) val_errors = np.r_[pct_pos_val, val_errors] val_iters = np.r_[0, val_iters] # Window the training error. i = 0 train_errors = [] train_losses = [] train_iters = [] while i < raw_train_errors.shape[0]: train_errors.append(np.mean(raw_train_errors[i:i + WINDOW])) train_losses.append(np.mean(raw_train_losses[i:i + WINDOW])) train_iters.append(i) i += WINDOW train_errors = np.array(train_errors) train_losses = np.array(train_losses) train_iters = np.array(train_iters) init_val_error = val_errors[0] norm_train_errors = train_errors / init_val_error norm_val_errors = val_errors / init_val_error norm_final_val_error = val_result.error_rate / val_errors[0] if pct_pos_val > 0: norm_final_val_error = val_result.error_rate / pct_pos_val vis2d.clf() vis2d.plot(train_iters, train_errors, linewidth=self.line_width, color="b") vis2d.plot(val_iters, val_errors, linewidth=self.line_width, color="g") vis2d.ylim(0, 100) vis2d.legend(("TRAIN (Minibatch)", "VAL"), fontsize=self.font_size, loc="best") vis2d.xlabel("Iteration", fontsize=self.font_size) vis2d.ylabel("Error Rate", fontsize=self.font_size) vis2d.title("Error Rate vs Training Iteration", fontsize=self.font_size) figname = os.path.join(model_output_dir, "training_error_rates.png") vis2d.savefig(figname, dpi=self.dpi) vis2d.clf() vis2d.plot(train_iters, norm_train_errors, linewidth=4, color="b") vis2d.plot(val_iters, norm_val_errors, linewidth=4, color="g") vis2d.ylim(0, 2.0) vis2d.legend(("TRAIN (Minibatch)", "VAL"), fontsize=self.font_size, loc="best") vis2d.xlabel("Iteration", fontsize=self.font_size) vis2d.ylabel("Normalized Error Rate", fontsize=self.font_size) vis2d.title("Normalized Error Rate vs Training Iteration", fontsize=self.font_size) figname = os.path.join(model_output_dir, "training_norm_error_rates.png") vis2d.savefig(figname, dpi=self.dpi) train_losses[train_losses > MAX_LOSS] = MAX_LOSS # CAP LOSSES. vis2d.clf() vis2d.plot(train_iters, train_losses, linewidth=self.line_width, color="b") vis2d.ylim(0, 2.0) vis2d.xlabel("Iteration", fontsize=self.font_size) vis2d.ylabel("Loss", fontsize=self.font_size) vis2d.title("Training Loss vs Iteration", fontsize=self.font_size) figname = os.path.join(model_output_dir, "training_losses.png") vis2d.savefig(figname, dpi=self.dpi) # Log. self.logger.info("TRAIN") self.logger.info("Original error: %.3f" % (train_errors[0])) self.logger.info("Final error: %.3f" % (train_result.error_rate)) self.logger.info("Orig loss: %.3f" % (train_losses[0])) self.logger.info("Final loss: %.3f" % (train_losses[-1])) self.logger.info("VAL") self.logger.info("Original error: %.3f" % (pct_pos_val)) self.logger.info("Final error: %.3f" % (val_result.error_rate)) self.logger.info("Normalized error: %.3f" % (norm_final_val_error)) return (train_errors[0], train_result.error_rate, train_losses[0], train_losses[-1], pct_pos_val, val_result.error_rate, norm_final_val_error) except Exception as e: self.logger.error("Failed to plot training curves!\n" + str(e))
def _run_prediction_single_model(self, model_dir, model_output_dir, dataset_config): """Analyze the performance of a single model.""" # Read in model config. model_config_filename = os.path.join(model_dir, GQCNNFilenames.SAVED_CFG) with open(model_config_filename) as data_file: model_config = json.load(data_file) # Load model. self.logger.info("Loading model %s" % (model_dir)) log_file = None for handler in self.logger.handlers: if isinstance(handler, logging.FileHandler): log_file = handler.baseFilename gqcnn = get_gqcnn_model(verbose=self.verbose).load( model_dir, verbose=self.verbose, log_file=log_file) gqcnn.open_session() gripper_mode = gqcnn.gripper_mode angular_bins = gqcnn.angular_bins # Read params from the config. if dataset_config is None: dataset_dir = model_config["dataset_dir"] split_name = model_config["split_name"] image_field_name = model_config["image_field_name"] pose_field_name = model_config["pose_field_name"] metric_name = model_config["target_metric_name"] metric_thresh = model_config["metric_thresh"] else: dataset_dir = dataset_config["dataset_dir"] split_name = dataset_config["split_name"] image_field_name = dataset_config["image_field_name"] pose_field_name = dataset_config["pose_field_name"] metric_name = dataset_config["target_metric_name"] metric_thresh = dataset_config["metric_thresh"] gripper_mode = dataset_config["gripper_mode"] self.logger.info("Loading dataset %s" % (dataset_dir)) dataset = TensorDataset.open(dataset_dir) train_indices, val_indices, _ = dataset.split(split_name) # Visualize conv filters. conv1_filters = gqcnn.filters num_filt = conv1_filters.shape[3] d = utils.sqrt_ceil(num_filt) vis2d.clf() for k in range(num_filt): filt = conv1_filters[:, :, 0, k] vis2d.subplot(d, d, k + 1) vis2d.imshow(DepthImage(filt)) figname = os.path.join(model_output_dir, "conv1_filters.pdf") vis2d.savefig(figname, dpi=self.dpi) # Aggregate training and validation true labels and predicted # probabilities. all_predictions = [] if angular_bins > 0: all_predictions_raw = [] all_labels = [] for i in range(dataset.num_tensors): # Log progress. if i % self.log_rate == 0: self.logger.info("Predicting tensor %d of %d" % (i + 1, dataset.num_tensors)) # Read in data. image_arr = dataset.tensor(image_field_name, i).arr pose_arr = read_pose_data( dataset.tensor(pose_field_name, i).arr, gripper_mode) metric_arr = dataset.tensor(metric_name, i).arr label_arr = 1 * (metric_arr > metric_thresh) label_arr = label_arr.astype(np.uint8) if angular_bins > 0: # Form mask to extract predictions from ground-truth angular # bins. raw_poses = dataset.tensor(pose_field_name, i).arr angles = raw_poses[:, 3] neg_ind = np.where(angles < 0) # TODO(vsatish): These should use the max angle instead. angles = np.abs(angles) % GeneralConstants.PI angles[neg_ind] *= -1 g_90 = np.where(angles > (GeneralConstants.PI / 2)) l_neg_90 = np.where(angles < (-1 * (GeneralConstants.PI / 2))) angles[g_90] -= GeneralConstants.PI angles[l_neg_90] += GeneralConstants.PI # TODO(vsatish): Fix this along with the others. angles *= -1 # Hack to fix reverse angle convention. angles += (GeneralConstants.PI / 2) pred_mask = np.zeros((raw_poses.shape[0], angular_bins * 2), dtype=bool) bin_width = GeneralConstants.PI / angular_bins for i in range(angles.shape[0]): pred_mask[i, int((angles[i] // bin_width) * 2)] = True pred_mask[i, int((angles[i] // bin_width) * 2 + 1)] = True # Predict with GQ-CNN. predictions = gqcnn.predict(image_arr, pose_arr) if angular_bins > 0: raw_predictions = np.array(predictions) predictions = predictions[pred_mask].reshape((-1, 2)) # Aggregate. all_predictions.extend(predictions[:, 1].tolist()) if angular_bins > 0: all_predictions_raw.extend(raw_predictions.tolist()) all_labels.extend(label_arr.tolist()) # Close session. gqcnn.close_session() # Create arrays. all_predictions = np.array(all_predictions) all_labels = np.array(all_labels) train_predictions = all_predictions[train_indices] val_predictions = all_predictions[val_indices] train_labels = all_labels[train_indices] val_labels = all_labels[val_indices] if angular_bins > 0: all_predictions_raw = np.array(all_predictions_raw) train_predictions_raw = all_predictions_raw[train_indices] val_predictions_raw = all_predictions_raw[val_indices] # Aggregate results. train_result = BinaryClassificationResult(train_predictions, train_labels) val_result = BinaryClassificationResult(val_predictions, val_labels) train_result.save(os.path.join(model_output_dir, "train_result.cres")) val_result.save(os.path.join(model_output_dir, "val_result.cres")) # Get stats, plot curves. self.logger.info("Model %s training error rate: %.3f" % (model_dir, train_result.error_rate)) self.logger.info("Model %s validation error rate: %.3f" % (model_dir, val_result.error_rate)) self.logger.info("Model %s training loss: %.3f" % (model_dir, train_result.cross_entropy_loss)) self.logger.info("Model %s validation loss: %.3f" % (model_dir, val_result.cross_entropy_loss)) # Save images. vis2d.figure() example_dir = os.path.join(model_output_dir, "examples") if not os.path.exists(example_dir): os.mkdir(example_dir) # Train. self.logger.info("Saving training examples") train_example_dir = os.path.join(example_dir, "train") if not os.path.exists(train_example_dir): os.mkdir(train_example_dir) # Train TP. true_positive_indices = train_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "true_positive_%03d.png" % (i))) # Train FP. false_positive_indices = train_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "false_positive_%03d.png" % (i))) # Train TN. true_negative_indices = train_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "true_negative_%03d.png" % (i))) # Train TP. false_negative_indices = train_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "false_negative_%03d.png" % (i))) # Val. self.logger.info("Saving validation examples") val_example_dir = os.path.join(example_dir, "val") if not os.path.exists(val_example_dir): os.mkdir(val_example_dir) # Val TP. true_positive_indices = val_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "true_positive_%03d.png" % (i))) # Val FP. false_positive_indices = val_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "false_positive_%03d.png" % (i))) # Val TN. true_negative_indices = val_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "true_negative_%03d.png" % (i))) # Val TP. false_negative_indices = val_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "false_negative_%03d.png" % (i))) # Save summary stats. train_summary_stats = { "error_rate": train_result.error_rate, "ap_score": train_result.ap_score, "auc_score": train_result.auc_score, "loss": train_result.cross_entropy_loss } train_stats_filename = os.path.join(model_output_dir, "train_stats.json") json.dump(train_summary_stats, open(train_stats_filename, "w"), indent=JSON_INDENT, sort_keys=True) val_summary_stats = { "error_rate": val_result.error_rate, "ap_score": val_result.ap_score, "auc_score": val_result.auc_score, "loss": val_result.cross_entropy_loss } val_stats_filename = os.path.join(model_output_dir, "val_stats.json") json.dump(val_summary_stats, open(val_stats_filename, "w"), indent=JSON_INDENT, sort_keys=True) return train_result, val_result
def _sample_suction_points(self, depth_im, camera_intr, num_samples, segmask=None, visualize=False): """ Sample a set of 2D suction point candidates from a depth image by choosing points on an object surface uniformly at random and then sampling around the surface normal Parameters ---------- depth_im : :obj:'perception.DepthImage' Depth image to sample from camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera that captured the images num_samples : int number of grasps to sample segmask : :obj:`perception.BinaryImage` binary image segmenting out the object of interest visualize : bool whether or not to show intermediate samples (for debugging) Returns ------- :obj:`list` of :obj:`SuctionPoint2D` list of 2D suction point candidates """ # compute edge pixels filter_start = time() depth_im_mask = depth_im.copy() if segmask is not None: depth_im_mask = depth_im.mask_binary(segmask) logging.debug('Filtering took %.3f sec' % (time() - filter_start)) if visualize: vis.figure() vis.subplot(1, 2, 1) vis.imshow(depth_im) vis.subplot(1, 2, 2) vis.imshow(depth_im_mask) vis.show() # project to get the point cloud cloud_start = time() point_cloud_im = camera_intr.deproject_to_image(depth_im_mask) normal_cloud_im = point_cloud_im.normal_cloud_im() nonzero_px = depth_im_mask.nonzero_pixels() num_nonzero_px = nonzero_px.shape[0] if num_nonzero_px == 0: return [] logging.debug('Normal cloud took %.3f sec' % (time() - cloud_start)) # randomly sample points and add to image sample_start = time() suction_points = [] k = 0 sample_size = min(self._max_num_samples, num_nonzero_px) indices = np.random.choice(num_nonzero_px, size=sample_size, replace=False) while k < sample_size and len(suction_points) < num_samples: # sample a point uniformly at random ind = indices[k] center_px = np.array([nonzero_px[ind, 1], nonzero_px[ind, 0]]) center = Point(center_px, frame=camera_intr.frame) axis = -normal_cloud_im[center.y, center.x] depth = point_cloud_im[center.y, center.x][2] # update number of tries k += 1 # check center px dist from boundary if center_px[0] < self._min_dist_from_boundary or \ center_px[1] < self._min_dist_from_boundary or \ center_px[1] > depth_im.height - self._min_dist_from_boundary or \ center_px[0] > depth_im.width - self._min_dist_from_boundary: continue # perturb depth delta_depth = self._depth_rv.rvs(size=1)[0] depth = depth + delta_depth # keep if the angle between the camera optical axis and the suction direction is less than a threshold dot = max(min(axis.dot(np.array([0, 0, 1])), 1.0), -1.0) psi = np.arccos(dot) if psi < self._max_suction_dir_optical_axis_angle: # check distance to ensure sample diversity candidate = SuctionPoint2D(center, axis, depth, camera_intr=camera_intr) if visualize: vis.figure() vis.imshow(depth_im) vis.scatter(center.x, center.y) vis.show() suction_points.append(candidate) logging.debug('Loop took %.3f sec' % (time() - sample_start)) return suction_points
def _sample_antipodal_grasps(self, depth_im, camera_intr, num_samples, segmask=None, visualize=False): """ Sample a set of 2D grasp candidates from a depth image by finding depth edges, then uniformly sampling point pairs and keeping only antipodal grasps with width less than the maximum allowable. Parameters ---------- depth_im : :obj:'perception.DepthImage' Depth image to sample from camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera that captured the images num_samples : int number of grasps to sample segmask : :obj:`perception.BinaryImage` binary image segmenting out the object of interest visualize : bool whether or not to show intermediate samples (for debugging) Returns ------- :obj:`list` of :obj:`Grasp2D` list of 2D grasp candidates """ # compute edge pixels edge_start = time() depth_im = depth_im.apply(snf.gaussian_filter, sigma=self._depth_grad_gaussian_sigma) scale_factor = self._rescale_factor depth_im_downsampled = depth_im.resize(scale_factor) depth_im_threshed = depth_im_downsampled.threshold_gradients( self._depth_grad_thresh) edge_pixels = (1.0 / scale_factor) * depth_im_threshed.zero_pixels() edge_pixels = edge_pixels.astype(np.int16) depth_im_mask = depth_im.copy() if segmask is not None: edge_pixels = np.array( [p for p in edge_pixels if np.any(segmask[p[0], p[1]] > 0)]) depth_im_mask = depth_im.mask_binary(segmask) # re-threshold edges if there are too few if edge_pixels.shape[0] < self._min_num_edge_pixels: logging.info('Too few edge pixels!') depth_im_threshed = depth_im.threshold_gradients( self._depth_grad_thresh) edge_pixels = depth_im_threshed.zero_pixels() edge_pixels = edge_pixels.astype(np.int16) depth_im_mask = depth_im.copy() if segmask is not None: edge_pixels = np.array([ p for p in edge_pixels if np.any(segmask[p[0], p[1]] > 0) ]) depth_im_mask = depth_im.mask_binary(segmask) num_pixels = edge_pixels.shape[0] logging.debug('Depth edge detection took %.3f sec' % (time() - edge_start)) logging.debug('Found %d edge pixels' % (num_pixels)) # exit if no edge pixels if num_pixels == 0: return [] # compute_max_depth min_depth = np.min(depth_im_mask.data[ depth_im_mask.data > 0]) + self._min_depth_offset max_depth = np.max(depth_im_mask.data[ depth_im_mask.data > 0]) + self._max_depth_offset # compute surface normals normal_start = time() edge_normals = self._surface_normals(depth_im, edge_pixels) logging.debug('Normal computation took %.3f sec' % (time() - normal_start)) if visualize: vis.figure() vis.subplot(1, 3, 1) vis.imshow(depth_im) if num_pixels > 0: vis.scatter(edge_pixels[:, 1], edge_pixels[:, 0], s=10, c='b') X = [pix[1] for pix in edge_pixels] Y = [pix[0] for pix in edge_pixels] U = [10 * pix[1] for pix in edge_normals] V = [-10 * pix[0] for pix in edge_normals] plt.quiver(X, Y, U, V, units='x', scale=0.5, zorder=2, color='g') vis.title('Edge pixels and normals') vis.subplot(1, 3, 2) vis.imshow(depth_im_threshed) vis.title('Edge map') vis.subplot(1, 3, 3) vis.imshow(segmask) vis.title('Segmask') vis.show() # form set of valid candidate point pairs pruning_start = time() max_grasp_width_px = Grasp2D(Point(np.zeros(2)), 0.0, min_depth, width=self._gripper_width, camera_intr=camera_intr).width_px normal_ip = edge_normals.dot(edge_normals.T) dists = ssd.squareform(ssd.pdist(edge_pixels)) valid_indices = np.where( (normal_ip < -np.cos(np.arctan(self._friction_coef))) & (dists < max_grasp_width_px) & (dists > 0.0)) valid_indices = np.c_[valid_indices[0], valid_indices[1]] logging.debug('Normal pruning %.3f sec' % (time() - pruning_start)) # raise exception if no antipodal pairs num_pairs = valid_indices.shape[0] if num_pairs == 0: return [] # prune out grasps contact_points1 = edge_pixels[valid_indices[:, 0], :] contact_points2 = edge_pixels[valid_indices[:, 1], :] contact_normals1 = edge_normals[valid_indices[:, 0], :] contact_normals2 = edge_normals[valid_indices[:, 1], :] v = contact_points1 - contact_points2 v_norm = np.linalg.norm(v, axis=1) v = v / np.tile(v_norm[:, np.newaxis], [1, 2]) ip1 = np.sum(contact_normals1 * v, axis=1) ip2 = np.sum(contact_normals2 * (-v), axis=1) ip1[ip1 > 1.0] = 1.0 ip1[ip1 < -1.0] = -1.0 ip2[ip2 > 1.0] = 1.0 ip2[ip2 < -1.0] = -1.0 beta1 = np.arccos(ip1) beta2 = np.arccos(ip2) alpha = np.arctan(self._friction_coef) antipodal_indices = np.where((beta1 < alpha) & (beta2 < alpha))[0] # raise exception if no antipodal pairs num_pairs = antipodal_indices.shape[0] if num_pairs == 0: return [] sample_size = min(self._max_rejection_samples, num_pairs) grasp_indices = np.random.choice(antipodal_indices, size=sample_size, replace=False) logging.debug('Grasp comp took %.3f sec' % (time() - pruning_start)) # compute grasps sample_start = time() k = 0 grasps = [] while k < sample_size and len(grasps) < num_samples: grasp_ind = grasp_indices[k] p1 = contact_points1[grasp_ind, :] p2 = contact_points2[grasp_ind, :] n1 = contact_normals1[grasp_ind, :] n2 = contact_normals2[grasp_ind, :] width = np.linalg.norm(p1 - p2) k += 1 # compute center and axis grasp_center = (p1 + p2) / 2 grasp_axis = p2 - p1 grasp_axis = grasp_axis / np.linalg.norm(grasp_axis) grasp_theta = np.pi / 2 if grasp_axis[1] != 0: grasp_theta = np.arctan(grasp_axis[0] / grasp_axis[1]) grasp_center_pt = Point( np.array([grasp_center[1], grasp_center[0]])) # check center px dist from boundary if grasp_center[0] < self._min_dist_from_boundary or \ grasp_center[1] < self._min_dist_from_boundary or \ grasp_center[0] > depth_im.height - self._min_dist_from_boundary or \ grasp_center[1] > depth_im.width - self._min_dist_from_boundary: continue # sample depths for i in range(self._depth_samples_per_grasp): # get depth in the neighborhood of the center pixel depth_win = depth_im.data[grasp_center[0] - self._h:grasp_center[0] + self._h, grasp_center[1] - self._w:grasp_center[1] + self._w] center_depth = np.min(depth_win) if center_depth == 0 or np.isnan(center_depth): continue # sample depth between the min and max min_depth = np.min(center_depth) + self._min_depth_offset max_depth = np.max(center_depth) + self._max_depth_offset sample_depth = min_depth + (max_depth - min_depth) * np.random.rand() candidate_grasp = Grasp2D(grasp_center_pt, grasp_theta, sample_depth, width=self._gripper_width, camera_intr=camera_intr, contact_points=[p1, p2], contact_normals=[n1, n2]) if visualize: vis.figure() vis.imshow(depth_im) vis.grasp(candidate_grasp) vis.scatter(p1[1], p1[0], c='b', s=25) vis.scatter(p2[1], p2[0], c='b', s=25) vis.show() grasps.append(candidate_grasp) # return sampled grasps logging.debug('Loop took %.3f sec' % (time() - sample_start)) return grasps
def _plan_grasp(self, color_im, depth_im, camera_intr, bounding_box=None, segmask=None): """Grasp planner request handler. Parameters --------- req: :obj:`ROS ServiceRequest` ROS `ServiceRequest` for grasp planner service. """ rospy.loginfo("Planning Grasp") # Inpaint images. color_im = color_im.inpaint( rescale_factor=self.cfg["inpaint_rescale_factor"]) depth_im = depth_im.inpaint( rescale_factor=self.cfg["inpaint_rescale_factor"]) # Init segmask. if segmask is None: segmask = BinaryImage(255 * np.ones(depth_im.shape).astype(np.uint8), frame=color_im.frame) # Visualize. if self.cfg["vis"]["color_image"]: vis.imshow(color_im) vis.show() if self.cfg["vis"]["depth_image"]: vis.imshow(depth_im) vis.show() if self.cfg["vis"]["segmask"] and segmask is not None: vis.imshow(segmask) vis.show() # Aggregate color and depth images into a single # BerkeleyAutomation/perception `RgbdImage`. rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) # Mask bounding box. if bounding_box is not None: # Calc bb parameters. min_x = bounding_box.minX min_y = bounding_box.minY max_x = bounding_box.maxX max_y = bounding_box.maxY # Contain box to image->don't let it exceed image height/width # bounds. if min_x < 0: min_x = 0 if min_y < 0: min_y = 0 if max_x > rgbd_im.width: max_x = rgbd_im.width if max_y > rgbd_im.height: max_y = rgbd_im.height # Mask. bb_segmask_arr = np.zeros([rgbd_im.height, rgbd_im.width]) bb_segmask_arr[min_y:max_y, min_x:max_x] = 255 bb_segmask = BinaryImage(bb_segmask_arr.astype(np.uint8), segmask.frame) segmask = segmask.mask_binary(bb_segmask) # Visualize. if self.cfg["vis"]["rgbd_state"]: masked_rgbd_im = rgbd_im.mask_binary(segmask) vis.figure() vis.subplot(1, 2, 1) vis.imshow(masked_rgbd_im.color) vis.subplot(1, 2, 2) vis.imshow(masked_rgbd_im.depth) vis.show() # Create an `RgbdImageState` with the cropped `RgbdImage` and # `CameraIntrinsics`. rgbd_state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) # Execute policy. try: return self.execute_policy(rgbd_state, self.grasping_policy, self.grasp_pose_publisher, camera_intr.frame) except NoValidGraspsException: rospy.logerr( ("While executing policy found no valid grasps from sampled" " antipodal point pairs. Aborting Policy!")) raise rospy.ServiceException( ("While executing policy found no valid grasps from sampled" " antipodal point pairs. Aborting Policy!"))