def _plot_grasp(self, datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=None): """ Plots a single grasp represented as a datapoint. """ image = DepthImage(datapoint[image_field_name][:,:,0]) depth = datapoint[pose_field_name][2] width = 0 grasps = [] if gripper_mode == GripperMode.PARALLEL_JAW or \ gripper_mode == GripperMode.LEGACY_PARALLEL_JAW: if angular_preds is not None: num_bins = angular_preds.shape[0] / 2 bin_width = GeneralConstants.PI / num_bins for i in range(num_bins): bin_cent_ang = i * bin_width + bin_width / 2 grasps.append(Grasp2D(center=image.center, angle=GeneralConstants.PI / 2 - bin_cent_ang, depth=depth, width=0.0)) grasps.append(Grasp2D(center=image.center, angle=datapoint[pose_field_name][3], depth=depth, width=0.0)) else: grasps.append(Grasp2D(center=image.center, angle=0, depth=depth, width=0.0)) width = datapoint[pose_field_name][-1] else: grasps.append(SuctionPoint2D(center=image.center, axis=[1,0,0], depth=depth)) vis2d.imshow(image) for i, grasp in enumerate(grasps[:-1]): vis2d.grasp(grasp, width=width, color=plt.cm.RdYlGn(angular_preds[i * 2 + 1])) vis2d.grasp(grasps[-1], width=width, color='b')
def _get_actions(self, preds, ind, images, depths, camera_intr, num_actions): """Generate the actions to be returned.""" actions = [] ang_bin_width = math.pi / preds.shape[-1] for i in range(num_actions): im_idx = ind[i, 0] h_idx = ind[i, 1] w_idx = ind[i, 2] ang_idx = ind[i, 3] center = Point( np.asarray([ w_idx * self._gqcnn_stride + self._gqcnn_recep_w / 2, h_idx * self._gqcnn_stride + self._gqcnn_recep_h / 2 ])) ang = math.pi / 2 - (ang_idx * ang_bin_width + ang_bin_width / 2) depth = depths[im_idx, 0] grasp = Grasp2D(center, ang, depth, width=self._gripper_width, camera_intr=camera_intr) grasp_action = GraspAction(grasp, preds[im_idx, h_idx, w_idx, ang_idx], DepthImage(images[im_idx])) actions.append(grasp_action) return actions
def project_camera(self, T_obj_camera, camera_intr): """ Project a grasp for a given gripper into the camera specified by a set of intrinsics. Parameters ---------- T_obj_camera : :obj:`autolab_core.RigidTransform` rigid transformation from the object frame to the camera frame camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera to use """ # print("project_camera in dexnet/src/grasping/grasp.py") # compute pose of grasp in camera frame T_grasp_camera = T_obj_camera * self.T_grasp_obj y_axis_camera = T_grasp_camera.y_axis[:2] if np.linalg.norm(y_axis_camera) > 0: y_axis_camera = y_axis_camera / np.linalg.norm(y_axis_camera) # compute grasp axis rotation in image space rot_z = np.arccos(y_axis_camera[0]) if y_axis_camera[1] < 0: rot_z = -rot_z while rot_z < 0: rot_z += 2 * np.pi while rot_z > 2 * np.pi: rot_z -= 2 * np.pi # compute grasp center in image space t_grasp_camera = T_grasp_camera.translation p_grasp_camera = Point(t_grasp_camera, frame=camera_intr.frame) u_grasp_camera = camera_intr.project(p_grasp_camera) d_grasp_camera = t_grasp_camera[2] return Grasp2D(u_grasp_camera, rot_z, d_grasp_camera, width=self.open_width, camera_intr=camera_intr)
def project(self, camera_intr, T_camera_world, gripper_width=0.05): # compute pose of grasp in camera frame T_grasp_camera = T_camera_world.inverse() * self.T_grasp_world y_axis_camera = T_grasp_camera.y_axis[:2] if np.linalg.norm(y_axis_camera) > 0: y_axis_camera = y_axis_camera / np.linalg.norm(y_axis_camera) # compute grasp axis rotation in image space rot_grasp_camera = np.arccos(y_axis_camera[0]) if y_axis_camera[1] < 0: rot_grasp_camera = -rot_grasp_camera while rot_grasp_camera < 0: rot_grasp_camera += 2 * np.pi while rot_grasp_camera > 2 * np.pi: rot_grasp_camera -= 2 * np.pi # compute grasp center in image space t_grasp_camera = T_grasp_camera.translation p_grasp_camera = Point(t_grasp_camera, frame=camera_intr.frame) u_grasp_camera = camera_intr.project(p_grasp_camera) d_grasp_camera = t_grasp_camera[2] return Grasp2D(u_grasp_camera, rot_grasp_camera, d_grasp_camera, width=gripper_width, camera_intr=camera_intr)
segmask = BinaryImage.open(segmask_filename, frame=camera_intr.frame) grasp_resp = plan_grasp_segmask(color_im.rosmsg, depth_im.rosmsg, camera_intr.rosmsg, segmask.rosmsg) else: grasp_resp = plan_grasp(color_im.rosmsg, depth_im.rosmsg, camera_intr.rosmsg) grasp = grasp_resp.grasp # convert to a grasp action grasp_type = grasp.grasp_type if grasp_type == GQCNNGrasp.PARALLEL_JAW: center = Point(np.array([grasp.center_px[0], grasp.center_px[1]]), frame=camera_intr.frame) grasp_2d = Grasp2D(center, grasp.angle, grasp.depth, width=gripper_width, camera_intr=camera_intr) elif grasp_type == GQCNNGrasp.SUCTION: center = Point(np.array([grasp.center_px[0], grasp.center_px[1]]), frame=camera_intr.frame) grasp_2d = SuctionPoint2D(center, np.array([0, 0, 1]), grasp.depth, camera_intr=camera_intr) else: raise ValueError('Grasp type %d not recognized!' % (grasp_type)) try: thumbnail = DepthImage(cv_bridge.imgmsg_to_cv2( grasp.thumbnail, desired_encoding="passthrough"), frame=camera_intr.frame)
def _sample_antipodal_grasps(self, depth_im, camera_intr, num_samples, segmask=None, visualize=False, constraint_fn=None): """ Sample a set of 2D grasp candidates from a depth image by finding depth edges, then uniformly sampling point pairs and keeping only antipodal grasps with width less than the maximum allowable. Parameters ---------- depth_im : :obj:'perception.DepthImage' Depth image to sample from camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera that captured the images num_samples : int number of grasps to sample segmask : :obj:`perception.BinaryImage` binary image segmenting out the object of interest visualize : bool whether or not to show intermediate samples (for debugging) constraint_fn : :obj:`GraspConstraintFn` constraint function to apply to grasps Returns ------- :obj:`list` of :obj:`Grasp2D` list of 2D grasp candidates """ # compute edge pixels edge_start = time() depth_im = depth_im.apply(snf.gaussian_filter, sigma=self._depth_grad_gaussian_sigma) scale_factor = self._rescale_factor depth_im_downsampled = depth_im.resize(scale_factor) depth_im_threshed = depth_im_downsampled.threshold_gradients( self._depth_grad_thresh) edge_pixels = (1.0 / scale_factor) * depth_im_threshed.zero_pixels() edge_pixels = edge_pixels.astype(np.int16) depth_im_mask = depth_im.copy() if segmask is not None: edge_pixels = np.array( [p for p in edge_pixels if np.any(segmask[p[0], p[1]] > 0)]) depth_im_mask = depth_im.mask_binary(segmask) # re-threshold edges if there are too few if edge_pixels.shape[0] < self._min_num_edge_pixels: self._logger.info('Too few edge pixels!') depth_im_threshed = depth_im.threshold_gradients( self._depth_grad_thresh) edge_pixels = depth_im_threshed.zero_pixels() edge_pixels = edge_pixels.astype(np.int16) depth_im_mask = depth_im.copy() if segmask is not None: edge_pixels = np.array([ p for p in edge_pixels if np.any(segmask[p[0], p[1]] > 0) ]) depth_im_mask = depth_im.mask_binary(segmask) num_pixels = edge_pixels.shape[0] self._logger.debug('Depth edge detection took %.3f sec' % (time() - edge_start)) self._logger.debug('Found %d edge pixels' % (num_pixels)) # compute point cloud point_cloud_im = camera_intr.deproject_to_image(depth_im_mask) # compute_max_depth depth_data = depth_im_mask.data[depth_im_mask.data > 0] if depth_data.shape[0] == 0: return [] min_depth = np.min(depth_data) + self._min_depth_offset max_depth = np.max(depth_data) + self._max_depth_offset # compute surface normals normal_start = time() edge_normals = self._surface_normals(depth_im, edge_pixels) self._logger.debug('Normal computation took %.3f sec' % (time() - normal_start)) if visualize: edge_pixels = edge_pixels[::2, :] edge_normals = edge_normals[::2, :] vis.figure() vis.subplot(1, 3, 1) vis.imshow(depth_im) if num_pixels > 0: vis.scatter(edge_pixels[:, 1], edge_pixels[:, 0], s=2, c='b') X = [pix[1] for pix in edge_pixels] Y = [pix[0] for pix in edge_pixels] U = [3 * pix[1] for pix in edge_normals] V = [-3 * pix[0] for pix in edge_normals] plt.quiver(X, Y, U, V, units='x', scale=0.25, width=0.5, zorder=2, color='r') vis.title('Edge pixels and normals') vis.subplot(1, 3, 2) vis.imshow(depth_im_threshed) vis.title('Edge map') vis.subplot(1, 3, 3) vis.imshow(segmask) vis.title('Segmask') vis.show() # exit if no edge pixels if num_pixels == 0: return [] # form set of valid candidate point pairs pruning_start = time() max_grasp_width_px = Grasp2D(Point(np.zeros(2)), 0.0, min_depth, width=self._gripper_width, camera_intr=camera_intr).width_px normal_ip = edge_normals.dot(edge_normals.T) dists = ssd.squareform(ssd.pdist(edge_pixels)) valid_indices = np.where( (normal_ip < -np.cos(np.arctan(self._friction_coef))) & (dists < max_grasp_width_px) & (dists > 0.0)) valid_indices = np.c_[valid_indices[0], valid_indices[1]] self._logger.debug('Normal pruning %.3f sec' % (time() - pruning_start)) # raise exception if no antipodal pairs num_pairs = valid_indices.shape[0] if num_pairs == 0: return [] # prune out grasps contact_points1 = edge_pixels[valid_indices[:, 0], :] contact_points2 = edge_pixels[valid_indices[:, 1], :] contact_normals1 = edge_normals[valid_indices[:, 0], :] contact_normals2 = edge_normals[valid_indices[:, 1], :] v = contact_points1 - contact_points2 v_norm = np.linalg.norm(v, axis=1) v = v / np.tile(v_norm[:, np.newaxis], [1, 2]) ip1 = np.sum(contact_normals1 * v, axis=1) ip2 = np.sum(contact_normals2 * (-v), axis=1) ip1[ip1 > 1.0] = 1.0 ip1[ip1 < -1.0] = -1.0 ip2[ip2 > 1.0] = 1.0 ip2[ip2 < -1.0] = -1.0 beta1 = np.arccos(ip1) beta2 = np.arccos(ip2) alpha = np.arctan(self._friction_coef) antipodal_indices = np.where((beta1 < alpha) & (beta2 < alpha))[0] # raise exception if no antipodal pairs num_pairs = antipodal_indices.shape[0] if num_pairs == 0: return [] sample_size = min(self._max_rejection_samples, num_pairs) grasp_indices = np.random.choice(antipodal_indices, size=sample_size, replace=False) self._logger.debug('Grasp comp took %.3f sec' % (time() - pruning_start)) # compute grasps sample_start = time() k = 0 grasps = [] while k < sample_size and len(grasps) < num_samples: grasp_ind = grasp_indices[k] p1 = contact_points1[grasp_ind, :] p2 = contact_points2[grasp_ind, :] n1 = contact_normals1[grasp_ind, :] n2 = contact_normals2[grasp_ind, :] width = np.linalg.norm(p1 - p2) k += 1 # compute center and axis grasp_center = (p1 + p2) / 2 grasp_axis = p2 - p1 grasp_axis = grasp_axis / np.linalg.norm(grasp_axis) grasp_theta = np.pi / 2 if grasp_axis[1] != 0: grasp_theta = np.arctan2(grasp_axis[0], grasp_axis[1]) grasp_center_pt = Point( np.array([grasp_center[1], grasp_center[0]])) # compute grasp points in 3D x1 = point_cloud_im[p1[0], p1[1]] x2 = point_cloud_im[p2[0], p2[1]] if np.linalg.norm(x2 - x1) > self._gripper_width: continue # perturb if self._grasp_center_sigma > 0.0: grasp_center_pt = grasp_center_pt + ss.multivariate_normal.rvs( cov=self._grasp_center_sigma * np.diag(np.ones(2))) if self._grasp_angle_sigma > 0.0: grasp_theta = grasp_theta + ss.norm.rvs( scale=self._grasp_angle_sigma) # check center px dist from boundary if grasp_center[0] < self._min_dist_from_boundary or \ grasp_center[1] < self._min_dist_from_boundary or \ grasp_center[0] > depth_im.height - self._min_dist_from_boundary or \ grasp_center[1] > depth_im.width - self._min_dist_from_boundary: continue # sample depths for i in range(self._depth_samples_per_grasp): # get depth in the neighborhood of the center pixel depth_win = depth_im.data[grasp_center[0] - self._h:grasp_center[0] + self._h, grasp_center[1] - self._w:grasp_center[1] + self._w] center_depth = np.min(depth_win) if center_depth == 0 or np.isnan(center_depth): continue # sample depth between the min and max min_depth = center_depth + self._min_depth_offset max_depth = center_depth + self._max_depth_offset sample_depth = min_depth + (max_depth - min_depth) * np.random.rand() candidate_grasp = Grasp2D(grasp_center_pt, grasp_theta, sample_depth, width=self._gripper_width, camera_intr=camera_intr, contact_points=[p1, p2], contact_normals=[n1, n2]) if visualize: vis.figure() vis.imshow(depth_im) vis.grasp(candidate_grasp) vis.scatter(p1[1], p1[0], c='b', s=25) vis.scatter(p2[1], p2[0], c='b', s=25) vis.show() grasps.append(candidate_grasp) # return sampled grasps self._logger.debug('Loop took %.3f sec' % (time() - sample_start)) return grasps
def generate_gqcnn_dataset(dataset_path, database, target_object_keys, env_rv_params, gripper_name, config): """ Generates a GQ-CNN TensorDataset for training models with new grippers, quality metrics, objects, and cameras. Parameters ---------- dataset_path : str path to save the dataset to database : :obj:`Hdf5Database` Dex-Net database containing the 3D meshes, grasps, and grasp metrics target_object_keys : :obj:`OrderedDict` dictionary mapping dataset names to target objects (either 'all' or a list of specific object keys) env_rv_params : :obj:`OrderedDict` parameters of the camera and object random variables used in sampling (see meshpy.UniformPlanarWorksurfaceImageRandomVariable for more info) gripper_name : str name of the gripper to use config : :obj:`autolab_core.YamlConfig` other parameters for dataset generation Notes ----- Required parameters of config are specified in Other Parameters Other Parameters ---------------- images_per_stable_pose : int number of object and camera poses to sample for each stable pose stable_pose_min_p : float minimum probability of occurrence for a stable pose to be used in data generation (used to prune bad stable poses gqcnn/crop_width : int width, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN) gqcnn/crop_height : int height, in pixels, of crop region around each grasp center, before resize (changes the size of the region seen by the GQ-CNN) gqcnn/final_width : int width, in pixels, of final transformed grasp image for input to the GQ-CNN (defaults to 32) gqcnn/final_height : int height, in pixels, of final transformed grasp image for input to the GQ-CNN (defaults to 32) table_alignment/max_approach_table_angle : float max angle between the grasp axis and the table normal when the grasp approach is maximally aligned with the table normal table_alignment/max_approach_offset : float max deviation from perpendicular approach direction to use in grasp collision checking table_alignment/num_approach_offset_samples : int number of approach samples to use in collision checking collision_checking/table_offset : float max allowable interpenetration between the gripper and table to be considered collision free collision_checking/table_mesh_filename : str path to a table mesh for collision checking (default data/meshes/table.obj) collision_checking/approach_dist : float distance, in meters, between the approach pose and final grasp pose along the grasp axis collision_checking/delta_approach : float amount, in meters, to discretize the straight-line path from the gripper approach pose to the final grasp pose tensors/datapoints_per_file : int number of datapoints to store in each unique tensor file on disk tensors/fields : :obj:`dict` dictionary mapping field names to dictionaries specifying the data type, height, width, and number of channels for each tensor debug : bool True (or 1) if the random seed should be set to enforce deterministic behavior, False (0) otherwise vis/candidate_grasps : bool True (or 1) if the collision free candidate grasps should be displayed in 3D (for debugging) vis/rendered_images : bool True (or 1) if the rendered images for each stable pose should be displayed (for debugging) vis/grasp_images : bool True (or 1) if the transformed grasp images should be displayed (for debugging) """ # read data gen params output_dir = dataset_path gripper = RobotGripper.load(gripper_name) image_samples_per_stable_pose = config['images_per_stable_pose'] stable_pose_min_p = config['stable_pose_min_p'] # read gqcnn params gqcnn_params = config['gqcnn'] im_crop_height = gqcnn_params['crop_height'] im_crop_width = gqcnn_params['crop_width'] im_final_height = gqcnn_params['final_height'] im_final_width = gqcnn_params['final_width'] cx_crop = float(im_crop_width) / 2 cy_crop = float(im_crop_height) / 2 # open database dataset_names = target_object_keys.keys() datasets = [database.dataset(dn) for dn in dataset_names] if CREATE_SUBSET: datasets = [ dataset.subset(INDEX_START, INDEX_END) for dataset in datasets ] # set target objects for dataset in datasets: if target_object_keys[dataset.name] == 'all': target_object_keys[dataset.name] = dataset.object_keys # setup grasp params table_alignment_params = config['table_alignment'] min_grasp_approach_offset = -np.deg2rad( table_alignment_params['max_approach_offset']) max_grasp_approach_offset = np.deg2rad( table_alignment_params['max_approach_offset']) max_grasp_approach_table_angle = np.deg2rad( table_alignment_params['max_approach_table_angle']) num_grasp_approach_samples = table_alignment_params[ 'num_approach_offset_samples'] phi_offsets = [] if max_grasp_approach_offset == min_grasp_approach_offset: phi_inc = 1 elif num_grasp_approach_samples == 1: phi_inc = max_grasp_approach_offset - min_grasp_approach_offset + 1 else: phi_inc = (max_grasp_approach_offset - min_grasp_approach_offset) / ( num_grasp_approach_samples - 1) phi = min_grasp_approach_offset while phi <= max_grasp_approach_offset: phi_offsets.append(phi) phi += phi_inc # setup collision checking coll_check_params = config['collision_checking'] approach_dist = coll_check_params['approach_dist'] delta_approach = coll_check_params['delta_approach'] table_offset = coll_check_params['table_offset'] table_mesh_filename = coll_check_params['table_mesh_filename'] if not os.path.isabs(table_mesh_filename): table_mesh_filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', table_mesh_filename) table_mesh = ObjFile(table_mesh_filename).read() # set tensor dataset config tensor_config = config['tensors'] tensor_config['fields']['depth_ims_tf_table']['height'] = im_final_height tensor_config['fields']['depth_ims_tf_table']['width'] = im_final_width tensor_config['fields']['obj_masks']['height'] = im_final_height tensor_config['fields']['obj_masks']['width'] = im_final_width # add available metrics (assuming same are computed for all objects) metric_names = [] dataset = datasets[0] obj_keys = dataset.object_keys if len(obj_keys) == 0: raise ValueError('No valid objects in dataset %s' % (dataset.name)) obj = dataset[obj_keys[0]] grasps = dataset.grasps(obj.key, gripper=gripper.name) grasp_metrics = dataset.grasp_metrics(obj.key, grasps, gripper=gripper.name) metric_names = grasp_metrics[grasp_metrics.keys()[0]].keys() for metric_name in metric_names: tensor_config['fields'][metric_name] = {} tensor_config['fields'][metric_name]['dtype'] = 'float32' # init tensor dataset tensor_dataset = TensorDataset(output_dir, tensor_config) tensor_datapoint = tensor_dataset.datapoint_template # setup log file experiment_log_filename = os.path.join(output_dir, 'dataset_generation.log') formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') hdlr = logging.FileHandler(experiment_log_filename) hdlr.setFormatter(formatter) logging.getLogger().addHandler(hdlr) root_logger = logging.getLogger() # copy config out_config_filename = os.path.join(output_dir, 'dataset_generation.json') ordered_dict_config = collections.OrderedDict() for key in config.keys(): ordered_dict_config[key] = config[key] with open(out_config_filename, 'w') as outfile: json.dump(ordered_dict_config, outfile) # 1. Precompute the set of valid grasps for each stable pose: # i) Perpendicular to the table # ii) Collision-free along the approach direction # load grasps if they already exist grasp_cache_filename = os.path.join(output_dir, CACHE_FILENAME) if os.path.exists(grasp_cache_filename): logging.info('Loading grasp candidates from file') candidate_grasps_dict = pkl.load(open(grasp_cache_filename, 'rb')) # otherwise re-compute by reading from the database and enforcing constraints else: # create grasps dict candidate_grasps_dict = {} # loop through datasets and objects for dataset in datasets: logging.info('Reading dataset %s' % (dataset.name)) for obj in dataset: if obj.key not in target_object_keys[dataset.name]: continue # init candidate grasp storage candidate_grasps_dict[obj.key] = {} # setup collision checker collision_checker = GraspCollisionChecker(gripper) collision_checker.set_graspable_object(obj) # read in the stable poses of the mesh stable_poses = dataset.stable_poses(obj.key) for i, stable_pose in enumerate(stable_poses): # render images if stable pose is valid if stable_pose.p > stable_pose_min_p: candidate_grasps_dict[obj.key][stable_pose.id] = [] # setup table in collision checker T_obj_stp = stable_pose.T_obj_table.as_frames( 'obj', 'stp') T_obj_table = obj.mesh.get_T_surface_obj( T_obj_stp, delta=table_offset).as_frames('obj', 'table') T_table_obj = T_obj_table.inverse() collision_checker.set_table(table_mesh_filename, T_table_obj) # read grasp and metrics grasps = dataset.grasps(obj.key, gripper=gripper.name) logging.info( 'Aligning %d grasps for object %s in stable %s' % (len(grasps), obj.key, stable_pose.id)) # align grasps with the table aligned_grasps = [ grasp.perpendicular_table(stable_pose) for grasp in grasps ] # check grasp validity logging.info( 'Checking collisions for %d grasps for object %s in stable %s' % (len(grasps), obj.key, stable_pose.id)) for aligned_grasp in aligned_grasps: # check angle with table plane and skip unaligned grasps _, grasp_approach_table_angle, _ = aligned_grasp.grasp_angles_from_stp_z( stable_pose) perpendicular_table = ( np.abs(grasp_approach_table_angle) < max_grasp_approach_table_angle) if not perpendicular_table: continue # check whether any valid approach directions are collision free collision_free = False for phi_offset in phi_offsets: rotated_grasp = aligned_grasp.grasp_y_axis_offset( phi_offset) collides = collision_checker.collides_along_approach( rotated_grasp, approach_dist, delta_approach) if not collides: collision_free = True break # store if aligned to table candidate_grasps_dict[obj.key][ stable_pose.id].append( GraspInfo(aligned_grasp, collision_free)) # visualize if specified if collision_free and config['vis'][ 'candidate_grasps']: logging.info('Grasp %d' % (aligned_grasp.id)) vis.figure() vis.gripper_on_object(gripper, aligned_grasp, obj, stable_pose.T_obj_world) vis.show() # save to file logging.info('Saving to file') pkl.dump(candidate_grasps_dict, open(grasp_cache_filename, 'wb')) # 2. Render a dataset of images and associate the gripper pose with image coordinates for each grasp in the Dex-Net database # setup variables obj_category_map = {} pose_category_map = {} cur_pose_label = 0 cur_obj_label = 0 cur_image_label = 0 # render images for each stable pose of each object in the dataset render_modes = [RenderMode.SEGMASK, RenderMode.DEPTH_SCENE] for dataset in datasets: logging.info('Generating data for dataset %s' % (dataset.name)) # iterate through all object keys object_keys = dataset.object_keys for obj_key in object_keys: obj = dataset[obj_key] obj.mesh.rescale(RESCALING_FACTOR) if obj.key not in target_object_keys[dataset.name]: continue # read in the stable poses of the mesh stable_poses = dataset.stable_poses(obj.key) if SAVE_ONE_OBJECT: # Save object mesh savefile = ObjFile("./data/meshes/dexnet/" + obj.key + ".obj") savefile.write(obj.mesh) # Save stable poses save_stp = StablePoseFile("./data/meshes/dexnet/" + obj.key + ".stp") save_stp.write(stable_poses) candidate_grasp_info = candidate_grasps_dict[obj.key][ stable_poses[0].id] print("Stable pose id:", stable_poses[0].id) candidate_grasps = [g.grasp for g in candidate_grasp_info] # Save candidate grasp info pkl.dump( candidate_grasps_dict[obj.key], open("./data/meshes/dexnet/" + obj.key + ".pkl", 'wb')) # Save grasp metrics grasp_metrics = dataset.grasp_metrics(obj.key, candidate_grasps, gripper=gripper.name) write_metrics = json.dumps(grasp_metrics) f = open("./data/meshes/dexnet/" + obj.key + ".json", "w") f.write(write_metrics) f.close() for i, stable_pose in enumerate(stable_poses): # render images if stable pose is valid if stable_pose.p > stable_pose_min_p: # log progress logging.info('Rendering images for object %s in %s' % (obj.key, stable_pose.id)) # add to category maps if obj.key not in obj_category_map.keys(): obj_category_map[obj.key] = cur_obj_label pose_category_map['%s_%s' % (obj.key, stable_pose.id)] = cur_pose_label # read in candidate grasps and metrics candidate_grasp_info = candidate_grasps_dict[obj.key][ stable_pose.id] candidate_grasps = [g.grasp for g in candidate_grasp_info] grasp_metrics = dataset.grasp_metrics(obj.key, candidate_grasps, gripper=gripper.name) # compute object pose relative to the table T_obj_stp = stable_pose.T_obj_table.as_frames('obj', 'stp') T_obj_stp = obj.mesh.get_T_surface_obj(T_obj_stp) # sample images from random variable T_table_obj = RigidTransform(from_frame='table', to_frame='obj') scene_objs = { 'table': SceneObject(table_mesh, T_table_obj) } urv = UniformPlanarWorksurfaceImageRandomVariable( obj.mesh, render_modes, 'camera', env_rv_params, stable_pose=stable_pose, scene_objs=scene_objs) render_start = time.time() render_samples = urv.rvs( size=image_samples_per_stable_pose) render_stop = time.time() logging.info('Rendering images took %.3f sec' % (render_stop - render_start)) # visualize if config['vis']['rendered_images']: d = int(np.ceil( np.sqrt(image_samples_per_stable_pose))) #segmask vis2d.figure() for j, render_sample in enumerate(render_samples): vis2d.subplot(d, d, j + 1) vis2d.imshow(render_sample.renders[ RenderMode.SEGMASK].image) # depth table vis2d.figure() for j, render_sample in enumerate(render_samples): vis2d.subplot(d, d, j + 1) vis2d.imshow(render_sample.renders[ RenderMode.DEPTH_SCENE].image) vis2d.show() if False: # binary fig = plt.figure(figsize=(8, 8)) fig.suptitle('SEGMASK') for j, render_sample in enumerate(render_samples): plt.subplot(d, d, j + 1) plt.imshow(render_sample.renders[ RenderMode.SEGMASK].image.data) # depth table fig = plt.figure(figsize=(8, 8)) fig.suptitle('DEPTH SCENE') for j, render_sample in enumerate(render_samples): plt.subplot(d, d, j + 1) plt.imshow(render_sample.renders[ RenderMode.DEPTH_SCENE].image.data) plt.show() # tally total amount of data num_grasps = len(candidate_grasps) num_images = image_samples_per_stable_pose num_save = num_images * num_grasps logging.info('Saving %d datapoints' % (num_save)) # for each candidate grasp on the object compute the projection # of the grasp into image space for render_sample in render_samples: # read images binary_im = render_sample.renders[ RenderMode.SEGMASK].image depth_im_table = render_sample.renders[ RenderMode.DEPTH_SCENE].image # read camera params T_stp_camera = render_sample.camera.object_to_camera_pose shifted_camera_intr = render_sample.camera.camera_intr # read pixel offsets cx = depth_im_table.center[1] cy = depth_im_table.center[0] # compute intrinsics for virtual camera of the final # cropped and rescaled images camera_intr_scale = float(im_final_height) / float( im_crop_height) cropped_camera_intr = shifted_camera_intr.crop( im_crop_height, im_crop_width, cy, cx) final_camera_intr = cropped_camera_intr.resize( camera_intr_scale) # create a thumbnail for each grasp for grasp_info in candidate_grasp_info: # read info grasp = grasp_info.grasp collision_free = grasp_info.collision_free # get the gripper pose T_obj_camera = T_stp_camera * T_obj_stp.as_frames( 'obj', T_stp_camera.from_frame) grasp_2d = grasp.project_camera( T_obj_camera, shifted_camera_intr) # center images on the grasp, rotate to image x axis dx = cx - grasp_2d.center.x dy = cy - grasp_2d.center.y translation = np.array([dy, dx]) binary_im_tf = binary_im.transform( translation, grasp_2d.angle) depth_im_tf_table = depth_im_table.transform( translation, grasp_2d.angle) # crop to image size binary_im_tf = binary_im_tf.crop( im_crop_height, im_crop_width) depth_im_tf_table = depth_im_tf_table.crop( im_crop_height, im_crop_width) # resize to image size binary_im_tf = binary_im_tf.resize( (im_final_height, im_final_width), interp='nearest') depth_im_tf_table = depth_im_tf_table.resize( (im_final_height, im_final_width)) # visualize the transformed images if config['vis']['grasp_images']: grasp_center = Point( depth_im_tf_table.center, frame=final_camera_intr.frame) # plot 2D grasp image grasp_center = Point( depth_im_tf_table.center, frame=final_camera_intr.frame) tf_grasp_2d = Grasp2D( grasp_center, 0, grasp_2d.depth, width=gripper.max_width, camera_intr=final_camera_intr) vis2d.figure() vis2d.subplot(2, 2, 1) vis2d.imshow(binary_im) vis2d.grasp(grasp_2d) vis2d.subplot(2, 2, 2) vis2d.imshow(depth_im_table) vis2d.grasp(grasp_2d) vis2d.subplot(2, 2, 3) vis2d.imshow(binary_im_tf) vis2d.grasp(tf_grasp_2d) vis2d.subplot(2, 2, 4) vis2d.imshow(depth_im_tf_table) vis2d.grasp(tf_grasp_2d) vis2d.title('Coll Free? %d' % (grasp_info.collision_free)) vis2d.show() if False: plt.figure(figsize=(8, 8)) plt.subplot(2, 2, 1) plt.imshow(binary_im.data) plt.title('Binary Image') plt.subplot(2, 2, 2) plt.imshow(depth_im_table.data) plt.subplot(2, 2, 3) plt.imshow(binary_im_tf.data) plt.subplot(2, 2, 4) plt.imshow(depth_im_tf_table.data) plt.title('Coll Free? %d' % (grasp_info.collision_free)) plt.show() plt.close() # plot 3D visualization # Whooza fig = plt.figure() ax = fig.add_subplot(111, projection='3d', transform=T_obj_camera) object_vertices = obj.mesh.trimesh.vertices table_vertices = table_mesh.trimesh.vertices # ax.plot_trisurf(table_vertices[:,0],table_vertices[:,1],triangles=table_mesh.trimesh.faces,Z=table_vertices[:,2],color='g') ax.plot_trisurf( object_vertices[:, 0], object_vertices[:, 1], triangles=obj.mesh.trimesh.faces, Z=object_vertices[:, 2], color='b') # Axes3D(fig,rect=(-0.2,-0.2,0.4,0.4),transform=T_obj_camera) plt.show() if False: vis.figure() T_obj_world = vis.mesh_stable_pose( obj.mesh.trimesh, stable_pose.T_obj_world, style='surface', dim=0.5) vis.gripper(gripper, grasp, T_obj_world, color=(0.3, 0.3, 0.3)) vis.show() # form hand pose array hand_pose = np.r_[grasp_2d.center.y, grasp_2d.center.x, grasp_2d.depth, grasp_2d.angle, grasp_2d.center.y - shifted_camera_intr.cy, grasp_2d.center.x - shifted_camera_intr.cx, grasp_2d.width_px] # store to data buffers tensor_datapoint[ 'depth_ims_tf_table'] = depth_im_tf_table.raw_data tensor_datapoint[ 'obj_masks'] = binary_im_tf.raw_data tensor_datapoint['hand_poses'] = hand_pose tensor_datapoint['collision_free'] = collision_free tensor_datapoint['obj_labels'] = cur_obj_label tensor_datapoint['pose_labels'] = cur_pose_label tensor_datapoint['image_labels'] = cur_image_label for metric_name, metric_val in grasp_metrics[ grasp.id].iteritems(): coll_free_metric = ( 1 * collision_free) * metric_val tensor_datapoint[ metric_name] = coll_free_metric tensor_dataset.add(tensor_datapoint) # update image label cur_image_label += 1 # update pose label cur_pose_label += 1 # force clean up gc.collect() # update object label cur_obj_label += 1 # force clean up gc.collect() # save last file tensor_dataset.flush() # save category mappings obj_cat_filename = os.path.join(output_dir, 'object_category_map.json') json.dump(obj_category_map, open(obj_cat_filename, 'w')) pose_cat_filename = os.path.join(output_dir, 'pose_category_map.json') json.dump(pose_category_map, open(pose_cat_filename, 'w'))
y = idx % 200 # print(x) # print(y) # print("MAX VALUE") # print(np.max(pos_pred_n)) q_value = pos_pred_n[x, y] # print(q_value) grasp_angle = 0.5 * math.atan2(sin_pred_n[x, y], cos_pred_n[x, y]) grasp_depth = depth_im.raw_data[x, y] + 0.03 coords_y = ((y - 100) * 2) + 100 coords_x = ((x - 100) * 2) + 100 grasp = Grasp2D(Point(np.array([coords_y, coords_x])), grasp_angle, grasp_depth, width=50, camera_intr=camera_intr) # grasp_angle_im = np.zeros((200,200)) # for xx in range(200): # for yy in range(200): # grasp_angle_im[xx,yy] = 0.5 * math.atan2(sin_pred_n[xx,yy], cos_pred_n[xx,yy]) # axis = np.array([np.cos(grasp_angle_im[x,y]), np.sin(grasp_angle_im[x,y])]) # center = [y,x] # g1p = center - (axis * 10) # start location of grasp jaw 1 # g2p = center + (axis * 10) # start location of grasp jaw 2 # plt.figure(figsize=(14, 4)) # plt.subplot(131) # plt.title("DEPTH")
def visualize_tensor_dataset(dataset, config): """ Visualizes a Tensor dataset. Parameters ---------- dataset : :obj:`TensorDataset` dataset to visualize config : :obj:`autolab_core.YamlConfig` parameters for visualization Notes ----- Required parameters of config are specified in Other Parameters Other Parameters ---------------- field_name : str name of the field in the TensorDataset to visualize (defaults to depth_ims_tf_table, which is a single view point cloud of the object on a table) field_type : str type of image that the field name correspondes to (defaults to depth, can also be `segmask` if using the field `object_masks`) print_fields : :obj:`list` of str names of additiona fields to print to the command line filter : :obj:`dict` mapping str to :obj:`dict` contraints that all displayed datapoints must satisfy (supports any univariate field name as a key and numeric thresholds) gripper_width_px : float width of the gripper to plot in pixels font_size : int size of font on the rendered images """ # shuffle the tensor indices indices = dataset.datapoint_indices print(len(indices)) np.random.shuffle(indices) # read config field_name = config['field_name'] field_type = config['field_type'] font_size = config['font_size'] print_fields = config['print_fields'] gripper_width_px = config['gripper_width_px'] num = 0 for i, ind in enumerate(indices): datapoint = dataset[ind] # img_2 = datapoint['depth_ims_tf'] # not rotated, segmentated img_1 = datapoint['depth_ims_tf_table_96'] print(img_1) # img_3 = datapoint['depth_ims_raw'] # img_4 = datapoint['depth_ims_raw_table'] img_1 = DepthImage(img_1) # img_2 = DepthImage(img_2) # img_3 = DepthImage(img_3) # img_4 = DepthImage(img_4) # data = datapoint[field_name] # if field_type == RenderMode.SEGMASK: # image = BinaryImage(data) # elif field_type == RenderMode.DEPTH: # image = DepthImage(data) # else: # raise ValueError('Field type %s not supported!' %(field_type)) # skip_datapoint = False # for f, filter_cfg in config['filter'].iteritems(): # data = datapoint[f] # if 'greater_than' in filter_cfg.keys() and data < filter_cfg['greater_than']: # skip_datapoint = True # break # elif 'less_than' in filter_cfg.keys() and data > filter_cfg['less_than']: # skip_datapoint = True # break # if skip_datapoint: # continue # logging.info('DATAPOINT %d' %(num)) # for f in print_fields: # data = datapoint[f] # #if f == "force_closure": # # logging.info('DATAPOINT %d. Force closure %f' %(num, data)) # coll_free_count[data] += 1 # logging.info('Field %s:' %(f)) # print data grasp_2d = Grasp2D( Point(np.r_[[ datapoint['hand_configurations'][1], datapoint['hand_configurations'][0] ]]), datapoint['hand_configurations'][2]) # grasp_2d = Grasp2D(Point(image.center), 0, datapoint['hand_poses'][2]) vis2d.figure() vis2d.imshow(img_1) vis2d.grasp(grasp_2d, width=gripper_width_px) # if field_type == RenderMode.RGBD: # vis2d.subplot(1,2,1) # vis2d.imshow(image.color) # vis2d.grasp(grasp_2d, width=gripper_width_px) # vis2d.subplot(1,2,2) # vis2d.imshow(image.depth) # vis2d.grasp(grasp_2d, width=gripper_width_px) # elif field_type == RenderMode.GD: # vis2d.subplot(1,2,1) # vis2d.imshow(image.gray) # vis2d.grasp(grasp_2d, width=gripper_width_px) # vis2d.subplot(1,2,2) # vis2d.imshow(image.depth) # vis2d.grasp(grasp_2d, width=gripper_width_px) # else: # vis2d.imshow(image) # vis2d.grasp(grasp_2d, width=gripper_width_px) vis2d.title('Datapoint %d: %s. ' % (ind, field_type)) vis2d.show() num += 1