Пример #1
0
    def plan_grasp(self,
                   depth,
                   rgb,
                   resetting=False,
                   camera_intr=None,
                   segmask=None):
        """
        Computes possible grasps.
        Parameters
        ----------
        depth: type `numpy`
            depth image
        rgb: type `numpy`
            rgb image
        camera_intr: type `perception.CameraIntrinsics`
            Intrinsic camera object.
        segmask: type `perception.BinaryImage`
            Binary segmask of detected object
        Returns
        -------
        type `GQCNNGrasp`
            Computed optimal grasp.
        """
        if "FC" in self.model:
            assert not (segmask is
                        None), "Fully-Convolutional policy expects a segmask."
        if camera_intr is None:
            camera_intr_filename = os.path.join(
                os.path.dirname(os.path.realpath(__file__)),
                "gqcnn/data/calib/primesense/primesense.intr")
            camera_intr = CameraIntrinsics.load(camera_intr_filename)

        depth_im = DepthImage(depth, frame=camera_intr.frame)
        color_im = ColorImage(rgb, frame=camera_intr.frame)

        valid_px_mask = depth_im.invalid_pixel_mask().inverse()
        if segmask is None:
            segmask = valid_px_mask
        else:
            segmask = segmask.mask_binary(valid_px_mask)

        # Inpaint.
        depth_im = depth_im.inpaint(
            rescale_factor=self.config["inpaint_rescale_factor"])
        # Aggregate color and depth images into a single
        # BerkeleyAutomation/perception `RgbdImage`.
        self.rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
        # Create an `RgbdImageState` with the `RgbdImage` and `CameraIntrinsics`.
        state = RgbdImageState(self.rgbd_im, camera_intr, segmask=segmask)

        # Set input sizes for fully-convolutional policy.
        if "FC" in self.model:
            self.policy_config["metric"]["fully_conv_gqcnn_config"][
                "im_height"] = depth_im.shape[0]
            self.policy_config["metric"]["fully_conv_gqcnn_config"][
                "im_width"] = depth_im.shape[1]

        return self.execute_policy(state, resetting)
    def get_state(self, depth, segmask):
        # Read images.
        depth_im = DepthImage(depth, frame=self.camera_intr.frame)
        color_im = ColorImage(np.zeros([depth_im.height, depth_im.width,
                                        3]).astype(np.uint8),
                              frame=self.camera_intr.frame)
        segmask = BinaryImage(segmask.astype(np.uint8) * 255,
                              frame=self.camera_intr.frame)

        # Inpaint.
        depth_im = depth_im.inpaint(rescale_factor=self.inpaint_rescale_factor)

        # Create state.
        rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
        state = RgbdImageState(rgbd_im, self.camera_intr, segmask=segmask)
        return state, rgbd_im
Пример #3
0
def run_gqcnn(depth,seg_mask):
	best_angle = 0;
	best_point = [0,0];
	best_dist = 0;
	depth_im =DepthImage(depth.astype("float32"), frame=camera_intr.frame)
	color_im = ColorImage(np.zeros([imageWidth, imageHeight,3]).astype(np.uint8),
                          frame=camera_intr.frame)
	print(seg_mask)
	segmask = BinaryImage(seg_mask)
	print(segmask)
	rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
	state_gqcnn = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) 
	policy_start = time.time()
	try:
		action = policy(state_gqcnn)
		logger.info("Planning took %.3f sec" % (time.time() - policy_start))
		best_point = [action.grasp.center[0],action.grasp.center[1]];
		best_point = [action.grasp.center[0],action.grasp.center[1]];
		best_angle = float(action.grasp.angle)*180/3.141592
	except Exception as inst:
		print(inst)

	return best_angle,best_point,best_dist
Пример #4
0
def run_gqcnn(depth, seg_mask):
    best_angle = 0
    best_point = [0, 0]
    best_dist = 0
    depth_im = DepthImage(depth.astype("float32"), frame=camera_intr.frame)
    color_im = ColorImage(np.zeros([imageWidth, imageHeight,
                                    3]).astype(np.uint8),
                          frame=camera_intr.frame)
    segmask = BinaryImage(seg_mask)
    rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
    state_gqcnn = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)
    policy_start = time.time()
    q_value = -1
    try:
        action = policy(state_gqcnn)
        best_point = [action.grasp.center[0], action.grasp.center[1]]
        best_angle = float(action.grasp.angle) * 180 / 3.141592
        q_value = action.q_value
        print("inner :       ", action.q_value)

    except Exception as inst:
        print(inst)

    return q_value
Пример #5
0
    if "input_images" in policy_config["vis"] and policy_config["vis"][
            "input_images"]:
        vis.figure(size=(10, 10))
        num_plot = 1
        if segmask is not None:
            num_plot = 2
        vis.subplot(1, num_plot, 1)
        vis.imshow(depth_im)
        if segmask is not None:
            vis.subplot(1, num_plot, 2)
            vis.imshow(segmask)
        vis.show()

    # Create state.
    rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
    state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

    # Set input sizes for fully-convolutional policy.
    if fully_conv:
        policy_config["metric"]["fully_conv_gqcnn_config"][
            "im_height"] = depth_im.shape[0]
        policy_config["metric"]["fully_conv_gqcnn_config"][
            "im_width"] = depth_im.shape[1]

    # Init policy.
    if fully_conv:
        # TODO(vsatish): We should really be doing this in some factory policy.
        if policy_config["type"] == "fully_conv_suction":
            policy = FullyConvolutionalGraspingPolicySuction(policy_config)
        elif policy_config["type"] == "fully_conv_pj":
            policy = FullyConvolutionalGraspingPolicyParallelJaw(policy_config)
Пример #6
0
    def _plan_grasp(self,
                    color_im,
                    depth_im,
                    camera_intr,
                    bounding_box=None,
                    segmask=None):
        """Grasp planner request handler.

        Parameters
        ---------
        req: :obj:`ROS ServiceRequest`
            ROS `ServiceRequest` for grasp planner service.
        """
        rospy.loginfo("Planning Grasp")

        # Inpaint images.
        color_im = color_im.inpaint(
            rescale_factor=self.cfg["inpaint_rescale_factor"])
        depth_im = depth_im.inpaint(
            rescale_factor=self.cfg["inpaint_rescale_factor"])

        # Init segmask.
        if segmask is None:
            segmask = BinaryImage(255 *
                                  np.ones(depth_im.shape).astype(np.uint8),
                                  frame=color_im.frame)

        # Visualize.
        if self.cfg["vis"]["color_image"]:
            vis.imshow(color_im)
            vis.show()
        if self.cfg["vis"]["depth_image"]:
            vis.imshow(depth_im)
            vis.show()
        if self.cfg["vis"]["segmask"] and segmask is not None:
            vis.imshow(segmask)
            vis.show()

        # Aggregate color and depth images into a single
        # BerkeleyAutomation/perception `RgbdImage`.
        rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)

        # Mask bounding box.
        if bounding_box is not None:
            # Calc bb parameters.
            min_x = bounding_box.minX
            min_y = bounding_box.minY
            max_x = bounding_box.maxX
            max_y = bounding_box.maxY

            # Contain box to image->don't let it exceed image height/width
            # bounds.
            if min_x < 0:
                min_x = 0
            if min_y < 0:
                min_y = 0
            if max_x > rgbd_im.width:
                max_x = rgbd_im.width
            if max_y > rgbd_im.height:
                max_y = rgbd_im.height

            # Mask.
            bb_segmask_arr = np.zeros([rgbd_im.height, rgbd_im.width])
            bb_segmask_arr[min_y:max_y, min_x:max_x] = 255
            bb_segmask = BinaryImage(bb_segmask_arr.astype(np.uint8),
                                     segmask.frame)
            segmask = segmask.mask_binary(bb_segmask)

        # Visualize.
        if self.cfg["vis"]["rgbd_state"]:
            masked_rgbd_im = rgbd_im.mask_binary(segmask)
            vis.figure()
            vis.subplot(1, 2, 1)
            vis.imshow(masked_rgbd_im.color)
            vis.subplot(1, 2, 2)
            vis.imshow(masked_rgbd_im.depth)
            vis.show()

        # Create an `RgbdImageState` with the cropped `RgbdImage` and
        # `CameraIntrinsics`.
        rgbd_state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

        # Execute policy.
        try:
            return self.execute_policy(rgbd_state, self.grasping_policy,
                                       self.grasp_pose_publisher,
                                       camera_intr.frame)
        except NoValidGraspsException:
            rospy.logerr(
                ("While executing policy found no valid grasps from sampled"
                 " antipodal point pairs. Aborting Policy!"))
            raise rospy.ServiceException(
                ("While executing policy found no valid grasps from sampled"
                 " antipodal point pairs. Aborting Policy!"))
Пример #7
0
#%%
img = rosco.rgb
d = rosco.depth
#%%
plt.imshow(img)
#%%
plt.imshow(d)
#print(img)
#print(d)
#%%
color_im = ColorImage(img.astype(np.uint8), encoding="bgr8", frame='pcl')
depth_im = DepthImage(d.astype(np.float32), frame='pcl')
color_im = color_im.inpaint(rescale_factor=cfg['inpaint_rescale_factor'])
depth_im = depth_im.inpaint(rescale_factor=cfg['inpaint_rescale_factor'])
rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
rgbd_state = RgbdImageState(rgbd_im, camera_int)
#%%
grasp = grasp_policy(rgbd_state)
#%%
img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = cv2.circle(img2,(int(grasp.grasp.center.x),int(grasp.grasp.center.y)),2,(255,0,0),3)
plt.imshow(img2)

#%%
img3 = cv2.cvtColor(d, cv2.COLOR_BGR2RGB)
img3 = cv2.circle(img3,(int(grasp.grasp.center.x),int(grasp.grasp.center.y)),2,(255,0,0),3)
plt.imshow(img3)
#%%
vis.figure(size=(16,16))
vis.imshow(rgbd_im.color, vmin=0.5, vmax=2.5)
vis.grasp(grasp.grasp, scale=2.0,jaw_width=2.0, show_center=True, show_axis=True, color=plt.cm.RdYlBu(.1))
Пример #8
0
                              cy=intr.cy,
                              height=intr.height,
                              width=intr.width)
#camera_int.save('mySensor.intr')
#%%
cfg = YamlConfig('cfg/examples/replication/dex-net_2.0.yaml')
grasp_policy = CrossEntropyRobustGraspingPolicy(cfg['policy'])

#%%
#depth_im.threshold(front_thresh=0.00, rear_thresh=75.0)
color_im = color_im.inpaint(rescale_factor=cfg['inpaint_rescale_factor'])
depth_im = depth_im.inpaint(rescale_factor=cfg['inpaint_rescale_factor'])
depth_im.save('my_im_heoo9.npy')
depth_im.save('my_im_heoo9.png')
rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
rgbd_state = RgbdImageState(rgbd_im, cam.color_intrinsics)

#%%
cam.stop()
grasp = grasp_policy(rgbd_state)

#%%
#print(grasp.grasp.center.x)
#print(grasp.grasp.center.y)

#%%
vis.figure(size=(16, 16))
vis.imshow(rgbd_im.color, vmin=0.5, vmax=2.5)
vis.grasp(grasp.grasp,
          scale=2.0,
          jaw_width=2.0,
Пример #9
0
    def plan_grasp(self, camera_data, n_candidates=1):
        """Grasp planner.

        Parameters
        ---------
        req: :obj:`ROS ServiceRequest`
            ROS `ServiceRequest` for grasp planner service.
        """

        self._camera_data = camera_data

        # --- Inpaint images --- #
        color_im = camera_data.rgb_img.inpaint(
            rescale_factor=self.cfg["inpaint_rescale_factor"])

        depth_im = camera_data.depth_img.inpaint(
            rescale_factor=self.cfg["inpaint_rescale_factor"])

        # --- Init segmask --- #
        if camera_data.seg_img is None:
            segmask = BinaryImage(255 *
                                  np.ones(depth_im.shape).astype(np.uint8),
                                  frame=color_im.frame)
        else:
            segmask = camera_data.seg_mask


        # --- Aggregate color and depth images into a single
        # BerkeleyAutomation/perception `RgbdImage` --- #
        rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)

        # --- Mask bounding box --- #
        if camera_data.bounding_box is not None:
            # Calc bb parameters.
            min_x = camera_data.bounding_box['min_x']
            min_y = camera_data.bounding_box['min_y']
            max_x = camera_data.bounding_box['max_x']
            max_y = camera_data.bounding_box['max_y']

            # Contain box to image->don't let it exceed image height/width
            # bounds.
            if min_x < 0:
                min_x = 0
            if min_y < 0:
                min_y = 0
            if max_x > rgbd_im.width:
                max_x = rgbd_im.width
            if max_y > rgbd_im.height:
                max_y = rgbd_im.height

            # Mask.
            bb_segmask_arr = np.zeros([rgbd_im.height, rgbd_im.width])
            bb_segmask_arr[min_y:max_y, min_x:max_x] = 255
            bb_segmask = BinaryImage(bb_segmask_arr.astype(np.uint8),
                                     segmask.frame)
            segmask = segmask.mask_binary(bb_segmask)

        # --- Create an `RgbdImageState` with the cropped `RgbdImage` and `CameraIntrinsics` --- #
        rgbd_state = RgbdImageState(rgbd_im, camera_data.intrinsic_params, segmask=segmask)

        # --- Execute policy --- #
        try:
            grasps_and_predictions = self.execute_policy(rgbd_state, self.grasping_policy,
                                                     camera_data.intrinsic_params.frame,
                                                     n_candidates)

            self._dexnet_gp = grasps_and_predictions

            # --- project planar grasps to 3D space --- #
            l = []
            for gp in grasps_and_predictions:

                # my method
                pose_6d = self.transform_grasp_to_6D(gp[0], camera_data.intrinsic_params)
                pos = pose_6d[:3,3]
                rot = pose_6d[:3, :3]
                grasp_6D = Grasp6D(position=pos, rotation=rot,
                                   width=gp[0].width, score= gp[1],
                                   ref_frame=camera_data.intrinsic_params.frame)

                l.append(grasp_6D)

                # dexnet method --> needs autolab_core installed as catkin package
                # 6D_gp = gp[0].pose()

            self.grasp_poses = l
            self.best_grasp = l[0]

            self.visualize()

            return True

        except NoValidGraspsException:
            warnings.warn(("While executing policy found no valid grasps from sampled antipodal point pairs!"))

            return False
Пример #10
0
def run_gqcnn():
    global depth_im
    global color_im
    global segmask
    global policy
    global prev_q_value
    global prev_depth
    global toggle
    global t
    global x
    global y
    global z
    global best_angle
    global depth_raw
    global num_find_loc
    global state
    global loc_count
    global no_find
    global center
    global q_value
    global angle
    global no_valid_grasp_count
    global no_valid_move_y
    global depth_frame
    global frames
    no_valid_grasp_count = 0
    time.sleep(3)
    depth_frame = frames.get_depth_frame()
    depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
    best_angle = 0
    x = 0.0
    y = 0.5
    action = 0
    num_find_loc = 0
    while 1:
        try:
            #depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
            if "input_images" in policy_config["vis"] and policy_config["vis"][
                    "input_images"]:
                plt.pause(0.001)
                #pass
            # Create state.
            rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
            state_gqcnn = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

            # Query policy.
            policy_start = time.time()
            try:
                action = policy(state_gqcnn)
                logger.info("Planning took %.3f sec" %
                            (time.time() - policy_start))
                no_valid_grasp_count = 0
            except Exception as inst:
                print(inst)
                no_valid_grasp_count = no_valid_grasp_count + 1
                time.sleep(0.3)

            if policy_config["vis"]["final_grasp"]:
                center[0] = action.grasp.center[0]
                center[1] = action.grasp.center[1]
                depth = depth_frame.get_distance(int(center[0]),
                                                 int(center[1]))
                #depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, [int(center[0]), int(center[1])], depth)
                #print(depth_point)

                q_value = action.q_value
                angle = float(action.grasp.angle) * 180 / 3.141592
                x, y, z = convert_depth_frame_to_pointcloud(
                    int(center[0]), int(center[1]), depth)
                print("center : \t" + str(action.grasp.center))
                print("angle : \t" + str(action.grasp.angle))
                print("Depth : \t" + str(action.grasp.depth))

                print("\n\n\n\n\n\n\nQ_value : \t" + str(action.q_value))
                if (prev_q_value < action.q_value):
                    prev_q_value = action.q_value
                    prev_depth = action.grasp.depth
                    best_center[0] = action.grasp.center[0]
                    best_center[1] = action.grasp.center[1]
                    depth = depth_frame.get_distance(int(best_center[0]),
                                                     int(best_center[1]))

                    #depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, [int(best_center[0]), int(best_center[1])], depth)
                    #print(depth_point)
                    #convert_data_ = convert_2d_2d(int(best_center[0]),int(best_center[1]))
                    #convert_data=convert_2d_3d(int(convert_data_[0]),int(convert_data_[1]))
                    x, y, z = convert_depth_frame_to_pointcloud(
                        int(best_center[0]), int(best_center[1]), depth)

                    best_angle = action.grasp.angle
                    best_angle = float(best_angle) * 180 / 3.141592

                    print("gqcnn_best_center : \t" + str(x) + "," + str(y))
                    print("best_angle : \t" + str(best_angle))
                    print("\n\n\n\n\n\n\nbest_Q_value : \t" +
                          str(action.q_value))
                    print("XYZ : \t" + str(x) + "\t" + str(y) + "\t" + str(z))

                num_find_loc = num_find_loc + 1
                if num_find_loc > 5:
                    prev_q_value = 0
                    x = 0
                    y = 0
                    z = 0
                    num_find_loc = 0
                    best_angle = 0
                    best_center[0] = 0
                    best_center[1] = 0
                time.sleep(0.01)
        except Exception as inst:
            print(inst)
            time.sleep(1)
            pass
Пример #11
0
    def _plan_grasp(self,
                    color_im,
                    depth_im,
                    camera_intr,
                    bounding_box=None,
                    segmask=None):
        """ Grasp planner request handler .
        
        Parameters
        ---------
        req: :obj:`ROS ServiceRequest`
            ROS ServiceRequest for grasp planner service
        """
        rospy.loginfo('Planning Grasp')

        # inpaint images
        color_im = color_im.inpaint(
            rescale_factor=self.cfg['inpaint_rescale_factor'])
        depth_im = depth_im.inpaint(
            rescale_factor=self.cfg['inpaint_rescale_factor'])

        # init segmask
        if segmask is None:
            segmask = BinaryImage(255 *
                                  np.ones(depth_im.shape).astype(np.uint8),
                                  frame=color_im.frame)

        # visualize
        if self.cfg['vis']['color_image']:
            vis.imshow(color_im)
            vis.show()
        if self.cfg['vis']['depth_image']:
            vis.imshow(depth_im)
            vis.show()
        if self.cfg['vis']['segmask'] and segmask is not None:
            vis.imshow(segmask)
            vis.show()

        # aggregate color and depth images into a single perception rgbdimage
        rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)

        # mask bounding box
        if bounding_box is not None:
            # calc bb parameters
            min_x = bounding_box.minX
            min_y = bounding_box.minY
            max_x = bounding_box.maxX
            max_y = bounding_box.maxY

            # contain box to image->don't let it exceed image height/width bounds
            if min_x < 0:
                min_x = 0
            if min_y < 0:
                min_y = 0
            if max_x > rgbd_im.width:
                max_x = rgbd_im.width
            if max_y > rgbd_im.height:
                max_y = rgbd_im.height

            # mask
            bb_segmask_arr = np.zeros([rgbd_image.height, rgbd_image.width])
            bb_segmask_arr[min_y:max_y, min_x:max_x] = 255
            bb_segmask = BinaryImage(bb_segmask_arr.astype(np.uint8),
                                     segmask.frame)
            segmask = segmask.mask_binary(bb_segmask)

        # visualize
        if self.cfg['vis']['rgbd_state']:
            masked_rgbd_im = rgbd_im.mask_binary(segmask)
            vis.figure()
            vis.subplot(1, 2, 1)
            vis.imshow(masked_rgbd_im.color)
            vis.subplot(1, 2, 2)
            vis.imshow(masked_rgbd_im.depth)
            vis.show()

        # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics
        rgbd_state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

        # execute policy
        try:
            return self.execute_policy(rgbd_state, self.grasping_policy,
                                       self.grasp_pose_publisher,
                                       camera_intr.frame)
        except NoValidGraspsException:
            rospy.logerr(
                'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!'
            )
            raise rospy.ServiceException(
                'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!'
            )
Пример #12
0
    def sample_grasp(self, env, mesh_obj, vis=True):
        # Setup sensor.
        #camera_intr = CameraIntrinsics.load(camera_intr_filename)

        # Read images.
        # get depth image from camera

        inpaint_rescale_factor = 1.0
        segmask_filename = None

        _, center, extents, obj_xquat, bbox_xpos = self.get_bbox_properties(
            env, mesh_obj)
        camera_pos = copy.deepcopy(center)
        camera_pos[2] += 0.5 * extents[2] + 0.2

        env.set_camera(camera_pos,
                       np.array([0.7071068, 0, 0, -0.7071068]),
                       camera_name=f"ext_camera_0")
        rgb, depth = env.render_from_camera(int(self.config.camera_img_height),
                                            int(self.config.camera_img_width),
                                            camera_name=f"ext_camera_0")
        # enforce zoom out

        from scipy.interpolate import interp2d

        center_x = self.config.camera_img_height / 2 + 1
        center_y = self.config.camera_img_width / 2 + 1
        img_height = self.config.camera_img_height
        img_width = self.config.camera_img_width
        xdense = np.linspace(0, img_height - 1, img_height)
        ydense = np.linspace(0, img_width - 1, img_width)
        xintr = (xdense - center_x) * (1.0 / self.rescale_factor) + center_x
        yintr = (ydense - center_y) * (1.0 / self.rescale_factor) + center_y
        xintr[xintr < 0] = 0
        xintr[xintr > (img_height - 1)] = img_height - 1
        yintr[yintr < 0] = 0
        yintr[yintr > (img_width - 1)] = img_width - 1

        fr = interp2d(xdense, ydense, rgb[:, :, 0], kind="linear")
        rgb_r_new = fr(xintr, yintr)
        fg = interp2d(xdense, ydense, rgb[:, :, 1], kind="linear")
        rgb_g_new = fg(xintr, yintr)
        fb = interp2d(xdense, ydense, rgb[:, :, 2], kind="linear")
        rgb_b_new = fb(xintr, yintr)
        rgb_new = np.stack([rgb_r_new, rgb_g_new, rgb_b_new], axis=2)

        fd = interp2d(xdense, ydense, depth, kind="linear")
        depth_new = fd(xintr, yintr)

        #from skimage.transform import resize
        #rgb22, depth2 = env.render_from_camera(int(self.config.camera_img_height) , int(self.config.camera_img_width), camera_name=f"ext_camera_0")

        #import ipdb; ipdb.set_trace()

        # visualize the interpolation
        #import imageio
        #imageio.imwrite(f"tmp/rgb_{self.iter_id}.png", rgb)
        #imageio.imwrite(f"tmp/rgb2_{self.iter_id}.png", rgb_new)
        #imageio.imwrite(f"tmp/depth_{self.iter_id}.png", depth)
        #imageio.imwrite(f"tmp/depth2_{self.iter_id}.png", depth_new)
        #import ipdb; ipdb.set_trace()

        rgb = rgb_new
        depth = depth_new

        depth = depth * self.rescale_factor

        # rgb: 128 x 128 x 1
        # depth: 128 x 128 x 1
        scaled_camera_fov_y = self.config.camera_fov_y
        aspect = 1
        scaled_fovx = 2 * np.arctan(
            np.tan(np.deg2rad(scaled_camera_fov_y) * 0.5) * aspect)
        scaled_fovx = np.rad2deg(scaled_fovx)
        scaled_fovy = scaled_camera_fov_y

        cx = self.config.camera_img_width * 0.5
        cy = self.config.camera_img_height * 0.5
        scaled_fx = cx / np.tan(np.deg2rad(
            scaled_fovx / 2.)) * (self.rescale_factor)
        scaled_fy = cy / np.tan(np.deg2rad(
            scaled_fovy / 2.)) * (self.rescale_factor)

        camera_intr = CameraIntrinsics(frame='phoxi',
                                       fx=scaled_fx,
                                       fy=scaled_fy,
                                       cx=self.config.camera_img_width * 0.5,
                                       cy=self.config.camera_img_height * 0.5,
                                       height=self.config.camera_img_height,
                                       width=self.config.camera_img_width)

        depth_im = DepthImage(depth, frame=camera_intr.frame)
        color_im = ColorImage(np.zeros([depth_im.height, depth_im.width,
                                        3]).astype(np.uint8),
                              frame=camera_intr.frame)

        # Optionally read a segmask.

        valid_px_mask = depth_im.invalid_pixel_mask().inverse()
        segmask = valid_px_mask

        # Inpaint.
        depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)

        # Create state.
        rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
        state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

        # Query policy.
        policy_start = time.time()
        action = self.policy(state)
        print("Planning took %.3f sec" % (time.time() - policy_start))
        # numpy array with 2 values
        grasp_center = action.grasp.center._data[:, 0]  #(width, depth)
        grasp_depth = action.grasp.depth * (1 / self.rescale_factor)
        grasp_angle = action.grasp.angle  #np.pi*0.3

        if self.config.data_collection_mode:

            self.current_grasp = action.grasp

            depth_im = state.rgbd_im.depth
            scale = 1.0
            depth_im_scaled = depth_im.resize(scale)
            translation = scale * np.array([
                depth_im.center[0] - grasp_center[1],
                depth_im.center[1] - grasp_center[0]
            ])
            im_tf = depth_im_scaled
            im_tf = depth_im_scaled.transform(translation, grasp_angle)
            im_tf = im_tf.crop(self.gqcnn_image_size, self.gqcnn_image_size)

            # get the patch
            self.current_patch = im_tf.raw_data

        XYZ_origin, gripper_quat = self.compute_grasp_pts_from_grasp_sample(
            grasp_center, grasp_depth, grasp_angle, env)

        return XYZ_origin[:, 0], gripper_quat

        # Vis final grasp.
        if vis:
            from visualization import Visualizer2D as vis
            vis.figure(size=(10, 10))
            vis.imshow(rgbd_im.depth,
                       vmin=self.policy_config["vis"]["vmin"],
                       vmax=self.policy_config["vis"]["vmax"])
            vis.grasp(action.grasp,
                      scale=2.5,
                      show_center=False,
                      show_axis=True)
            vis.title("Planned grasp at depth {0:.3f}m with Q={1:.3f}".format(
                action.grasp.depth, action.q_value))
            vis.show(f"tmp/grasp2_{mesh_obj.name}_{self.iter_id}.png")

            vis.figure(size=(10, 10))

            vis.imshow(im_tf,
                       vmin=self.policy_config["vis"]["vmin"],
                       vmax=self.policy_config["vis"]["vmax"])
            vis.show(f"tmp/cropd_{mesh_obj.name}_{self.iter_id}.png")

        import ipdb
        ipdb.set_trace()

        return XYZ_origin[:, 0], gripper_quat

        import ipdb
        ipdb.set_trace()
Пример #13
0
def run_gqcnn():
    global depth_im
    global color_im
    global segmask
    global policy
    global prev_q_value
    global prev_depth
    global toggle
    global t
    global x
    global y
    global z
    global best_angle
    global depth_raw
    global num_find_loc
    global state
    global loc_count
    global no_find
    global center
    global q_value
    global angle
    global no_valid_grasp_count
    global no_valid_move_y
    no_valid_grasp_count = 0
    num_find_loc = 0
    best_angle = 0
    x = 0.0
    y = 0.5
    z = 0
    time.sleep(5)
    while 1:
        try:
            print("GQCNN IS RUNNING")
            #depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
            if "input_images" in policy_config["vis"] and policy_config["vis"][
                    "input_images"]:
                plt.pause(0.001)
                #pass
            # Create state.
            print("GQCNN IS RUNNING2")
            rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
            state_gqcnn = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

            # Query policy.
            policy_start = time.time()
            try:
                action = policy(state_gqcnn)
                logger.info("Planning took %.3f sec" %
                            (time.time() - policy_start))
                no_valid_grasp_count = 0
            except Exception as inst:
                print(inst)
                no_valid_grasp_count = no_valid_grasp_count + 1
                time.sleep(0.3)
            print("GQCNN IS RUNNING3")
            if policy_config["vis"]["final_grasp"]:
                print("GQCNN IS RUNNING4")
                center[0] = action.grasp.center[0]
                center[1] = action.grasp.center[1]
                q_value = action.q_value
                angle = float(action.grasp.angle) * 180 / 3.141592
                print("center : \t" + str(action.grasp.center))
                print("angle : \t" + str(action.grasp.angle))
                print("Depth : \t" + str(action.grasp.depth))
                print("XYZ : \t" + str(x) + " , " + str(y) + " , " + str(z))
                print("\n\n\n\n\n\n\nQ_value : \t" + str(action.q_value))
                if (prev_q_value < action.q_value):
                    prev_q_value = action.q_value
                    prev_depth = action.grasp.depth
                    best_center[0] = action.grasp.center[0]
                    best_center[1] = action.grasp.center[1]

                    best_angle = action.grasp.angle
                    best_angle = float(best_angle) * 180 / 3.141592

                    print("gqcnn_best_center : \t" + str(x) + "," + str(y))
                    print("best_angle : \t" + str(best_angle))
                    print("\n\n\n\n\n\n\nbest_Q_value : \t" +
                          str(action.q_value))
                    print("XYZ : \t" + str(x) + str(y) + str(z))
                num_find_loc = num_find_loc + 1
                if num_find_loc > 5:
                    prev_q_value = 0
                    x = 0
                    y = 0
                    z = 0
                    num_find_loc = 0
                    best_angle = 0
                    best_center[0] = 0
                    best_center[1] = 0
                time.sleep(0.01)
        except Exception as inst:
            print(inst)
            time.sleep(1)
            pass
Пример #14
0
def run_gqcnn():
    global depth_im
    global color_im
    global segmask
    global policy
    global im3
    global ax3
    global prev_q_value
    global prev_depth
    global toggle
    global t
    global x
    global y
    global z
    global best_angle
    global depth_raw
    global im1
    global ax1
    global num_find_loc
    global state
    global loc_count
    global no_find
    global center
    global q_value
    global angle
    global no_valid_grasp_count
    global no_valid_move_y
    no_valid_grasp_count = 0

    best_angle = 0
    x = 0.0
    y = 0.5
    while 1:
        try:
            print("GQCNN IS RUNNING")
            #depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
            if "input_images" in policy_config["vis"] and policy_config["vis"][
                    "input_images"]:
                im1.set_data(depth_im)
                plt.pause(0.001)
                #pass
            # Create state.
            print("GQCNN IS RUNNING2")
            rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
            state_gqcnn = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)

            # Query policy.
            policy_start = time.time()
            try:
                action = policy(state_gqcnn)
                logger.info("Planning took %.3f sec" %
                            (time.time() - policy_start))
                no_valid_grasp_count = 0
            except:
                no_valid_grasp_count = no_valid_grasp_count + 1
                time.sleep(0.3)
        # Vis final grasp.
            if policy_config["vis"]["final_grasp"]:
                # vis.imshow(segmask,
                #           vmin=policy_config["vis"]["vmin"],
                #           vmax=policy_config["vis"]["vmax"])
                #im3.set_data(rgbd_im.depth)

                center[0] = action.grasp.center[0] + 96
                center[1] = action.grasp.center[1] + 96
                q_value = action.q_value
                angle = float(action.grasp.angle) * 180 / 3.141592
                print("center : \t" + str(action.grasp.center))
                print("angle : \t" + str(action.grasp.angle))
                print("Depth : \t" + str(action.grasp.depth))
                if (prev_q_value < action.q_value):

                    #vis.grasp(action.grasp, scale=1, show_center=True, show_axis=True)
                    #vis.title("Planned grasp at depth {0:.3f}m with Q={1:.3f}".format(
                    #    action.grasp.depth, action.q_value))
                    prev_q_value = action.q_value
                    prev_depth = action.grasp.depth
                    best_center[0] = action.grasp.center[0] + 96
                    best_center[1] = action.grasp.center[1] + 96
                    convert_data = convert_2d_3d(int(best_center[0]),
                                                 int(best_center[1]))
                    x = -1 * convert_data[0] / 1000 + no_valid_move_y
                    y = (-1 * convert_data[1] / 1000) + 0.41
                    z = convert_data[2] / 1000

                    # x=-(best_center[0]-592)*0.00065625 +0.00#592
                    # y=-(best_center[1]-361)*0.000673611+0.46   #361
                    best_angle = action.grasp.angle
                    best_angle = float(best_angle) * 180 / 3.141592

                    print("gqcnn_best_center : \t" + str(x) + "," + str(y))
                    print("best_angle : \t" + str(best_angle))
                    print("Q_value : \t" + str(action.q_value))
                num_find_loc = num_find_loc + 1

                # vis.show()
                #plt.show()
                time.sleep(0.3)
                #plt.close()
        except:

            time.sleep(1)
            pass