def squeeze_cloud(self, msg):
     cloud = orh.rospc_to_o3dpc(msg, remove_nans=True)
     cloud = cloud.voxel_down_sample(voxel_size=self.filter_size)
     cloud = orh.apply_pass_through_filter(cloud, self.ROI['x'],
                                           self.ROI['y'], self.ROI['z'])
     cloud = orh.o3dpc_to_rospc(cloud, frame_id=msg.header.frame_id)
     self.cloud_pub.publish(cloud)
    def callback(self, msg):

        # convert ros imgmsg to opencv img
        cloud = orh.rospc_to_o3dpc(msg, remove_nans=True)
        cloud = cloud.voxel_down_sample(voxel_size=self.voxel_size)
        cloud = orh.apply_pass_through_filter(cloud, self.ROI['x'],
                                              self.ROI['y'], self.ROI['z'])
        cloud = orh.o3dpc_to_rospc(cloud, frame_id=msg.header.frame_id)
        self.point_pub.publish(cloud)

        rospy.loginfo_once("Published the result as topic. check /filt_point2")
    def inference(self, rgb, pc_msg):
        ## 1. Get rgb, depth, point cloud
        start_time = time.time()
        camera_header = rgb.header
        rgb = self.bridge.imgmsg_to_cv2(rgb, desired_encoding='bgr8')
        rgb = PIL.Image.fromarray(np.uint8(rgb), mode="RGB")
        (x1, x2, y1, y2) = self.params["roi"]
        rgb_img = cv2.resize(np.uint8(rgb)[y1:y2, x1:x2],
                             (self.params["width"], self.params["height"]),
                             interpolation=cv2.INTER_LINEAR)
        cloud_cam = orh.rospc_to_o3dpc(pc_msg, remove_nans=True)

        ## 2. Run centermask with client
        su.sendall_image(self.cm_sock, rgb_img)
        vis_img = su.recvall_image(self.cm_sock)
        pred_masks = su.recvall_pickle(
            self.cm_sock)  # [n_detect, 720, 1280] binary mask
        pred_boxes = su.recvall_pickle(self.cm_sock)  # (x1, y1, x2, y2)
        pred_classes = su.recvall_pickle(
            self.cm_sock)  # a vector of N confidence scores.
        pred_scores = su.recvall_pickle(self.cm_sock)  # [0, num_categories).

        ## 3. Detect 2d pose
        poses_2d, vis_img = self.detect_2d_pose(rgb_img, vis_img, pred_classes,
                                                pred_boxes, pred_scores,
                                                pred_masks)
        # sort pose_2d with score
        if len(poses_2d) > 0:
            poses_2d = sorted(poses_2d, key=lambda k: k['score'], reverse=True)
            x1, x2, y1, y2 = poses_2d[0]["bbox"]
            vis_img = cv2.putText(vis_img, "Best Grasp",
                                  (int(x1) - 15, int(y2) + 20),
                                  cv2.FONT_HERSHEY_SIMPLEX, 0.7, (13, 13, 255),
                                  2, cv2.LINE_AA)
            vis_img = cv2.rectangle(vis_img, (int(x1), int(y1)),
                                    (int(x2), int(y2)), (13, 13, 255), 3)
        self.vis_is_pub.publish(self.bridge.cv2_to_imgmsg(vis_img))

        ## 4. Convert 2d pose to 3d pose
        cloud_cam.estimate_normals()
        header = camera_header
        header.frame_id = "map"
        pose_3d_array = self.convert_2d_to_3d_pose(header, poses_2d, cloud_cam)
        self.pose_array_pub.publish(pose_3d_array)
Exemple #4
0
    pub = rospy.Publisher("/pose_estimation/filtered_pointcloud",
                          sensor_msgs.msg.PointCloud2,
                          queue_size=1)
    scene_pcd = o3d.geometry.PointCloud()

    tfBuffer = tf2_ros.Buffer()
    listener = tf2_ros.TransformListener(tfBuffer)
    rate = rospy.Rate(5.0)
    time_start = time.time()
    dt_time = 0.0

    while not rospy.is_shutdown() and dt_time < 29.0:
        rate.sleep()
        data = rospy.wait_for_message("/camera/depth/color/points",
                                      sensor_msgs.msg.PointCloud2)
        scene_pcd = orh.rospc_to_o3dpc(data, remove_nans=True)

        detected_roi = []
        for i in range(4):
            aruco_id_frame = "depth_aruco_id_" + str(i + 1)
            tf_transform = tfBuffer.lookup_transform(
                "camera_depth_optical_frame", aruco_id_frame, rospy.Time(0),
                rospy.Duration(0.5))
            detected_roi.append((tf_transform.transform.translation.x,
                                 tf_transform.transform.translation.y,
                                 tf_transform.transform.translation.z))
        size_aruco = 0.04
        detected_roi = np.array(detected_roi)
        x_min, y_min, z_min = np.amin(detected_roi, axis=0)
        x_min_idx, y_min_idx, z_min_idx = np.argmin(detected_roi, axis=0)
        x_max, y_max, z_max = np.amax(detected_roi, axis=0)
            0.1296740834731827, 0.6340179359143318, 0.460, -1.1278920483609907,
            2.826591377957774, -0.39990085541033094
        ])
        pose.append([0.40802, 0.51430, 0.480, 2.618, -1.057, -0.155])
        pose.append([
            0.487492080419456, 0.30558932581303366, 0.460, 3.137, 1.283, -0.185
        ])

    print(":: Reconstructing the scene.")
    start_time = time.time()
    for i in range(nb_views):
        view_frames.append("view" + str(i) + "_frame")
        rtde_c.moveL(pose[i], 0.2, 0.2)
        time.sleep(2)
        scene_pcd = orh.rospc_to_o3dpc(rospy.wait_for_message(
            "/pose_estimation/filtered_pointcloud",
            sensor_msgs.msg.PointCloud2),
                                       remove_nans=True)
        scene_view.append(scene_pcd)
        tf_transform_base_view = tfBuffer.lookup_transform(
            base_frame, depth_frame, rospy.Time())
        tf_views_frames.append(tf_transform_base_view)
        broadcaster_scene_view_frame(static_broadcaster, view_frames[i],
                                     tf_transform_base_view)
        o3d.io.write_point_cloud(
            "/home/carlos/Desktop/results/scene_view" + str(i) + ".pcd",
            scene_view[i])
        data_color = rospy.wait_for_message("/camera/color/image_raw",
                                            sensor_msgs.msg.Image)
        color_image = bridge.imgmsg_to_cv2(data_color,
                                           desired_encoding="passthrough")
        cv2.imwrite(