Example #1
0
def main(args):
    # set logging
    logging.getLogger().setLevel(logging.INFO)
    rospy.init_node("ensenso_reader", anonymous=True)

    num_frames = 10
    sensor = RgbdSensorFactory("ensenso", cfg={"frame": "ensenso"})
    sensor.start()

    total_time = 0
    for i in range(num_frames):
        if i > 0:
            start_time = time.time()

        _, depth_im, _ = sensor.frames()

        if i > 0:
            total_time += time.time() - start_time
            print("Frame %d" % (i))
            print("Avg FPS: %.5f" % (float(i) / total_time))

    depth_im = sensor.median_depth_img(num_img=5)
    point_cloud = sensor.ir_intrinsics.deproject(depth_im)
    point_cloud.remove_zero_points()

    sensor.stop()

    vis2d.figure()
    vis2d.imshow(depth_im)
    vis2d.title("Ensenso - Raw")
    vis2d.show()

    vis3d.figure()
    vis3d.points(point_cloud, random=True, subsample=10, scale=0.0025)
    vis3d.show()
def test_GQCNN():

    config = YamlConfig(
        '/home/baymax/catkin_ws/src/jaco_manipulation/cfg/grasp_test.yaml')

    # create rgbd sensor
    rospy.loginfo('Creating RGBD Sensor')
    sensor_cfg = config['sensor_cfg']
    sensor_type = sensor_cfg['type']
    sensor = RgbdSensorFactory.sensor(sensor_type, sensor_cfg)
    sensor.start()
    rospy.loginfo('Sensor Running')

    # setup safe termination
    def handler(signum, frame):
        rospy.loginfo('caught CTRL+C, exiting...')
        if sensor is not None:
            sensor.stop()
        if robot is not None:
            robot.stop()
        if subscriber is not None and subscriber._started:
            subscriber.stop()
        exit(0)

    signal.signal(signal.SIGINT, handler)

    planner = GQCNNPlanner(sensor, config)
    # rospy.sleep(10)
    planner.get_grasp_plan("cup")
Example #3
0
def main(args):
    # from visualization import Visualizer2D as vis2d
    # from visualization import Visualizer3D as vis3d
    import matplotlib.pyplot as vis2d

    # set logging
    logging.getLogger().setLevel(logging.DEBUG)
    rospy.init_node("kinect_reader", anonymous=True)

    num_frames = 5
    sensor_cfg = {
        "quality": Kinect2BridgedQuality.HD,
        "frame": "kinect2_rgb_optical_frame",
    }
    sensor_type = "bridged_kinect2"
    sensor = RgbdSensorFactory.sensor(sensor_type, sensor_cfg)
    sensor.start()

    def handler(signum, frame):
        rospy.loginfo("caught CTRL+C, exiting...")
        if sensor is not None:
            sensor.stop()
        exit(0)

    signal.signal(signal.SIGINT, handler)

    total_time = 0
    for i in range(num_frames):
        if i > 0:
            start_time = time.time()

        _, depth_im, _ = sensor.frames()

        if i > 0:
            total_time += time.time() - start_time
            logging.info("Frame %d" % (i))
            logging.info("Avg FPS: %.5f" % (float(i) / total_time))

    depth_im = sensor.median_depth_img(num_img=5)
    color_im, depth_im, _ = sensor.frames()

    sensor.stop()

    vis2d.figure()
    vis2d.subplot("211")
    vis2d.imshow(depth_im.data)
    vis2d.title("Kinect - depth Raw")

    vis2d.subplot("212")
    vis2d.imshow(color_im.data)
    vis2d.title("kinect color")
    vis2d.show()
def main():
    logging.getLogger().setLevel(logging.INFO)

    # parse args
    parser = argparse.ArgumentParser(description='Register a webcam to the Photoneo PhoXi')
    parser.add_argument('--config_filename', type=str, default='cfg/tools/colorize_phoxi.yaml', help='filename of a YAML configuration for registration')
    args = parser.parse_args()
    config_filename = args.config_filename
    config = YamlConfig(config_filename)

    sensor_data = config['sensors']
    phoxi_config = sensor_data['phoxi']
    phoxi_config['frame'] = 'phoxi'

    # Initialize ROS node
    rospy.init_node('colorize_phoxi', anonymous=True)
    logging.getLogger().addHandler(rl.RosStreamHandler())

    # Get PhoXi sensor set up
    phoxi = RgbdSensorFactory.sensor(phoxi_config['type'], phoxi_config)
    phoxi.start()

    # Capture PhoXi and webcam images
    phoxi_color_im, phoxi_depth_im, _ = phoxi.frames()

    # vis2d.figure()
    # vis2d.subplot(121)
    # vis2d.imshow(phoxi_color_im)
    # vis2d.subplot(122)
    # vis2d.imshow(phoxi_depth_im)
    # vis2d.show()

    phoxi_pc = phoxi.ir_intrinsics.deproject(phoxi_depth_im)
    colors = phoxi_color_im.data.reshape((phoxi_color_im.shape[0] * phoxi_color_im.shape[1], phoxi_color_im.shape[2])) / 255.0
    vis3d.figure()
    vis3d.points(phoxi_pc.data.T[::3], color=colors[::3], scale=0.001)
    vis3d.show()

    # Export to PLY file
    vertices = phoxi.ir_intrinsics.deproject(phoxi_depth_im).data.T
    colors = phoxi_color_im.data.reshape(phoxi_color_im.data.shape[0] * phoxi_color_im.data.shape[1], phoxi_color_im.data.shape[2])
    f = open('pcloud.ply', 'w')
    f.write('ply\nformat ascii 1.0\nelement vertex {}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\n'.format(len(vertices)) +
            'property uchar green\nproperty uchar blue\nend_header\n')
    for v, c in zip(vertices,colors):
        f.write('{} {} {} {} {} {}\n'.format(v[0], v[1], v[2], c[0], c[1], c[2]))
    f.close()
Example #5
0
def main():
    ids = discover_cams()
    assert ids, "[!] No camera detected."

    cfg = {}
    cfg["cam_id"] = ids[0]
    cfg["filter_depth"] = True
    cfg["frame"] = "realsense_overhead"

    sensor = RgbdSensorFactory.sensor("realsense", cfg)
    sensor.start()
    camera_intr = sensor.color_intrinsics
    color_im, depth_im, _ = sensor.frames()
    sensor.stop()

    print("intrinsics matrix: {}".format(camera_intr.K))

    _, axes = plt.subplots(1, 2)
    for ax, im in zip(axes, [color_im.data, depth_im.data]):
        ax.imshow(im)
        ax.axis("off")
    plt.show()
Example #6
0
    T_cb_world = RigidTransform.load(config['chessboard_tf'])

    # initialize node
    rospy.init_node('register_camera', anonymous=True)

    # get camera sensor object
    for sensor_frame, sensor_data in config['sensors'].iteritems():
        sensor_config = sensor_data['sensor_config']
        registration_config = sensor_data['registration_config'].copy()
        registration_config.update(config['chessboard_registration'])

        # open sensor
        try:
            sensor_type = sensor_config['type']
            sensor_config['frame'] = sensor_frame
            sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
            logging.info('Starting sensor')
            sensor.start()
            ir_intrinsics = sensor.ir_intrinsics
            logging.info('Sensor initialized')

            # register
            reg_result = CameraChessboardRegistration.register(sensor, registration_config)
            T_camera_world = T_cb_world * reg_result.T_camera_cb

            logging.info('Final Result for sensor %s' %(sensor_frame))
            logging.info('Rotation: ')
            logging.info(T_camera_world.rotation)
            logging.info('Quaternion: ')
            logging.info(T_camera_world.quaternion)
            logging.info('Translation: ')
Example #7
0
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # read config
    config = YamlConfig(config_filename)
    sensor_type = config['sensor']['type']
    sensor_frame = config['sensor']['frame']

    # read camera calib
    tf_filename = '%s_to_world.tf' % (sensor_frame)
    T_camera_world = RigidTransform.load(
        os.path.join(config['calib_dir'], sensor_frame, tf_filename))
    T_camera_world.save(os.path.join(output_dir, tf_filename))

    # setup sensor
    sensor = RgbdSensorFactory.sensor(sensor_type, config['sensor'])

    # start the sensor
    sensor.start()
    color_intrinsics = sensor.color_intrinsics
    ir_intrinsics = sensor.ir_intrinsics
    color_intrinsics.save(
        os.path.join(output_dir, '%s_color.intr' % (sensor.frame)))
    ir_intrinsics.save(os.path.join(output_dir, '%s_ir.intr' % (sensor.frame)))

    # get raw images
    for i in range(config['num_images']):
        logging.info('Capturing image %d' % (i))
        color, depth, ir = sensor.frames()
        color.save(os.path.join(output_dir, 'color_%d.png' % (i)))
        depth.save(os.path.join(output_dir, 'depth_%d.npy' % (i)))
Example #8
0
def register_ensenso(config):
    # set parameters
    average = True
    add_to_buffer = True
    clear_buffer = False
    decode = True
    grid_spacing = -1

    # read parameters
    num_patterns = config["num_patterns"]
    max_tries = config["max_tries"]

    # load tf pattern to world
    T_pattern_world = RigidTransform.load(config["pattern_tf"])

    # initialize sensor
    sensor_frame = config["sensor_frame"]
    sensor_config = {"frame": sensor_frame}
    logging.info("Creating sensor")
    sensor = RgbdSensorFactory.sensor("ensenso", sensor_config)

    # initialize node
    rospy.init_node("register_ensenso", anonymous=True)
    rospy.wait_for_service("/%s/collect_pattern" % (sensor_frame))
    rospy.wait_for_service("/%s/estimate_pattern_pose" % (sensor_frame))

    # start sensor
    print("Starting sensor")
    sensor.start()
    time.sleep(1)
    print("Sensor initialized")

    # perform registration
    try:
        print("Collecting patterns")
        num_detected = 0
        i = 0
        while num_detected < num_patterns and i < max_tries:
            collect_pattern = rospy.ServiceProxy(
                "/%s/collect_pattern" % (sensor_frame), CollectPattern)
            resp = collect_pattern(add_to_buffer, clear_buffer, decode,
                                   grid_spacing)
            if resp.success:
                print("Detected pattern %d" % (num_detected))
                num_detected += 1
            i += 1

        if i == max_tries:
            raise ValueError("Failed to detect calibration pattern!")

        print("Estimating pattern pose")
        estimate_pattern_pose = rospy.ServiceProxy(
            "/%s/estimate_pattern_pose" % (sensor_frame), EstimatePatternPose)
        resp = estimate_pattern_pose(average)

        q_pattern_camera = np.array([
            resp.pose.orientation.w,
            resp.pose.orientation.x,
            resp.pose.orientation.y,
            resp.pose.orientation.z,
        ])
        t_pattern_camera = np.array(
            [resp.pose.position.x, resp.pose.position.y, resp.pose.position.z])
        T_pattern_camera = RigidTransform(
            rotation=q_pattern_camera,
            translation=t_pattern_camera,
            from_frame="pattern",
            to_frame=sensor_frame,
        )
    except rospy.ServiceException as e:
        print("Service call failed: %s" % (str(e)))

    # compute final transformation
    T_camera_world = T_pattern_world * T_pattern_camera.inverse()

    # save final transformation
    output_dir = os.path.join(config["calib_dir"], sensor_frame)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    pose_filename = os.path.join(output_dir, "%s_to_world.tf" % (sensor_frame))
    T_camera_world.save(pose_filename)
    intr_filename = os.path.join(output_dir, "%s.intr" % (sensor_frame))
    sensor.ir_intrinsics.save(intr_filename)

    # log final transformation
    print("Final Result for sensor %s" % (sensor_frame))
    print("Rotation: ")
    print(T_camera_world.rotation)
    print("Quaternion: ")
    print(T_camera_world.quaternion)
    print("Translation: ")
    print(T_camera_world.translation)

    # stop sensor
    sensor.stop()

    # move robot to calib pattern center
    if config["use_robot"]:
        # create robot pose relative to target point
        target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)
        target_pt_world = T_camera_world * target_pt_camera
        R_gripper_world = np.array([[1.0, 0, 0], [0, -1.0, 0], [0, 0, -1.0]])
        t_gripper_world = np.array([
            target_pt_world.x + config["gripper_offset_x"],
            target_pt_world.y + config["gripper_offset_y"],
            target_pt_world.z + config["gripper_offset_z"],
        ])
        T_gripper_world = RigidTransform(
            rotation=R_gripper_world,
            translation=t_gripper_world,
            from_frame="gripper",
            to_frame="world",
        )

        # move robot to pose
        y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
        y.reset_home()
        time.sleep(1)

        T_lift = RigidTransform(translation=(0, 0, 0.05),
                                from_frame="world",
                                to_frame="world")
        T_gripper_world_lift = T_lift * T_gripper_world

        y.right.goto_pose(T_gripper_world_lift)
        y.right.goto_pose(T_gripper_world)

        # wait for human measurement
        keyboard_input("Take measurement. Hit [ENTER] when done")
        y.right.goto_pose(T_gripper_world_lift)
        y.reset_home()
        y.stop()
Example #9
0
    def __init__(self):
        super(PickAndDropProgram, self).__init__()

        # First initialize `moveit_commander`_ and a `rospy`_ node:
        moveit_commander.roscpp_initialize(sys.argv)
        rospy.init_node('pick_and_drop_program', anonymous=True)

        filepath = rospy.get_param('~goal_joint_states')
        goal_joint_states = joint_state_filereader.read(filepath)
        self.goal_names = ["home", "near_box", "near_drop", "drop"]
        for name in self.goal_names:
            assert name in goal_joint_states, \
                "Joint state name '{}' is not found".format(name)

        # Declare suctionpad topics
        self.pub_to = rospy.Publisher('toArduino', String, queue_size=100)
        self.pub_from = rospy.Publisher('fromArduino', String, queue_size=100)

        # Vision config
        config = YamlConfig(rospy.get_param('~config'))

        # Instantiate a `RobotCommander`_ object. This object is the outer-level interface to
        # the robot:
        robot = moveit_commander.RobotCommander()

        # Instantiate a `PlanningSceneInterface`_ object.  This object is an interface
        # to the world surrounding the robot:
        scene = moveit_commander.PlanningSceneInterface()

        # Instantiate a `MoveGroupCommander`_ object.  This object is an interface
        # to one group of joints.  In this case the group is the joints in the UR5
        # arm so we set ``group_name = manipulator``. If you are using a different robot,
        # you should change this value to the name of your robot arm planning group.
        group_name = "manipulator"  # See .srdf file to get available group names
        group = moveit_commander.MoveGroupCommander(group_name)

        # We create a `DisplayTrajectory`_ publisher which is used later to publish
        # trajectories for RViz to visualize:
        # display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
        #                                                moveit_msgs.msg.DisplayTrajectory,
        #                                                queue_size=20)

        # We can get the name of the reference frame for this robot:
        planning_frame = group.get_planning_frame()
        print("============ Reference frame: %s" % planning_frame)

        # We can also print(the name of the end-effector link for this group:
        eef_link = group.get_end_effector_link()
        print("============ End effector: %s" % eef_link)

        # We can get a list of all the groups in the robot:
        group_names = robot.get_group_names()
        print("============ Robot Groups:", robot.get_group_names())

        # Sometimes for debugging it is useful to print the entire state of the
        # robot:
        print("============ Printing robot state")
        print(robot.get_current_state())
        print("")

        # Setup vision sensor
        print("============ Setup vision sensor")
        # create rgbd sensor
        rospy.loginfo('Creating RGBD Sensor')
        sensor_cfg = config['sensor_cfg']
        sensor_type = sensor_cfg['type']
        self.sensor = RgbdSensorFactory.sensor(sensor_type, sensor_cfg)
        self.sensor.start()
        rospy.loginfo('Sensor Running')

        rospy.loginfo('Loading T_camera_world')
        tf_camera_world = RigidTransform.load(
            rospy.get_param('~world_camera_tf'))
        rospy.loginfo('tf_camera_world: {}'.format(tf_camera_world))

        # Setup client for grasp pose service
        rospy.loginfo('Setup client for grasp pose service')
        rospy.wait_for_service('grasping_policy')
        self.plan_grasp_client = rospy.ServiceProxy('grasping_policy',
                                                    GQCNNGraspPlanner)

        self.transform_broadcaster = tf2_ros.TransformBroadcaster()

        # Misc variables
        self.robot = robot
        self.scene = scene
        self.group = group
        self.group_names = group_names
        self.config = config
        self.tf_camera_world = tf_camera_world
        self.goal_joint_states = goal_joint_states
Example #10
0
    config = YamlConfig('/home/autolab/Workspace/vishal_working/catkin_ws/src/gqcnn/cfg/ros_nodes/yumi_control_node.yaml')

    rospy.loginfo('Loading Gripper')
    gripper = RobotGripper.load('yumi_metal_spline')

    rospy.loginfo('Loading T_camera_world')
    T_camera_world = RigidTransform.load('/home/autolab/Public/alan/calib/primesense_overhead/primesense_overhead_to_world.tf')

    detector_cfg = config['detector']

    # create rgbd sensor
    rospy.loginfo('Creating RGBD Sensor')
    sensor_cfg = config['sensor_cfg']
    sensor_type = sensor_cfg['type']
    sensor = RgbdSensorFactory.sensor(sensor_type, sensor_cfg)
    sensor.start()
    rospy.loginfo('Sensor Running')

    # setup safe termination
    def handler(signum, frame):
        logging.info('caught CTRL+C, exiting...')        
        if sensor is not None:
            sensor.stop()
        if robot is not None:
            robot.stop()
        if subscriber is not None and subscriber._started:
            subscriber.stop()            
        exit(0)
    signal.signal(signal.SIGINT, handler)
    
    config = YamlConfig(config_filename)
    sensor_type = config["sensor"]["type"]
    sensor_frame = config["sensor"]["frame"]
    num_grasp_samples = config["num_grasp_samples"]
    gripper_width = config["gripper_width"]
    inpaint_rescale_factor = config["inpaint_rescale_factor"]
    visualize_sampling = config["visualize_sampling"]
    sample_config = config["sampling"]

    # Read camera calib.
    tf_filename = "%s_to_world.tf" % (sensor_frame)
    T_camera_world = RigidTransform.load(
        os.path.join(config["calib_dir"], sensor_frame, tf_filename))

    # Setup sensor.
    sensor = RgbdSensorFactory.sensor(sensor_type, config["sensor"])
    sensor.start()
    camera_intr = sensor.ir_intrinsics

    # Read images.
    color_im, depth_im, _ = sensor.frames()
    color_im = color_im.inpaint(rescale_factor=inpaint_rescale_factor)
    depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
    rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)

    # Sample grasps.
    grasp_sampler = AntipodalDepthImageGraspSampler(sample_config,
                                                    gripper_width)
    grasps = grasp_sampler.sample(rgbd_im,
                                  camera_intr,
                                  num_grasp_samples,
Example #12
0
    def test_virtual(self, height=100, width=100):
        # Generate folder of color and depth images
        if not os.path.exists(IM_FILEROOT):
            os.makedirs(IM_FILEROOT)
        cam_intr = CameraIntrinsics(
            "a",
            fx=0.0,
            fy=0.0,
            cx=0.0,
            cy=0.0,
            skew=0.0,
            height=100,
            width=100,
        )
        cam_intr.save(os.path.join(IM_FILEROOT, "a.intr"))
        color_data = (255 * np.random.rand(10, height, width, 3)).astype(
            np.uint8)
        depth_data = np.random.rand(10, height, width).astype(np.float32)
        for i in range(10):
            im = ColorImage(color_data[i], frame="a")
            im.save(os.path.join(IM_FILEROOT, "color_{:d}.png".format(i)))

            im = DepthImage(depth_data[i], frame="a")
            im.save(os.path.join(IM_FILEROOT, "depth_{:d}.npy".format(i)))

        # Create virtual camera
        virtual_cam = RgbdSensorFactory.sensor("virtual",
                                               cfg={
                                                   "image_dir": IM_FILEROOT,
                                                   "frame": "a"
                                               })
        self.assertTrue(
            virtual_cam.path_to_images == IM_FILEROOT,
            msg="img path changed after init",
        )

        # Start virtual camera and read frames
        virtual_cam.start()
        self.assertTrue(virtual_cam.is_running,
                        msg="camera not running after start")
        for i in range(10):
            color, depth = virtual_cam.frames()
            self.assertTrue(
                np.all(color.data == color_data[i]),
                msg="color data for img {:d} changed".format(i),
            )
            self.assertTrue(
                color.frame == virtual_cam.frame,
                msg="frame mismatch between color im and camera",
            )
            self.assertTrue(
                np.all(depth.data == depth_data[i]),
                msg="depth data for img {:d} changed".format(i),
            )
            self.assertTrue(
                depth.frame == virtual_cam.frame,
                msg="frame mismatch between depth im and camera",
            )

        # Make sure camera is stopped
        virtual_cam.stop()
        self.assertFalse(virtual_cam.is_running,
                         msg="camera running after stop")

        # Cleanup images
        for i in range(10):
            os.remove(os.path.join(IM_FILEROOT, "color_{:d}.png".format(i)))
            os.remove(os.path.join(IM_FILEROOT, "depth_{:d}.npy".format(i)))
        os.remove(os.path.join(IM_FILEROOT, "a.intr"))
        os.rmdir(IM_FILEROOT)
Example #13
0
        work_bin = yaml_obj_loader('bin')
        work_bin.pose = bin_tf
        phoxi = yaml_obj_loader('phoxi')
        phoxi.pose = phoxi_tf
        
        # Create scene for each camera for rendering sim
        depth_scene = create_scene(phoxi, [work_bin])

        # Create ICP objects
        feature_matcher = PointToPlaneFeatureMatcher()
        solver = PointToPlaneICPSolver()

        # Start physical depth and color cameras
        logging.info('Creating Phoxi sensor')
        sensor_config = {'frame': 'phoxi', 'device_name': 1703005, 'size': 'small'}
        phoxi_sensor = RgbdSensorFactory.sensor('phoxi', sensor_config)
        logging.info('Starting Phoxi sensor')
        phoxi_sensor.start()
        logging.info('Phoxi sensor initialized')

        # Create box for filtering point cloud
        min_pt = np.array([0.25, -0.12, 0])
        max_pt = np.array([0.5, 0.12, 0.25])
        mask_box = Box(min_pt, max_pt, 'world')

        env = GraspingEnv(config, config['vis'])
        dataset = get_dataset(config, args)
        datapoint = dataset.datapoint_template

        obj_id, obj_id_to_key = 0, {}
        while not rospy.is_shutdown():