コード例 #1
0
    def __init__(self, phoxi_config, webcam_config, calib_dir, frame='phoxi'):
        """Initialize a webcam-colorized PhoXi sensor.

        Parameters
        ----------
        frame : str
            A name for the frame in which images are returned.
        phoxi_config : dict
            Config for the PhoXi camera.
        phoxi_to_world_fn : str
            Filepath for T_phoxi_world.
        webcam_config : dict
            Config for the webcam.
        webcam_to_world_fn : str
            Filepath for T_webcam_world
        """
        self._frame = frame
        phoxi_to_world_fn = os.path.join(calib_dir, 'phoxi', 'phoxi_to_world.tf')
        webcam_to_world_fn = os.path.join(calib_dir, 'webcam', 'webcam_to_world.tf')
        self._T_phoxi_world = RigidTransform.load(phoxi_to_world_fn)
        self._T_webcam_world = RigidTransform.load(webcam_to_world_fn)
        self._phoxi = PhoXiSensor(**phoxi_config)
        self._webcam = WebcamSensor(**webcam_config)
        self._camera_intr = self._phoxi.ir_intrinsics
        self._running = False
コード例 #2
0
ファイル: gripper.py プロジェクト: BlueFoxCN/foxarm
    def load(gripper_name, gripper_dir):
        """ Load the gripper specified by gripper_name.

        Parameters
        ----------
        gripper_name : :obj:`str`
            name of the gripper to load
        gripper_dir : :obj:`str`
            directory where the gripper files are stored

        Returns
        -------
        :obj:`RobotGripper`
            loaded gripper objects
        """
        mesh_filename = os.path.join(gripper_dir, gripper_name,
                                     GRIPPER_MESH_FILENAME)
        mesh = trimesh.load(mesh_filename)

        f = open(
            os.path.join(
                os.path.join(gripper_dir, gripper_name,
                             GRIPPER_PARAMS_FILENAME)), 'r')
        params = json.load(f)

        T_mesh_gripper = RigidTransform.load(
            os.path.join(gripper_dir, gripper_name, T_MESH_GRIPPER_FILENAME))
        T_grasp_gripper = RigidTransform.load(
            os.path.join(gripper_dir, gripper_name, T_GRASP_GRIPPER_FILENAME))
        return RobotGripper(gripper_name, mesh, mesh_filename, params,
                            T_mesh_gripper, T_grasp_gripper)
コード例 #3
0
 def setup_perception(self):
     self.cfg = YamlConfig("april_tag_pick_place_azure_kinect_cfg.yaml")
     self.T_camera_world = RigidTransform.load(
         self.cfg['T_k4a_franka_path'])
     self.sensor = Kinect2SensorFactory.sensor(
         'bridged', self.cfg)  # Kinect sensor object
     prefix = ""
     self.sensor.topic_image_color = prefix + self.sensor.topic_image_color
     self.sensor.topic_image_depth = prefix + self.sensor.topic_image_depth
     self.sensor.topic_info_camera = prefix + self.sensor.topic_info_camera
     self.sensor.start()
     self.april = AprilTagDetector(self.cfg['april_tag'])
     # intr = sensor.color_intrinsics #original
     overhead_intr = CameraIntrinsics('k4a',
                                      fx=970.4990844726562,
                                      cx=1025.4967041015625,
                                      fy=970.1990966796875,
                                      cy=777.769775390625,
                                      height=1536,
                                      width=2048)  #fx fy cx cy overhead
     frontdown_intr = CameraIntrinsics('k4a',
                                       fx=611.9021606445312,
                                       cx=637.0317993164062,
                                       fy=611.779968261718,
                                       cy=369.051239013671,
                                       height=1536,
                                       width=2048)  #fx fy cx cy frontdown
     self.intr = overhead_intr
コード例 #4
0
    def get_T_cam_world(self, from_frame, to_frame, config_path='./'):
        """ get transformation from camera frame to world frame"""

        transform_filename = config_path + "/" + from_frame + '_' + to_frame + '.tf'
        if os.path.exists(transform_filename):
            return RigidTransform.load(transform_filename)
        time = 0
        trans = None
        qat = None

        self.tl = tf.TransformListener()
        while not rospy.is_shutdown():
            try:
                time = self.tl.getLatestCommonTime(to_frame, from_frame)
                (trans, qat) = self.tl.lookupTransform(to_frame, from_frame,
                                                       time)
                break
            except (tf.Exception, tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                print 'try again'
                continue
        RT = RigidTransform()

        #print qat
        qat_wxyz = [qat[-1], qat[0], qat[1], qat[2]]

        #rot = RT.rotation_from_quaternion(qat_wxyz)
        #print trans
        #print rot

        #return RigidTransform(rot, trans, from_frame, to_frame)
        ans = RigidTransform(translation=trans,
                             rotation=qat_wxyz,
                             from_frame=from_frame,
                             to_frame=to_frame)
        ans.save(transform_filename)
        return ans
コード例 #5
0
            previous_grasp = None
            raw_input("Press ENTER to resume")


if __name__ == '__main__':

    # initialize the ROS node
    rospy.init_node('Baxter_Control_Node')

    # load detector config
    detector_cfg = YamlConfig(CFG_PATH +
                              'baxter_control_node.yaml')['detector']

    # load camera tf and intrinsics
    rospy.loginfo('Loading T_camera_world')
    T_camera_world = RigidTransform.load(CFG_PATH + 'kinect_to_world.tf')
    rospy.loginfo("Loading camera intrinsics")
    raw_camera_info = rospy.wait_for_message('/camera/rgb/camera_info',
                                             CameraInfo)
    camera_intrinsics = perception.CameraIntrinsics(
        raw_camera_info.header.frame_id, raw_camera_info.K[0],
        raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5],
        raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width)

    # initalize image processing objects
    cv_bridge = CvBridge()
    boundingBox = BoundingBox(BOUNDBOX_MIN_X, BOUNDBOX_MIN_Y, BOUNDBOX_MAX_X,
                              BOUNDBOX_MAX_Y)

    # wait for Grasp Planning Service and create Service Proxy
    rospy.loginfo("Waiting for planner node")
コード例 #6
0
                        help='path to configuration file to use')
    args = parser.parse_args()
    output_dir = args.output_dir
    config_filename = args.config_filename

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # read config
    config = YamlConfig(config_filename)
    sensor_type = config['sensor']['type']
    sensor_frame = config['sensor']['frame']

    # read camera calib
    tf_filename = '%s_to_world.tf' % (sensor_frame)
    T_camera_world = RigidTransform.load(
        os.path.join(config['calib_dir'], sensor_frame, tf_filename))
    T_camera_world.save(os.path.join(output_dir, tf_filename))

    # setup sensor
    sensor = RgbdSensorFactory.sensor(sensor_type, config['sensor'])

    # start the sensor
    sensor.start()
    color_intrinsics = sensor.color_intrinsics
    ir_intrinsics = sensor.ir_intrinsics
    color_intrinsics.save(
        os.path.join(output_dir, '%s_color.intr' % (sensor.frame)))
    ir_intrinsics.save(os.path.join(output_dir, '%s_ir.intr' % (sensor.frame)))

    # get raw images
    for i in range(config['num_images']):
コード例 #7
0
    def sample(self):
        """ Samples a state from the space
        Returns
        -------
        :obj:`HeapState`
            state of the object pile
        """

        # Start physics engine
        self._physics_engine.start()

        # setup workspace
        workspace_obj_states = []
        workspace_objs = self._config['workspace']['objects']
        for work_key, work_config in list(workspace_objs.items()):

            # make paths absolute
            mesh_filename = work_config['mesh_filename']
            pose_filename = work_config['pose_filename']

            if not os.path.isabs(mesh_filename):
                mesh_filename = os.path.join(
                    os.path.dirname(os.path.realpath(__file__)), '..',
                    mesh_filename)
            if not os.path.isabs(pose_filename):
                pose_filename = os.path.join(
                    os.path.dirname(os.path.realpath(__file__)), '..',
                    pose_filename)

            # load mesh
            mesh = trimesh.load_mesh(mesh_filename)
            mesh.density = self.obj_density
            pose = RigidTransform.load(pose_filename)
            workspace_obj = ObjectState(work_key, mesh, pose)
            self._physics_engine.add(workspace_obj, static=True)
            workspace_obj_states.append(workspace_obj)

        # sample state
        train = True
        if np.random.rand() > self._train_pct:
            train = False
            sample_keys = self.test_keys
            self._logger.info('Sampling from test')
        else:
            sample_keys = self.train_keys
            self._logger.info('Sampling from train')

        total_num_objs = len(sample_keys)

        # sample object ids
        num_objs = min(self.num_objs_rv.rvs(size=1)[0], total_num_objs - 1) + 1
        num_objs = min(num_objs, self.max_objs)
        num_objs = max(num_objs, self.min_objs)
        obj_inds = np.random.permutation(np.arange(total_num_objs))

        # log
        self._logger.info('Sampling %d objects' % (num_objs))

        # sample pile center
        heap_center = self.heap_center_space.sample()
        t_heap_world = np.array([heap_center[0], heap_center[1], 0])
        self._logger.debug('Sampled pile location: %.3f %.3f' %
                           (t_heap_world[0], t_heap_world[1]))

        # sample object, center of mass, pose
        objs_in_heap = []
        total_drops = 0
        while total_drops < total_num_objs and len(objs_in_heap) < num_objs:
            obj_key = sample_keys[total_drops]
            obj_mesh = trimesh.load_mesh(self.mesh_filenames[obj_key])
            obj_mesh.density = self.obj_density
            obj = ObjectState(obj_key, obj_mesh)
            _, radius = trimesh.nsphere.minimum_nsphere(obj.mesh)
            if 2 * radius > self.max_obj_diam:
                self._logger.warning('Obj too big, skipping .....')
                total_drops += 1
                continue

            # sample center of mass
            delta_com = self.delta_com_rv.rvs(size=1)
            center_of_mass = obj.mesh.center_mass + delta_com
            obj.mesh.center_mass = center_of_mass

            # sample obj drop pose
            obj_orientation = self.obj_orientation_space.sample()
            az = obj_orientation[0]
            elev = obj_orientation[1]
            T_obj_table = RigidTransform.sph_coords_to_pose(az,
                                                            elev).as_frames(
                                                                'obj', 'world')

            # sample object planar pose
            obj_planar_pose = self.obj_planar_pose_space.sample()
            theta = obj_planar_pose[2]
            R_table_world = RigidTransform.z_axis_rotation(theta)
            R_obj_drop_world = R_table_world.dot(T_obj_table.rotation)
            t_obj_drop_heap = np.array(
                [obj_planar_pose[0], obj_planar_pose[1], self.drop_height])
            t_obj_drop_world = t_obj_drop_heap + t_heap_world
            obj.pose = RigidTransform(rotation=R_obj_drop_world,
                                      translation=t_obj_drop_world,
                                      from_frame='obj',
                                      to_frame='world')

            self._physics_engine.add(obj)
            try:
                v, w = self._physics_engine.get_velocity(obj.key)
            except:
                self._logger.warning(
                    'Could not get base velocity for object %s. Skipping ...' %
                    (obj_key))
                self._physics_engine.remove(obj.key)
                total_drops += 1
                continue

            objs_in_heap.append(obj)

            # setup until approx zero velocity
            wait = time.time()
            objects_in_motion = True
            num_steps = 0
            while objects_in_motion and num_steps < self.max_settle_steps:

                # step simulation
                self._physics_engine.step()

                # check velocities
                max_mag_v = 0
                max_mag_w = 0
                objs_to_remove = set()
                for o in objs_in_heap:
                    try:
                        v, w = self._physics_engine.get_velocity(o.key)
                    except:
                        self._logger.warning(
                            'Could not get base velocity for object %s. Skipping ...'
                            % (o.key))
                        objs_to_remove.add(o)
                        continue
                    mag_v = np.linalg.norm(v)
                    mag_w = np.linalg.norm(w)
                    if mag_v > max_mag_v:
                        max_mag_v = mag_v
                    if mag_w > max_mag_w:
                        max_mag_w = mag_w

                # Remove invalid objects
                for o in objs_to_remove:
                    self._physics_engine.remove(o.key)
                    objs_in_heap.remove(o)

                # check objs in motion
                if max_mag_v < self.mag_v_thresh and max_mag_w < self.mag_w_thresh:
                    objects_in_motion = False

                num_steps += 1

            # read out object poses
            objs_to_remove = set()
            for o in objs_in_heap:
                obj_pose = self._physics_engine.get_pose(o.key)
                o.pose = obj_pose.copy()

                # remove the object if its outside of the workspace
                if not self.in_workspace(obj_pose):
                    self._logger.warning(
                        'Object {} fell out of the workspace!'.format(o.key))
                    objs_to_remove.add(o)

            # remove invalid objects
            for o in objs_to_remove:
                self._physics_engine.remove(o.key)
                objs_in_heap.remove(o)

            total_drops += 1
            self._logger.debug('Waiting for zero velocity took %.3f sec' %
                               (time.time() - wait))

        # Stop physics engine
        self._physics_engine.stop()

        # add metadata for heap state and return it
        metadata = {'split': TRAIN_ID}
        if not train:
            metadata['split'] = TEST_ID

        return HeapState(workspace_obj_states, objs_in_heap, metadata=metadata)
コード例 #8
0
    parser.add_argument('object_name')
    args = parser.parse_args()

    config_filename = 'cfg/tools/register_object.yaml'
    config = YamlConfig(config_filename)

    sensor_frame = config['sensor']['frame_name']
    sensor_type = config['sensor']['type']
    sensor_config = config['sensor']

    object_path = os.path.join(config['objects_dir'], args.object_name)
    obj_cb_transform_file_path = os.path.join(
        object_path, 'T_cb_{0}.tf'.format(args.object_name))

    # load T_cb_obj
    T_cb_obj = RigidTransform.load(obj_cb_transform_file_path)

    # load T_world_cam
    T_world_cam_path = os.path.join(config['calib_dir'], sensor_frame,
                                    '{0}_to_world.tf'.format(sensor_frame))
    T_world_cam = RigidTransform.load(T_world_cam_path)

    # open sensor
    sensor_type = sensor_config['type']
    sensor_config['frame'] = sensor_frame
    sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
    logging.info('Starting sensor')
    sensor.start()
    ir_intrinsics = sensor.ir_intrinsics
    logging.info('Sensor initialized')
コード例 #9
0
#!/usr/bin/env python
import rospy
import tf2_ros
from geometry_msgs.msg import TransformStamped
from autolab_core import RigidTransform

if __name__ == '__main__':
    # initialize ROS node
    rospy.init_node('camera_to_world_tf_broadcaster')
    rospy.loginfo('Camera to World TF Broadcaster Initialized')

    # load RigidTransform
    rospy.loginfo('Loading T_camera_world')
    T_camera_world = RigidTransform.load(
        '/home/autolab/Public/alan/calib/primesense_overhead/primesense_overhead_to_world.tf'
    )

    # create broadcaster
    transform_broadcaster = tf2_ros.TransformBroadcaster()

    # create Stamped ROS Transform
    camera_world_transform = TransformStamped()
    camera_world_transform.header.stamp = rospy.Time.now()
    camera_world_transform.header.frame_id = 'primesense_overhead_rgb_optical_frame'
    camera_world_transform.child_frame_id = 'world'

    camera_world_transform.transform.translation.x = T_camera_world.translation[
        0]
    camera_world_transform.transform.translation.y = T_camera_world.translation[
        1]
    camera_world_transform.transform.translation.z = T_camera_world.translation[
コード例 #10
0
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""

import rospy
import tf2_ros
from geometry_msgs.msg import TransformStamped
from autolab_core import RigidTransform

if __name__ == '__main__':
    # initialize ROS node
    rospy.init_node('camera_to_world_tf_broadcaster')
    rospy.loginfo('Camera to World TF Broadcaster Initialized')

    # load RigidTransform
    rospy.loginfo('Loading T_camera_world')
    T_camera_world = RigidTransform.load(
        '../data/calib/kinect/kinect_to_world.tf')

    # create broadcaster
    transform_broadcaster = tf2_ros.TransformBroadcaster()

    # create Stamped ROS Transform
    camera_world_transform = TransformStamped()
    camera_world_transform.header.stamp = rospy.Time.now()
    camera_world_transform.header.frame_id = T_camera_world.to_frame
    camera_world_transform.child_frame_id = T_camera_world.from_frame

    camera_world_transform.transform.translation.x = T_camera_world.translation[
        0]
    camera_world_transform.transform.translation.y = T_camera_world.translation[
        1]
    camera_world_transform.transform.translation.z = T_camera_world.translation[
コード例 #11
0
if __name__ == '__main__':
    logging.getLogger().setLevel(logging.INFO)
    args = parse_args()
    rospy.init_node('test_toppling', anonymous=True)

    config = YamlConfig(args.config_filename)
    config['model']['load'] = 1
    policy = TestTopplePolicy(config, use_sensitivity=True)

    if config['debug']:
        random.seed(SEED)
        np.random.seed(SEED)

    # Start webcam stream and get webcam tf
    phoxi_tf = RigidTransform.load(os.path.join('/nfs/diskstation/calib', 'phoxi', 'phoxi_to_world.tf'))
    bin_tf = RigidTransform(translation=np.array([0.387500, -0.003000, -0.0025]), 
                            rotation=np.array([[-0.024541, -0.999699, 0.01000],
                                               [0.999699, -0.024541, 0.000000],
                                               [0.000000, 0.000000, 1.000000]]), 
                            from_frame='bin', to_frame='world')
    
    # Load all python objects
    basedir = os.path.join(os.path.dirname(__file__), '..', '..', 'ambidex', 'tests', 'cfg')
    yaml_obj_loader = YamlObjLoader(basedir)

    phys_robot = yaml_obj_loader('physical_yumi')
    try:
    # if True:
        work_bin = yaml_obj_loader('bin')
        work_bin.pose = bin_tf
コード例 #12
0
    logging.getLogger().setLevel(logging.INFO)

    # parse args
    parser = argparse.ArgumentParser(
        description="Register a webcam to the robot")
    parser.add_argument(
        "--config_filename",
        type=str,
        default="cfg/tools/register_webcam.yaml",
        help="filename of a YAML configuration for registration",
    )
    args = parser.parse_args()
    config_filename = args.config_filename
    config = YamlConfig(config_filename)

    T_cb_world = RigidTransform.load(config["chessboard_tf"])

    # Get camera sensor object
    for sensor_frame, sensor_data in config["sensors"].iteritems():
        logging.info("Registering {}".format(sensor_frame))
        sensor_config = sensor_data["sensor_config"]
        reg_cfg = sensor_data["registration_config"].copy()
        reg_cfg.update(config["chessboard_registration"])

        try:
            # Open sensor
            sensor_type = sensor_config["type"]
            sensor_config["frame"] = sensor_frame
            logging.info("Creating sensor")
            sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
            logging.info("Starting sensor")
コード例 #13
0
from perception import DepthImage, CameraIntrinsics, RenderMode
from visualization import Visualizer2D as vis2d, Visualizer3D as vis3d
from meshrender import Scene, MaterialProperties, AmbientLight, PointLight, SceneObject, VirtualCamera
"""
This library uses an object mesh file and a depth image of a bin for object placement to infer the best placement location for the given object in the
bin from the depth image. 
@author: Ruta Joshi
"""

# Globals
#ci_file = '/nfs/diskstation/projects/dex-net/placing/datasets/real/sample_ims_05_22/camera_intrinsics.intr'
ci_file = '/nfs/diskstation/projects/dex-net/placing/datasets/sim/sim_06_22/image_dataset/depth_ims/intrinsics_000002.intr'
ci = CameraIntrinsics.load(ci_file)

cp_file = '/nfs/diskstation/projects/dex-net/placing/datasets/sim/sim_06_22/image_dataset/depth_ims/pose_000002.tf'
cp = RigidTransform.load(cp_file)
""" 1. Given depth image of bin, retrieve largest planar surface """


@profile
def largest_planar_surface(filename):
    # Load the image as a numpy array and the camera intrinsics
    image = np.load(filename)
    # Create and deproject a depth image of the data using the camera intrinsics
    di = DepthImage(image, frame=ci.frame)
    di = di.inpaint()
    pc = ci.deproject(di)
    # Make a PCL type point cloud from the image
    p = pcl.PointCloud(pc.data.T.astype(np.float32))
    # Make a segmenter and segment the point cloud.
    seg = p.make_segmenter()
コード例 #14
0
def register_ensenso(config):
    # set parameters
    average = True
    add_to_buffer = True
    clear_buffer = False
    decode = True
    grid_spacing = -1

    # read parameters
    num_patterns = config['num_patterns']
    max_tries = config['max_tries']
    
    # load tf pattern to world
    T_pattern_world = RigidTransform.load(config['pattern_tf'])
    
    # initialize sensor
    sensor_frame = config['sensor_frame']
    sensor = EnsensoSensor(sensor_frame)

    # initialize node
    rospy.init_node('register_ensenso', anonymous=True)
    rospy.wait_for_service('/%s/collect_pattern' %(sensor_frame))
    rospy.wait_for_service('/%s/estimate_pattern_pose' %(sensor_frame))
    
    # start sensor
    print('Starting sensor')
    sensor.start()
    time.sleep(1)
    print('Sensor initialized')
    
    # perform registration
    try:
        print('Collecting patterns')
        num_detected = 0
        i = 0
        while num_detected < num_patterns and i < max_tries:
            collect_pattern = rospy.ServiceProxy('/%s/collect_pattern' %(sensor_frame), CollectPattern)
            resp = collect_pattern(add_to_buffer,
                                   clear_buffer,
                                   decode,
                                   grid_spacing)
            if resp.success:
                print('Detected pattern %d' %(num_detected))
                num_detected += 1
            i += 1

        if i == max_tries:
            raise ValueError('Failed to detect calibration pattern!')
            
        print('Estimating pattern pose')
        estimate_pattern_pose = rospy.ServiceProxy('/%s/estimate_pattern_pose' %(sensor_frame), EstimatePatternPose)
        resp = estimate_pattern_pose(average)

        q_pattern_camera = np.array([resp.pose.orientation.w,
                                     resp.pose.orientation.x,
                                     resp.pose.orientation.y,
                                     resp.pose.orientation.z])
        t_pattern_camera = np.array([resp.pose.position.x,
                                     resp.pose.position.y,
                                     resp.pose.position.z])
        T_pattern_camera = RigidTransform(rotation=q_pattern_camera,
                                          translation=t_pattern_camera,
                                          from_frame='pattern',
                                          to_frame=sensor_frame)
    except rospy.ServiceException as e:
        print('Service call failed: %s' %(str(e)))

    # compute final transformation
    T_camera_world = T_pattern_world * T_pattern_camera.inverse()
    
    # save final transformation
    output_dir = os.path.join(config['calib_dir'], sensor_frame)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    pose_filename = os.path.join(output_dir, '%s_to_world.tf' %(sensor_frame))
    T_camera_world.save(pose_filename)
    intr_filename = os.path.join(output_dir, '%s.intr' %(sensor_frame))
    sensor.ir_intrinsics.save(intr_filename)

    # log final transformation
    print('Final Result for sensor %s' %(sensor_frame))
    print('Rotation: ')
    print(T_camera_world.rotation)
    print('Quaternion: ')
    print(T_camera_world.quaternion)
    print('Translation: ')
    print(T_camera_world.translation)

    # stop sensor
    sensor.stop()

    # move robot to calib pattern center
    if config['use_robot']:  
        # create robot pose relative to target point
        target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)
        target_pt_world = T_camera_world * target_pt_camera
        R_gripper_world = np.array([[1.0, 0, 0],
                                    [0, -1.0, 0],
                                    [0, 0, -1.0]])
        t_gripper_world = np.array([target_pt_world.x + config['gripper_offset_x'],
                                    target_pt_world.y + config['gripper_offset_y'],
                                    target_pt_world.z + config['gripper_offset_z']])
        T_gripper_world = RigidTransform(rotation=R_gripper_world,
                                         translation=t_gripper_world,
                                         from_frame='gripper',
                                         to_frame='world')

        # move robot to pose
        y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
        y.reset_home()
        time.sleep(1)

        T_lift = RigidTransform(translation=(0,0,0.05), from_frame='world', to_frame='world')
        T_gripper_world_lift = T_lift * T_gripper_world

        y.right.goto_pose(T_gripper_world_lift)
        y.right.goto_pose(T_gripper_world)
        
        # wait for human measurement
        yesno = raw_input('Take measurement. Hit [ENTER] when done')
        y.right.goto_pose(T_gripper_world_lift)
        y.reset_home()
        y.stop()
コード例 #15
0
    plt.pause(GUI_PAUSE)

    # read workspace bounds
    workspace_box = Box(
        np.array(workspace_config["min_pt"]),
        np.array(workspace_config["max_pt"]),
        frame="world",
    )

    # read workspace objects
    workspace_objects = {}
    for obj_key, obj_config in workspace_config["objects"].iteritems():
        mesh_filename = obj_config["mesh_filename"]
        pose_filename = obj_config["pose_filename"]
        obj_mesh = trimesh.load_mesh(mesh_filename)
        obj_pose = RigidTransform.load(pose_filename)
        obj_mat_props = MaterialProperties(smooth=True, wireframe=False)
        scene_obj = SceneObject(obj_mesh, obj_pose, obj_mat_props)
        workspace_objects[obj_key] = scene_obj

    # setup each sensor
    datasets = {}
    sensors = {}
    sensor_poses = {}
    camera_intrs = {}
    workspace_ims = {}
    for sensor_name, sensor_config in sensor_configs.iteritems():
        # read params
        sensor_type = sensor_config["type"]
        sensor_frame = sensor_config["frame"]
コード例 #16
0
    parser = argparse.ArgumentParser(description='Filter a set of images')
    parser.add_argument('image_dir',
                        type=str,
                        help='location to read the images from')
    parser.add_argument('frame', type=str, help='frame of images')
    args = parser.parse_args()
    image_dir = args.image_dir
    frame = args.frame

    # sensor
    sensor = VirtualSensor(image_dir)
    sensor.start()
    camera_intr = sensor.ir_intrinsics

    # read tf
    T_camera_world = RigidTransform.load(
        os.path.join(image_dir, '%s_to_world.tf' % (frame)))

    # read images
    color_im, depth_im, _ = sensor.frames()

    # inpaint original image
    depth_im_filtered = depth_im.copy()
    depth_im_orig = depth_im.inpaint(rescale_factor)

    # timing
    filter_start = time.time()

    small_depth_im = depth_im.resize(rescale_factor, interp='nearest')
    small_camera_intr = camera_intr.resize(rescale_factor)

    # convert to point cloud in world coords
コード例 #17
0
    config = YamlConfig(config_filename)
    inpaint_rescale_factor = config["inpaint_rescale_factor"]
    policy_config = config["policy"]

    # Make relative paths absolute.
    if model_dir is not None:
        policy_config["metric"]["gqcnn_model"] = model_dir
    if "gqcnn_model" in policy_config["metric"] and not os.path.isabs(
            policy_config["metric"]["gqcnn_model"]):
        policy_config["metric"]["gqcnn_model"] = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), "..",
            policy_config["metric"]["gqcnn_model"])

    # Setup sensor.
    camera_intr = CameraIntrinsics.load(camera_intr_filename)
    T_camera_world = RigidTransform.load(camera_pose_filename)

    # Read images.
    depth_data = np.load(depth_im_filename)
    depth_data = depth_data.astype(np.float32) / 1000.0
    depth_im = DepthImage(depth_data, frame=camera_intr.frame)
    color_im = ColorImage(np.zeros([depth_im.height, depth_im.width,
                                    3]).astype(np.uint8),
                          frame=camera_intr.frame)

    # Optionally read a segmask.
    mask = np.zeros((camera_intr.height, camera_intr.width, 1), dtype=np.uint8)
    c = np.array([165, 460, 500, 135])
    r = np.array([165, 165, 370, 370])
    rr, cc = skimage.draw.polygon(r, c, shape=mask.shape)
    mask[rr, cc, 0] = 255
コード例 #18
0
    def __init__(self):
        super(PickAndDropProgram, self).__init__()

        # First initialize `moveit_commander`_ and a `rospy`_ node:
        moveit_commander.roscpp_initialize(sys.argv)
        rospy.init_node('pick_and_drop_program', anonymous=True)

        filepath = rospy.get_param('~goal_joint_states')
        goal_joint_states = joint_state_filereader.read(filepath)
        self.goal_names = ["home", "near_box", "near_drop", "drop"]
        for name in self.goal_names:
            assert name in goal_joint_states, \
                "Joint state name '{}' is not found".format(name)

        # Declare suctionpad topics
        self.pub_to = rospy.Publisher('toArduino', String, queue_size=100)
        self.pub_from = rospy.Publisher('fromArduino', String, queue_size=100)

        # Vision config
        config = YamlConfig(rospy.get_param('~config'))

        # Instantiate a `RobotCommander`_ object. This object is the outer-level interface to
        # the robot:
        robot = moveit_commander.RobotCommander()

        # Instantiate a `PlanningSceneInterface`_ object.  This object is an interface
        # to the world surrounding the robot:
        scene = moveit_commander.PlanningSceneInterface()

        # Instantiate a `MoveGroupCommander`_ object.  This object is an interface
        # to one group of joints.  In this case the group is the joints in the UR5
        # arm so we set ``group_name = manipulator``. If you are using a different robot,
        # you should change this value to the name of your robot arm planning group.
        group_name = "manipulator"  # See .srdf file to get available group names
        group = moveit_commander.MoveGroupCommander(group_name)

        # We create a `DisplayTrajectory`_ publisher which is used later to publish
        # trajectories for RViz to visualize:
        # display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
        #                                                moveit_msgs.msg.DisplayTrajectory,
        #                                                queue_size=20)

        # We can get the name of the reference frame for this robot:
        planning_frame = group.get_planning_frame()
        print("============ Reference frame: %s" % planning_frame)

        # We can also print(the name of the end-effector link for this group:
        eef_link = group.get_end_effector_link()
        print("============ End effector: %s" % eef_link)

        # We can get a list of all the groups in the robot:
        group_names = robot.get_group_names()
        print("============ Robot Groups:", robot.get_group_names())

        # Sometimes for debugging it is useful to print the entire state of the
        # robot:
        print("============ Printing robot state")
        print(robot.get_current_state())
        print("")

        # Setup vision sensor
        print("============ Setup vision sensor")
        # create rgbd sensor
        rospy.loginfo('Creating RGBD Sensor')
        sensor_cfg = config['sensor_cfg']
        sensor_type = sensor_cfg['type']
        self.sensor = RgbdSensorFactory.sensor(sensor_type, sensor_cfg)
        self.sensor.start()
        rospy.loginfo('Sensor Running')

        rospy.loginfo('Loading T_camera_world')
        tf_camera_world = RigidTransform.load(
            rospy.get_param('~world_camera_tf'))
        rospy.loginfo('tf_camera_world: {}'.format(tf_camera_world))

        # Setup client for grasp pose service
        rospy.loginfo('Setup client for grasp pose service')
        rospy.wait_for_service('grasping_policy')
        self.plan_grasp_client = rospy.ServiceProxy('grasping_policy',
                                                    GQCNNGraspPlanner)

        self.transform_broadcaster = tf2_ros.TransformBroadcaster()

        # Misc variables
        self.robot = robot
        self.scene = scene
        self.group = group
        self.group_names = group_names
        self.config = config
        self.tf_camera_world = tf_camera_world
        self.goal_joint_states = goal_joint_states
コード例 #19
0
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
import rospy
import tf2_ros
from geometry_msgs.msg import TransformStamped
from autolab_core import RigidTransform

if __name__ == '__main__':
    # initialize ROS node
    rospy.init_node('camera_to_world_tf_broadcaster')
    rospy.loginfo('Camera to World TF Broadcaster Initialized')

    # load RigidTransform
    rospy.loginfo('Loading T_camera_world')
    T_camera_world = RigidTransform.load(rospy.get_param('~world_camera_tf'))
    T_camera_world = T_camera_world.inverse()

    print("T_camera_world: {}".format(T_camera_world))
    print("T_camera_world.from_frame: {}".format(T_camera_world.from_frame))
    print("T_camera_world.to_frame: {}".format(T_camera_world.to_frame))
    print("T_camera_world.R: {}".format(T_camera_world.rotation))

    # create broadcaster
    transform_broadcaster = tf2_ros.TransformBroadcaster()

    # broadcast
    rate = rospy.Rate(10.0)
    while not rospy.is_shutdown():
        # create Stamped ROS Transform
        camera_world_transform = TransformStamped()
コード例 #20
0
def register_ensenso(config):
    # set parameters
    average = True
    add_to_buffer = True
    clear_buffer = False
    decode = True
    grid_spacing = -1

    # read parameters
    num_patterns = config["num_patterns"]
    max_tries = config["max_tries"]

    # load tf pattern to world
    T_pattern_world = RigidTransform.load(config["pattern_tf"])

    # initialize sensor
    sensor_frame = config["sensor_frame"]
    sensor_config = {"frame": sensor_frame}
    logging.info("Creating sensor")
    sensor = RgbdSensorFactory.sensor("ensenso", sensor_config)

    # initialize node
    rospy.init_node("register_ensenso", anonymous=True)
    rospy.wait_for_service("/%s/collect_pattern" % (sensor_frame))
    rospy.wait_for_service("/%s/estimate_pattern_pose" % (sensor_frame))

    # start sensor
    print("Starting sensor")
    sensor.start()
    time.sleep(1)
    print("Sensor initialized")

    # perform registration
    try:
        print("Collecting patterns")
        num_detected = 0
        i = 0
        while num_detected < num_patterns and i < max_tries:
            collect_pattern = rospy.ServiceProxy(
                "/%s/collect_pattern" % (sensor_frame), CollectPattern)
            resp = collect_pattern(add_to_buffer, clear_buffer, decode,
                                   grid_spacing)
            if resp.success:
                print("Detected pattern %d" % (num_detected))
                num_detected += 1
            i += 1

        if i == max_tries:
            raise ValueError("Failed to detect calibration pattern!")

        print("Estimating pattern pose")
        estimate_pattern_pose = rospy.ServiceProxy(
            "/%s/estimate_pattern_pose" % (sensor_frame), EstimatePatternPose)
        resp = estimate_pattern_pose(average)

        q_pattern_camera = np.array([
            resp.pose.orientation.w,
            resp.pose.orientation.x,
            resp.pose.orientation.y,
            resp.pose.orientation.z,
        ])
        t_pattern_camera = np.array(
            [resp.pose.position.x, resp.pose.position.y, resp.pose.position.z])
        T_pattern_camera = RigidTransform(
            rotation=q_pattern_camera,
            translation=t_pattern_camera,
            from_frame="pattern",
            to_frame=sensor_frame,
        )
    except rospy.ServiceException as e:
        print("Service call failed: %s" % (str(e)))

    # compute final transformation
    T_camera_world = T_pattern_world * T_pattern_camera.inverse()

    # save final transformation
    output_dir = os.path.join(config["calib_dir"], sensor_frame)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    pose_filename = os.path.join(output_dir, "%s_to_world.tf" % (sensor_frame))
    T_camera_world.save(pose_filename)
    intr_filename = os.path.join(output_dir, "%s.intr" % (sensor_frame))
    sensor.ir_intrinsics.save(intr_filename)

    # log final transformation
    print("Final Result for sensor %s" % (sensor_frame))
    print("Rotation: ")
    print(T_camera_world.rotation)
    print("Quaternion: ")
    print(T_camera_world.quaternion)
    print("Translation: ")
    print(T_camera_world.translation)

    # stop sensor
    sensor.stop()

    # move robot to calib pattern center
    if config["use_robot"]:
        # create robot pose relative to target point
        target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)
        target_pt_world = T_camera_world * target_pt_camera
        R_gripper_world = np.array([[1.0, 0, 0], [0, -1.0, 0], [0, 0, -1.0]])
        t_gripper_world = np.array([
            target_pt_world.x + config["gripper_offset_x"],
            target_pt_world.y + config["gripper_offset_y"],
            target_pt_world.z + config["gripper_offset_z"],
        ])
        T_gripper_world = RigidTransform(
            rotation=R_gripper_world,
            translation=t_gripper_world,
            from_frame="gripper",
            to_frame="world",
        )

        # move robot to pose
        y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
        y.reset_home()
        time.sleep(1)

        T_lift = RigidTransform(translation=(0, 0, 0.05),
                                from_frame="world",
                                to_frame="world")
        T_gripper_world_lift = T_lift * T_gripper_world

        y.right.goto_pose(T_gripper_world_lift)
        y.right.goto_pose(T_gripper_world)

        # wait for human measurement
        keyboard_input("Take measurement. Hit [ENTER] when done")
        y.right.goto_pose(T_gripper_world_lift)
        y.reset_home()
        y.stop()
コード例 #21
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--intrinsics_file_path', type=str, default=AZURE_KINECT_CALIB_DATA)
    parser.add_argument('--transform_file_path', type=str, default=AZURE_KINECT_EXTRINSICS)
    parser.add_argument('--object', type=str, default=AZURE_KINECT_EXTRINSICS)
    args = parser.parse_args()

    # rospy.init_node("Test azure kinect calibration")
    print('Starting robot')
    fa = FrankaArm()

    cv_bridge = CvBridge()
    ir_intrinsics = CameraIntrinsics.load(args.intrinsics_file_path)

    kinect2_overhead_to_world_transform = RigidTransform.load(args.transform_file_path)


    print('Opening Grippers')
    fa.open_gripper()

    print('Reset with pose')
    fa.reset_pose()

    print('Reset with joints')
    fa.reset_joints()

    depth_image_msg = rospy.wait_for_message('/depth_to_rgb/image_raw', Image)
    try:
        cv_image = cv_bridge.imgmsg_to_cv2(depth_image_msg)
    except CvBridgeError as e:
コード例 #22
0

if __name__ == "__main__":
    logging.getLogger().setLevel(logging.INFO)
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--cfg',
        '-c',
        type=str,
        default=
        '/home/stevenl3/Documents/planorparam/scripts/real_robot/april_tag_pick_place_azure_kinect_cfg.yaml'
    )
    parser.add_argument('--no_grasp', '-ng', action='store_true')
    args = parser.parse_args()
    cfg = YamlConfig(args.cfg)
    T_camera_world = RigidTransform.load(cfg['T_k4a_franka_path'])

    logging.info('Starting robot')
    fa = FrankaArm()
    #    fa.reset_joints()
    #    fa.reset_pose()
    #    fa.open_gripper()

    T_ready_world = fa.get_pose()
    T_ready_world.translation[0] += 0.25
    T_ready_world.translation[2] = 0.4

    #ret = fa.goto_pose(T_ready_world)

    logging.info('Init camera')
コード例 #23
0
                        type=str,
                        default=AZURE_KINECT_CALIB_DATA)
    parser.add_argument('--transform_file_path',
                        type=str,
                        default=AZURE_KINECT_EXTRINSICS)
    parser.add_argument('--filename', type=str, default=None)
    args = parser.parse_args()

    # rospy.init_node("Test azure kinect calibration")
    print('Starting robot')
    fa = FrankaArm()

    cv_bridge = CvBridge()
    ir_intrinsics = CameraIntrinsics.load(args.intrinsics_file_path)

    kinect2_left_to_world_transform = RigidTransform.load(
        args.transform_file_path)

    # print('Opening Grippers')
    # fa.open_gripper()

    print('Reset with pose')
    fa.reset_pose()

    print('Reset with joints')
    fa.reset_joints()

    fa.close_gripper()

    rgb_image_msg = rospy.wait_for_message('/rgb/image_raw', Image)
    try:
        rgb_cv_image = cv_bridge.imgmsg_to_cv2(rgb_image_msg)
コード例 #24
0
import numpy as np
from copy import deepcopy
from Queue import Queue

from baxter_interface import Gripper
import moveit_commander

from autolab_core import RigidTransform

from geometry_msgs.msg import PoseStamped
from gqcnn.srv import GQCNNGraspExecuter

CFG_PATH = '../cfg/ros_nodes/'

# Poses
L_HOME_STATE = RigidTransform.load(CFG_PATH + 'L_HOME_STATE.tf').pose_msg
R_HOME_STATE = RigidTransform.load(CFG_PATH + 'R_HOME_STATE.tf').pose_msg
L_PREGRASP_POSE = RigidTransform.load(CFG_PATH + 'L_PREGRASP_POSE.tf').pose_msg

CAMERA_MESH_DIM = (0.3, 0.05, 0.05)

# Grasping params
TABLE_DEPTH = -0.190
GRASP_APPROACH_DIST = 0.1
GRIPPER_CLOSE_FORCE = 30.0  # percentage [0.0, 100.0]

# Velocity params; fractions [0.0,1.0]
MAX_VELOCITY = 1.0


class GraspExecuter(object):
コード例 #25
0
                          translation=grasp_translation,
                          from_frame=T_ee_world.from_frame,
                          to_frame=T_ee_world.to_frame)


if __name__ == "__main__":
    logging.getLogger().setLevel(logging.INFO)
    parser = argparse.ArgumentParser()
    parser.add_argument('--cfg',
                        '-c',
                        type=str,
                        default='cfg/examples/april_tag_pick_place_cfg.yaml')
    parser.add_argument('--no_grasp', '-ng', action='store_true')
    args = parser.parse_args()
    cfg = YamlConfig(args.cfg)
    T_camera_ee = RigidTransform.load(cfg['T_camera_ee_path'])
    T_camera_mount_delta = RigidTransform.load(cfg['T_camera_mount_path'])

    logging.info('Starting robot')
    fa = FrankaArm()
    fa.set_tool_delta_pose(T_camera_mount_delta)
    fa.reset_joints()
    fa.open_gripper()

    T_ready_world = fa.get_pose()
    T_ready_world.translation[0] += 0.25
    T_ready_world.translation[2] = 0.4

    ret = fa.goto_pose(T_ready_world)

    logging.info('Init camera')
コード例 #26
0
from visualization import Visualizer3D as vis
from yumipy import YuMiRobot
from yumipy import YuMiConstants as YMC

if __name__ == '__main__':
    logging.getLogger().setLevel(logging.INFO)

    # parse args
    parser = argparse.ArgumentParser(description='Register a camera to a robot')
    parser.add_argument('--config_filename', type=str, default='cfg/tools/register_camera.yaml', help='filename of a YAML configuration for registration')
    args = parser.parse_args()
    config_filename = args.config_filename
    config = YamlConfig(config_filename)
    
    # get known tf from chessboard to world
    T_cb_world = RigidTransform.load(config['chessboard_tf'])

    # initialize node
    rospy.init_node('register_camera', anonymous=True)

    # get camera sensor object
    for sensor_frame, sensor_data in config['sensors'].iteritems():
        sensor_config = sensor_data['sensor_config']
        registration_config = sensor_data['registration_config'].copy()
        registration_config.update(config['chessboard_registration'])

        # open sensor
        try:
            sensor_type = sensor_config['type']
            sensor_config['frame'] = sensor_frame
            sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
コード例 #27
0
import IPython
import numpy as np
from scipy.linalg import lstsq
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
from std_msgs.msg import Header, Int64
import rospy

from autolab_core import Box, PointCloud, RigidTransform
from perception import CameraIntrinsics
from visualization import Visualizer3D as vis3d

CAMERA_INTR_FILENAME = '/nfs/diskstation/calib/primesense_overhead/primesense_overhead.intr'
CAMERA_POSE_FILENAME = '/nfs/diskstation/calib/primesense_overhead/primesense_overhead_to_world.tf'
camera_intr = CameraIntrinsics.load(CAMERA_INTR_FILENAME)
T_camera_world = RigidTransform.load(CAMERA_POSE_FILENAME).as_frames(
    'camera', 'world')
workspace = Box(min_pt=np.array([0.22, -0.22, 0.005]),
                max_pt=np.array([0.56, 0.22, 0.2]),
                frame='world')
pub = None


def cloudCallback(msg):
    global T_camera_world

    # read the cloud
    cloud_array = []
    for p in point_cloud2.read_points(msg):
        cloud_array.append([p[0], p[1], p[2]])
    cloud_array = np.array(cloud_array).T