def visualize_pointcloud (camera_frame="camera_depth_optical_frame", device_id="#1"):
    """
    Visualize point clouds from openni grabber through ROS.
    """
    if rospy.get_name() == '/unnamed':
        rospy.init_node("visualize_pointcloud")
    
    
    pc_pub = rospy.Publisher("camera_points", PointCloud2)
    sleeper = rospy.Rate(30)
    
    grabber = cpr.CloudGrabber(device_id)
    grabber.startRGBD()
    
    print "Streaming now from frame %s: Pointclouds only."%camera_frame
    while True:
        try:
            r, d = grabber.getRGBD()            

            pc = ru.xyzrgb2pc(clouds.depth_to_xyz(d, asus_xtion_pro_f), r, camera_frame)
            pc_pub.publish(pc)

            sleeper.sleep()
        except KeyboardInterrupt:
            print "Keyboard interrupt. Exiting."
            break
def get_click_points (rgb, depth):
    """
    Get clicked points in an image.
    Add functions if you want to calibrate based on color or something.
    """
    clicks = []

    def mouse_click(event, x, y, flags, params):
        if event == cv.CV_EVENT_LBUTTONDOWN:
            clicks.append([x, y])

    xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
            
    cv2.imshow("Points", rgb)
    cv.SetMouseCallback("Points", mouse_click, 0)
    print "Click the points keeping the order in mind. Press enter to continue."
    cv2.waitKey()
    cv2.destroyWindow("Points")
    
    clickPoints = []

    for (x, y) in clicks:
        clickPoints.append(np.copy(xyz[y][x]))
    
    return clickPoints
def publish_transform_markers(grabber, marker, T, from_frame, to_frame, rate=100):

    from visualization_msgs.msg import Marker
    from sensor_msgs.msg import PointCloud2
    
    print colorize("Transform : ", "yellow", True)
    print T
    
    marker_frame = "ar_frame"
    tf_pub = tf.TransformBroadcaster()
    pc_pub = rospy.Publisher("camera_points", PointCloud2)
    
    trans = T[0:3,3] 
    rot   = tf.transformations.quaternion_from_matrix(T)
    sleeper = rospy.Rate(10)

    while True:
        try:
            r, d = grabber.getRGBD()
            ar_tfm = get_ar_transform_id (d, r, marker)
            
            pc = ru.xyzrgb2pc(clouds.depth_to_xyz(d, asus_xtion_pro_f), r, from_frame)
            pc_pub.publish(pc)
            
            tf_pub.sendTransform(trans, rot, rospy.Time.now(), to_frame, from_frame)
            
            try:
                trans_m, rot_m = conversions.hmat_to_trans_rot(ar_tfm)
                tf_pub.sendTransform(trans_m, rot_m, rospy.Time.now(), marker_frame, from_frame)
            except:
                pass
            
            sleeper.sleep()
        except KeyboardInterrupt:
            break    
def extract_rope_tracking(rgb, depth, T_w_k):
    red_mask = [lambda(x): (x<25)|(x>100), lambda(x): x>80, lambda(x): x>100]
    hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
    h = hsv[:,:,0]
    s = hsv[:,:,1]
    v = hsv[:,:,2]
    
    h_mask = red_mask[0](h)
    s_mask = red_mask[1](s)
    v_mask = red_mask[2](v)
    color_mask = h_mask & s_mask & v_mask
    
    valid_mask = (depth > 0)
    
    xyz_k = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
    
    z = xyz_w[:,:,2]   
    z0 = xyz_k[:,:,2]

    # 'height' or distance from the camera
    height_mask = (xyz_w[:,:,2] > 0) & (xyz_w[:,:,2] < 1.1)
        
    good_mask = color_mask & valid_mask & height_mask
    
    # return xyz_k instead of xyz_w ==> change to use xyz_w
    good_xyz = xyz_w[good_mask]
    good_rgb = rgb[good_mask]
    

    
    
    colored_xyz = np.hstack((good_xyz, good_rgb))
    
    return colored_xyz
def get_ar_marker_poses (rgb, depth, pc=None):
    """
    In order to run this, ar_marker_service needs to be running.
    """
    global getMarkers, req, ar_initialized
    
    if not ar_initialized:
        if rospy.get_name() == '/unnamed':
            rospy.init_node('ar_tfm')
        getMarkers = rospy.ServiceProxy("getMarkers", MarkerPositions)
        req = MarkerPositionsRequest()
        ar_initialized = True
    
    if pc is None:
        xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
        pc = ru.xyzrgb2pc(xyz, rgb, '/camera_frame')
    
    req.pc = pc
    
    marker_tfm = {}
    res = getMarkers(req)
    for marker in res.markers.markers:
        marker_tfm[marker.id] = conversions.pose_to_hmat(marker.pose.pose)

    return marker_tfm
    def run (self):
        """
        Publishes the transforms stored.
        """
        while True:
            if self.publish_pc and self.cameras is not None:
                data=self.cameras.get_RGBD()
                for i,rgbd in data.items():
                    if rgbd['depth'] is None or rgbd['rgb'] is None:
                        print 'wut'
                        continue
                    camera_frame = 'camera%d_depth_optical_frame'%(i+1)
                    xyz = clouds.depth_to_xyz(rgbd['depth'], asus_xtion_pro_f)
                    pc = ru.xyzrgb2pc(xyz, rgbd['rgb'], camera_frame)
                    ar_cam = gmt.get_ar_marker_poses(None, None, pc=pc)
                    
                    self.pc_pubs[i].publish(pc)
                    marker_frame = 'ar_marker%d_camera%d'
                    for marker in ar_cam:
                        trans, rot = conversions.hmat_to_trans_rot(ar_cam[marker])
                        self.tf_broadcaster.sendTransform(trans, rot,
                                                          rospy.Time.now(),
                                                          marker_frame%(marker,i+1), 
                                                          camera_frame)
                    

            if self.ready:
                for parent, child in self.transforms:
                    trans, rot = self.transforms[parent, child]
                    self.tf_broadcaster.sendTransform(trans, rot,
                                                      rospy.Time.now(),
                                                      child, parent)
            time.sleep(1/self.rate)
def extract_cloud(depth, T_w_k):
    xyz_k = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
    
    valid_mask = (depth > 0)
    
    good_xyz = xyz_w[valid_mask]
    
    return good_xyz
Ejemplo n.º 8
0
 def get_pc(self, index=None):
     if not index:
         index = self.index
     rgb = cv2.imread(self.rgbs_fnames[index])
     depth = cv2.imread(self.depth_fnames[index], 2)
     xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
     pc = ru.xyzrgb2pc(xyz, rgb, frame_id=self.frame_id, use_time_now=False)
     
     return pc
def get_ar_transform_id (depth, rgb, idm=None):
    """
    In order to run this, ar_marker_service needs to be running.
    """
    req.pc = ru.xyzrgb2pc(clouds.depth_to_xyz(depth, asus_xtion_pro_f), rgb, '/camera_link')
    res = getMarkers(req)
    marker_tfm = {marker.id:conversions.pose_to_hmat(marker.pose.pose) for marker in res.markers.markers}

    if not idm: return marker_tfm
    if idm not in marker_tfm: return None
    return marker_tfm[idm]
def get_tfm_of_camera(ind1,ind2):

    rgbd1_dir = '/home/sibi/sandbox/human_demos/hd_data/demos/sibi_demo/camera_#1'
    rgbd2_dir = '/home/sibi/sandbox/human_demos/hd_data/demos/sibi_demo/camera_#2'
    rgbs1fnames, depths1fnames, stamps1 = eu.get_rgbd_names_times(rgbd1_dir)
    rgbs2fnames, depths2fnames, stamps2 = eu.get_rgbd_names_times(rgbd2_dir)

    winname = 'abcd'

    ar1, ar2 = None, None
    rgb1 = cv2.imread(rgbs1fnames[ind1])
    assert rgb1 is not None
    depth1 = cv2.imread(depths1fnames[ind1],2)
    assert depth1 is not None
    
    cv2.imshow(winname,)
    
    xyz1 = clouds.depth_to_xyz(depth1, asus_xtion_pro_f)
    pc1 = ru.xyzrgb2pc(xyz1, rgb1, frame_id='', use_time_now=False)
    ar_tfms1 = get_ar_marker_poses(pc1)
    if ar_tfms1 and 1 in ar_tfms1:
        blueprint("Got markers " + str(ar_tfms1.keys()) + " at time %f"%stamps1[ind1])
        ar1 = ar_tfms1[1]
        
    rgb2 = cv2.imread(rgbs2fnames[ind2])
    assert rgb2 is not None
    depth2 = cv2.imread(depths2fnames[ind2],2)
    assert depth2 is not None
    xyz2 = clouds.depth_to_xyz(depth2, asus_xtion_pro_f)
    pc2 = ru.xyzrgb2pc(xyz2, rgb2, frame_id='', use_time_now=False)
    ar_tfms2 = get_ar_marker_poses(pc2)
    if ar_tfms2 and 1 in ar_tfms2:
        blueprint("Got markers " + str(ar_tfms2.keys()) + " at time %f"%stamps1[ind2])
        ar2 = ar_tfms2[1]

    if ar1 is None or ar2 is None:
        return None
    return ar1.dot(nlg.inv(ar2))
def extract_color(rgb, depth, mask, T_w_k, xyz_mask=None, use_outlier_removal=False, outlier_thresh=2, outlier_k=20):

    """
    extract red points and downsample
    """
        
    hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
    h = hsv[:,:,0]
    s = hsv[:,:,1]
    v = hsv[:,:,2]
    
    h_mask = mask[0](h)
    s_mask = mask[1](s)
    v_mask = mask[2](v)
    color_mask = h_mask & s_mask & v_mask
    
    valid_mask = (depth > 0)
    
    xyz_k = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
    
    z = xyz_w[:,:,2]   
    z0 = xyz_k[:,:,2]

    # 'height' or distance from the camera
    height_mask = (xyz_w[:,:,2] > 0.6) & (xyz_w[:,:,2] < 1.1)
    if xyz_mask: height_mask = height_mask & xyz_mask(xyz_w)
        
    good_mask = color_mask & valid_mask & height_mask
    #good_mask = skim.remove_small_objects(good_mask,min_size=64)

    if DEBUG_PLOTS:
        cv2.imshow("z0",z0/z0.max())
        cv2.imshow("z",z/z.max())
        cv2.imshow("hue", h_mask.astype('uint8')*255)
        cv2.imshow("sat", s_mask.astype('uint8')*255)
        cv2.imshow("val", v_mask.astype('uint8')*255)
        cv2.imshow("final",good_mask.astype('uint8')*255)
        cv2.imshow("rgb", rgb)
        cv2.waitKey()

    good_xyz = xyz_w[good_mask]
    
    if use_outlier_removal:
        #good_xyz = clouds.remove_outliers(good_xyz, outlier_thresh, outlier_k)
        #good_xyz = remove_outlier_connected_component(good_xyz)
        good_xyz = clouds.cluster_filter(good_xyz, 0.03, int(0.4 * len(good_xyz)))

    return good_xyz
def calibrate_cb (rgb1, depth1, rgb2, depth2):
    """
    Find CB transform for each cam and then return a transform such that
    Tfm * cam1 = cam2.
    TODO: Find better method to get transform out of checkerboard.
    """

    rtn1, corners1 = cbt.get_corners_rgb(rgb1, cb_rows, cb_cols)
    xyz1 = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    rtn2, corners2 = cbt.get_corners_rgb(rgb2, cb_rows, cb_cols)
    xyz2 = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    
    if rtn1 != rtn2:
        print "Warning: One of the cameras do not see all the checkerboard points."
    elif rtn1 == 0:
        print "Warning: Not all the checkerboard points are seen by the cameras."
        
    if len(corners1) != len(corners2):
        print "Warning: Different number of points found by different cameras -- transform will be off."
    
    tfm1 = cbt.get_svd_tfm_from_points(cbt.get_xyz_from_corners(corners1, xyz1))
    tfm2 = cbt.get_svd_tfm_from_points(cbt.get_xyz_from_corners(corners2, xyz2))
    
    return np.linalg.inv(tfm1).dot(tfm2)
def extract_hitch(rgb, depth, T_w_k, dir=None, radius=0.016, length =0.215, height_range=[0.70,0.80]):
    """
    template match to find the hitch in the picture, 
    get the xyz at those points using the depth image,
    extend the points down to aid in tps.
    """
    template_fname = osp.join(hd_data_dir, 'hitch_template.jpg')
    template = cv2.imread(template_fname)
    h,w,_    = template.shape
    res = cv2.matchTemplate(rgb, template, cv2.TM_CCORR_NORMED)
    _, _, _, max_loc = cv2.minMaxLoc(res)

    top_left     = max_loc
    bottom_right = (top_left[0]+w, top_left[1]+h)

    if DEBUG_PLOTS:
        cv2.rectangle(rgb,top_left, bottom_right, (255,0,0), thickness=2)
        cv2.imshow("hitch detect", rgb)
        cv2.waitKey()

    # now get all points in a window around the hitch loc:
    xyz_k = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
    
    hitch_pts = xyz_w[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0],:]
    height_mask = (hitch_pts[:,:,2] > height_range[0]) & (hitch_pts[:,:,2] < height_range[1])
    hitch_pts = hitch_pts[height_mask]

    ## add pts along the rod:
    center_xyz = np.median(hitch_pts, axis=0)
    ang = np.linspace(0, 2*np.pi, 30)
    circ_pts = radius*np.c_[np.cos(ang), np.sin(ang)] + center_xyz[None,:2]
    circ_zs  = np.linspace(center_xyz[2], length+center_xyz[2], 30)

    rod_pts  = np.empty((0,3))
    for z in circ_zs:
        rod_pts = np.r_[rod_pts, np.c_[circ_pts, z*np.ones((len(circ_pts),1))] ]
    
    all_pts = np.r_[hitch_pts, rod_pts]
    if dir is None:
        return all_pts, center_xyz
    else:
        axis = np.cross([0,0,1], dir)
        angle = np.arccos(np.dot([0,0,1], dir))
        tfm = matrixFromAxisAngle(axis, angle)
        tfm[:3,3] = - tfm[:3,:3].dot(center_xyz) + center_xyz
        return np.asarray([tfm[:3,:3].dot(point) + tfm[:3,3] for point in all_pts]), center_xyz
def checkerboard_loop ():
    
    rospy.init_node("checkerboard_tracker")
    
    grabber = cpr.CloudGrabber()
    grabber.startRGBD()
    
    pc_pub = rospy.Publisher("cb_cloud", PointCloud2)
    pose_pub = rospy.Publisher("cb_pose", PoseStamped)
    
    rate = rospy.Rate(10)
    
    while True:
        try:
            rgb, depth = grabber.getRGBD()
            xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
            
            rtn, corners = get_corners_rgb (rgb)
            points = get_xyz_from_corners(corners, xyz)
            
            print "Shape of points: ", points.shape
            
            if points.any():
                try:
                    tfm = get_svd_tfm_from_points(points)
                except:
                    continue
                print tfm
                
                pc = ros_utils.xyzrgb2pc(xyz, rgb, "/map")
                pose = PoseStamped()
                pose.pose = conversions.hmat_to_pose(tfm)
                
                pc.header.frame_id = pose.header.frame_id = "/map"
                pose.header.stamp = pose.header.stamp = rospy.Time.now()
                
                pose_pub.publish(pose)
                pc_pub.publish(pc)
                
                rate.sleep()
            
        except KeyboardInterrupt:
            break
def grabcut(rgb, depth, T_w_k):
    xyz_k = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]

    valid_mask = depth > 0

    from hd_rapprentice import interactive_roi as ir
    xys = ir.get_polyline(rgb, "rgb")
    xy_corner1 = np.clip(np.array(xys).min(axis=0), [0,0], [639,479])
    xy_corner2 = np.clip(np.array(xys).max(axis=0), [0,0], [639,479])
    polymask = ir.mask_from_poly(xys)
    #cv2.imshow("mask",mask)
        
    xy_tl = np.array([xy_corner1, xy_corner2]).min(axis=0)
    xy_br = np.array([xy_corner1, xy_corner2]).max(axis=0)

    xl, yl = xy_tl
    w, h = xy_br - xy_tl
    mask = np.zeros((h,w),dtype='uint8')    
    mask[polymask[yl:yl+h, xl:xl+w] > 0] = cv2.GC_PR_FGD
    print mask.shape
    #mask[h//4:3*h//4, w//4:3*w//4] = cv2.GC_PR_FGD

    tmp1 = np.zeros((1, 13 * 5))
    tmp2 = np.zeros((1, 13 * 5))    
    cv2.grabCut(rgb[yl:yl+h, xl:xl+w, :],mask,(0,0,0,0),tmp1, tmp2,10,mode=cv2.GC_INIT_WITH_MASK)

    mask = mask % 2
    #mask = ndi.binary_erosion(mask, utils_images.disk(args.erode)).astype('uint8')
    contours = cv2.findContours(mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)[0]
    cv2.drawContours(rgb[yl:yl+h, xl:xl+w, :],contours,-1,(0,255,0),thickness=2)
    
    cv2.imshow('rgb', rgb)
    print "press enter to continue"
    cv2.waitKey()

    zsel = xyz_w[yl:yl+h, xl:xl+w, 2]
    mask = (mask%2==1) & np.isfinite(zsel)# & (zsel - table_height > -1)
    mask &= valid_mask[yl:yl+h, xl:xl+w]
    
    xyz_sel = xyz_w[yl:yl+h, xl:xl+w,:][mask.astype('bool')]
    return clouds.downsample(xyz_sel, .01)
Ejemplo n.º 16
0
def visualize_ar ():
    """
    Visualize point clouds from openni grabber and AR marker transforms through ROS.
    """
    global getMarkers
    
    if rospy.get_name() == '/unnamed':
        rospy.init_node("visualize_ar")
    
    camera_frame = "camera_depth_optical_frame"
    
    tf_pub = tf.TransformBroadcaster()
    pc_pub = rospy.Publisher("camera_points", PointCloud2)
    sleeper = rospy.Rate(30)
    
    getMarkers = rospy.ServiceProxy("getMarkers", MarkerPositions)
    
    grabber= cpr.CloudGrabber()
    grabber.startRGBD()
    
    print "Streaming now."
    while True:
        try:
            r, d = grabber.getRGBD()
            ar_tfms = get_ar_transform_id (d, r)
            
            pc = ru.xyzrgb2pc(clouds.depth_to_xyz(d, asus_xtion_pro_f), r, camera_frame)
            pc_pub.publish(pc)
                        
            for i in ar_tfms:
                try:
                    trans, rot = conversions.hmat_to_trans_rot(ar_tfms[i])
                    tf_pub.sendTransform(trans, rot, rospy.Time.now(), "ar_marker_%d"%i, camera_frame)
                except:
                    print "Warning: Problem with AR transform."
                    pass
            
            sleeper.sleep()
        except KeyboardInterrupt:
            print "Keyboard interrupt. Exiting."
            break
def get_ar_marker_poses (rgb, depth):
    """
    In order to run this, ar_marker_service needs to be running.
    """
    if rospy.get_name() == '/unnamed':
        rospy.init_node('keypoints')
    
    getMarkers = rospy.ServiceProxy("getMarkers", MarkerPositions)
    
    #xyz = svi.transform_pointclouds(depth, tfm)
    xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
    pc = ru.xyzrgb2pc(xyz, rgb, '/base_footprint')
    
    req = MarkerPositionsRequest()
    req.pc = pc
    
    marker_tfm = {}
    res = getMarkers(req)
    for marker in res.markers.markers:
        marker_tfm[marker.id] = conversions.pose_to_hmat(marker.pose.pose)
    
    #print "Marker ids found: ", marker_tfm.keys()
    
    return marker_tfm
Ejemplo n.º 18
0
def get_markers_kinect():
    subprocess.call("killall XnSensorServer", shell=True)
    grabber = cloudprocpy.CloudGrabber()
    grabber.startRGBD()
    rgb, depth = grabber.getRGBD()
    clicks = []

    def mouse_click(event, x, y, flags, params):
        if event == cv.CV_EVENT_LBUTTONDOWN:
            clicks.append([x, y])

    # rewrite this
    def find_nearest(xy, indicators):
        nearest = []
        for (x, y) in xy:
            min_dist = 10000000
            closest_point = [0, 0]
            for j in xrange(len(indicators)):
                for i in xrange(len(indicators[0])):
                    dist = (x - i) ** 2 + (y - j) ** 2
                    if indicators[j][i] and dist < min_dist:
                        #print 'satisfied'
                        min_dist = dist
                        closest_point = [i, j]
            nearest.append(closest_point)
        return nearest
    
    def find_avg(points, clicks, cloud):
        avgPoints = []
        rad = 0.02
        r = 5
        for (x,y), point in zip(clicks,points):
            avg = np.array([0,0,0])
            num = 0.0
            for i in range(-r,r+1):
                for j in range(-r,r+1):
                    cloudPt = cloud[y+i, x+j,:];
                    if not np.isnan(cloudPt).any() and np.linalg.norm(point - cloudPt) < rad:
                        avg = avg + cloudPt
                        num += 1.0
            if num == 0.0:
                print "not found"
                avgPoints.append(point)
            else:
                print "found"
                avgPoints.append(avg / num)
        return avgPoints       
    
    def find_nearest_radius(points, clicks, cloud):
        markerPoints = []
        rad = 0.01
        r = 5
        for (x,y), point in zip(clicks,points):
            
            checkPoints = cloud[max(0,y-r):min(cloud.shape[0]-1,y+r), max(0,x-r):min(cloud.shape[1]-1, x+r),:]
            np.putmask(checkPoints, np.isnan(checkPoints), np.infty)
            dist = checkPoints - point[None,None,:]
            dist = np.sum((dist*dist), axis=2)
            yi,xi = np.unravel_index(np.argmin(dist), dist.shape)
            
            point =  checkPoints[yi,xi]
            if np.isnan(point).any() or np.min(dist) > rad:
                print "No point found for clicked point: ", point
            else:
                markerPoints.append(point)
            
        return markerPoints

    hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
    h = hsv[:,:,0]
    s = hsv[:,:,1]
    v = hsv[:,:,2]

    red_mask = (v>150) & ((h<10) | (h>150))# & (v > 200)# & (s > 100)
    #valid = depth*(depth > 0)
    xyz_k = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
            
    #cv2.imshow("red",red_mask.astype('uint8')*255)
    cv2.imshow("red", red_mask.astype('uint8')*255)
    cv2.imshow("rgb", rgb)
    cv.SetMouseCallback("rgb", mouse_click, 0)
    print "press enter to continue"
    cv2.waitKey()
    
    clickPoints = []
    #get click points
    for (x, y) in clicks:
        clickPoints.append(np.copy(xyz_k[y][x]))
    
#     import IPython
#     IPython.embed()
    red_mask3d = np.tile(np.atleast_3d(red_mask), [1,1,3])
    np.putmask (xyz_k, red_mask3d != 1, np.NAN)     

    markerPoints = find_nearest_radius(clickPoints, clicks, xyz_k)
    return markerPoints
Ejemplo n.º 19
0
def save_observations_rgbd(demo_type, demo_name, save_file=None):
    """
    Extract data from all sensors and store into demo.data file.
    """
    global setCalib
    # Temp file to show that data is already being extracted
    demo_dir        = osp.join(demo_files_dir, demo_type, demo_name)
    with open(osp.join(demo_dir, demo_names.extract_data_temp),'w') as fh: fh.write('Extracting...')
    
    calib_file_path = osp.join(demo_dir, demo_names.calib_name)
    bag_file        = osp.join(demo_dir, demo_names.bag_name)
    
    with open(osp.join(demo_dir, demo_names.camera_types_name)) as fh:
        camera_types = yaml.load(fh)
    with open(osp.join(demo_dir, demo_names.camera_models_name)) as fh:
        camera_models = yaml.load(fh)
    
    num_cameras = len(camera_types)

    video_dirs = {}
    for i in range(1, num_cameras + 1):
        video_dirs[i] = osp.join(demo_dir, demo_names.video_dir%i)

    c_frames = {}
    for i in range(1, num_cameras + 1):
        c_frames[i]= 'camera%i_link'%(i)
    hydra_frame = 'hydra_base'
    
    tfm_c1 = {i:None for i in range (1,num_cameras+1)}
    tfm_c1[1] = np.eye(4)
    tfm_c1_h = None

    with open(calib_file_path,'r') as fh: calib_data = cPickle.load(fh)
    bag = rosbag.Bag(bag_file)
    
    for tfm in calib_data['transforms']:
        if tfm['parent'] == c_frames[1] or tfm['parent'] == '/' + c_frames[1]:
            if tfm['child'] == hydra_frame or tfm['child'] == '/' + hydra_frame:
                tfm_c1_h = nlg.inv(tfm_link_rof).dot(tfm['tfm'])
            else:
                for i in range(2, num_cameras+1):
                    if tfm['child'] == c_frames[i] or tfm['child'] == '/' + c_frames[i]:
                        tfm_c1[i] = nlg.inv(tfm_link_rof).dot(tfm['tfm']).dot(tfm_link_rof)

    if tfm_c1_h is None or not all([tfm_c1[s] != None for s in tfm_c1]):
        redprint("Calibration does not have required transforms")
        return

    if not calib_data.get('grippers'):
        redprint("Gripper not found.")
        return

    grippers = {}
    data = {}
    data['T_cam2hbase'] = tfm_c1_h
    for lr,gdata in calib_data['grippers'].items():
        gr = gripper_lite.GripperLite(lr, gdata['ar'], trans_marker_tooltip=gripper_trans_marker_tooltip[lr])
        gr.reset_gripper(lr, gdata['tfms'], gdata['ar'], gdata['hydra'])
        grippers[lr] = gr
        data[lr] ={'hydra':[],
                   'pot_angles':[],
                   'T_tt2hy': gr.get_rel_transform('tool_tip', gr.hydra_marker)}
        ## place-holder for AR marker transforms:
        for i in xrange(num_cameras):
            data[lr]['camera%d'%(i+1)] = []


    winname = 'cam_image'    
    cam_counts = []
    for i in range(1,num_cameras+1):
        if verbose:
            yellowprint('Camera%i'%i)
        if camera_types[i] == "rgbd":
            rgb_fnames, depth_fnames, stamps = eu.get_rgbd_names_times(video_dirs[i])
        else:
            rgb_fnames, stamps = eu.get_rgbd_names_times(video_dirs[i], depth = False)
        cam_count = len(stamps)
        cam_counts.append(cam_count)

        if camera_types[i] == "rgbd":
            for ind in rgb_fnames:
                rgb = cv2.imread(rgb_fnames[ind])
                
                if displayImages:
                    cv2.imshow(winname, rgb)
                    cv2.waitKey(1)

                assert rgb is not None
                depth = cv2.imread(depth_fnames[ind],2)
                assert depth is not None
                xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
                pc = ru.xyzrgb2pc(xyz, rgb, frame_id='', use_time_now=False)
                ar_tfms = get_ar_marker_poses(pc,track=True)
                if ar_tfms:
                    if verbose:
                        blueprint("Got markers " + str(ar_tfms.keys()) + " at time %f"%stamps[ind])

                    for lr,gr in grippers.items():
                        ar = gr.get_ar_marker() 
                        if ar in ar_tfms:
                            tt_tfm = gr.get_tooltip_transform(ar, np.asarray(ar_tfms[ar]))
                            data[lr]['camera%i'%i].append((tfm_c1[i].dot(tt_tfm),stamps[ind]))
        else:
#             if setCalib is None: 
#                 setCalib = rospy.ServiceProxy("setCalibInfo", SetCalibInfo)
#             reqCalib.camera_model = camera_models[i]
#             setCalib(reqCalib)
#             
#             if verbose:
#                 yellowprint("Changed camera calibration parameters to model %s"%camera_models[i])

            for ind in rgb_fnames:                
                rgb = cv.LoadImage(rgb_fnames[ind])
                
                if displayImages:
                    cv.ShowImage(winname, rgb)
                    cv.WaitKey(1)

                assert rgb is not None
                ar_tfms = get_ar_marker_poses(rgb,use_pc_service=False, model = camera_models[i], track=True)

                if ar_tfms:
                    if verbose:
                        blueprint("Got markers " + str(ar_tfms.keys()) + " at time %f"%stamps[ind])
    
                    for lr,gr in grippers.items():
                        ar = gr.get_ar_marker() 
                        if ar in ar_tfms:
                            tt_tfm = gr.get_tooltip_transform(ar, np.asarray(ar_tfms[ar]))
                            data[lr]['camera%i'%i].append((tfm_c1[i].dot(tt_tfm),stamps[ind]))


    found_hydra_data = False
    found_pot_data = False
    # If hydra_only.data file already exists, don't do extra work.
    hydra_data_file        = osp.join(demo_dir, demo_names.hydra_data_name)
    if osp.isfile(hydra_data_file):
        yellowprint ("%s already exists for %s. Getting hydra data from this info."%(demo_names.hydra_data_name,demo_name))
        with open(hydra_data_file,'r') as fh: hdata = cPickle.load(fh)
         
        for lr in hdata:
            if lr in 'lr':
                if hdata[lr].get('hydra'):
                    data[lr]['hydra'] = hdata[lr]['hydra']
                    found_hydra_data = True
                if hdata[lr].get('pot_angles'):
                    data[lr]['pot_angles'] = hdata[lr]['pot_angles']
                    found_pot_data = True


    if not found_hydra_data:
        if verbose:
            yellowprint('Hydra')
        lr_long = {'l':'left','r':'right'}
        for (_, msg, _) in bag.read_messages(topics=['/tf']):
            hyd_tfm = {}
            found = ''
            for tfm in msg.transforms:
                if found in ['lr','rl']:
                    break
                for lr in grippers:
                    if lr in found:
                        continue
                    elif tfm.header.frame_id == '/' + hydra_frame and tfm.child_frame_id == '/hydra_'+lr_long[lr]:
                        t,r = tfm.transform.translation, tfm.transform.rotation
                        trans = (t.x,t.y,t.z)
                        rot = (r.x, r.y, r.z, r.w)
                        hyd_tfm = tfm_c1_h.dot(conversions.trans_rot_to_hmat(trans, rot))
                        stamp = tfm.header.stamp.to_sec()
                        tt_tfm = grippers[lr].get_tooltip_transform(lr_long[lr], hyd_tfm)
                        data[lr]['hydra'].append((tt_tfm, stamp))
                        found += lr
            if found and verbose:
                blueprint("Got hydra readings %s at time %f"%(found,stamp))

    if not found_pot_data:
        if verbose:
            yellowprint('Potentiometer readings')
        for lr in grippers:
            for (_, msg, ts) in bag.read_messages(topics=['/%s_pot_angle'%lr]):
                angle = msg.data
                stamp = ts.to_sec()
                data[lr]['pot_angles'].append((angle, stamp))
                if verbose: 
                    blueprint("Got a %s potentiometer angle of %f at time %f"%(lr,angle, stamp))

    if verbose:
        for lr in 'lr':
            yellowprint("Gripper %s:"%lr)
            for i in range(num_cameras):
                yellowprint("Found %i transforms out of %i rgb/rgbd images from camera%i"%(len(data[lr]['camera%i'%(i+1)]), cam_counts[i], i+1))
            
            yellowprint("Found %i transforms from hydra"%len(data[lr]['hydra']))
            yellowprint("Found %i potentiometer readings"%len(data[lr]['pot_angles']))

    if save_file is None:
        save_file = demo_names.data_name
    save_filename = osp.join(demo_dir, save_file)

    with open(save_filename, 'w') as sfh: cPickle.dump(data, sfh)
    yellowprint("Saved %s."%demo_names.data_name)
    
    os.remove(osp.join(demo_dir, demo_names.extract_data_temp))
Ejemplo n.º 20
0
def try_smaller_pc():
    global getMarkers
    
    rospy.init_node('test_ar')
    subprocess.call("killall XnSensorServer", shell=True)
    
    grabber = cpr.CloudGrabber()
    grabber.startRGBD()
    
    getMarkers = rospy.ServiceProxy("getMarkers", MarkerPositions)
    
    class clickClass ():
        x = None
        y = None
        done = False
        
        def callback(self, event, x, y, flags, param):
            if self.done:
                return
            elif event == cv2.EVENT_LBUTTONDOWN:
                self.x = x
                self.y = y
                self.done = True
   
   
    range = 100
    
    try:
        while True:
            rgb, depth = grabber.getRGBD()
            xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
            
            click = clickClass() 
            print "Click on the point at the middle of the AR marker"
            
            cv2.imshow(WIN_NAME, rgb)
            cv2.setMouseCallback(WIN_NAME, click.callback)
            while not click.done:
                cv2.waitKey(100)
                
            x = click.x
            y = click.y
            
            bl = (max(0,x-range), max(0,y-range))
            tr = (min(rgb.shape[1]-1,x+range), min(rgb.shape[0]-1, y+range))
            
            cv2.circle(rgb, (x, y), 5, (0, 0, 255), -1)
            cv2.circle(rgb, tr, 5, (255, 0, 0), -1)
            cv2.circle(rgb, bl, 5, (0, 255, 0), -1)
            cv2.rectangle(rgb, bl, tr, (0, 0, 255), 1)
            cv2.imshow(WIN_NAME, rgb)    
            cv2.waitKey(3000)
            
            checkRGB = rgb[bl[0]:tr[0],bl[1]:tr[1],:]
            checkXYZ = xyz[bl[0]:tr[0],bl[1]:tr[1],:]
            
            tfms = get_ar_transform_id_unorganized(checkXYZ, checkRGB)
            print "The found AR markers are:"
            print tfms
    except KeyboardInterrupt:
        print "Done"
Ejemplo n.º 21
0
def save_init_ar(demo_type, demo_name, ar_marker, save_in_data_dir=True):
    """
    Extracts the initializing ar marker transform and stores it.
    """    
    # Temp file to show that data is already being extracted
    global setCalib
    demo_dir        = osp.join(demo_files_dir, demo_type, demo_name)
    calib_file_path = osp.join(demo_dir, demo_names.calib_name)
    
    with open(osp.join(demo_dir, demo_names.camera_types_name)) as fh:
        camera_types = yaml.load(fh)
    with open(osp.join(demo_dir, demo_names.camera_models_name)) as fh:
        camera_models = yaml.load(fh)
    
    cameras = range(1,len(camera_types)+1)
    c_frames = {}
    for i in cameras:
        c_frames[i]= 'camera%i_link'%(i)

    video_dirs = {}
    for i in cameras:
        video_dirs[i] = osp.join(demo_dir, demo_names.video_dir%i)
    
    tfm_c1 = {i:None for i in cameras}
    tfm_c1[1] = np.eye(4)

    with open(calib_file_path,'r') as fh: calib_data = cPickle.load(fh)

    for tfm in calib_data['transforms']:
        if tfm['parent'] == c_frames[1] or tfm['parent'] == '/' + c_frames[1]:
            for i in cameras:
                if tfm['child'] == c_frames[i] or tfm['child'] == '/' + c_frames[i]:
                    tfm_c1[i] = nlg.inv(tfm_link_rof).dot(tfm['tfm']).dot(tfm_link_rof)

    if not all([tfm_c1[s] != None for s in tfm_c1]):
        redprint("Calibration does not have required transforms")
        return

    # We want the ar marker transform from all the relevant cameras
    data = {'tfms':{i:None for i in cameras}, 'marker':ar_marker}

    for i in cameras:
        if verbose:
            yellowprint('Camera%i'%i)
        if camera_types[i] == "rgbd":
            rgb_fnames, depth_fnames, stamps = eu.get_rgbd_names_times(video_dirs[i])
        else:
            rgb_fnames, stamps = eu.get_rgbd_names_times(video_dirs[i], depth = False)

        all_tfms = []
        cam_count = 0
        if camera_types[i] == "rgbd":
            for ind in rgb_fnames:
                rgb = cv2.imread(rgb_fnames[ind])
                
                if displayImages:
                    cv2.imshow(winname, rgb)
                    cv2.waitKey(1)

                assert rgb is not None
                depth = cv2.imread(depth_fnames[ind],2)
                assert depth is not None
                xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
                pc = ru.xyzrgb2pc(xyz, rgb, frame_id='', use_time_now=False)
                ar_tfms = get_ar_marker_poses(pc,track=True)
                if ar_tfms and ar_marker in ar_tfms:
                    all_tfms.append(tfm_c1[i].dot(ar_tfms[ar_marker]))
                    cam_count += 1
            
        else:
            if setCalib is None: 
                setCalib = rospy.ServiceProxy("setCalibInfo", SetCalibInfo)
            reqCalib.camera_model = camera_models[i]
            setCalib(reqCalib)

            if verbose:
                yellowprint("Changed camera calibration parameters to model %s"%camera_models[i])

            for ind in rgb_fnames:                
                rgb = cv.LoadImage(rgb_fnames[ind])
                
                if displayImages:
                    cv.ShowImage(winname, rgb)
                    cv.WaitKey(1)

                assert rgb is not None
                ar_tfms = get_ar_marker_poses(rgb,use_pc_service=False,track=True)
                if ar_tfms and ar_marker in ar_tfms:
                    all_tfms.append(tfm_c1[i].dot(ar_tfms[ar_marker]))
                    cam_count += 1
        
        if verbose:
            blueprint ("Got %i values for AR Marker %i from camera %i."%(cam_count, ar_marker, i))
        if cam_count > 0:
            data['tfms'][i] = utils.avg_transform(all_tfms)

    save_filename1 = osp.join(demo_dir, save_file)
    with open(save_filename1, 'w') as sfh: cPickle.dump(data, sfh)
    if save_in_data_dir:
        save_filename2 = osp.join(ar_init_dir, ar_init_demo_name)
        with open(save_filename2, 'w') as sfh: cPickle.dump(data, sfh)
    yellowprint("Saved %s."%save_file)