def get_kinect_transform(robot, sim=False):    
    """
    Returns transform from base_footprint to camera_depth_optical_frame.
    Call update_rave() before this! 
    """
    if sim:
        return tfm_bf_head.dot(tfm_head_dof)

    T_w_h = robot.GetLink("head_plate_frame").GetTransform()    
    T_w_k = T_w_h.dot(tfm_head_dof)
    return T_w_k

       
        ar_demo_file = osp.join(hd_data_dir, ar_init_dir, ar_init_demo_name)
        with open(ar_demo_file,'r') as fh: ar_demo_tfms = cPickle.load(fh)
        # use camera 1 as default
        ar_marker_cameras = [1]
        ar_demo_tfm = avg_transform([ar_demo_tfms['tfms'][c] for c in ar_demo_tfms['tfms'] if c in ar_marker_cameras])

        # Get ar marker for PR2:
        # default demo_file
        ar_run_file = osp.join(hd_data_dir, ar_init_dir, ar_init_playback_name)
        with open(ar_run_file,'r') as fh: ar_run_tfms = cPickle.load(fh)
        ar_run_tfm = ar_run_tfms['tfm']

        # transform to move the demo points approximately into PR2's frame
        # Basically a rough transform from head kinect to demo_camera, given the tables are the same.
        init_tfm = ar_run_tfm.dot(np.linalg.inv(ar_demo_tfm))
        init_tfm = tfm_bf_head.dot(tfm_head_dof).dot(init_tfm)
        
        
        seg_info = hdf[demo_name][annotations[0]["name"]]
        if ref_image is None:
            rgb_image = np.asarray(seg_info["rgb"])
        else:
            rgb_image = color_match.match(ref_image, np.asarray(seg_info["rgb"]))
        
        #table_cloud0, table_plane0 = cloud_proc_mod.extract_table_ransac(rgb_image, np.asarray(seg_info["depth"]), np.eye(4))
        table_cloud, table_plane = cloud_proc_mod.extract_table_pca(rgb_image, np.asarray(seg_info["depth"]), np.eye(4))
                        
        
        if args.offline:
            track_video_frames_offline(rgbd_dir, np.eye(4), init_tfm, ref_image, None) #offline
        else: