Пример #1
0
def main():
    viewer = miniviewer.make_viewer()

    # Define point cloud-reading stream, depending on input method
    if args.input is None:
        live = True
        print 'No input file specified, streaming clouds live'

        def stream_rgbd():
            import rospy
            from rapprentice import berkeley_pr2, PR2
            rospy.init_node("tracker", disable_signals=True)
            pr2 = PR2.PR2()
            grabber = cloudprocpy.CloudGrabber()
            grabber.startRGBD()
            while True:
                print 'Grabbing cloud...'
                rgb, depth = grabber.getRGBD()
                pr2.update_rave()
                T_w_k = berkeley_pr2.get_kinect_transform(pr2.robot)
                print 'done'
                yield rgb, depth, T_w_k

    else:
        live = False
        print 'Reading input file', args.input
        import h5py

        def stream_rgbd():
            h5 = h5py.File(args.input, 'r')
            rgbs, depths, T_w_k = h5['rgb'], h5['depth'], np.array(h5['T_w_k'])
            num_frames = len(rgbs)
            skip = 2
            start = 160
            for i_frame in range(start, num_frames, skip):
                print 'Processing frame %d/%d.' % (i_frame + 1, num_frames)
                yield rgbs[i_frame], depths[i_frame], T_w_k

    # Tracking loop: run EM iterations for each point cloud received
    tracked_obj = None
    for rgb, depth, T_w_k in stream_rgbd():
        mask_func = clouds.yellow_mask if args.obj_type == 'suture_pad' else clouds.red_mask
        cloud_xyz = clouds.extract_color(rgb, depth, T_w_k, mask_func)
        if len(cloud_xyz) == 0:
            print 'Filtered cloud is empty. Skipping frame.'
            continue

        # Initialization: on the first frame, create cloth and table
        if tracked_obj is None:
            init_handles = []
            tracked_obj = initialize_tracked_obj(viewer, init_handles,
                                                 cloud_xyz)
            print 'Initialization ok?'
            handles = []
            plot(viewer, handles, cloud_xyz, T_w_k, tracked_obj)
            #viewer.Idle()
            if args.gpu:
                from trackingpy import tracking_gpu
                tracker = tracking_gpu.GPUTracker(tracked_obj)
            else:
                tracker = tracking.Tracker(tracked_obj)

        tracker.set_input(cloud_xyz, depth, T_w_k)
        out = tracker.step(return_data=True)
        handles = []
        plot(viewer, handles, cloud_xyz, T_w_k, tracked_obj,
             out['occluded_in_depth_img'], out['occluded_by_model'],
             out['force_kd'])

        if not live:
            raw_input('Press enter for the next frame')
Пример #2
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('file', type=str)
  parser.add_argument('--real_robot', action='store_true')
  parser.add_argument('--view', action='store_true')
  args = parser.parse_args()

  if args.view:
    print 'Opening file %s' % args.file
    f = h5py.File(args.file, 'r')
    rgbs = f['rgb']
    depths = f['depth']
    T_w_k = np.array(f['T_w_k'])
    Globals.env = openravepy.Environment()
    viewer = trajoptpy.GetViewer(Globals.env)

    num_frames = len(rgbs)
    for i in range(0, num_frames, 10):
      print 'Showing frame %d/%d' % (i+1, num_frames)
      rgb, depth = rgbs[i], depths[i]
      # XYZ_k = clouds.depth_to_xyz(depth, berkeley_pr2.f)
      # XYZ_w = XYZ_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
      # handle = Globals.env.plot3(XYZ_w.reshape(-1,3), 2, rgb.reshape(-1,3)[:,::-1]/255.)
      xyz = clouds.extract_color(rgb, depth, T_w_k, clouds.yellow_mask)
      handles = []
      handles.append(Globals.env.plot3(xyz, 2, (1,0,0)))
      draw_ax(T_w_k, .1, Globals.env, handles)
      viewer.Idle()

    return

  if args.real_robot:
    import rospy
    rospy.init_node("recorder", disable_signals=True)
    Globals.pr2 = PR2.PR2()
    Globals.env = Globals.pr2.env
    Globals.robot = Globals.pr2.robot

    Globals.pr2.update_rave()
    T_w_k = berkeley_pr2.get_kinect_transform(Globals.robot)
  else:
    Globals.env = openravepy.Environment()
    T_w_k = np.eye(4)

  viewer = trajoptpy.GetViewer(Globals.env)
  #env.LoadData(make_table_xml(translation=[0, 0, -.05], extents=[1, 1, .05]))

  num_frames = 0
  rgbs, depths = [], []

  subprocess.call("killall XnSensorServer", shell=True)
  grabber = cloudprocpy.CloudGrabber()
  grabber.startRGBD()
  try:
    while True:
      rgb, depth = grabber.getRGBD()
      rgbs.append(rgb)
      depths.append(depth)

      cv2.imshow("rgb", rgb)
      cv2.imshow("depth", cmap[np.fmin((depth*.064).astype('int'), 255)])
      cv2.waitKey(30)

      num_frames += 1
      print 'Captured frame', num_frames
      viewer.Step()

  except KeyboardInterrupt:
    print 'Keyboard interrupt'

  print 'Writing to %s ...' % args.file
  out = h5py.File(args.file, 'w')
  out.create_dataset('rgb', data=rgbs, compression='gzip', chunks=(1, 256, 256, 3))
  out.create_dataset('depth', data=depths, compression='gzip', chunks=(1, 256, 256))
  out['T_w_k'] = T_w_k
  out.close()
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('file', type=str)
    parser.add_argument('--real_robot', action='store_true')
    parser.add_argument('--view', action='store_true')
    args = parser.parse_args()

    if args.view:
        print 'Opening file %s' % args.file
        f = h5py.File(args.file, 'r')
        rgbs = f['rgb']
        depths = f['depth']
        T_w_k = np.array(f['T_w_k'])
        Globals.env = openravepy.Environment()
        viewer = trajoptpy.GetViewer(Globals.env)

        num_frames = len(rgbs)
        for i in range(0, num_frames, 10):
            print 'Showing frame %d/%d' % (i + 1, num_frames)
            rgb, depth = rgbs[i], depths[i]
            # XYZ_k = clouds.depth_to_xyz(depth, berkeley_pr2.f)
            # XYZ_w = XYZ_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
            # handle = Globals.env.plot3(XYZ_w.reshape(-1,3), 2, rgb.reshape(-1,3)[:,::-1]/255.)
            xyz = clouds.extract_color(rgb, depth, T_w_k, clouds.yellow_mask)
            handles = []
            handles.append(Globals.env.plot3(xyz, 2, (1, 0, 0)))
            draw_ax(T_w_k, .1, Globals.env, handles)
            viewer.Idle()

        return

    if args.real_robot:
        import rospy
        rospy.init_node("recorder", disable_signals=True)
        Globals.pr2 = PR2.PR2()
        Globals.env = Globals.pr2.env
        Globals.robot = Globals.pr2.robot

        Globals.pr2.update_rave()
        T_w_k = berkeley_pr2.get_kinect_transform(Globals.robot)
    else:
        Globals.env = openravepy.Environment()
        T_w_k = np.eye(4)

    viewer = trajoptpy.GetViewer(Globals.env)
    #env.LoadData(make_table_xml(translation=[0, 0, -.05], extents=[1, 1, .05]))

    num_frames = 0
    rgbs, depths = [], []

    subprocess.call("killall XnSensorServer", shell=True)
    grabber = cloudprocpy.CloudGrabber()
    grabber.startRGBD()
    try:
        while True:
            rgb, depth = grabber.getRGBD()
            rgbs.append(rgb)
            depths.append(depth)

            cv2.imshow("rgb", rgb)
            cv2.imshow("depth", cmap[np.fmin((depth * .064).astype('int'),
                                             255)])
            cv2.waitKey(30)

            num_frames += 1
            print 'Captured frame', num_frames
            viewer.Step()

    except KeyboardInterrupt:
        print 'Keyboard interrupt'

    print 'Writing to %s ...' % args.file
    out = h5py.File(args.file, 'w')
    out.create_dataset('rgb',
                       data=rgbs,
                       compression='gzip',
                       chunks=(1, 256, 256, 3))
    out.create_dataset('depth',
                       data=depths,
                       compression='gzip',
                       chunks=(1, 256, 256))
    out['T_w_k'] = T_w_k
    out.close()
Пример #4
0
def main():
  viewer = miniviewer.make_viewer()

  # Define point cloud-reading stream, depending on input method
  if args.input is None:
    live = True
    print 'No input file specified, streaming clouds live'
    def stream_rgbd():
      import rospy
      from rapprentice import berkeley_pr2, PR2
      rospy.init_node("tracker", disable_signals=True)
      pr2 = PR2.PR2()
      grabber = cloudprocpy.CloudGrabber()
      grabber.startRGBD()
      while True:
        print 'Grabbing cloud...'
        rgb, depth = grabber.getRGBD()
        pr2.update_rave()
        T_w_k = berkeley_pr2.get_kinect_transform(pr2.robot)
        print 'done'
        yield rgb, depth, T_w_k

  else:
    live = False
    print 'Reading input file', args.input
    import h5py
    def stream_rgbd():
      h5 = h5py.File(args.input, 'r')
      rgbs, depths, T_w_k = h5['rgb'], h5['depth'], np.array(h5['T_w_k'])
      num_frames = len(rgbs)
      skip = 2
      start = 160
      for i_frame in range(start, num_frames, skip):
        print 'Processing frame %d/%d.' % (i_frame+1, num_frames)
        yield rgbs[i_frame], depths[i_frame], T_w_k


  # Tracking loop: run EM iterations for each point cloud received
  tracked_obj = None
  for rgb, depth, T_w_k in stream_rgbd():
    mask_func = clouds.yellow_mask if args.obj_type == 'suture_pad' else clouds.red_mask
    cloud_xyz = clouds.extract_color(rgb, depth, T_w_k, mask_func)
    if len(cloud_xyz) == 0:
      print 'Filtered cloud is empty. Skipping frame.'
      continue

    # Initialization: on the first frame, create cloth and table
    if tracked_obj is None:
      init_handles = []
      tracked_obj = initialize_tracked_obj(viewer, init_handles, cloud_xyz)
      print 'Initialization ok?'
      handles = []; plot(viewer, handles, cloud_xyz, T_w_k, tracked_obj)
      #viewer.Idle()
      if args.gpu:
        from trackingpy import tracking_gpu
        tracker = tracking_gpu.GPUTracker(tracked_obj)
      else:
        tracker = tracking.Tracker(tracked_obj)

    tracker.set_input(cloud_xyz, depth, T_w_k)
    out = tracker.step(return_data=True)
    handles = []; plot(viewer, handles, cloud_xyz, T_w_k, tracked_obj, out['occluded_in_depth_img'], out['occluded_by_model'], out['force_kd'])

    if not live:
      raw_input('Press enter for the next frame')