Beispiel #1
0
    def __init__(self, args):
        rospy.init_node('picmap_server')
        stereo_cam = camera.Camera(
            (389.0, 389.0, 89.23 * 1e-3, 323.42, 323.42, 274.95))

        self.vo = None
        self.transformer = pytf_swig.pyTransformer()
        self.transformer.setExtrapolationLimit(0.0)

        self.fd = FeatureDetectorFast(300)
        self.ds = DescriptorSchemeCalonder()
        self.pm = PictureMap(self.ds)

        #self.pub = rospy.Publisher("/picmap_pose", vslam.msg.Picmap)

        rospy.Subscriber('/stereo/raw_stereo',
                         stereo_msgs.msg.RawStereo,
                         self.handle_raw_stereo_queue,
                         queue_size=2,
                         buff_size=7000000)
        rospy.Subscriber('/amcl_pose',
                         geometry_msgs.msg.PoseWithCovarianceStamped,
                         self.handle_amcl_pose)
        rospy.Subscriber('/tf_message', tf.msg.tfMessage, self.handle_tf_queue)

        self.pmlock = threading.Lock()
        self.q = Queue.Queue(0)  # 0 means maxsize is infinite
        self.tfq = Queue.Queue(0)
        t = threading.Thread(target=self.worker)
        t.start()
Beispiel #2
0
  def __init__(self):
    rospy.Subscriber('/stereo/raw_stereo', stereo_msgs.msg.RawStereo, self.handle_raw_stereo, queue_size=2, buff_size=7000000)

    self.pub_vo = rospy.Publisher("/vo", deprecated_msgs.msg.VOPose)

    self.vo = None
    self.modulo = 0
    self.fd = FeatureDetectorFast(300)
    self.ds = DescriptorSchemeCalonder()
    min_distance = 10.0
    return VO.harris(frame.rawdata, frame.size[0], frame.size[1], 1000, quality_level, min_distance)

for topic, msg, t in rosrecord.logplayer(filename):
  if rospy.is_shutdown():
    break

  if topic.endswith("videre/cal_params") and not cam:
    print msg.data
    cam = camera.VidereCamera(msg.data)
    (Fx, Fy, Tx, Clx, Crx, Cy) = cam.params
    Tx /= (7.30 / 7.12)
    cam = camera.Camera((Fx, Fy, Tx, Clx, Crx, Cy))

    vos = [
      VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), sba = None),
      VisualOdometer(cam, feature_detector = FeatureDetectorMine(), inlier_error_threshold = 1.0, descriptor_scheme = DescriptorSchemeSAD(), sba = (3,10,10)),
      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), sba = (3,8,10)),

      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()),
      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True),
      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True, inlier_thresh = 100),

      #VisualOdometer(cam, feature_detector = FeatureDetectorHarris(), descriptor_scheme = DescriptorSchemeSAD()),
      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()),
      #VisualOdometer(cam, feature_detector = FeatureDetector4x4(FeatureDetectorFast), descriptor_scheme = DescriptorSchemeSAD()),
    ]
    vo_x = [ [] for i in vos]
    vo_y = [ [] for i in vos]
    vo_u = [ [] for i in vos]
    vo_v = [ [] for i in vos]
Beispiel #4
0
for filename in sys.argv[1:]:
    cam = None
    prev_frame = None
    framecounter = 0
    all_ds = []
    sos = numpy.array([.0, .0, .0])

    for topic, msg in rosrecord.logplayer(filename):
        if rospy.is_shutdown():
            break

        if topic.endswith("videre/cal_params") and not cam:
            cam = camera.VidereCamera(msg.data)

            vo = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD())

        if cam and topic.endswith("videre/images"):
            imgR = imgAdapted(msg.images[0])
            imgL = imgAdapted(msg.images[1])
            assert msg.images[0].label == "right_rectified"
            assert msg.images[1].label == "left_rectified"

            frame = SparseStereoFrame(imgL, imgR)
            vo.find_keypoints(frame)
            vo.find_disparities(frame)
            #frame.kp = [ (x,y,d) for (x,y,d) in frame.kp if d > 8]
            all_ds += [d for (x, y, d) in frame.kp]

            vo.collect_descriptors(frame)
Beispiel #5
0
for o,a in optlist:
  if o == '-l':
    skel_load_filename = a

def playlist(args):
  for f in args:
    r = reader(f)
    for d in r.next():
      yield d + (f,)
    
inl_history = [0,0]
for cam,l_image,r_image,label in playlist(args):
  print framecounter
  if vos == None:
    vos = [
      VisualOdometer(cam, scavenge = False, feature_detector = FeatureDetectorFast(),
                          descriptor_scheme = DescriptorSchemeCalonder(),
                          inlier_error_threshold = 3.0, sba = None,
                          inlier_thresh = 100,
                          position_keypoint_thresh = 0.2, angle_keypoint_thresh = 0.15)
    ]
    vo_x = [ [] for i in vos]
    vo_y = [ [] for i in vos]
    vo_u = [ [] for i in vos]
    vo_v = [ [] for i in vos]
    trajectory = [ [] for i in vos]
    skel = Skeleton(cam)
    if skel_load_filename:
      skel.load(skel_load_filename)
      vos[0].num_frames = max(skel.nodes) + 1
      framecounter = max(skel.nodes) + 1
Beispiel #6
0
  def test_sim(self):
    # Test process with one 'ideal' camera, one real-world Videre
    camera_param_list = [
      # (200.0, 200.0, 3.00,  320.0, 320.0, 240.0),
      (389.0, 389.0, 1e-3 * 89.23, 323.42, 323.42, 274.95)
    ]
    def move_forward(i, prev):
      """ Forward 1 meter, turn around, Back 1 meter """
      if i == 0:
        return Pose(rotation(0,0,1,0), (0,0,0))
      elif i < 10:
        return prev * Pose(rotation(0,0,1,0), (0,0,.1))
      elif i < 40:
        return prev * Pose(rotation(math.pi / 30, 0, 1, 0), (0, 0, 0))
      elif i < 50:
        return prev * Pose(rotation(0,0,1,0), (0,0,.1))

    for movement in [ move_forward ]: # move_combo, move_Yrot ]:
      for cam_params in camera_param_list:
        cam = camera.Camera(cam_params)

        random.seed(0)
        def rr():
          return 2 * random.random() - 1.0
        model = [ (3 * rr(), 1 * rr(), 3 * rr()) for i in range(300) ]
        def rndimg():
          b = "".join(random.sample([ chr(c) for c in range(256) ], 64))
          return Image.fromstring("L", (8,8), b)
        def sprite(dst, x, y, src):
          try:
            dst.paste(src, (int(x)-4,int(y)-4))
          except:
            print "paste failed", x, y
        palette = [ rndimg() for i in model ]
        expected = []
        afs = []
        P = None
        for i in range(50):
          P = movement(i, P)
          li = Image.new("L", (640, 480))
          ri = Image.new("L", (640, 480))
          q = 0
          for (mx,my,mz) in model:
            pp = None
            pt_camera = (numpy.dot(P.M.I, numpy.array([mx,my,mz,1]).T))
            (cx,cy,cz,cw) = numpy.array(pt_camera).ravel()
            if cz > .100:
              ((xl,yl),(xr,yr)) = cam.cam2pixLR(cx, cy, cz)
              if 0 <= xl and xl < 640 and 0 <= yl and yl < 480:
                sprite(li, xl, yl, palette[q])
                sprite(ri, xr, yr, palette[q])
            q += 1
          expected.append(P)
          afs.append(SparseStereoFrame(imgStereo(li), imgStereo(ri)))

      vo = VisualOdometer(cam)
      for i,(af,ep) in enumerate(zip(afs, expected)):
        vo.handle_frame(af)
        if 0:
          print vo.pose.xform(0,0,0)
          print "expected", ep.M
          print "vo.pose", vo.pose.M
          print numpy.abs((ep.M - vo.pose.M))
        self.assert_(numpy.alltrue(numpy.abs((ep.M - vo.pose.M)) < 0.2))

      def run(vos):
        for af in afs:
          for vo in vos:
            vo.handle_frame(af)

      # Check that the pose estimators are truly independent

      v1 = VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), inlier_error_threshold=1.0)
      v2 = VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), inlier_error_threshold=2.0)
      v8 = VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), inlier_error_threshold=8.0)
      v1a = VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), inlier_error_threshold=1.0)
      run([v1])
      run([v2,v8,v1a])
      self.assert_(v1.pose.xform(0,0,0) == v1a.pose.xform(0,0,0))
      for a,b in [ (v1,v2), (v2,v8), (v1, v8) ]:
        self.assert_(a.pose.xform(0,0,0) != b.pose.xform(0,0,0))

      return
Beispiel #7
0
qangles = []
keys = set()

for topic, msg, t in rosrecord.logplayer(filename):
  if rospy.is_shutdown():
    break

  if topic.endswith("videre/cal_params") and not cam:
    print msg.data
    cam = camera.VidereCamera(msg.data)
    (Fx, Fy, Tx, Clx, Crx, Cy) = cam.params
    Tx /= (7.30 / 7.12)                 # baseline adjustment 
    cam = camera.Camera((Fx, Fy, Tx, Clx, Crx, Cy))

    vos = [
      VisualOdometer(cam, scavenge = False, feature_detector = FeatureDetectorFast(), inlier_error_threshold = 3.0, sba = None),

      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), sba = (3,8,10)),

      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()),
      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True),
      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True, inlier_thresh = 100),

      #VisualOdometer(cam, feature_detector = FeatureDetectorHarris(), descriptor_scheme = DescriptorSchemeSAD()),
      #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()),
      #VisualOdometer(cam, feature_detector = FeatureDetector4x4(FeatureDetectorFast), descriptor_scheme = DescriptorSchemeSAD()),
    ]
    vo_x = [ [] for i in vos]
    vo_y = [ [] for i in vos]
    vo_u = [ [] for i in vos]
    vo_v = [ [] for i in vos]