예제 #1
0
class Demo:
  def __init__(self, source):
    self.cvim = None

    # These variables are internal:

    self.stereo_cam = source.cam()
    print "Camera is", self.stereo_cam
    self.connected = False
    self.snail_trail = []
    self.source = source
    self.inlier_history = []
    self.label = 0
    self.skel_dist_thresh = 0.1;        # in meters

    # These variables can be tweaked:

    self.fd = FeatureDetectorFast(300)
#    self.fd = FeatureDetectorStar(500)
    self.ds = DescriptorSchemeCalonder()
    self.camera_preview = False
    self.vo = VisualOdometer(self.stereo_cam,
                             scavenge = False,
                             position_keypoint_thresh = 0.1,
                             angle_keypoint_thresh = 3*pi/180,
                             sba=None,
                             num_ransac_iters=500,
                             inlier_error_threshold = 3.0)
    self.skel = Skeleton(self.stereo_cam,
                         link_thresh = 100,
                         descriptor_scheme = self.ds,
                         optimize_after_addition = False)
    self.skel.node_vdist = 0
    self.running = -1                   # run forever

    if self.camera_preview:
      cv.NamedWindow("Camera")
      cv.MoveWindow("Camera", 0, 700)

  def handleFrame(self):
    r = False

    (w,h,li,ri) = self.source.getImage()

    self.f = SparseStereoFrame(li, ri,
                               disparity_range = 96,
                               feature_detector = self.fd,
                               descriptor_scheme = self.ds)

    self.vo.handle_frame(self.f)
    print self.vo.inl
    self.inlier_history = self.inlier_history[-20:] + [self.vo.inl]
    # If the VO inlier count falls below 20, we're not well-connected for this link
    if self.vo.inl < 20:
      self.connected = False
      self.f.pose = Pose()
      self.snail_trail = []

    # Add a frame to graph if:
    #   Frame zero, or
    #   There have been N successful preceding frames, and either
    #   Not connected,
    #   or if connected, the distance from the youngest graph frame to this frame is > thresh

    add_to_graph = None
    if self.vo.keyframe.id == 0 and len(self.skel.nodes) == 0:
      add_to_graph = "Zero keyframe"
    elif min(self.inlier_history) > 20:
      if not self.connected:
        add_to_graph = "Unconnected - but now recovering good poses"
        # This is a new graph, so generate a new label
        self.label += 1
      else:
        relpose = ~self.last_added_pose * self.f.pose
        if relpose.distance() > self.skel_dist_thresh:
          add_to_graph = "Connected and distance thresh passed"

    if add_to_graph:
      self.skel.setlabel(self.label)
      self.skel.add(self.vo.keyframe, self.connected)
      print "ADD %d %s" % (self.f.id, add_to_graph)
      self.connected = True
      self.last_added_pose = self.f.pose
      self.snail_trail = []
#      self.ioptimize(10)


    self.snail_trail.append(self.f.pose)

    # Render the camera view using OpenCV, then load it into an OpenGL texture
    if True:
      if not self.cvim:
        self.cvim = cv.CreateImage((w,h/2), cv.IPL_DEPTH_8U, 3)

      im = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
      im2 = cv.CreateImage((w/2,h/2), cv.IPL_DEPTH_8U, 1)
      for i,src in enumerate([li, ri]):
        cv.SetData(im, str(src.tostring()), w)
        cv.Resize(im, im2)
        cv.SetImageROI(self.cvim, (i * w / 2, 0, w / 2, h / 2))
        cv.CvtColor(im2, self.cvim, cv.CV_GRAY2BGR)

      cv.ResetImageROI(self.cvim)

      def annotate(cvim):
        green = cv.RGB(0,255,0)
        red = cv.RGB(0,0,255)
        def half(p):
          return (p[0] / 2, p[1] / 2)
        for (x,y,d) in self.f.features():
          cv.Circle(cvim, half((x, y)), 2, green)
          d = int(d)
          cv.Line(cvim, ((w+x-d)/2, y/2 - 2), ((w+x-d)/2, y/2 + 2), green)
        a_features = self.vo.keyframe.features()
        b_features = self.f.features()
        inliers = set([ (b,a) for (a,b) in self.vo.pe.inl])
        outliers = set(self.vo.pairs) - inliers
        for (a,b) in inliers:
          cv.Line(cvim, half(b_features[b]), half(a_features[a]), green)
        for (a,b) in outliers:
          cv.Line(cvim, half(b_features[b]), half(a_features[a]), red)

      annotate(self.cvim)
      glBindTexture(GL_TEXTURE_2D, demo.textures[2])
      glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 640, 240, 0, GL_RGB, GL_UNSIGNED_BYTE, self.cvim.tostring())

    if self.camera_preview:
      cv.ShowImage("Camera", self.cvim)
      if cv.WaitKey(10) == 27:
        r = True

    return r

  def anchor(self):
    """ Return the current pose of the most recently added skeleton node """
    id = max(self.skel.nodes)
    return self.skel.newpose(id)

  def optimize(self):
    self.skel.optimize(1000)

  def ioptimize(self,iters = 30):
    self.skel.ioptimize(iters)

  def report(self):
    print
    print
    print
    print "-" * 80
    print self.skel.edges
    print "-" * 80

  def pose(self):
    """ Return the current camera pose in the skeleton frame """
    if len(self.skel.nodes) == 0:
      return self.vo.pose
    else:
      return self.anchor() * (~self.snail_trail[0] * self.snail_trail[-1])

  def map_size(self):
    xyz = [demo.skel.newpose(i).xform(0,0,0) for i in demo.skel.nodes]
    xrange = max([x for (x,y,z) in xyz]) - min([x for (x,y,z) in xyz])
    zrange = max([z for (x,y,z) in xyz]) - min([z for (x,y,z) in xyz])
    return max(xrange, zrange)
예제 #2
0
    f = []
    for i in range(2100):  # range(1,146):
        dir = "/u/jamesb/ros/ros-pkg/vision/vslam/trial"
        L = Image.open("%s/%06dL.png" % (dir, i))
        R = Image.open("%s/%06dR.png" % (dir, i))
        nf = SparseStereoFrame(L, R)
        vo.setup_frame(nf)
        print i, "kp=", len(nf.kp)
        nf.id = len(f)
        if vt:
            vt.add(nf.lf, nf.descriptors)

        if skel:
            vo.handle_frame_0(nf)
            skel.add(vo.keyframe)
            vo.correct(skel.correct_frame_pose, nf)
            print len(gt), vo.inl
            gt.append(vo.pose.xform(0, 0, 0))
        else:
            pf = open("trial/%06d.pose.pickle" % i, "r")
            gt.append(pickle.load(pf))
            pf.close()

        if 1:
            del nf.rawdata
            del nf.lgrad
            del nf.rgrad
            del nf.lf
            del nf.rf
            del L
예제 #3
0
                    scavenge=False,
                    sba=None,
                    inlier_error_threshold=3.0)
vo.num_frames = 0

ground_truth = []
started = time.time()

connected = False
for i in range(0, 800):
    print i
    (desired_pose, imL, imR) = getImage(i)
    ground_truth.append(desired_pose.xform(0, 0, 0))
    f = SparseStereoFrame(imL, imR, feature_detector=fd, descriptor_scheme=ds)
    vo.handle_frame(f)
    skel.add(vo.keyframe, connected)
    connected = True

if 0:
    (desired_pose, imL, imR) = getImage(408)
    ground_truth.append(desired_pose.xform(0, 0, 0))
    qf = SparseStereoFrame(imL, imR)
    skel.localization(qf)
    sys.exit(0)

print "nodes", skel.nodes

ended = time.time()
took = ended - started
print "Took", took, "so", 1000 * took / i, "ms/frame"
예제 #4
0
class RoadmapServer:
    def __init__(self, args):
        rospy.init_node('roadmap_server')
        self.optimization_distance = rospy.get_param('~optimization_distance', 10);
        
        self.tf = TransformListener()
        stereo_cam = camera.Camera((389.0, 389.0, 89.23 * 1e-3, 323.42, 323.42, 274.95))
        self.skel = Skeleton(stereo_cam)
        # self.skel.node_vdist = 1
        if False:
          self.skel.load(args[1])
          self.skel.optimize()
          self.startframe = 100000
        else:
          self.startframe = 0

        self.frame_timestamps = {}
        self.vo = None

        self.pub = rospy.Publisher("roadmap", vslam.msg.Roadmap)

        time.sleep(1)
        #self.send_map(rospy.time(0))
        self.wheel_odom_edges = set()

        rospy.Subscriber('/wide_stereo/raw_stereo', stereo_msgs.msg.RawStereo, self.handle_raw_stereo, queue_size=2, buff_size=7000000)

    def send_map(self, stamp):

        if len(self.skel.nodes) < 4:
          return

        TG = nx.MultiDiGraph()
        for i in self.skel.nodes:
            TG.add_node(i)
        for (a,b,p,c) in self.skel.every_edge + list(self.wheel_odom_edges):
            TG.add_edge(a, b, (p, c))
        here = max(self.skel.nodes)

        # TG is the total graph, G is the local subgraph

        uTG = TG.to_undirected()
        print "uTG", uTG.nodes(), uTG.edges()
        close = set([here])
        for i in range(self.optimization_distance):
            close |= set(nx.node_boundary(uTG, close))
        print "close", close

        G = TG.subgraph(close)
        uG = uTG.subgraph(close)
        pg = TreeOptimizer3()

        def mk_covar(xyz, rp, yaw):
            return (1.0 / math.sqrt(xyz),1.0 / math.sqrt(xyz), 1.0 / math.sqrt(xyz), 1.0 / math.sqrt(rp), 1.0 / math.sqrt(rp), 1.0 / math.sqrt(yaw))
        weak = mk_covar(9e10,3,3)
        strong = mk_covar(0.0001, 0.000002, 0.00002)

        pg.initializeOnlineOptimization()
        def pgadd(p, n, data):
            relpose, strength = data
            if strength == 0.0:
                cov = weak
            else:
                cov = strong
            pg.addIncrementalEdge(p, n, relpose.xform(0,0,0), relpose.euler(), cov)

        Gedges = G.edges(data = True)
        revmap = {}
        map = {}
        for n in nx.dfs_preorder(uG, here):
            revmap[len(map)] = n
            map[n] = len(map)
            priors = [p for p in uG.neighbors(n) if p in map]
            if priors == []:
                print "START NODE", n, "as", map[n]
            else:
                print "NEXT NODE", n
                p = priors[0]
                if not G.has_edge(p, n):
                  n,p = p,n
                data = G.get_edge_data(p, n)[0]
                Gedges.remove((p, n, data))
                pgadd(map[p], map[n], data)
        for (p,n,d) in Gedges:
          pgadd(map[p], map[n], d)
        pg.initializeOnlineIterations()
        start_error = pg.error()
        for i in range(10):
            pg.iterate()
        print "Error from", start_error, "to", pg.error()

        pg.recomputeAllTransformations()
        print self.tf.getFrameStrings()
        target_frame = "base_link"
        t = self.tf.getLatestCommonTime("wide_stereo_optical_frame", target_frame)
        trans,rot = self.tf.lookupTransform(target_frame, "wide_stereo_optical_frame", t)
        xp = Pose()
        xp.fromlist(self.tf.fromTranslationRotation(trans,rot))

        roadmap_nodes = dict([ (n, (30,0,0)) for n in TG.nodes() ])
        nl = sorted(roadmap_nodes.keys())
        print "nl", nl
        for i in sorted(map.values()):
            xyz,euler = pg.vertex(i)
            p = from_xyz_euler(xyz, euler)
            pose_in_target = xp * p
            x,y,z = pose_in_target.xform(0,0,0)
            euler = pose_in_target.euler()
            print x,y,z, euler
            roadmap_nodes[revmap[i]] = (x, y, euler[2])

        roadmap_edges = []
        for (p,n) in set(TG.edges()):
            print p,n
            best_length = max([ (confidence, pose.distance()) for (pose, confidence) in TG.get_edge_data(p, n).values()])[1]
            roadmap_edges.append((p, n, best_length))

        msg = vslam.msg.Roadmap()
        msg.header.stamp = stamp
        msg.header.frame_id = "base_link"
        msg.nodes = [vslam.msg.Node(*roadmap_nodes[n]) for n in nl]
        print "(p,n)", [ (p,n) for (p,n,l) in roadmap_edges ]
        msg.edges = [vslam.msg.Edge(nl.index(p), nl.index(n), l) for (p, n, l) in roadmap_edges]
        msg.localization = nl.index(here)
        self.pub.publish(msg)

        if 1:
            import matplotlib.pyplot as plt
            fig = plt.figure(figsize = (12, 6))
            plt.subplot(121)
            pos = nx.spring_layout(TG, iterations=1000)
            nx.draw(TG, pos,node_color='#A0CBE2')
            nx.draw_networkx_edges(G, pos, with_labels=False, edge_color='r', alpha=0.5, width=25.0, arrows = False)
            plt.subplot(122)
            nx.draw(G, pos = dict([(n, roadmap_nodes[n][:2]) for n in G.nodes()]))
            #plt.show()
            plt.savefig("/tmp/map_%d.png" % here)
 
    def handle_raw_stereo(self, msg):
        size = (msg.left_info.width, msg.left_info.height)
        if self.vo == None:
          cam = camera.StereoCamera(msg.right_info)
          self.fd = FeatureDetectorFast(300)
          self.ds = DescriptorSchemeCalonder()
          self.vo = VisualOdometer(cam, scavenge = False,
                              inlier_error_threshold = 3.0, sba = None,
                              inlier_thresh = 100,
                              position_keypoint_thresh = 0.2, angle_keypoint_thresh = 0.15)
          self.vo.num_frames = self.startframe
        pair = [Image.fromstring("L", size, i.uint8_data.data) for i in [ msg.left_image, msg.right_image ]]
        af = SparseStereoFrame(pair[0], pair[1], feature_detector = self.fd, descriptor_scheme = self.ds)
        self.vo.handle_frame(af)
        self.frame_timestamps[af.id] = msg.header.stamp
        
        if self.skel.add(self.vo.keyframe):
          print "====>", self.vo.keyframe.id, self.frame_timestamps[self.vo.keyframe.id]
          #print self.tf.getFrameStrings()
          #assert self.tf.frameExists("wide_stereo_optical_frame")
          #assert self.tf.frameExists("odom_combined")
          N = sorted(self.skel.nodes)
          for a,b in zip(N, N[1:]):
            if (a,b) in self.skel.edges:
              t0 = self.frame_timestamps[a]
              t1 = self.frame_timestamps[b]
              if self.tf.canTransformFull("wide_stereo_optical_frame", t0, "wide_stereo_optical_frame", t1, "odom_combined"):
                t,r = self.tf.lookupTransformFull("wide_stereo_optical_frame", t0, "wide_stereo_optical_frame", t1, "odom_combined")
                relpose = Pose()
                relpose.fromlist(self.tf.fromTranslationRotation(t,r))
                self.wheel_odom_edges.add((a, b, relpose, 1.0))
          self.send_map(msg.header.stamp)
예제 #5
0
    vo.handle_frame(af)
    inl_history.append(vo.inl)
    inl_history = inl_history[-2:]
    af.connected = vo.inl > 7
    # Log keyframes into "pool_loop"
    if False and not vo.keyframe.id in keys:
      k = vo.keyframe
      Image.fromstring("L", (640,480), k.lf.tostring()).save("dump/%06dL.png" % len(keys))
      Image.fromstring("L", (640,480), k.rf.tostring()).save("dump/%06dR.png" % len(keys))
      print "saving frame", "id", k.id, "as key", len(keys), "inliers:", k.inl, "keypoints:", len(k.kp2d), len(k.kp)
      #vo.report_frame(k)
      keys.add(k.id)

    if max(inl_history) > 7:
      skel.setlabel(label)
      novel = skel.add(vo.keyframe, vo.keyframe.connected)
    x,y,z = vo.pose.xform(0,0,0)
    trajectory[i].append((x,y,z))
    vo_x[i].append(x)
    vo_y[i].append(z)
    x1,y1,z1 = vo.pose.xform(0,0,1)
    vo_u[i].append(x1 - x)
    vo_v[i].append(z1 - z)
  print framecounter, "kp", len(af.kp), "inliers:", vo.inl
  inliers.append(vo.inl)

  framecounter += 1

  def ground_truth(p, q):
    return Pose(transformations.rotation_matrix_from_quaternion([q.x, q.y, q.z, q.w])[:3,:3], [p.x, p.y, p.z])