Example #1
0
    def handle_raw_stereo(self, msg):
        print "incoming picture  ", msg.header.stamp.to_seconds()

        size = (msg.left_info.width, msg.left_info.height)
        cam = camera.StereoCamera(msg.right_info)
        if self.vo == None:
            self.vo = VisualOdometer(cam,
                                     scavenge=False,
                                     inlier_error_threshold=3.0,
                                     sba=None,
                                     inlier_thresh=100,
                                     position_keypoint_thresh=0.2,
                                     angle_keypoint_thresh=0.15)
            self.keys = set()
            self.v = Vis('raw stereo')

        #pair = [Image.fromstring("L", size, i.uint8_data.data) for i in [ msg.left_image, msg.right_image ]]
        pair = [dcamImage(i) for i in [msg.left_image, msg.right_image]]
        af = SparseStereoFrame(pair[0],
                               pair[1],
                               feature_detector=self.fd,
                               descriptor_scheme=self.ds)
        af.t = msg.header.stamp.to_seconds()

        self.vo.handle_frame(af)

        self.v.show(msg.left_image.uint8_data.data, [])
        k = self.vo.keyframe
        if (not (k.id in self.keys)):
            self.keys.add(k.id)
            picture = Picture(k.features(), k.descriptors())
            self.pm.newpic(k.t, cam, picture, True)

        if 0:
            picture = Picture(af.features(), af.descriptors())
            self.pm.newpic(af.t, cam, picture, False)

        while True:
            if self.tfq.qsize() == 0:
                break
            tfmsg = self.tfq.get()
            if min([t.header.stamp.to_seconds() for t in tfmsg.transforms
                    ]) > (msg.header.stamp.to_seconds() + 1.0):
                break
            self.handle_tf(tfmsg)

        self.pmlock.acquire()
        self.pm.resolve(self.transformer)
        self.pmlock.release()
Example #2
0
    def __init__(self, mode, library):
        self.mode = mode
        self.library = library

        rospy.TopicSub('/videre/images', ImageArray, self.display_array)
        rospy.TopicSub('/videre/cal_params', String, self.display_params)
        rospy.TopicSub('/vo/tmo', Pose44, self.handle_corrections)

        self.viz_pub = rospy.Publisher("/overlay", Lines)
        self.vo_key_pub = rospy.Publisher("/vo/key", Frame)
        self.Tmo = Pose()

        self.mymarker1 = Marker(10)
        self.mymarkertrail = [Marker(11 + i) for i in range(10)]
        self.trail = []

        self.vis = Vis()
Example #3
0
  def __init__(self, mode, library):
    self.mode = mode
    self.library = library

    rospy.TopicSub('/videre/images', ImageArray, self.display_array)
    rospy.TopicSub('/videre/cal_params', String, self.display_params)
    rospy.TopicSub('/vo/tmo', Pose44, self.handle_corrections)

    self.viz_pub = rospy.Publisher("/overlay", Lines)
    self.vo_key_pub = rospy.Publisher("/vo/key", Frame)
    self.Tmo = Pose()

    self.mymarker1 = Marker(10)
    self.mymarkertrail = [ Marker(11 + i) for i in range(10) ]
    self.trail = []

    self.vis = Vis()
Example #4
0
        optimizer.step()

        if i % 30 == 0:
            status = 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch + 1, i * len(data), len(train_loader.dataset),
                100. * i / len(train_loader), loss.item())
            print(status)
            vis.update_train(x=torch.Tensor([epoch + i / len(train_loader)]),
                             y=torch.Tensor([loss.item()]),
                             status=status)


if __name__ == '__main__':
    # load data and init
    train_loader, test_loader = get_data_loader()
    vis = Vis('bba_race resnet')

    # model
    model = resnet50()
    input_size = model.fc.in_features
    model.fc = nn.Linear(input_size, 20)  # output 20 category

    # load exist
    # checkpoints = vars.checkpoint_path + 'res_net50_0.14.pt'
    checkpoints = ''
    if checkpoints:
        model.load_state_dict(torch.load(checkpoints))  # load exist model
    model.cuda()  # gpu

    # criterion, optimizer
    criterion = nn.CrossEntropyLoss().cuda()  # gpu
Example #5
0
File: get_vis.py Project: POFK/imag
UV_root=None
##############################init########################################
BL_num=int(sys.argv[1])
split=int(sys.argv[2])
#path_map = './gsm_80_H1024.npy'
path_map = './gsm_80_H2048.npy'
#path_map = './gsm_80.npy'
map=np.load(path_map)
map_nside=hp.get_nside(map)
freq = 80. * 10**6  # Mpc
D = 5.
BL_length = 800
#BL_length = 15
lam = 3. * 10**8 / freq
source = [0. / 180. * np.pi, 35. / 180. * np.pi]
Vis.__init__(source=source)
#==========
#split=32
#BL_num=256
#BL_num=128
#BL_num = 64
#==========
BL_ra = np.linspace(-0./6, 2*np.pi , 1*BL_num, endpoint=False)
BL_dec = np.linspace(-np.pi / 2, np.pi / 2, BL_num)

Vis.BaseLine(BL_ra, BL_dec)
BL_bl = BL_length * Vis.BL_xyz
BL_NUM=len(BL_bl)

#==========
#filename='_BL%d_0.5_allsky'%BL_NUM
Example #6
0
class PicmapNode:
    def __init__(self, args):
        rospy.init_node('picmap_server')
        stereo_cam = camera.Camera(
            (389.0, 389.0, 89.23 * 1e-3, 323.42, 323.42, 274.95))

        self.vo = None
        self.transformer = pytf_swig.pyTransformer()
        self.transformer.setExtrapolationLimit(0.0)

        self.fd = FeatureDetectorFast(300)
        self.ds = DescriptorSchemeCalonder()
        self.pm = PictureMap(self.ds)

        #self.pub = rospy.Publisher("/picmap_pose", vslam.msg.Picmap)

        rospy.Subscriber('/stereo/raw_stereo',
                         stereo_msgs.msg.RawStereo,
                         self.handle_raw_stereo_queue,
                         queue_size=2,
                         buff_size=7000000)
        rospy.Subscriber('/amcl_pose',
                         geometry_msgs.msg.PoseWithCovarianceStamped,
                         self.handle_amcl_pose)
        rospy.Subscriber('/tf_message', tf.msg.tfMessage, self.handle_tf_queue)

        self.pmlock = threading.Lock()
        self.q = Queue.Queue(0)  # 0 means maxsize is infinite
        self.tfq = Queue.Queue(0)
        t = threading.Thread(target=self.worker)
        t.start()

    def start(self):
        rospy.spin()
        pickle.dump(self.pm, open("/tmp/final_pm.pickle", "w"))

    def handle_tf_queue(self, msg):
        print "  queued transform", min(
            [t.header.stamp.to_seconds() for t in msg.transforms])
        self.tfq.put(msg)

    def handle_tf(self, msg):
        #print "incoming transform", min([ t.header.stamp.to_seconds() for t in msg.transforms ])
        for transform in msg.transforms:
            ptf = py_transform_from_transform_stamped(transform)
            self.transformer.setTransform(ptf)
        #print self.transformer.allFramesAsString()
        #print "TF", "map->stereo_link getLatestCommonTime", self.transformer.getLatestCommonTime('map', 'stereo_link')

    def handle_raw_stereo_queue(self, msg):
        print "  queued picture  ", msg.header.stamp.to_seconds()
        self.q.put(msg)

    def worker(self):
        while True:
            # There are N messages in the queue.
            # Want to discard all but the most recent.
            while self.q.qsize() > 1:
                self.q.get()
            item = self.q.get()
            self.handle_raw_stereo(item)

    def handle_raw_stereo(self, msg):
        print "incoming picture  ", msg.header.stamp.to_seconds()

        size = (msg.left_info.width, msg.left_info.height)
        cam = camera.StereoCamera(msg.right_info)
        if self.vo == None:
            self.vo = VisualOdometer(cam,
                                     scavenge=False,
                                     inlier_error_threshold=3.0,
                                     sba=None,
                                     inlier_thresh=100,
                                     position_keypoint_thresh=0.2,
                                     angle_keypoint_thresh=0.15)
            self.keys = set()
            self.v = Vis('raw stereo')

        #pair = [Image.fromstring("L", size, i.uint8_data.data) for i in [ msg.left_image, msg.right_image ]]
        pair = [dcamImage(i) for i in [msg.left_image, msg.right_image]]
        af = SparseStereoFrame(pair[0],
                               pair[1],
                               feature_detector=self.fd,
                               descriptor_scheme=self.ds)
        af.t = msg.header.stamp.to_seconds()

        self.vo.handle_frame(af)

        self.v.show(msg.left_image.uint8_data.data, [])
        k = self.vo.keyframe
        if (not (k.id in self.keys)):
            self.keys.add(k.id)
            picture = Picture(k.features(), k.descriptors())
            self.pm.newpic(k.t, cam, picture, True)

        if 0:
            picture = Picture(af.features(), af.descriptors())
            self.pm.newpic(af.t, cam, picture, False)

        while True:
            if self.tfq.qsize() == 0:
                break
            tfmsg = self.tfq.get()
            if min([t.header.stamp.to_seconds() for t in tfmsg.transforms
                    ]) > (msg.header.stamp.to_seconds() + 1.0):
                break
            self.handle_tf(tfmsg)

        self.pmlock.acquire()
        self.pm.resolve(self.transformer)
        self.pmlock.release()

    def handle_amcl_pose(self, msg):
        if self.vo:
            print "handle_amcl_pose(", msg, ")"
            happy = max([msg.pose.covariance[0], msg.pose.covariance[7]
                         ]) < 0.003
            print "picmap node got amcl", msg.header.stamp.to_seconds(
            ), msg.pose.covariance[0], msg.pose.covariance[7], "happy", happy
            self.pmlock.acquire()
            self.pm.newLocalization(msg.header.stamp.to_seconds(), happy)
            self.pmlock.release()
    def plot(self):

        self.dialog = Vis(self.data)
        self.dialog.show()
Example #8
0
#model = Segnet(n_classes) #Fully Convolutional Networks
#model = U_Net(img_ch=3,output_ch=n_classes) #U Network
#model = R2U_Net(img_ch=3,output_ch=n_classes,t=2) #Residual Recurrent U Network, R2Unet (t=2)
#model = R2U_Net(img_ch=3,output_ch=n_classes,t=3) #Residual Recurrent U Network, R2Unet (t=3)
#model = RecU_Net(img_ch=3,output_ch=n_classes,t=2) #Recurrent U Network, RecUnet (t=2)
#model = ResU_Net(img_ch=3,output_ch=n_classes) #Residual U Network, ResUnet 
#model = DeepLabV3(n_classes, 'vgg') #DeepLabV3 VGG backbone
model = DeepLabV3(n_classes, 'resnet') #DeepLabV3 Resnet backbone

print('Evaluation logs for model: {}'.format(model.__class__.__name__))

model = nn.DataParallel(model, device_ids=num_gpu).to(device)
model_params = torch.load(os.path.join(expt_logdir, "{}".format(ckpt_name)))
model.load_state_dict(model_params) 

#Visualization of test data
test_vis = Vis(test_dst, expt_logdir, rows, cols)
#Metrics calculator for test data
test_metrics = Metrics(n_classes, testloader, test_split, device, expt_logdir)

model.eval()
for i, (inputs, labels) in enumerate(testloader):
    inputs = inputs.to(device)
    labels = labels.to(device)
    predictions = model(inputs)

epoch = ckpt_name

test_metrics.compute(epoch, model)
test_metrics.plot_roc(epoch) 
test_vis.visualize(epoch, model)
Example #9
0
class VODemo:
    vo = None
    frame = 0

    def __init__(self, mode, library):
        self.mode = mode
        self.library = library

        rospy.TopicSub('/videre/images', ImageArray, self.display_array)
        rospy.TopicSub('/videre/cal_params', String, self.display_params)
        rospy.TopicSub('/vo/tmo', Pose44, self.handle_corrections)

        self.viz_pub = rospy.Publisher("/overlay", Lines)
        self.vo_key_pub = rospy.Publisher("/vo/key", Frame)
        self.Tmo = Pose()

        self.mymarker1 = Marker(10)
        self.mymarkertrail = [Marker(11 + i) for i in range(10)]
        self.trail = []

        self.vis = Vis()

    def display_params(self, iar):
        if not self.vo:
            cam = camera.VidereCamera(iar.data)
            print "cam.params", cam.params
            self.vo = VisualOdometer(cam)
            self.started = None
            if self.mode == 'learn':
                self.library = set()
            self.previous_keyframe = None
            self.know_state = 'lost'
            self.best_show_pose = None
            self.mymarker1.floor()

    def handle_corrections(self, corrmsg):
        print "GOT CORRECTION AT", time.time()
        Tmo_pose = Pose()
        Tmo_pose.fromlist(corrmsg.v)
        self.Tmo = Tmo_pose
        self.know_state = 'corrected'

    def display_array(self, iar):
        diag = ""
        af = None
        if self.vo:
            if not self.started:
                self.started = time.time()
            imgR = imgAdapted(iar.images[0])
            imgL = imgAdapted(iar.images[1])
            af = SparseStereoFrame(imgL, imgR)

            if 1:
                if self.mode == 'play':
                    pose = self.vo.handle_frame(af)
                if self.mode == 'learn':
                    pose = self.vo.handle_frame(af)
                    if (af.id != 0) and (self.vo.inl < 80):
                        print "*** LOST TRACK ***"
                        #sys.exit(1)
                    self.library.add(self.vo.keyframe)
                else:
                    #diag = "best match %d from %d in library" % (max(probes)[0], len(self.library))
                    pass
                diag = "%d/%d inliers, moved %.1f library size %d" % (
                    self.vo.inl, len(af.kp), pose.distance(), len(
                        self.library))

                if self.mode == 'play':
                    kf = self.vo.keyframe
                    if kf != self.previous_keyframe:
                        f = Frame()
                        f.id = kf.id
                        f.pose = Pose44(kf.pose.tolist())
                        f.keypoints = [
                            Keypoint(x, y, d) for (x, y, d) in kf.kp
                        ]
                        f.descriptors = [Descriptor(d) for d in kf.descriptors]
                        print "ASKING FOR MATCH AT", time.time()
                        self.vo_key_pub.publish(f)
                        self.previous_keyframe = kf
                        if kf.inl < 50 or self.vo.inl < 50:
                            self.know_state = 'lost'
                        else:
                            self.know_state = {
                                'lost': 'lost',
                                'uncertain': 'uncertain',
                                'corrected': 'uncertain'
                            }[self.know_state]

                result_pose = af.pose
                if self.mode == 'learn':
                    self.mymarker1.frompose(af.pose, self.vo.cam,
                                            (255, 255, 255))
                else:
                    if self.best_show_pose and self.know_state == 'lost':
                        inmap = self.best_show_pose
                    else:
                        Top = af.pose
                        Tmo = self.Tmo
                        inmap = Tmo * Top
                        if self.know_state != 'lost':
                            self.best_show_pose = inmap
                    if self.know_state != 'lost' or not self.best_show_pose:
                        color = {
                            'lost': (255, 0, 0),
                            'uncertain': (127, 127, 0),
                            'corrected': (0, 255, 0)
                        }[self.know_state]
                        self.mymarker1.frompose(inmap, self.vo.cam, color)
                        if 0:
                            self.trail.append(inmap)
                            self.trail = self.trail[-10:]
                            for i, p in enumerate(self.trail):
                                self.mymarkertrail[i].frompose(p, color)
                #print af.diff_pose.xform(0,0,0), af.pose.xform(0,0,0)

                if self.frame > 5 and ((self.frame % 10) == 0):
                    inliers = self.vo.pe.inliers()
                    pts = [(1, int(x0), int(y0))
                           for ((x0, y0, d0), (x1, y1, d1)) in inliers]
                    self.vis.show(iar.images[1].data, pts)

                if False and self.vo.pairs != []:
                    ls = Lines()
                    inliers = self.vo.pe.inliers()
                    lr = "left_rectified"
                    ls.lines = [
                        Line(lr, 0, 255, 0, x0, y0 - 2, x0, y0 + 2)
                        for ((x0, y0, d0), (x1, y1, d1)) in inliers
                    ]
                    ls.lines += [
                        Line(lr, 0, 255, 0, x0 - 2, y0, x0 + 2, y0)
                        for ((x0, y0, d0), (x1, y1, d1)) in inliers
                    ]
                    rr = "right_rectified"
                    #ls.lines += [ Line(rr, 0,255,0,x0-d0,y0-2,x0-d0,y0+2) for ((x0,y0,d0), (x1,y1,d1)) in inliers]
                    #ls.lines += [ Line(rr, 0,255,0,x0-2-d0,y0,x0+2-d0,y0) for ((x0,y0,d0), (x1,y1,d1)) in inliers]
                    self.viz_pub.publish(ls)

                if (self.frame % 30) == 0:
                    took = time.time() - self.started
                    print "%4d: %5.1f [%f fps]" % (self.frame, took,
                                                   self.frame / took), diag
                self.frame += 1

        #print "got message", len(iar.images)
        #print iar.images[0].width
        if SEE:
            right = ut.ros2cv(iar.images[0])
            left = ut.ros2cv(iar.images[1])
            hg.cvShowImage('channel L', left)
            hg.cvShowImage('channel R', right)
            hg.cvWaitKey(5)

    def dump(self):
        print
        print self.vo.name()
        self.vo.summarize_timers()
        if self.mode == 'learn':
            print "DUMPING LIBRARY", len(self.library)
            f = open("library.pickle", "w")
            pickle.dump(self.vo.cam, f)
            db = [(af.id, af.pose, af.kp, af.descriptors)
                  for af in self.library]
            pickle.dump(db, f)
            f.close()
            print "DONE"
Example #10
0
        pred = self.net(img)
        _, pred = torch.max(pred.data, 1)
        pred = int(pred)
        classs = self.classes[pred]
        return classs


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--train', action='store_true')
    parser.add_argument('--epochs', type=int, default=100)
    args = parser.parse_args()
    print(args)

    vis = Vis(unnormalize)
    transform = transforms.Compose([
        transforms.ColorJitter(),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomAffine(0, [0.2, 0.2]),
        lambda x: np.asarray(x),
        normalize
    ])
    all_dataset = datasets.DatasetFolder('train', loader, ['png'], transform)
    trainset, testset = torch.utils.data.dataset.random_split(all_dataset, [len(all_dataset) - 500, 500])
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)
    testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2)
    vis.showbatch(next(iter(trainloader))[0])

    net = Net().cuda()
Example #11
0
def main():
    pass


if __name__ == '__main__':
    special_color = tuple(
        [int(item * 255) for item in colorsys.hsv_to_rgb(0.05, 0.7, 0.85)])

    # background_color = (247, 237, 226)
    # background_color_bgr = (226, 237, 247)
    background_color_bgr = background_color = (20, 20, 20)
    font_color = (240, 240, 240)
    # special_color = (88, 130, 135)

    v = Vis(special_color=special_color)
    w = Word()
    c = Canvas('framework_3')

    c.set_bg_color(background_color_bgr)
    c.set_font_color(font_color)
    c.set_special_color(special_color)
    c.add_layer(Layer(img=v.get(vis.dist_vis), alpha=1, title='成绩变化'))
    c.add_layer(
        Layer(img=w.get(
            '每年一度的运动会就要开始报名了,希望大家踊跃到体育委员处报名!今年的运动会有很多新项目,班会上将给大家详细讲解。',
            color=c.font_color,
            font=c.font,
            bg_color=background_color),
              alpha=1,
              title='运动会报名'))
Example #12
0
class VODemo:
  vo = None
  frame = 0

  def __init__(self, mode, library):
    self.mode = mode
    self.library = library

    rospy.TopicSub('/videre/images', ImageArray, self.display_array)
    rospy.TopicSub('/videre/cal_params', String, self.display_params)
    rospy.TopicSub('/vo/tmo', Pose44, self.handle_corrections)

    self.viz_pub = rospy.Publisher("/overlay", Lines)
    self.vo_key_pub = rospy.Publisher("/vo/key", Frame)
    self.Tmo = Pose()

    self.mymarker1 = Marker(10)
    self.mymarkertrail = [ Marker(11 + i) for i in range(10) ]
    self.trail = []

    self.vis = Vis()

  def display_params(self, iar):
    if not self.vo:
      cam = camera.VidereCamera(iar.data)
      print "cam.params", cam.params
      self.vo = VisualOdometer(cam)
      self.started = None
      if self.mode == 'learn':
        self.library = set()
      self.previous_keyframe = None
      self.know_state = 'lost'
      self.best_show_pose = None
      self.mymarker1.floor()

  def handle_corrections(self, corrmsg):
    print "GOT CORRECTION AT", time.time()
    Tmo_pose = Pose()
    Tmo_pose.fromlist(corrmsg.v)
    self.Tmo = Tmo_pose
    self.know_state = 'corrected'

  def display_array(self, iar):
    diag = ""
    af = None
    if self.vo:
      if not self.started:
        self.started = time.time()
      imgR = imgAdapted(iar.images[0])
      imgL = imgAdapted(iar.images[1])
      af = SparseStereoFrame(imgL, imgR)

      if 1:
        if self.mode == 'play':
          pose = self.vo.handle_frame(af)
        if self.mode == 'learn':
          pose = self.vo.handle_frame(af)
          if (af.id != 0) and (self.vo.inl < 80):
            print "*** LOST TRACK ***"
            #sys.exit(1)
          self.library.add(self.vo.keyframe)
        else:
          #diag = "best match %d from %d in library" % (max(probes)[0], len(self.library))
          pass
        diag = "%d/%d inliers, moved %.1f library size %d" % (self.vo.inl, len(af.kp), pose.distance(), len(self.library))

        if self.mode == 'play':
          kf = self.vo.keyframe
          if kf != self.previous_keyframe:
            f = Frame()
            f.id = kf.id
            f.pose = Pose44(kf.pose.tolist())
            f.keypoints = [ Keypoint(x,y,d) for (x,y,d) in kf.kp ]
            f.descriptors = [ Descriptor(d) for d in kf.descriptors ]
            print "ASKING FOR MATCH AT", time.time()
            self.vo_key_pub.publish(f)
            self.previous_keyframe = kf
            if kf.inl < 50 or self.vo.inl < 50:
              self.know_state = 'lost'
            else:
              self.know_state = { 'lost':'lost', 'uncertain':'uncertain', 'corrected':'uncertain' }[self.know_state]

        result_pose = af.pose
        if self.mode == 'learn':
          self.mymarker1.frompose(af.pose, self.vo.cam, (255,255,255))
        else:
          if self.best_show_pose and self.know_state == 'lost':
            inmap = self.best_show_pose
          else:
            Top = af.pose
            Tmo = self.Tmo
            inmap = Tmo * Top
            if self.know_state != 'lost':
              self.best_show_pose = inmap
          if self.know_state != 'lost' or not self.best_show_pose:
            color = { 'lost' : (255,0,0), 'uncertain' : (127,127,0), 'corrected' : (0,255,0) }[self.know_state]
            self.mymarker1.frompose(inmap, self.vo.cam, color)
            if 0:
              self.trail.append(inmap)
              self.trail = self.trail[-10:]
              for i,p in enumerate(self.trail):
                self.mymarkertrail[i].frompose(p, color)
        #print af.diff_pose.xform(0,0,0), af.pose.xform(0,0,0)

        if self.frame > 5 and ((self.frame % 10) == 0):
          inliers = self.vo.pe.inliers()
          pts = [(1,int(x0),int(y0)) for ((x0,y0,d0), (x1,y1,d1)) in inliers]
          self.vis.show(iar.images[1].data, pts )

        if False and self.vo.pairs != []:
          ls = Lines()
          inliers = self.vo.pe.inliers()
          lr = "left_rectified"
          ls.lines = [ Line(lr, 0,255,0,x0,y0-2,x0,y0+2) for ((x0,y0,d0), (x1,y1,d1)) in inliers]
          ls.lines += [ Line(lr, 0,255,0,x0-2,y0,x0+2,y0) for ((x0,y0,d0), (x1,y1,d1)) in inliers]
          rr = "right_rectified"
          #ls.lines += [ Line(rr, 0,255,0,x0-d0,y0-2,x0-d0,y0+2) for ((x0,y0,d0), (x1,y1,d1)) in inliers]
          #ls.lines += [ Line(rr, 0,255,0,x0-2-d0,y0,x0+2-d0,y0) for ((x0,y0,d0), (x1,y1,d1)) in inliers]
          self.viz_pub.publish(ls)

        if (self.frame % 30) == 0:
          took = time.time() - self.started
          print "%4d: %5.1f [%f fps]" % (self.frame, took, self.frame / took), diag
        self.frame += 1

    #print "got message", len(iar.images)
    #print iar.images[0].width
    if SEE:
      right = ut.ros2cv(iar.images[0])
      left  = ut.ros2cv(iar.images[1])
      hg.cvShowImage('channel L', left)
      hg.cvShowImage('channel R', right)
      hg.cvWaitKey(5)

  def dump(self):
    print
    print self.vo.name()
    self.vo.summarize_timers()
    if self.mode == 'learn':
      print "DUMPING LIBRARY", len(self.library)
      f = open("library.pickle", "w")
      pickle.dump(self.vo.cam, f)
      db = [(af.id, af.pose, af.kp, af.descriptors) for af in self.library]
      pickle.dump(db, f)
      f.close()
      print "DONE"
Example #13
0
File: get_vis.py Project: POFK/imag
UV_root = None
##############################init########################################
BL_num = int(sys.argv[1])
split = int(sys.argv[2])
#path_map = './gsm_80_H1024.npy'
path_map = './gsm_80_H2048.npy'
#path_map = './gsm_80.npy'
map = np.load(path_map)
map_nside = hp.get_nside(map)
freq = 80. * 10**6  # Mpc
D = 5.
BL_length = 800
#BL_length = 15
lam = 3. * 10**8 / freq
source = [0. / 180. * np.pi, 35. / 180. * np.pi]
Vis.__init__(source=source)
#==========
#split=32
#BL_num=256
#BL_num=128
#BL_num = 64
#==========
BL_ra = np.linspace(-0. / 6, 2 * np.pi, 1 * BL_num, endpoint=False)
BL_dec = np.linspace(-np.pi / 2, np.pi / 2, BL_num)

Vis.BaseLine(BL_ra, BL_dec)
BL_bl = BL_length * Vis.BL_xyz
BL_NUM = len(BL_bl)

#==========
#filename='_BL%d_0.5_allsky'%BL_NUM
Example #14
0
cam = None
filename = "/u/prdata/videre-bags/loop2-color.bag"
filename = "/u/prdata/videre-bags/greenroom-2008-11-3-color.bag"
filename = "2008-11-04-09-55-12-topic.bag"
filename = "2008-11-04-14-44-56-topic.bag"
filename = "2008-11-05-14-35-11-topic.bag"
filename = "/u/prdata/videre-bags/vo1.bag"
filename = sys.argv[1]
framecounter = 0
oe_x = []
oe_y = []
first_pair = None
inliers = []

vis = Vis(filename)
wheel_pose = None
prev_wheel_pose = None
wheel_o = None
prev_wheel_o = None
wheel_p = None

angles = []
qangles = []
keys = set()
initfig = 0
ploton = 0

# turn this on to skip every other frame
flag_15Hz = 0
Example #15
0
import bge
import parking
from vis import Vis

if __name__ == "__main__":
    a, l = parking.load_json_array(parking.SIMPLE)
    o = bge.logic.getCurrentController().owner
    o['vis'] = Vis(a, l)