예제 #1
0
def main():
	rospy.init_node("particle_based_tracking")

	detector = Detector(visualize=False, modelfileName=rospy.get_param('~model'))
	transformer = Transformer("transformer")
	pf = ParticleFilter(500, HParticle, metric, explorationFactor=0.1, noiseFactor=0.05, averageType="weighted")
	pf.generateParticles()
	viz = PFViz(pf, "/odom", "myViz")

	history = []

	while not rospy.is_shutdown():
		try:
			detector.getImage()
			detector.processImage()
			pixels = detector.centroids[:]
			if pixels:

				startPoint, endPoints = transformer.transform(pixels)
				rospy.loginfo(len(startPoint))
				rospy.loginfo(np.asarray(endPoints).shape)
				history.append(tuple((startPoint[:-1], np.asarray(endPoints)[:,:-1])))
				T0 = time.time()
				for _ in range(0, numItersPerSample):
					print "Updating particle filter",

					print "\tmeasuring",
					t0 = time.time()
					pf.measureParticles(history)
					t1 = time.time()
					print "dt=%f" % (t1-t0),

					print "\tweighting",
					t0 = time.time()
					pf.calculateWeights()
					t1 = time.time()
					print "dt=%f" % (t1-t0),

					print "\tpredicting",
					t0 = time.time()
					prediction = pf.predict()
					t1 = time.time()
					print "dt=%f" % (t1-t0),

					viz.update(history[-1])

					print "\tresampling",
					t0 = time.time()
					pf.resample()
					t1 = time.time()
					print "dt=%f" % (t1-t0),

					pf.update(None)

					print
				T1 = time.time()
				print "Total particle filter update time %f" % (T1-T0)
		except KeyboardInterrupt:
			break
예제 #2
0
    count = 0
    for m in measurements:
        count += 1
        if not pf.initialized:
            # Initialize the particle set using GNSS measurement.
            pf.initialize(m['gnss_x'], m['gnss_y'], m['gnss_theta'], *pos_std)
        else:
            # Prediction step
            pf.predict(delta_t, m['previous_velocity'],
                       m['previous_yawrate'], *pos_std)
        # Feed the particle with observations to update weights.
        observations = [{'x': x, 'y': y} for (x, y) in \
                        zip (m['measurement_x'], m['measurement_y'])]
        pf.update_weights(sensor_range, *landmark_std, observations, Map)
        # Resample particles to capture posterior belief distribution.
        pf.resample()
        # Select the best (highest weight) particle;
        #   we will assume that this represents the vehicle's position.
        particle = pf.get_best_particle()
        # Print for debugging purposes.
        print("[%d] %f, %f" % (count, particle['x'], particle['y']))
        # Collect data for post-mortem plotting.
        graph.append({
            'position': (particle['x'], particle['y']),
            'particles': [(p['x'], p['y']) for p in pf.particles],
            'landmarks': [(Map[l]['x'], Map[l]['y']) \
                          for l in particle['assoc']],
        })
    # Go plot the results.
    plot_2D(graph)
예제 #3
0
    def draw_pred_gif_old(self, full_getter, p=1.0, use_pf=False, sim_config=None, use_stepper=False,
                      folder_plots=FOLDER_PLOTS, tag=0, normalize=False, nice_start=True):
        ep_images, poses = full_getter()
        ep_images = ep_images[0, ...].reshape((1,) + ep_images.shape[1:])
        ep_images_masked, removed_percepts = self.mask_percepts(ep_images, p, return_indices=True)
        # net_preds = self.pred_ae.predict(ep_images_masked[:, 0:-SERIES_SHIFT, ...])
        net_preds = self.pred_ae.predict(ep_images_masked[:, :, ...])

        # stepper predictions
        stepper_pred = []
        if use_stepper:
            self.stepper.reset_states()
            for t in range(EP_LEN-SERIES_SHIFT):
                im = ep_images_masked[:, t, ...]
                stepper_pred.append(self.stepper.predict(im))

        pf_pred = []
        if use_pf:
            pf = ParticleFilter(sim_config, n_particles=4000, nice_start=nice_start)
            for t in range(EP_LEN-SERIES_SHIFT):
                if not removed_percepts[t]:
                    pose = poses[0, t, 0, :]
                    pf.update(pose)
                    pf.resample()

                # add noise only if next percept is available
                # if t+1 < EP_LEN-SERIES_SHIFT and not removed_percepts[t+1]:
                #     pf.resample()
                    # pf.add_noise()

                pf.predict()
                pf_pred.append(pf.draw())

        # create header with labels
        col = np.zeros((HEADER_HEIGHT, 1))

        labels = []
        labels.append(create_im_label('Ob'))
        labels.append(create_im_label('GT'))
        labels.append(create_im_label('AE'))
        if use_stepper:
            labels.append(create_im_label('ST'))
        if use_pf:
            labels.append(create_im_label('PF'))

        header = [col]
        for label in labels:
            header.append(label)
            header.append(col)

        header = np.concatenate(header, axis=1)

        # combine predictions
        col = np.ones((IM_HEIGHT, 1))
        frames = []
        for t in range(EP_LEN - SERIES_SHIFT):
            images = []
            images.append(ep_images_masked[0, t+SERIES_SHIFT, :, :, 0])
            images.append(ep_images[0, t+SERIES_SHIFT, :, :, 0])
            if normalize:
                net_preds[0, t, :, :, 0] /= np.max(net_preds[0, t, :, :, 0])
            images.append(net_preds[0, t, :, :, 0])
            if use_stepper:
                if normalize:
                    stepper_pred[t][0, :, :, 0] /= np.max(stepper_pred[t][0, :, :, 0])
                images.append(stepper_pred[t][0, :, :, 0])

            if use_pf:
                if normalize:
                    pf_pred[t][:, :, 0] /= np.max(pf_pred[t][:, :, 0])
                images.append(pf_pred[t][:, :, 0])

            table = [col]
            for image in images:
                table.append(image)
                table.append(col)

            frame = np.concatenate(table, axis=1)

            # print(frame.shape)
            width = frame.shape[1]
            row = np.ones((1, width))
            frame = np.concatenate([header, frame, row], axis=0)

            frames.append(frame)

        fpath = '{}/predictions-{}.gif'.format(folder_plots, tag)
        imageio.mimsave(fpath, frames)
예제 #4
0
    def draw_pred_gif(self, full_getter, p=1.0, use_pf=False, sim_config=None, use_stepper=False,
                      folder_plots=FOLDER_PLOTS, tag=0, normalize=False):

        ep_images, poses, eps_vels = full_getter()
        ep_images = ep_images[0, ...].reshape((1,) + ep_images.shape[1:])
        ep_images_masked, removed_percepts = self.mask_percepts(ep_images, p, return_indices=True)
        # net_preds = self.pred_ae.predict(ep_images_masked[:, 0:-SERIES_SHIFT, ...])
        net_preds = self.pred_ae.predict(ep_images_masked[:, :, ...])

        # stepper predictions
        # stepper_pred = []
        # if use_stepper:
        #     self.stepper.reset_states()
        #     for t in range(EP_LEN-SERIES_SHIFT):
        #         im = ep_images_masked[:, t, ...]
        #         stepper_pred.append(self.stepper.predict(im))

        pf_pred = []
        if use_pf:
            pf = ParticleFilter(sim_config, n_particles=3000)
            init_poses = poses[0][0]
            init_vels = eps_vels[0][0]
            pf.warm_start(init_poses, init_vels)
            for t in range(EP_LEN-SERIES_SHIFT):
                if not removed_percepts[t]:
                    measurements = poses[0][t]
                    # print(measurements)
                    pf.update(measurements)
                    pf.resample()

                pf_pred.append(pf.draw())
                pf.predict()

        # combine predictions
        percepts = []
        truths = []
        pae_preds = []
        pf_preds = []

        pae_losses = []
        pf_losses = []

        for t in range(EP_LEN - SERIES_SHIFT):
            percepts.append(ep_images_masked[0, t+SERIES_SHIFT, :, :, 0])
            truths.append(ep_images[0, t+SERIES_SHIFT, :, :, 0])
            pae_preds.append(net_preds[0, t, :, :, 0])

            if use_pf:
                pf_preds.append(pf_pred[t][:, :, 0])

                pae_losses.append(np.mean((truths[-1] - pae_preds[-1])**2))
                pf_losses.append(np.mean((truths[-1] - pf_preds[-1])**2))

            if normalize:
                pae_preds[-1] /= np.max(pae_preds[-1])
                if use_pf:
                    pf_preds[-1] /= np.max(pf_preds[-1])

                        # if use_stepper:
            #     if normalize:
            #         stepper_pred[t][0, :, :, 0] /= np.max(stepper_pred[t][0, :, :, 0])
            #     images.append(stepper_pred[t][0, :, :, 0])

        imageio.mimsave('{}/percepts-{}.gif'.format(folder_plots, tag), percepts)
        imageio.mimsave('{}/truths-{}.gif'.format(folder_plots, tag), truths)
        imageio.mimsave('{}/pae_preds-{}.gif'.format(folder_plots, tag), pae_preds)

        if use_pf:
            imageio.mimsave('{}/pf_preds-{}.gif'.format(folder_plots, tag), pf_preds)
            return {'pae_losses': pae_losses, 'pf_losses': pf_losses}
예제 #5
0
def pf_multi_run_plot(net,
                      sim_conf,
                      fpath='ims/last_test.csv',
                      cuda=True,
                      runs=10,
                      p_mask=1.0,
                      n_particles=100,
                      gif_no=0):
    CONSISTENT_NOISE = False
    RUN_LENGTH = 160
    DURATION = 0.4
    N_SIZE = 256

    pf_loss_ar = np.zeros(RUN_LENGTH)
    pae_loss_ar = np.zeros(RUN_LENGTH)

    for _ in range(runs):
        w = World(**sim_conf)

        pf = ParticleFilter(sim_conf, n_particles=n_particles)

        pos = [body.pos for body in w.bodies]
        vel = [body.vel for body in w.bodies]
        pf.warm_start(pos, vel=vel)

        ims_percept = []
        ims_pf_belief = []
        ims_pf_sample = []

        loss_pf = []
        loss_sample_mse = []

        masked_percepts = np.zeros(RUN_LENGTH) < 1
        for i in range(RUN_LENGTH):
            if i < 8 or np.random.rand() > p_mask:
                measures = [
                    body.pos +
                    sim_conf['measurement_noise'] * np.random.randn(2)
                    for body in w.bodies
                ]
                pf.update(measures)
                masked_percepts[i] = False
                pf.resample()
            else:
                masked_percepts[i] = True

            w.run()
            pf.predict()

            percept = w.draw()
            belief = pf.draw()[:, :, 0]
            sample = pf.parts[np.random.randint(pf.n)].draw()

            loss_pf.append(np.mean((percept - belief)**2))
            loss_sample_mse.append(np.mean((percept - sample)**2))

            ims_percept.append(percept)
            ims_pf_belief.append(belief)
            ims_pf_sample.append(sample)

        # run predictions with the network
        x = np.array(ims_percept)
        x = x.reshape((1, RUN_LENGTH, 28, 28, 1))
        x[:, masked_percepts, ...] = 0

        x = x.transpose((1, 0, 4, 2, 3))
        x = Variable(torch.FloatTensor(x))

        if cuda:
            net = net.cuda()
            x = x.cuda()

        states = net.bs_prop(x)

        # create expected observations
        obs_expectation = net.decoder(states)
        obs_expectation = obs_expectation.view(x.size())

        obs_expectation = obs_expectation.data.cpu().numpy()
        obs_expectation = obs_expectation.reshape((RUN_LENGTH, 28, 28))

        # create observation samples (constant or varying noise accross time)
        if CONSISTENT_NOISE is True:
            noise = Variable(torch.FloatTensor(1, N_SIZE))
            noise.data.normal_(0, 1)
            noise = noise.expand(RUN_LENGTH, N_SIZE)
        else:
            noise = Variable(torch.FloatTensor(RUN_LENGTH, N_SIZE))
            noise.data.normal_(0, 1)

        if cuda:
            noise = noise.cuda()

        # states_non_ep = states.unfold(0, 1, (EP_LEN*BATCH_SIZE)//GAN_BATCH_SIZE).squeeze(-1)

        pae_samples = net.G(noise, states.squeeze_(1))
        pae_samples = net.decoder(pae_samples)
        pae_samples = pae_samples.view(x.size())

        pae_samples = pae_samples.data.cpu().numpy()
        pae_samples = pae_samples.reshape((RUN_LENGTH, 28, 28))

        pae_ims = []
        pae_samples_ims = []
        loss_pae = []
        for i in range(RUN_LENGTH):
            pae_ims.append(obs_expectation[i, ...])
            pae_samples_ims.append(pae_samples[i, ...])
            loss_pae.append(
                np.mean((ims_percept[i] - obs_expectation[i, ...])**2))

        pf_loss_ar += np.array(loss_pf)
        pae_loss_ar += np.array(loss_pae)

    ims_ar = np.array(ims_percept)
    av_pixel_intensity = np.mean(ims_ar)
    baseline_level = np.mean((ims_ar - av_pixel_intensity)**2)
    baseline = np.ones(len(loss_pf)) * baseline_level
    # print("Uninformative baseline level at {}".format(baseline_level))

    pf_loss_ar /= runs
    pae_loss_ar /= runs
    baseline_ar = baseline

    df = pd.DataFrame({
        'pf loss': pf_loss_ar,
        'pae loss': pae_loss_ar,
        'baseline': baseline_ar
    })

    df.to_csv(fpath)

    plt.plot(pf_loss_ar)
    plt.plot(pae_loss_ar)
    plt.plot(baseline, 'g--')

    plt.title("Image reconstruction loss vs timestep")
    plt.ylabel("loss (MSE)")
    plt.xlabel("timestep")
    plt.legend(["PF", "PAE", "baseline"], loc=4)

    plt.savefig("ims/{}-plot.png".format(gif_no))
    # plt.show()
    plt.clf()

    imageio.mimsave("ims/{}-percept.gif".format(gif_no),
                    ims_percept,
                    duration=DURATION)
    imageio.mimsave("ims/{}-pf_belief.gif".format(gif_no),
                    ims_pf_belief,
                    duration=DURATION)
    imageio.mimsave("ims/{}-pf_sample.gif".format(gif_no),
                    ims_pf_sample,
                    duration=DURATION)
    imageio.mimsave("ims/{}-pae_belief.gif".format(gif_no),
                    pae_ims,
                    duration=DURATION)
    imageio.mimsave("ims/{}-pae_sample.gif".format(gif_no),
                    pae_samples_ims,
                    duration=DURATION)

    page = """
    <html>
    <body>
    Configuration: {1}
    <br>
    <img src="{0}-plot.png" align="center">
    <table>
      <tr>
        <th>Ground truth</th>
        <th>Particle Filter</th>
        <th>PF Sample</th>
        <th>Predictive AE</th>
        <th>PAE Sample</th>
      </tr>
      <tr>
        <td><img src="{0}-percept.gif" width="140"></td>
        <td><img src="{0}-pf_belief.gif" width="140"></td>
        <td><img src="{0}-pf_sample.gif" width="140"></td>
        <td><img src="{0}-pae_belief.gif" width="140"></td>
        <td><img src="{0}-pae_sample.gif" width="140"></td>

      </tr>

    </table>
    </body>
    </html>
    """.format(gif_no, sim_conf)

    with open("ims/page-{}.html".format(gif_no), 'w') as f:
        f.write(page)
예제 #6
0
def data():
    print "starting push logic"
    p.start("entire_push")
    if 'data' not in request.form:
        return 'Nothing received'
        #Test code:
        # request.form = {'data' : json.dumps([
#                 {'name' : 'sensors',
#                  'data' : 'shit'},
#                 {'name' : 'wifi',
#                  'data' : [{'label' : 'blah',
#                             'level' : -56,
#                             'freqMhz' : 2600,
#                             'estimatedDistance' : 10}]}])}
    #p.off()
    p.start('initial')
    data = json.loads(request.form['data'])

    wifi_magic = WifiMagic()
    wifi_deep_magic = WifiDeeperMagic(cache)

    walls = None
    p.start('sensors')
    sensors_magic = SensorsMagic(walls)
    p.pstop('sensors')

    p.start('load_particles')
    saved_particles = get_db("particles")
    p.pstop('load_particles')
    pf = ParticleFilter(particles=saved_particles)
    p.pstop('initial')

    p.start('weights')
    for d in data:
        if d['name'] == 'sensors':
            result = sensors_magic.parse(d['data'])
            print 'dheading: %f' % result['dheading']
            sensors_magic.update_particles(pf.get_particles(), result)
        if d['name'] == 'wifi':
            wifidata = d['data']
            corr = wifi_deep_magic.get_corrections()
            for r in wifidata:
                if r['label'] in corr:
                    oldLvl = r['level']
                    r['level']+=corr[r['label']]
                    #print "corrected",r['label'],'from',oldLvl,'to',r['level']
            result = wifi_magic.parse(wifidata)
            set_db("router_dist", result)
            result = wifi_magic.update_particles(pf.get_particles(), result)
    p.pstop('weights')

    p.start('resample')
    pf.resample();
    p.pstop('resample')
    p.start('save_to_cache')
    set_db("particles", pf.get_particles())
    p.pstop('save_to_cache')
    #x, y = pf.get_position()
    #std = pf.get_std()
    #print "Particles updated to", x, y, " (var:", std,")"
    p.pstop('entire_push')
    #return json.dumps([x, y, std])
    return 'Thank you!'
예제 #7
0
    # Write initialization code here
    dt = 0.05
    steps = 0
    robot = RobotP3DX(client_id, dt)
    z_us, z_v, z_w = robot.sense()
    navigation = Navigation(dt, z_us)
    localized = False

    #particle filter
    m = Map('map_project.json',
            sensor_range=robot.SENSOR_RANGE,
            compiled_intersect=True,
            use_regions=True)
    pf = ParticleFilter(m, robot.SENSORS[:16], robot.SENSOR_RANGE)
    resample_count = 0
    pf.resample(z_us)

    #path planning
    action_costs = (10.0, 12.0, 12.0)
    path_followed = [
    ]  #this will be the path with the nodes left to reach (when we reach a node, we delete it)
    distance_tolerance = 0.2  #if we are closer to the next node than this distance, we will consider it has been reached

    start_time = time.time()

    try:
        while not goal_reached(robot_handle, goal, localized):

            if localized == False and pf.localized == True:
                #we enter here only when we pass from non-localized to localized
                localized = True  #set the flag
예제 #8
0
파일: node.py 프로젝트: pengsongyou/PR_lab
class LocalizationNode(object):
    '''
    Class to hold all ROS related transactions to use split and merge algorithm.
    '''
    
    #===========================================================================
    def __init__(self, odom_lin_sigma=0.025, odom_ang_sigma=np.deg2rad(2), meas_rng_noise=0.2,  meas_ang_noise=np.deg2rad(10), x_init=0, y_init=0, theta_init=0):
        '''
        Initializes publishers, subscribers and the particle filter.
        '''
        # Threads
        self.lock = threading.RLock()
        
        # Publishers
        self.pub_map = rospy.Publisher("map", Marker, queue_size = 2)
        self.pub_lines = rospy.Publisher("lines", Marker, queue_size = 2)
        self.pub_lines_mean = rospy.Publisher("lines_mean", Marker, queue_size = 2)
        self.pub_particles = rospy.Publisher("particles", PoseArray, queue_size = 2)
        self.pub_big_particle = rospy.Publisher("mean_particle", PoseStamped, queue_size = 2)
        self.pub_odom = rospy.Publisher("mean_particle_odom", Odometry, queue_size = 2)
        self.pub_wei = rospy.Publisher("weights", MarkerArray, queue_size = 2)
        
        # Subscribers
        self.sub_scan = rospy.Subscriber("lines", Marker,  self.laser_callback)
        self.sub_odom = rospy.Subscriber("odom", Odometry, self.odom_callback)
        
        # TF
        self.tfBroad = tf.TransformBroadcaster()
        
        # Incremental odometry
        self.last_odom = None
        self.odom = None
        
        # Flags
        self.new_odom = False
        self.new_laser = False
        self.pub = False
        self.time = None
        self.lines = None
        self.i = 0
        
        # Particle filter
        self.part_filter = ParticleFilter(get_dataset3_map(), 500, odom_lin_sigma, odom_ang_sigma, meas_rng_noise, meas_ang_noise, x_init, y_init, theta_init)
    
    #===========================================================================
    def odom_callback(self, msg):
        '''
        Publishes a tf based on the odometry of the robot and calculates the incremental odometry as seen from the vehicle frame.
        '''
        # Lock thread
        self.lock.acquire()
        
        # Save time
        self.time = msg.header.stamp
        
        # Translation
        trans = (msg.pose.pose.position.x, 
                 msg.pose.pose.position.y, 
                 msg.pose.pose.position.z)
        
        # Rotation
        rot = (msg.pose.pose.orientation.x,
               msg.pose.pose.orientation.y,
               msg.pose.pose.orientation.z,
               msg.pose.pose.orientation.w)
        
        # Publish transform
        self.tfBroad.sendTransform(translation = trans,
                                   rotation = rot, 
                                   time = msg.header.stamp,
                                   child = '/base_footprint',
                                   parent = '/world')
                                   
        # Incremental odometry
        if self.last_odom is not None:
            
            # Increment computation
            delta_x = msg.pose.pose.position.x - self.last_odom.pose.pose.position.x
            delta_y = msg.pose.pose.position.y - self.last_odom.pose.pose.position.y
            yaw = yaw_from_quaternion(msg.pose.pose.orientation)
            lyaw = yaw_from_quaternion(self.last_odom.pose.pose.orientation)
            # Odometry seen from vehicle frame
            self.uk = np.array([delta_x * np.cos(lyaw) + delta_y * np.sin(lyaw),
                               -delta_x * np.sin(lyaw) + delta_y * np.cos(lyaw),
                                angle_wrap(yaw - lyaw)])
                                
            # Flag available
            self.cur_odom = msg
            self.new_odom = True
        
        # Save initial odometry for increment
        else:
            self.last_odom = msg
        
        # Unlock thread
        self.lock.release()
    
    #===========================================================================
    def laser_callback(self, msg):
        '''
        Reads lines coming from split and merge node. 
        '''
        # Lock thread

        self.lock.acquire()
        
        # Save time
        self.time = msg.header.stamp
        self.lines = None
        
        # Assertion
        assert msg.type == Marker.LINE_LIST

        if msg.ns == 'scan_line':
            # Retrieve lines from split and merge
            line = list()
            for point in msg.points:
                line.append(point.x)
                line.append(point.y)
            self.lines = np.array(line).reshape((-1, 4))
            # Have valid points
            if self.lines.shape[0] > 0:
                
                # Flag
                self.new_laser = True
                
            else:
                self.lines = None
                
        # Unlock thread
        self.lock.release()
    
    #===========================================================================
    def iterate(self):
        '''
        Main loop of the filter.
        '''
        lines = None

        # Prediction
        if self.new_odom:
            
            # Copy safely
            self.lock.acquire()
            uk = self.uk.copy()
            self.last_odom = self.cur_odom
            self.new_odom = False
            self.pub = True
            self.lock.release()
            
            # Predict filter
            self.part_filter.predict(uk)
        # Weightimg and resampling
        if self.new_laser and self.lines is not None:
            # Copy safely
            self.lock.acquire()
            lines = self.lines.copy()
            self.new_laser = False
            self.pub = True
            self.lock.release()
            
            # Update and resample filter
            self.part_filter.weight(lines)
            if self.part_filter.moving == True and self.part_filter.n_eff<self.part_filter.num/2.0:
                self.part_filter.resample()
            
        # Publish results
        if self.pub:
            self.publish_results(lines)
            self.pub = False
    
    #===========================================================================
    def publish_results(self, lines):
        '''
        Publishes all results from the filter.
        '''
        if self.time is not None:
            
            time = rospy.Time.now()
            
            # Map of the room
            map_lines = get_dataset3_map()
            publish_lines(map_lines, self.pub_map, frame='/world', ns='map',
                          color=(0,1,0))
            
            # Particles and biggest weighted particle
            msg, msg_mean, msg_odom, trans, rot, msg_wei = get_particle_msgs(self.part_filter,
                                                                    time)
            self.pub_particles.publish(msg)
            self.pub_big_particle.publish(msg_mean)
            self.pub_odom.publish(msg_odom)
            self.pub_wei.publish(msg_wei)
            self.tfBroad.sendTransform(translation = trans,
                                       rotation = rot, 
                                       time = time,
                                       parent = 'world',
                                       child = 'mean_particle')
                                       
            # Publish scanned lines
            if lines is not None:
                publish_lines(lines, self.pub_lines_mean, frame='mean_particle',
                          time=time, ns='scan_lines_mean', color=(0,0,1))
예제 #9
0
def data():
    print "starting push logic"
    p.start("entire_push")
    if 'data' not in request.form:
        return 'Nothing received'
        #Test code:
        # request.form = {'data' : json.dumps([


#                 {'name' : 'sensors',
#                  'data' : 'shit'},
#                 {'name' : 'wifi',
#                  'data' : [{'label' : 'blah',
#                             'level' : -56,
#                             'freqMhz' : 2600,
#                             'estimatedDistance' : 10}]}])}
#p.off()
    p.start('initial')
    data = json.loads(request.form['data'])

    wifi_magic = WifiMagic()
    wifi_deep_magic = WifiDeeperMagic(cache)

    walls = None
    p.start('sensors')
    sensors_magic = SensorsMagic(walls)
    p.pstop('sensors')

    p.start('load_particles')
    saved_particles = get_db("particles")
    p.pstop('load_particles')
    pf = ParticleFilter(particles=saved_particles)
    p.pstop('initial')

    p.start('weights')
    for d in data:
        if d['name'] == 'sensors':
            result = sensors_magic.parse(d['data'])
            print 'dheading: %f' % result['dheading']
            sensors_magic.update_particles(pf.get_particles(), result)
        if d['name'] == 'wifi':
            wifidata = d['data']
            corr = wifi_deep_magic.get_corrections()
            for r in wifidata:
                if r['label'] in corr:
                    oldLvl = r['level']
                    r['level'] += corr[r['label']]
                    #print "corrected",r['label'],'from',oldLvl,'to',r['level']
            result = wifi_magic.parse(wifidata)
            set_db("router_dist", result)
            result = wifi_magic.update_particles(pf.get_particles(), result)
    p.pstop('weights')

    p.start('resample')
    pf.resample()
    p.pstop('resample')
    p.start('save_to_cache')
    set_db("particles", pf.get_particles())
    p.pstop('save_to_cache')
    #x, y = pf.get_position()
    #std = pf.get_std()
    #print "Particles updated to", x, y, " (var:", std,")"
    p.pstop('entire_push')
    #return json.dumps([x, y, std])
    return 'Thank you!'
예제 #10
0
class LocalizationNode(object):
    '''
    Class to hold all ROS related transactions to use split and merge algorithm.
    '''

    #===========================================================================
    def __init__(self,
                 odom_lin_sigma=0.025,
                 odom_ang_sigma=np.deg2rad(2),
                 meas_rng_noise=0.2,
                 meas_ang_noise=np.deg2rad(10),
                 x_init=0,
                 y_init=0,
                 theta_init=0):
        '''
        Initializes publishers, subscribers and the particle filter.
        '''
        # Threads
        self.lock = threading.RLock()

        # Publishers
        self.pub_map = rospy.Publisher("map", Marker, queue_size=2)
        self.pub_lines = rospy.Publisher("lines", Marker, queue_size=2)
        self.pub_lines_mean = rospy.Publisher("lines_mean",
                                              Marker,
                                              queue_size=2)
        self.pub_particles = rospy.Publisher("particles",
                                             PoseArray,
                                             queue_size=2)
        self.pub_big_particle = rospy.Publisher("mean_particle",
                                                PoseStamped,
                                                queue_size=2)
        self.pub_odom = rospy.Publisher("mean_particle_odom",
                                        Odometry,
                                        queue_size=2)
        self.pub_wei = rospy.Publisher("weights", MarkerArray, queue_size=2)

        # Subscribers
        self.sub_scan = rospy.Subscriber("lines", Marker, self.laser_callback)
        self.sub_odom = rospy.Subscriber("odom", Odometry, self.odom_callback)

        # TF
        self.tfBroad = tf.TransformBroadcaster()

        # Incremental odometry
        self.last_odom = None
        self.odom = None

        # Flags
        self.new_odom = False
        self.new_laser = False
        self.pub = False
        self.time = None
        self.lines = None

        # Particle filter
        self.part_filter = ParticleFilter(get_dataset3_map(), 500,
                                          odom_lin_sigma, odom_ang_sigma,
                                          meas_rng_noise, meas_ang_noise,
                                          x_init, y_init, theta_init)

    #===========================================================================
    def odom_callback(self, msg):
        '''
        Publishes a tf based on the odometry of the robot and calculates the incremental odometry as seen from the vehicle frame.
        '''
        # Lock thread
        self.lock.acquire()

        # Save time
        self.time = msg.header.stamp

        # Translation
        trans = (msg.pose.pose.position.x, msg.pose.pose.position.y,
                 msg.pose.pose.position.z)

        # Rotation
        rot = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y,
               msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)

        # Publish transform
        self.tfBroad.sendTransform(translation=trans,
                                   rotation=rot,
                                   time=msg.header.stamp,
                                   child='/base_footprint',
                                   parent='/world')

        # Incremental odometry
        if self.last_odom is not None:

            # Increment computation
            delta_x = msg.pose.pose.position.x - self.last_odom.pose.pose.position.x
            delta_y = msg.pose.pose.position.y - self.last_odom.pose.pose.position.y
            yaw = yaw_from_quaternion(msg.pose.pose.orientation)
            lyaw = yaw_from_quaternion(self.last_odom.pose.pose.orientation)

            # Odometry seen from vehicle frame
            self.uk = np.array([
                delta_x * np.cos(lyaw) + delta_y * np.sin(lyaw),
                -delta_x * np.sin(lyaw) + delta_y * np.cos(lyaw),
                angle_wrap(yaw - lyaw)
            ])

            # Flag available
            self.cur_odom = msg
            self.new_odom = True

        # Save initial odometry for increment
        else:
            self.last_odom = msg

        # Unlock thread
        self.lock.release()

    #===========================================================================
    def laser_callback(self, msg):
        '''
        Reads lines coming from split and merge node. 
        '''
        # Lock thread
        self.lock.acquire()

        # Save time
        self.time = msg.header.stamp
        self.lines = None

        # Assertion
        assert msg.type == Marker.LINE_LIST
        if msg.ns == 'scan_line':

            # Retrieve lines from split and merge
            line = list()
            for point in msg.points:
                line.append(point.x)
                line.append(point.y)
            self.lines = np.array(line).reshape((-1, 4))

            # Have valid points
            if self.lines.shape[0] > 0:

                # Flag
                self.new_laser = True

            else:
                self.lines = None

        # Unlock thread
        self.lock.release()

    #===========================================================================
    def iterate(self):
        '''
        Main loop of the filter.
        '''
        lines = None

        # Prediction
        if self.new_odom:

            # Copy safely
            self.lock.acquire()
            uk = self.uk.copy()
            self.last_odom = self.cur_odom
            self.new_odom = False
            self.pub = True
            self.lock.release()

            # Predict filter
            self.part_filter.predict(uk)

        # Weightimg and resampling
        if self.new_laser and self.lines is not None:

            # Copy safely
            self.lock.acquire()
            lines = self.lines.copy()
            self.new_laser = False
            self.pub = True
            self.lock.release()

            # Update and resample filter
            self.part_filter.weight(lines)
            if self.part_filter.moving == True and self.part_filter.n_eff < self.part_filter.num / 2.0:
                self.part_filter.resample()

        # Publish results
        if self.pub:
            self.publish_results(lines)
            self.pub = False

    #===========================================================================
    def publish_results(self, lines):
        '''
        Publishes all results from the filter.
        '''
        if self.time is not None:

            time = rospy.Time.now()

            # Map of the room
            map_lines = get_dataset3_map()
            publish_lines(map_lines,
                          self.pub_map,
                          frame='/world',
                          ns='map',
                          color=(0, 1, 0))

            # Particles and biggest weighted particle
            msg, msg_mean, msg_odom, trans, rot, msg_wei = get_particle_msgs(
                self.part_filter, time)
            self.pub_particles.publish(msg)
            self.pub_big_particle.publish(msg_mean)
            self.pub_odom.publish(msg_odom)
            self.pub_wei.publish(msg_wei)
            self.tfBroad.sendTransform(translation=trans,
                                       rotation=rot,
                                       time=time,
                                       parent='world',
                                       child='mean_particle')

            # Publish scanned lines
            if lines is not None:
                publish_lines(lines,
                              self.pub_lines_mean,
                              frame='mean_particle',
                              time=time,
                              ns='scan_lines_mean',
                              color=(0, 0, 1))
# Initial state
x0 = np.array([0, 0.1, 0, 0.1])

state, measurements = simulate_system(simulation_time, x0)
particle_filter = ParticleFilter(A, H, Q, R, x0, n_particles)

estimated_state = np.zeros((simulation_time, 4))

for k in range(simulation_time):
    particle_filter.predict()
    particle_filter.update_particle_weights(measurements[k, :])

    estimated_state[k, :] = particle_filter.calculate_estimate()

    particle_filter.resample()

# Plot X-Y
plt.figure()
plt.plot(state[:, 0], state[:, 2], '-bo')
plt.plot(estimated_state[:, 0], estimated_state[:, 2], '-ko')
plt.plot(measurements[:, 0], measurements[:, 1], ':rx')
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.legend(['true state', 'inferred state', 'observed measurement'])

# Plot particles
particles_x = [particle[0] for particle in particle_filter.particles]
particles_y = [particle[2] for particle in particle_filter.particles]

plt.figure()
예제 #12
0
class LocalizationNode(object):
    '''
    Class to hold all ROS related transactions to use split and merge algorithm.
    '''

    #===========================================================================
    def __init__(self,
                 odom_lin_sigma=0.025,
                 odom_ang_sigma=np.deg2rad(2),
                 meas_rng_noise=0.2,
                 meas_ang_noise=np.deg2rad(10)):
        '''
        Initializes publishers, subscribers and the particle filter.
        '''
        # Publishers
        self.pub_lines = rospy.Publisher("lines", Marker)
        self.pub_particles = rospy.Publisher("particles", PoseArray)
        self.pub_big_particle = rospy.Publisher("mean_particle", PoseStamped)
        self.pub_odom = rospy.Publisher("mean_particle_odom", Odometry)

        # Subscribers
        self.sub_scan = rospy.Subscriber("scan", LaserScan,
                                         self.laser_callback)
        self.sub_odom = rospy.Subscriber("odom", Odometry, self.odom_callback)

        # TF
        self.tfBroad = tf.TransformBroadcaster()

        # Incremental odometry
        self.last_odom = None
        self.odom = None

        # Flags
        self.new_odom = False
        self.new_laser = False
        self.pub = False

        # Particle filter
        self.part_filter = ParticleFilter(get_map(), 500, odom_lin_sigma,
                                          odom_ang_sigma, meas_rng_noise,
                                          meas_ang_noise)

    #===========================================================================
    def odom_callback(self, msg):
        '''
        Publishes a tf based on the odometry of the robot and calculates the incremental odometry as seen from the vehicle frame.
        '''
        # Save time
        self.time = msg.header.stamp

        # Translation
        trans = (msg.pose.pose.position.x, msg.pose.pose.position.y,
                 msg.pose.pose.position.z)

        # Rotation
        rot = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y,
               msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)

        # Publish transform
        self.tfBroad.sendTransform(translation=trans,
                                   rotation=rot,
                                   time=msg.header.stamp,
                                   child='/openni_depth_frame',
                                   parent='/world')

        # Incremental odometry
        if self.last_odom is not None:

            # Increment computation
            delta_x = msg.pose.pose.position.x - self.last_odom.pose.pose.position.x
            delta_y = msg.pose.pose.position.y - self.last_odom.pose.pose.position.y
            yaw = yaw_from_quaternion(msg.pose.pose.orientation)
            lyaw = yaw_from_quaternion(self.last_odom.pose.pose.orientation)

            # Odometry seen from vehicle frame
            self.uk = np.array([
                delta_x * np.cos(lyaw) + delta_y * np.sin(lyaw),
                -delta_x * np.sin(lyaw) + delta_y * np.cos(lyaw),
                angle_wrap(yaw - lyaw)
            ])

            # Flag
            self.new_odom = True

        # For next loop
        self.last_odom = msg

    #===========================================================================
    def laser_callback(self, msg):
        '''
        Function called each time a LaserScan message with topic "scan" arrives. 
        '''
        # Save time
        self.time = msg.header.stamp

        # Project LaserScan to points in space
        rng = np.array(msg.ranges)
        ang = np.linspace(msg.angle_min, msg.angle_max, len(msg.ranges))
        points = np.vstack((rng * np.cos(ang), rng * np.sin(ang)))

        # Filter long ranges
        cond = rng < msg.range_max
        points = points[:, rng < msg.range_max]

        # Use split and merge to obtain lines and publish
        self.lines = splitandmerge(points,
                                   split_thres=0.1,
                                   inter_thres=0.3,
                                   min_points=6,
                                   dist_thres=0.12,
                                   ang_thres=np.deg2rad(10))

        # Have valid points
        if self.lines is not None:

            # Publish results
            publish_lines(self.lines,
                          self.pub_lines,
                          frame=msg.header.frame_id,
                          time=msg.header.stamp,
                          ns='scan_lines',
                          color=(1, 0, 0))
            publish_lines(self.lines,
                          self.pub_lines,
                          frame='/mean_particle',
                          time=msg.header.stamp,
                          ns='scan_lines_mean',
                          color=(0, 0, 1))

            # Flag
            self.new_laser = True

    #===========================================================================
    def iterate(self):
        '''
        Main loop of the filter.
        '''
        # Prediction
        if self.new_odom:

            self.part_filter.predict(self.uk.copy())
            self.new_odom = False
            self.pub = True

        # Weightimg and resampling
        if self.new_laser:

            self.part_filter.weight(self.lines.copy())
            self.part_filter.resample()
            self.new_laser = False
            self.pub = True

        # Publish results
        if self.pub:
            self.publish_results()
            self.pub = False

    #===========================================================================
    def publish_results(self):
        '''
        Publishes all results from the filter.
        '''
        # Map of the room
        map_lines = get_map()
        publish_lines(map_lines,
                      self.pub_lines,
                      frame='/world',
                      ns='map',
                      color=(0, 1, 0))

        # Particles and biggest weighted particle
        msg, msg_mean, msg_odom, trans, rot = get_particle_msgs(
            self.part_filter)
        self.pub_particles.publish(msg)
        self.pub_big_particle.publish(msg_mean)
        self.pub_odom.publish(msg_odom)
        self.tfBroad.sendTransform(translation=trans,
                                   rotation=rot,
                                   time=self.time,
                                   child='/mean_particle',
                                   parent='/world')
예제 #13
0
class ParticleFilterNode(object):
    """ The class that represents a Particle Filter ROS Node
    """
    def __init__(self):
        rospy.init_node('pf')

        real_robot = False
        # create instances of two helper objects that are provided to you
        # as part of the project
        self.particle_filter = ParticleFilter()
        self.occupancy_field = OccupancyField()
        self.TFHelper = TFHelper()
        self.sensor_model = sensor_model = SensorModel(
            model_noise_rate=0.5,
            odometry_noise_rate=0.15,
            world_model=self.occupancy_field,
            TFHelper=self.TFHelper)

        self.position_delta = None  # Pose, delta from current to previous odometry reading
        self.last_scan = None  # list of ranges
        self.odom = None  # Pose, current odometry reading

        self.x_y_spread = 0.3  # Spread constant for x-y initialization of particles
        self.z_spread = 0.2  # Spread constant for z initialization of particles

        self.n_particles = 150  # number of particles

        # pose_listener responds to selection of a new approximate robot
        # location (for instance using rviz)
        rospy.Subscriber("initialpose", PoseWithCovarianceStamped,
                         self.update_initial_pose)

        rospy.Subscriber("odom", Odometry, self.update_position)
        rospy.Subscriber("stable_scan", LaserScan, self.update_scan)

        # publisher for the particle cloud for visualizing in rviz.
        self.particle_pub = rospy.Publisher("particlecloud",
                                            PoseArray,
                                            queue_size=10)

    def update_scan(self, msg):
        """Updates the scan to the most recent reading"""
        self.last_scan = [(i, msg.ranges[i]) for i in range(len(msg.ranges))]

    def update_position(self, msg):
        """Calculate delta in position since last odometry reading, update current odometry reading"""
        self.position_delta = Vector3()

        this_pos = self.TFHelper.convert_pose_to_xy_and_theta(msg.pose.pose)
        if self.odom is not None:
            prev_pos = self.TFHelper.convert_pose_to_xy_and_theta(self.odom)
            self.position_delta.x = this_pos.x - prev_pos.x
            self.position_delta.y = this_pos.y - prev_pos.y
            self.position_delta.z = self.TFHelper.angle_diff(
                this_pos.z, prev_pos.z)
        else:
            self.position_delta = this_pos

        self.odom = msg.pose.pose

        self.particle_filter.predict(self.position_delta)

    def reinitialize_particles(self, initial_pose):
        """Reinitialize particles when a new initial pose is given."""
        self.particle_filter.particles = []
        for i in range(self.n_particles):
            pos = Vector3()

            initial_pose_trans = self.TFHelper.convert_pose_to_xy_and_theta(
                initial_pose)

            pos.x = initial_pose_trans.x + (2 * randn() - 1) * self.x_y_spread
            pos.y = initial_pose_trans.y + (2 * randn() - 1) * self.x_y_spread
            pos.z = initial_pose_trans.z + (2 * randn() - 1) * self.z_spread

            new_particle = Particle(position=pos,
                                    weight=1 / float(self.n_particles),
                                    sensor_model=self.sensor_model)
            self.particle_filter.add_particle(new_particle)

    def update_initial_pose(self, msg):
        """ Callback function to handle re-initializing the particle filter
            based on a pose estimate.  These pose estimates could be generated
            by another ROS Node or could come from the rviz GUI """
        self.reinitialize_particles(msg.pose.pose)

        self.TFHelper.fix_map_to_odom_transform(msg.pose.pose,
                                                msg.header.stamp)

    def publish_particles(self):
        """ Extract position from each particle, transform into pose, and publish them as PoseArray"""
        pose_array = PoseArray()
        for p in self.particle_filter.particles:
            pose_array.poses.append(
                self.TFHelper.convert_vector3_to_pose(p.position))
        pose_array.header.frame_id = "map"
        self.particle_pub.publish(pose_array)

    def run(self):
        r = rospy.Rate(5)

        while not (rospy.is_shutdown()):
            # in the main loop all we do is continuously broadcast the latest
            # map to odom transform
            self.TFHelper.send_last_map_to_odom_transform()
            if len(self.particle_filter.particles) > 0:
                if self.last_scan != None:
                    self.particle_filter.integrate_observation(self.last_scan)
                    self.last_scan = None
                    self.particle_filter.normalize()
                    self.publish_particles()
                    self.particle_filter.resample()
            r.sleep()
예제 #14
0
class Tracker:
    def __init__(self):
        self.tracker = None
        self.particle_filter = ParticleFilter()
        self.histo = None
        self.points = []
        self.frame_number = 0

    def init_tracker(self, frame, bbox):
        tracker_types = [
            'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN'
        ]
        tracker_type = tracker_types[0]

        if tracker_type == tracker_types[0]:
            self.tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            self.tracker = cv2.TrackerMIL_create()

    #   if tracker_type == 'KCF':
    #       self.tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            self.tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            self.tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            self.tracker = cv2.TrackerGOTURN_create()

        # Initialize tracker with first frame and bounding box

        ok = self.tracker.init(frame, bbox)
        self.particle_filter = ParticleFilter()
        self.particle_filter = ParticleFilter.init_particles(
            self.particle_filter, region=bbox, particlesPerObject=100)
        img = frame[int(bbox[1]):int(bbox[1] + bbox[3]),
                    int(bbox[0]):int(bbox[0] + bbox[2])]
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        self.histo = cv2.calcHist([hsv], [0, 1], None, [180, 256],
                                  [0, 180, 0, 256])
        self.points.append(
            ParticleFilter.get_particle_center(self.particle_filter))
        self.frame_number += 1

        return self

    def next_frame(self, frame):

        ok, bbox = self.tracker.update(frame)

        frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        w = frame.shape[0]
        h = frame.shape[1]
        self.particle_filter.transition(w, h)
        self.particle_filter.update_weight(frameHSV, self.histo)
        self.particle_filter.normalize_weights()
        self.particle_filter.resample()
        self.points.append(
            ParticleFilter.get_particle_center(self.particle_filter))
        self.frame_number += 1

        return ok, bbox

    def show_results(self, frame, bbox):

        p1 = (int(bbox[0]), int(bbox[1]))
        p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
        frame = first_segmentation_mask(
            frame, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])))
        cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        return frame