Example #1
0
 def resample(self):
     indexes = systematic_resample(self.weights)
     self.particles = self.particles.T
     self.particles[:] = self.particles[indexes]
     self.particles = self.particles.T
     self.weights[:] = self.weights[indexes]
     self.weights.fill(1.0 / len(self.weights))
def particle_filter(keyps,
                    j,
                    initial_x,
                    N=300,
                    sensor_std_err=0.3,
                    xlim=(-356, 356),
                    ylim=(-356, 356)):
    landmarks = np.array([[-10, -10], [-10, 266], [266, -10], [266, 266]])
    NL = len(landmarks)
    # create particles and weights
    if initial_x is not None:
        particles = create_gaussian_particles(mean=initial_x,
                                              std=(5, 5, np.pi / 4),
                                              N=N)
    weights = np.ones(N) / N
    kp = copy.deepcopy(keyps)
    for x in range(len(keyps) - 1):
        keypt_pos, uv = read_pickle(keyps, x, j)
        zs = (norm(landmarks - keypt_pos, axis=1) +
              (randn(NL) * sensor_std_err))
        if uv[1] == 0:
            uv[1] == 2
        # move diagonally forward to (x+1, x+1)
        predict(particles, u=uv, std=(0, 0))

        # incorporate measurements
        update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)
        # resample if too few effective particles
        if neff(weights) < N / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
            #assert np.allclose(weights, 1/N)
        mu, var = estimate(particles, weights)
        kp[x, :2, j] = mu
    return kp
def pfilter(N, iterations=300, sensor_err=1, x=(0, 20), y=(0, 20)):
    track = []
    est = []
    marker = np.array([[-1,2],[6, 2], [11,15], [16,27], [25,30]])
    NL = len(marker)
    sensor_noise = np.random.randn(NL) * sensor_err
    particles = uni_part((0,20), (0,20), (0,2*np.pi), N)
    weights = np.ones(N) / N
    
    xs = []
    #best at initial position 0,0
    init_p = np.array([2., 3.])
    for x in range(iterations):
        init_p = init_p + (1, 1)
        zs = (np.linalg.norm((marker - init_p), axis=1) + sensor_noise)
        transition(particles, control_input=(0, 1.414), var=(.2, 0.1))
        update(particles, weights, obs=zs, obs_noise=sensor_err, marker=marker)
        if (1/np.sum(np.square(weights))) < N/2:
            indexes = systematic_resample(weights)
            particles[:] = particles[indexes]
            weights.resize(len(particles))
            weights.fill (1.0 / len(weights))
            if(np.allclose(weights, 1/N)) == True:
                pass
        mu = np.average(particles[:,0:2], weights=weights, axis=0)
        xs.append(mu)
        track.append(init_p)
        est.append(mu)
    return track,mu,xs
Example #4
0
    def run_pf(self, camera, lidar, dt, i):
        #sigma (uncertainty) for the camera
        cam_err_x = 0.35
        cam_err_y = 0.25
        cam_err_rad = 0.1

        if lidar is not None:
            #sigma (uncertainty) for lidar
            lid_err_x = 0.05
            lid_err_y = 0.05
            lid_err_rad = 0.05
            noise_factor = 0

            lid_err = [[lid_err_x**2, 0, 0], [0, lid_err_y**2, 0],
                       [0, 0, lid_err_rad**2]]
            cam_err = [[cam_err_x**2, 0, 0], [0, cam_err_y**2, 0],
                       [0, 0, lid_err_rad**2]]

            zs, sigma = multivariate_multiply(camera, cam_err, lidar, lid_err)
            R = sigma * (1 + noise_factor)

        else:
            zs = camera
            R = [[cam_err_x**2, 0, 0], [0, cam_err_y**2, 0],
                 [0, 0, cam_err_rad**2]]

        # do predict
        std_predict = (0.02, 0.02, 0.04, 0.04, 0.04, 0.04, 0.04)
        pf.predict(self.new_particles[:, :, i], std=std_predict, dt=dt)

        # incorporate measurements
        pf.update_multivariate(self.new_particles[:, :, i],
                               self.new_weights[:, i],
                               z=zs,
                               R=R)

        # resample if too few effective particles
        if pf.neff(self.new_weights[:, i]) < self.N / 2:
            indexes = systematic_resample(self.new_weights[:, i])
            self.new_weights[:, i] = pf.resample_from_index(
                self.new_particles[:, :, i], self.new_weights[:, i], indexes)

        self.mu, self.var = pf.estimate(self.new_particles[:, :, i],
                                        self.new_weights[:, i])

        # reinitialize
        t = 10
        time = round(self.time_elapsed, 1)
        mu_pos = np.array(self.mu[0:2])
        z_pos = np.array(zs[0:2])
        euclid_dist = np.linalg.norm(mu_pos - z_pos)
        if time % t == 10 or euclid_dist > 2:
            print("Reinitializing")
            initial = (zs[0], zs[1], 0.1, 0.1, 0, 0, zs[2])
            init_std = (0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.01)
            self.new_particles[:, :,
                               i] = pf.create_gaussian_particles(mean=initial,
                                                                 std=init_std,
                                                                 N=self.N)
            self.new_weights[:, i] = pf.create_initial_weights(self.N)
def run(N, R, init=None):
    if init:
        # 初始状态(x, y, z, m, ex, ey, ez)
        particles = create_gaussian_particles(mean=init, N=N)
    else:
        particles = create_uniform_particles((-0.2, 0.2), (-0.2, 0.2), (0.01, 0.1), (0.1, 0.5), N)
    weights = np.zeros(N)

    for i in range(10):
        predict(particles)
        update(particles, weights, magSmoothData, R)

        particlesX = particles[:, 0]
        particlesY = particles[:, 1]
        particlesZ = particles[:, 2]
        # 绘制粒子的3D分布图
        # ax.scatter(particlesX, particlesY, particlesZ)
        # plt.show()
        plt.scatter(particlesX, particlesY, color='k', marker=',', s=weights)
        # plt.show()

        if neff(weights) < N / 2:
            print('==========systematic_resample=============')
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
        mu, var = estimate(particles, weights)
Example #6
0
	def filterdata(self, data):
	    self.predict(self.particles, u=self.u, std=self.particle_std)
	    self.update(self.particles, self.weights, z=data, R=self.sensor_std_err, init_var=self.initial_pose)
	    if self.neff(self.weights) < self.N/2: #Perform systematic resampling.
	        self.indexes = systematic_resample(self.weights)
	        self.resample_from_index(self.particles, self.weights, self.indexes)
	    mu, _ = self.estimate(self.particles, self.weights)
	    return mu
Example #7
0
 def _systematic_resample(self, x_pred, weights, params_states):
     idxs = systematic_resample(weights)
     params_states[0], _ = self._resample_from_index(params_states[0], weights, idxs)
     params_states[1], _ = self._resample_from_index(params_states[1], weights, idxs)
     params_states[2], _ = self._resample_from_index(params_states[2], weights, idxs)
     params_states[3], _ = self._resample_from_index(params_states[3], weights, idxs)
     params_states[4], _ = self._resample_from_index(params_states[4], weights, idxs)
     x_pred, weights = self._resample_from_index(x_pred, weights, idxs)
     return x_pred, weights, params_states
Example #8
0
    def resample(self, weights):
        """Runs resampling step.
        
        Resampling methods supported: systematic, stratified,
        residual and multinomial.
        
        Args:
            weights (array): particle weigths after update process
            
        Returns:
            (array) of indixes resulting from resampling with (1,N) dimension 
        """
        def residual_resample(weights):
            """ This was taken from filterpy package and some changes were made.
            
            Performs the residual resampling algorithm used by particle filters.
        
            For more documentation see https://filterpy.readthedocs.org
            """
            N = len(weights)
            indexes = np.zeros(N, 'i')

            # take int(N*w) copies of each weight, which ensures particles with the
            # same weight are drawn uniformly
            num_copies = (np.floor(N * np.asarray(weights))).astype(int)
            #    num_copies = (N*np.asarray(weights)).astype(int)
            #    scaled_weights = N*np.asarray(weights)

            k = 0
            for i in range(N):
                # change done here in num_copies[i][0]
                for _ in range(np.array(num_copies[i][0])):  # make n copies
                    indexes[k] = i
                    k += 1

            # use multinormal resample on the residual to fill up the rest. This
            # maximizes the variance of the samples
            residual = weights - num_copies  # get fractional part
            #    residual = scaled_weights - num_copies
            residual /= sum(residual)  # normalize
            cumulative_sum = np.cumsum(residual)
            cumulative_sum[
                -1] = 1.  # avoid round-off errors: ensures sum is exactly one
            indexes[k:N] = np.searchsorted(cumulative_sum, random(N - k))

            return indexes

        if self.arg_resample == "systematic":
            indixes = systematic_resample(weights)
        elif self.arg_resample == "stratified":
            indixes = stratified_resample(weights)
        elif self.arg_resample == "residual":
            indixes = residual_resample(weights)
        elif self.arg_resample == "multinomial":
            indixes = multinomial_resample(weights)
        return indixes
Example #9
0
 def _systematic_resample_states(self, params_states, weights):
     idxs = systematic_resample(weights)
     params_states[0] = params_states[0][idxs]
     params_states[1] = params_states[1][idxs]
     params_states[2] = params_states[2][idxs]
     params_states[3] = params_states[3][idxs]
     params_states[4] = params_states[4][idxs]
     params_states[5] = params_states[5][idxs]
     params_states[6] = params_states[6][idxs]
     return params_states, np.ones(len(weights))/len(weights)
Example #10
0
    def update(self, z, arg, u, visualize=False):
        """
        :param z: state measured by sensor
        :param arg: which keypoints to select
        :param u: control input
        :param landmarks: criteria for updating weight
        :param sensor_std_err: std error of sensor
        :param visualize: True or False
        :return:
        """

        """
        predict particles: move according to control input u with noise Q
        """
        N = len(self.particles)     # number of particles
        n = np.shape(self.particles)[1]     # number of variables to estimate
        # self.particles += np.array(u) + (randn(N, n) * self.std)
        self.particles += np.array(u) + uniform(self.lower_limit, self.upper_limit, size=(N, n))

        """
        update weights
        """
        kps_model = ToolPoseEstimation.get_keypoints_model_q567(self.particles)[:, arg, :]  # w.r.t cam frame
        kps_model_tr = ToolPoseEstimation.get_rigid_transform(kps_model, [z[arg]])
        distance = np.linalg.norm(kps_model_tr - z[arg], axis=2)
        dist_sum = np.sum(distance, axis=1)  # various metrics can be used here (ex) RMS, chamfer dist, etc
        tau = 0.001     # sensitivity factor
        self.weights *= np.exp(-dist_sum/tau)
        # weights *= scipy.stats.norm(distance, R).pdf(z[i])
        # self.weights *= scipy.stats.norm(dist_sum, 0.01).pdf(0.0)
        self.weights += 1.e-300  # avoid round-off to zero
        self.weights /= sum(self.weights)  # normalize


        """
        resample if too few effective particles
        """
        if self.neff(self.weights) < self.N/2:
            indexes = systematic_resample(self.weights)
            self.resample_from_index(self.particles, self.weights, indexes)
            assert np.allclose(self.weights, 1 / self.N)
        mu, var = self.estimate(self.particles, self.weights)

        if visualize:
            plt.scatter(self.particles[:, 0], self.particles[:, 1], color='k', marker=',', s=1)
            p1 = plt.scatter(z[0], z[1], marker='+', color='r', s=180, lw=3)
            p2 = plt.scatter(mu[0], mu[1], marker='s', color='b')
            plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
            xlim = (-0.1, 0.1)
            ylim = (-0.1, 0.1)
            plt.xlim(*xlim)
            plt.ylim(*ylim)
            plt.show()
        return mu, var
Example #11
0
    def filter_(self, y, params):
        """
        Performs sequential monte-carlo sampling particle filtering
        """
        mu, kappa, theta, sigma, rho, v0 = self._unwrap_params(params)
        x_pred = np.array([v0] * self.N)
        v = v0
        observations = np.zeros(len(y))
        hidden = np.zeros(len(y))
        observations[0] = y[0]
        hidden[0] = v0

        # initialize weights and particles
        dy = y[1]-y[0]
        particles = np.maximum(1e-3, self.proposal_sample(self.N, v, dy, params))
        weights = np.array([1/self.N] * self.N)
        for i in range(1, len(y)):
            dy = y[i] - y[i-1]
            # prediction
            x_pred = particles + kappa*(theta-particles)*self.dt + \
                    sigma*rho*(dy - (mu-1/2*particles)*self.dt) + \
                    sigma*np.sqrt(particles*(1-rho**2)*self.dt)*norm.rvs()

            # TODO: resample neg. vols
            x_pred = np.maximum(1e-3, x_pred)

            # SIR (update)
            weights = weights * self.likelihood(y[i], x_pred, particles, y[i-1], params)*\
                        self.transition(x_pred, particles, params)/\
                        self.proposal(x_pred, particles, dy, params)
            weights = weights/sum(weights)

            # Resampling
            if self._neff(weights) < 0.3*self.N:
                print('resampling since: {}'.format(self._neff(weights)))
                # x_pred, weights = self._simple_resample(x_pred, weights)
                idxs = systematic_resample(weights)
                x_pred, weights = self._resample_from_index(x_pred, weights, idxs)

            # observation prediction
            # y_hat = y[i-1] + (mu-1/2*x_pred)*self.dt + np.sqrt(particles*self.dt)*norm.rvs()
            # py_hat = np.array([np.mean(self.prediction_density(y_hat[k], y[i-1], x_pred, params)) for k in range(len(y_hat))])
            # py_hat = py_hat/sum(py_hat)
            # y_hat = np.sum(py_hat * y_hat)

            v = max(1e-3, np.average(x_pred, weights=weights))
            particles = x_pred
            # observations[i] = y_hat
            hidden[i] = v
            print('done with step: {}'.format(i))
        return observations, hidden
Example #12
0
    def resample(self, img, thresh_im):
        # If particles are unset, initialize particle filter
        if np.all(self.particles[0] == None):
            print "Sampling Initial Particles"
            self.particles = self.pfilter.create_uniform_particles(
                (0, img.shape[0]), (0, img.shape[1]), (0, 2 * np.pi),
                N_PARTICLES)
            self.old_particles = self.particles
            (good_new, good_old) = self.getFeaturePoints(img, thresh_im)
        else:
            (good_new, good_old) = self.getFeaturePoints(img, thresh_im)

        # Get feature points
        if np.all(good_new != None) and len(good_new) > 1:
            good_new = good_new.reshape(-1, 2)

            # Predict
            mean = np.mean(good_new, axis=0)
            std_dev = np.std(good_new, axis=0)
            self.pfilter.predict(self.particles, u=mean, std=std_dev)

            # Update
            std_err = np.std(good_new, axis=0)
            zs = np.zeros((len(good_new), len(self.particles)))
            for f_ind, feature in enumerate(good_new):
                for p_ind, particle in enumerate(self.particles[:, 0:2]):
                    zs[f_ind, p_ind] += np.linalg.norm(particle - feature)

            self.pfilter.update(self.particles,
                                self.weights,
                                z=zs,
                                R=np.array(std_err),
                                landmarks=good_new)

            # Resample if too few effective particles
            if self.pfilter.neff(self.weights) < N_PARTICLES / 2:
                indices = systematic_resample(self.weights)
                self.pfilter.resample(self.particles, self.weights, indices)

            # Estimate position
            mu, var = self.pfilter.estimate(self.particles, self.weights)
            self.xs.append(mu)

        ptcl_im = img.copy()
        if np.all(good_new != None) and len(good_new) > 1:
            for f in good_new:
                cv2.circle(ptcl_im,
                           (f[0].astype('float32'), f[1].astype('float32')), 2,
                           (0, 0, 255), 4)

        return ptcl_im
Example #13
0
def update_and_resample(particles, weights, iters, threshold, R):
    xs = []
    for i in range(iters):
        weights = update(particles=particles, weights=weights, z=ball.pos, R=R)
        # resample if too few effective particles
        #print(weights)
        #print(neff(weights))
        if neff(weights) < threshold:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
            assert np.allclose(weights, 1 / N)
        mu, var = estimate(particles, weights)
        xs.append([mu, var])
    return xs
Example #14
0
def run_pf1(N,
            y,
            sensors_error=.01,
            do_plot=True,
            plot_particles=False,
            initial_x=None):
    plt.figure(figsize=(16, 10))
    # create particles
    if initial_x:
        particles = create_gaussian_particles(initial_x[0], initial_x[1], N)
    else:
        particles = create_uniform_particles((0, 5), N)
    weights = np.ones(N) / N
    t = range(len(y))
    if do_plot:
        if plot_particles:
            alpha = .20
            if N > 5000:
                alpha *= np.sqrt(5000) / np.sqrt(N)
            plt.scatter([t[0]] * N, particles, alpha=alpha, color='g')

    xs = []
    y = [0] + y
    maee, rmse = 0, 0
    for i, ym in enumerate(y):
        if i == 0:
            continue
        particles = predict(particles, ym - y[i - 1], std=2)
        weights = update(particles, weights, ym, R=sensors_error)
        if neff(weights) < N * 2 / 3:
            indexes = systematic_resample(weights)
            particles, weights = resample_from_index(particles, weights,
                                                     indexes)
            assert np.allclose(weights, 1 / N)
        mu, var = estimate(particles, weights)
        xs.append(mu)
        if do_plot:
            if plot_particles:
                plt.scatter([t[i]] * N, particles, color='k', marker=',', s=1)
            p1 = plt.scatter(t[i], ym, marker='+', color='b', s=18, lw=3)
            p2 = plt.scatter(t[i], mu, marker='+', color='r', s=18, lw=3)
        maee = max(abs(mu - ym), maee)
        rmse += (mu - ym)**2
    if do_plot:
        plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
        plt.grid()
        plt.show()
    print('final position error, variance:\n\t', mu - y[-1], var)
    print('RMSE: {}\nMAEE: {}\n'.format((rmse / (len(y) - 1))**(0.5), maee))
    return (rmse / (len(y) - 1))**(0.5), maee
def resample_particles(particles: List[Particle]) -> List[Particle]:
    '''
        Resamples particles using systematic resample from filterpy
    '''
    N = len(particles)
    weights = [p.w for p in particles]
    indexes = systematic_resample(weights)

    new_particles = []
    for i in indexes:
        p = particles[i].copy()
        p.w = 1 / N
        new_particles.append(p)

    return new_particles
Example #16
0
    def update(self, particles, weights, z, R, landmarks):
        weights.fill(1.)

        for i, landmark in enumerate(landmarks):
            distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
            weights *= scipy.stats.norm(distance, R).pdf(z[i])

        weights += 1.e-300  # avoid round-off to zero
        weights /= sum(weights)  # normalize

        # resample if too few effective particles
        if self.neff(weights) < self.numParticles / 2:
            indexes = systematic_resample(weights)
            self.resample_from_index(particles, weights, indexes)

        self.estimate(particles, weights)
Example #17
0
def run_pf1(n_particles, observations, sensor_std_err=1e-3):
    # create particles and weights
    particles = create_initial_particles(n_particles,
                                         x_init=observations[0, :])
    initial_particles = copy.copy(particles)

    l_means = []
    l_vars = []
    m_means = []
    m_vars = []

    for zs in observations[0:, :]:
        # Propagate particles forward
        predict(particles)

        # incorporate measurements
        weights = update(particles, z=zs, R=sensor_std_err)

        # resample if too few effective particles
        if neff(weights) < n_particles / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
            assert np.allclose(weights, 1 / n_particles)

        mean_l_hat, var_l_hat, mean_m, var_m = estimate(particles, weights)
        l_means.append(mean_l_hat)
        l_vars.append(var_l_hat)
        m_means.append(mean_m)
        m_vars.append(var_m)

        # print("Particles: ", particles)
        # print("\n weights: ", weights)
        # input("Press enter to continue")

    return np.array(l_means), np.array(l_vars), np.array(m_means), np.array(
        m_vars), initial_particles
 def resample(self):
     if self.neff() < self.N / 2:
         #print('resample!')
         self.resample_time += 1
         indexes = systematic_resample(self.weights)
         self.resample_from_index(indexes)
 def resample_syst(self):
     indexes = systematic_resample(self.weights)
     self.resample_from_index(indexes)
Example #20
0
 def resample_from_index(self):
     indexes = systematic_resample(self.weights)
     self.particles[:] = self.particles[indexes]
     self.weights[:] = self.weights[indexes]
     self.weights.fill(1.0 / self.N)
Example #21
0
def anim_thread(fps = 60):
    dt = 1/fps
    move_error = 0.05 #update this value
    turn_error = 0.05 #update this value
    move_vel = 0.8
    turn_vel = 1
    while running:
        vp.rate(fps)
        k = vp.keysdown()
        u = np.array([0.,0.,0.,0.,0.,0.])
        u = u.astype('float64')
        if 'w' in k:
            #print(scene.forward.norm()*move_vel*dt*(1+move_error*randn()))
            delta = scene.forward.norm()*move_vel*dt*(1+move_error*randn())
            u += np.array([delta.x,delta.y,delta.z,0.,0.,0.])
            scene.camera.pos += delta
        if 's' in k:
            delta = scene.forward.norm()*move_vel*dt*(1+move_error*randn())
            u += np.array([delta.x,delta.y,delta.z,0.,0.,0.])
            scene.camera.pos -= delta
        if 'd' in k:
            delta = scene.forward.cross(scene.up).norm()*move_vel*dt*(1+move_error*randn())
            u += np.array([delta.x,delta.y,delta.z,0.,0.,0.])
            scene.camera.pos += delta
        if 'a' in k:
            delta = scene.forward.cross(scene.up).norm()*move_vel*dt*(1+move_error*randn())
            u += np.array([delta.x,delta.y,delta.z,0.,0.,0.])
            scene.camera.pos -= delta
        if 'up' in k:
            epsilon = scene.camera.axis.rotate(angle = turn_vel*dt*(1+turn_error*randn()), axis = scene.camera.axis.cross(scene.up)).norm()
            delta = (epsilon - scene.camera.axis.norm()).norm()
            u += np.array([0.,0.,0.,delta.x,delta.y,delta.z])
            scene.camera.axis = epsilon
        if 'down' in k:
            epsilon = scene.camera.axis.rotate(angle = -1*turn_vel*dt*(1+turn_error*randn()), axis = scene.camera.axis.cross(scene.up)).norm()
            delta = (epsilon - scene.camera.axis.norm()).norm()
            u += np.array([0.,0.,0.,delta.x,delta.y,delta.z])
            scene.camera.axis = epsilon
        if 'right' in k:          
            epsilon = scene.camera.axis.rotate(angle = -1*turn_vel*dt*(1+turn_error*randn()),axis = scene.up).norm()
            delta = (epsilon - scene.camera.axis.norm()).norm()
            u += np.array([0.,0.,0.,delta.x,delta.y,delta.z])
            scene.camera.axis = epsilon
        if 'left' in k:
            epsilon = scene.camera.axis.rotate(angle = turn_vel*dt*(1+turn_error*randn()),axis = scene.up).norm()
            delta = (epsilon - scene.camera.axis).norm()
            u += np.array([0.,0.,0.,delta.x,delta.y,delta.z])
            scene.camera.axis = epsilon
        scene.camera.axis.mag = 1
        scene.forward.mag = 1
        scene.up.mag = 1
        #Update Pos - do I need error in this?
        pos = np.array([scene.camera.pos.x,scene.camera.pos.y,scene.camera.pos.z, scene.camera.axis.norm().x,scene.camera.axis.norm().y,scene.camera.axis.norm().z]) #x y z x bearing y bearing z bearing
        #Predict
        predict(particles = particles, u = u, std = [move_error,move_error,move_error,turn_error,turn_error,turn_error], dt = 1.)
        #update
        update(particles = particles, weights = weights, z = pos, R = [move_error,move_error,move_error,turn_error,turn_error,turn_error])
        #Neff and resample
        if neff(weights) < threshold:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
            assert np.allclose(weights, 1/N)
        #print out error maybe
        est = estimate(particles, weights)
        compare_est_pos(est = est, pos = pos)
Example #22
0
 def resample(self):
     """
     use systematic resample by default, can also use stratified resample as well, filterpy.monte_carlo provides this one too
     """
     idx = systematic_resample(self.weights)
     self._resample_from_index(idx)
def run_pf1(N,
            sensor_std_err=1,
            do_plot=True,
            plot_particles=False,
            xlim=(-10, 40),
            ylim=(0, 140),
            initial_x=None,
            vl=None,
            vr=None,
            t=None,
            angle=None,
            dist=None):
    landmarks = np.array([[0, 10, 10], [0.1, 5, 15], [-0.1, 15, 5],
                          [0.3, 10, 15], [-1, 15, 15]])
    NL = len(landmarks)
    iters = len(t)

    plt.figure()

    # create particles and weights
    if initial_x is not None:
        particles = create_gaussian_particles(mean=initial_x,
                                              std=(5, 5, np.pi / 4),
                                              N=N)
    else:
        particles = create_uniform_particles((0, 20), (0, 20), (0, 6.28), N)
    weights = np.zeros(N)

    if plot_particles:
        alpha = .20
        if N > 5000:
            alpha *= np.sqrt(5000) / np.sqrt(N)
        plt.scatter(particles[:, 3], particles[:, 4], alpha=alpha, color='g')

    xs = []
    robot_pos = np.array([0., 0., 13.8])

    x = 0
    y = 0
    for i in range(1, iters):
        # TODO
        # robot_pos += (1, 1)
        dt = t[i] - t[i - 1]

        robot_pos = (angle[i - 1], vl[i - 1], vr[i - 1])

        # landmarks = np.array([robot_pos])
        # distance from robot to each landmark
        zs = (norm(landmarks - robot_pos) + (randn(NL) * sensor_std_err))

        # move diagonally forward to (x+1, x+1)
        predict(particles, u=(0.00, 1.414), std=(.2, .05), dt=dt)

        # incorporate measurements
        update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)

        # resample if too few effective particles
        if neff(weights) < N / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)

        mu, var = estimate(particles, weights)
        xs.append(mu)

        if plot_particles:
            plt.scatter(particles[:, 0],
                        particles[:, 2],
                        color='k',
                        marker=',',
                        s=1)
        p1 = plt.scatter(robot_pos[1],
                         robot_pos[2],
                         marker='',
                         color='k',
                         s=180,
                         lw=3)
        p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')

    # xs = np.array(xs)
    # plt.plot(xs[:, 0], xs[:, 1])
    plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
    # plt.xlim(*xlim)
    # plt.ylim(*ylim)
    # print('final position error, variance:\n\t', mu - np.array([iters, iters]), var)
    plt.show()
Example #24
0
    def run_pf(self, camera=None, lidar=None, dt=0, i=0):
        #self.camera_obstacle_pos = np.array([camera.center.x, camera.center.y])
        #self.camera_radius = np.array([camera.radius])
        #sigma (uncertainty) sensor with added noise
        cam_err_x = 0.3
        cam_err_y = 0.2
        cam_err_rad = 0.08

        if self.run_fusion[i]:
            lid_err_x = 0.1
            lid_err_y = 0.08
            lid_err_rad = 0.08
            lid_var = [[lid_err_x**2, 0, 0], [0, lid_err_y**2, 0],
                       [0, 0, lid_err_rad**2]]
            cam_var = [[cam_err_x**2, 0, 0], [0, cam_err_y**2, 0],
                       [0, 0, lid_err_rad**2]]
            zs, sigma = multivariate_multiply(camera, cam_var, lidar, lid_var)
            #fused_radius = (camera[2] * cam_err_rad**2 + lidar[2] * lid_err_rad**2)/(cam_err_rad**2+lid_err_rad**2)
            #camera_err_norm = camera_std_err/(camera_std_err+lidar_std_err)
            #lidar_err_norm = lidar_std_err/(camera_std_err+lidar_std_err)
            #R = np.average([camera_std_err, lidar_std_err],
            #                weights = [(1-camera_err_norm), (1-lidar_err_norm)])
            var = sigma * (1 + 0.5)

        else:
            #self.camera = self.camera_obstacle_pos + randn()* camera_std_err
            #self.camera = self.camera_obstacle_pos + np.array([randn()* camera_std_err_x, randn()* camera_std_err_y])
            #print(type(self.camera))
            #zs = np.concatenate((self.camera, self.camera_radius))
            zs = camera
            var = [[cam_err_x**2, 0, 0], [0, cam_err_y**2, 0],
                   [0, 0, cam_err_rad**2]]
        # do predict
        std = (0.02, 0.02, 0.04, 0.04, 0.04, 0.04, 0.02)
        pf.predict(self.new_particles[:, :, i], std=std, dt=dt)

        # incorporate measurements
        #pf.update(self.new_particles[:, :,i], self.new_weights[:, i], z = zs, R = R)

        pf.update_multivariate(self.new_particles[:, :, i],
                               self.new_weights[:, i],
                               z=zs,
                               R=var)

        # resample if too few effective particles
        if pf.neff(self.new_weights[:, i]) < self.N / 2:
            indexes = systematic_resample(self.new_weights[:, i])
            self.new_weights[:, i] = pf.resample_from_index(
                self.new_particles[:, :, i], self.new_weights[:, i], indexes)
            #assert np.allclose(self.weights, 1/self.N)

        self.mu, self.var = pf.estimate(self.new_particles[:, :, i],
                                        self.new_weights[:, i])
        #reinitialize every t seconds
        #t = 0
        #time = round(self.time_elapsed, 1)
        if False:
            print("Reinitializing")
            initial = (zs[0], zs[1], 0.05, 0.05, 0, 0, zs[2])
            init_std = (0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1)
            self.new_particles[:, :,
                               i] = pf.create_gaussian_particles(mean=initial,
                                                                 std=init_std,
                                                                 N=self.N)
            self.new_weights[:, i] = pf.create_initial_weights(self.N)
Example #25
0
def run_sim_pf(N,
               iters=30,
               sensor_std_err=0.1,
               do_plot=True,
               plot_particles=False,
               xlim=(0, 2),
               ylim=(0, 2),
               initial_x=None):
    landmarks = np.array([[-1, 2], [5, 10], [12, 14], [18, 21]])
    NL = len(landmarks)
    plt.figure()

    # --------------------------------Initialize filter elements
    # Gaussian or Uniform
    if initial_x is not None:
        particles = create_gaussian_particles(mean=initial_x,
                                              std=(0.35, 0.4, np.pi / 4),
                                              N=N)
    else:
        particles = create_uniform_particles((0, 20), (0, 20), (0, 6.28), N)
    # Initial normalized weights
    weights = np.ones(N) / N

    # Plot initial particles distribution
    # if plot_particles:
    #     alpha = .20
    #     if N > 5000:
    #         alpha *= np.sqrt(5000)/np.sqrt(N)
    #     plt.scatter(particles[:, 0], particles[:, 1],
    #                 alpha=alpha, color='g')

    xs = []  # Particle set
    robot_pos = np.array([0., 0.])  # Initial robot position

    # Simulation and estimation loop
    for x in range(iters):
        # Evolve robot position
        robot_pos += (1, 1)

        # Measurements (distance from landmarks)
        zs = (norm(landmarks - robot_pos, axis=1) +
              (randn(NL) * sensor_std_err))

        # Evolve particles following the motion model
        predict_nonlinear_planar(particles, u=(0.00, 1.414), std=(.2, .05))

        # Update weights from measurements
        update_landmarks(particles,
                         weights,
                         z=zs,
                         R=sensor_std_err,
                         landmarks=landmarks)

        # Resample triggering
        if neff(weights) < N / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
            #multinomial_resample(particles, weights)
            #assert np.allclose(weights, 1/N)

        # State estimation
        mu, var = estimate(particles, weights)
        xs.append(mu)

        if plot_particles:
            plt.scatter(particles[:, 0],
                        particles[:, 1],
                        color='k',
                        marker=',',
                        s=1)

        # Plot ground truth and estimate
        p1 = plt.scatter(robot_pos[0],
                         robot_pos[1],
                         marker='+',
                         color='b',
                         s=180,
                         lw=3)
        p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')

    # Plot data after loop
    xs = np.array(xs)
    plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
    plt.xlim(*xlim)
    plt.ylim(*ylim)
    print('final position error, variance:\n\t', mu - np.array([iters, iters]),
          var)
    plt.show()
Example #26
0
def run_DA(PedPredXY,
           PedActXY,
           nextLSTMInput,
           time,
           pedID,
           gateID,
           df,
           lb,
           scaler,
           pedsUsed,
           preparedData,
           GetReal=25,
           N=100,
           iters=50,
           sensor_std_err=0.1,
           plot_particles=False,
           plot_iteration=False,
           savePlot=False,
           xlim=(0, 750),
           ylim=(0, 750),
           initial_x=None,
           savePre=False):

    plt.figure()

    #Create Uniforms Particles Across Map:
    particles = create_uniform_particles((0, 750), (0, 750), (0, 6.28), N)

    weights = np.ones(N) / N

    #Plotting of Initial Particles:
    if plot_particles:
        alpha = .20
        if N > 5000:
            alpha *= np.sqrt(5000) / np.sqrt(N)
        InitialParticles = plt.scatter(particles[:, 0],
                                       particles[:, 1],
                                       alpha=alpha,
                                       color='g')
    else:
        InitialParticles = plt.scatter(particles[:, 0],
                                       particles[:, 1],
                                       alpha=0.0,
                                       color='g')

    xs = []
    time = time + 1

    #Create a data frame to collect information on the preformance of model:
    column_names = [
        "Time", "PedID", "GateID", "Iterations", "Final x", "Final y",
        "Actual x", "Actual y", "Variance x", "Variance y",
        "Final Position x Error", "Final Position y Error", "Error_Pixels"
    ]

    preformanceDF = pd.DataFrame(columns=column_names)

    for x in range(iters):

        #Recieving Actual Data:
        if x % GetReal == 0 and x != 0:
            preparedData, pedsUsed = prepare_data(df, time, lb, scaler, gateID)
            LSTMPrediction = randomLSTM_Predict(
                len(pedsUsed))  #Using randomLSTM_Predict
            LSTMPredictionIndexed = np.insert(LSTMPrediction,
                                              0,
                                              pedsUsed,
                                              axis=1)

            PedPredXY = np.zeros((1, 2))
            prediction = predict(LSTMPredictionIndexed, gateID, time,
                                 PedPredXY)
            nextLSTMInput = prepareNextLSTMInput(prediction, preparedData)

            PedPredXY = newXY(prediction, pedID)

            PedActXY = df[(df['ID'] == pedID) & (df['Time'] == time)]
            PedActXY = np.array([PedActXY.iloc[0]['X'], PedActXY.iloc[0]['Y']])

            particles = create_uniform_particles(
                (PedPredXY[0][0] - 200, PedPredXY[0][0] + 200),
                (PedPredXY[0][1] - 200, PedPredXY[0][1] + 200), (0, 6.28), N)

            weights = np.ones(N) / N

            if plot_particles:
                alpha = .20
                if N > 5000:
                    alpha *= np.sqrt(5000) / np.sqrt(N)
                #InitialParticles = plt.scatter(particles[:, 0], particles[:, 1], alpha=alpha, color='g')

        #Move diagonally forward to (x+1, x+1) #(0.2, 1.414)
        particle_predict(particles, u=(10, 1.414), std=(50, 50))

        #Incorporate LSTM measurements:
        weights = update(particles, weights, landmarks=PedPredXY)

        #Resample:
        #if neff(weights) < N/1.5:
        #if x % 1 == 0:
        indexes = systematic_resample(weights)
        resample_from_index(particles, weights, indexes)
        assert np.allclose(weights, 1 / N)

        mu, var = estimate(particles, weights)
        xs.append(mu)

        #Plot the new particles produced:
        if plot_particles:
            ResampledParticles = plt.scatter(particles[:, 0],
                                             particles[:, 1],
                                             color='k',
                                             marker=',',
                                             s=1,
                                             alpha=0.05)
        else:
            ResampledParticles = plt.scatter(particles[:, 0],
                                             particles[:, 1],
                                             color='k',
                                             marker=',',
                                             s=1,
                                             alpha=0.0)

        #Plot the actual position:
        p1 = plt.scatter(PedActXY[0],
                         PedActXY[1],
                         marker='^',
                         color='b',
                         s=50,
                         lw=1)

        #Plot the predicted positions using PF and LSTM:
        p2 = plt.scatter(mu[0], mu[1], marker='x', color='r')

        #Record the preformance:
        preformanceDF = preformance(preformanceDF, time, x, pedID, gateID,
                                    iters, mu, PedActXY, var)

        #Step Time:
        time = time + 1

        #Generate Next LSTM Prediction:
        LSTMPrediction = randomLSTM_Predict(
            len(pedsUsed))  #Using randomLSTM_Predict
        LSTMPredictionIndexed = np.insert(LSTMPrediction, 0, pedsUsed, axis=1)

        prediction = predict(LSTMPredictionIndexed, gateID, time, PedPredXY)
        nextLSTMInput = prepareNextLSTMInput(prediction, preparedData)

        #Get next pedestrian's predict XY location from the LSTM Model:
        PedPredXY = newXY(prediction, pedID)

        #Get next pedestrian's actual location from the original dataframe / CSV file this
        #if for plotting purposes only:
        PedActXY = df[(df['ID'] == pedID) & (df['Time'] == time)]
        PedActXY = np.array([PedActXY.iloc[0]['X'], PedActXY.iloc[0]['Y']])

        #If you want plot each iteration:
        if plot_iteration:
            plotPF(InitialParticles, ResampledParticles, p1, p2, xlim, ylim,
                   gateID, pedID, x, plot_particles, mu, var, savePlot)

    xs = np.array(xs)

    #Overall plotting of PF and Preformanc:
    plotPF(InitialParticles, ResampledParticles, p1, p2, xlim, ylim, gateID,
           pedID, iters, plot_particles, mu, var, savePlot)

    #Show Preformance:
    preformanceDF.plot.line(x='Iterations', y='Error_Pixels')

    #Save preformance:
    if savePre:
        preformanceDF.to_csv('PedID_' + str(pedID) + 'GateID' + str(gateID) +
                             'Iter' + str(iters) + '.csv',
                             index=False,
                             header=True)
def run_textbook_particle_filter(num_of_particles,
                                 num_of_iterations=20,
                                 sensor_std=.1,
                                 do_plot=True,
                                 plot_particles=False):
    '''
    Run the particle filter
    
    PARAMETERS
     - num_of_particles:     Number of particles
     - num_of_iterations:    Number of iterations for particle filter
     - sensor_std:           Standard deviation for error of sensor
     - do_plot:              Boolean variable to plot particle filter results
     - plot_particles:       Boolean variable to plot each particle
    
    DESCRIPTION
    Set locations for landmarks, particle locations, and particle weights.
    Plot individual particles initially. Set robot location. For each
    iteration, increment the robot location, take observation with noise 
    for distance between robot and landmarks, predict particles forward, 
    update particle weights. If effective N is small enough, resample
    particles. Calculate estimates and save the particle mean. Plot 
    results and print final error statistics.
    '''

    landmarks = np.array([[-1, 0], [2, 3], [-1, 15], [2, 36]])
    num_of_landmarks = len(landmarks)

    plt.figure()

    # create particles
    particles = create_uniform_particles((0, 20), (0, 20), (0, 6.28),
                                         num_of_particles)
    weights = np.zeros(num_of_particles)

    # plot particles
    if plot_particles:
        alpha = .20
        if num_of_particles > 5000:
            alpha *= np.sqrt(5000) / np.sqrt(num_of_particles)
        plt.scatter(particles[:, 0], particles[:, 1], alpha=alpha, color='g')

    means = []
    robot_position = np.array([0., 0.])

    # loop through iterations
    for iteration in range(num_of_iterations):

        # increment robot location
        robot_position += (1, 1)

        # distance from robot to each landmark
        observation = (norm(landmarks - robot_position, axis=1) +
                       (randn(num_of_landmarks) * sensor_std))

        # move diagonally forward
        predict(particles, control_input=(0.00, 1., 1.), std=(.2, 5, 5))

        # incorporate measurements
        update(particles,
               weights,
               observation=observation,
               sensor_std=sensor_std,
               landmarks=landmarks)

        # resample if too few effective particles
        if effective_n(weights) < num_of_particles / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)

        # calculate and save estimates
        mean, variance = estimate(particles, weights)
        means.append(mean)

        if plot_particles:
            plt.scatter(particles[:, 0],
                        particles[:, 1],
                        color='k',
                        marker=',',
                        s=1)
        p1 = plt.scatter(robot_position[0],
                         robot_position[1],
                         marker='+',
                         color='k',
                         s=180,
                         lw=3)
        p2 = plt.scatter(mean[0], mean[1], marker='s', color='r')

    means = np.array(means)
    plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
    print('final position error, variance:\n\t',
          mean - np.array([num_of_iterations, num_of_iterations]), variance)
    plt.show()
def run_pf1(N, iters=13, sensor_std_err=0.0000001,
            plot_particles=False, is_lim=False,
            xlim=(117.21, 117.31), ylim=(39.11, 39.21),
            initial_x=None):
    '''

    :param N: 粒子数量,越大越可能落在正确位置。
    :param iters: data length
    :param sensor_std_err: measurement/sensor standard测量的准不准,越小测量越准。
    :param plot_particles: 是否画出粒子堆
    :param is_lim: 是否规范刻度
    :param xlim: x刻度
    :param ylim: y刻度
    :param initial_x: 知道初始位置就gaussian分布,不知道就均匀分布
    :return: none
    '''
    landmarks = np.array([[117.3005, 39.1160], [117.2995, 39.1160], [117.2985, 39.1160], [117.2975, 39.1160],
                          [117.3005, 39.1170], [117.2995, 39.1170], [117.2985, 39.1170], [117.2975, 39.1170],
                          [117.2965, 39.1160], [117.2960, 39.1160], [117.2955, 39.1160], [117.2950, 39.1160],
                          [117.2965, 39.1170], [117.2960, 39.1170], [117.2955, 39.1170], [117.2950, 39.1170]])
    NL = len(landmarks)

    plt.figure()

    # create particles and weights
    # 是否gaussian
    if initial_x is not None:   # 有最初的x就创建gaussian,std与Q
        particles = create_gaussian_particles(
            mean=initial_x, std=(0.00001, 0.00001, np.pi / 8), N=N)
    else:
        particles = create_uniform_particles((117.21, 117.31), (39.11, 39.21), (0, np.pi*2), N)
    weights = np.zeros(N)

    # 画出初始化的透明粒子,画出地标
    if plot_particles:
        alpha = .20
        if N > 5000:
            alpha *= np.sqrt(5000) / np.sqrt(N)
        plt.scatter(particles[:, 0], particles[:, 1],
                    alpha=alpha, color='g')
        plt.scatter(landmarks[:, 0], landmarks[:, 1], marker='s', s=60)

    dt_series, turn_angle_series, velocity_series, longitude_series, latitude_series\
        = Data.load_process_data(data_length=iters, correction=1.3, conversion=17)     # true position
    true_pos, us = [], []  # array([[longitude], [latitude]])
    for (turn_angle, velocity, longitude, latitude) in zip(turn_angle_series, velocity_series, longitude_series, latitude_series):
        true_pos.append([longitude, latitude])
        us.append([turn_angle, velocity])
    true_pos = np.array(true_pos)
    us = np.array(us)

    xs = []  # estimate x
    for i in range(iters):
        # 模拟测量distance from robot to each landmark
        zs = (norm(landmarks - true_pos[i], axis=1) +
              (randn(NL) * sensor_std_err))

        # move
        predict(particles, u=us[i], std=(0.2, 0.05), dt=dt_series[i])   # predict的置信度

        # incorporate measurements
        update(particles, weights, z=zs, R=sensor_std_err,  # measure的置信度
               landmarks=landmarks)

        # resample if too few effective particles
        if neff(weights) < N / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)    # 重采样

        mu, var = estimate(particles, weights)
        xs.append(mu)

        # 画出estimate后的粒子堆
        if plot_particles:
            plt.scatter(particles[:, 0], particles[:, 1],
                        color='k', marker=',', s=1)

        # 画出真实位置点+
        p1 = plt.scatter(true_pos[i, 0], true_pos[i, 1], marker='+',
                         color='k', s=180, lw=1, label='true')
        # 画出estimate位置点
        p2 = plt.scatter(mu[0], mu[1], marker='s', color='r', label='estimate')

    # xs = np.array(xs)
    # plt.plot(xs[:, 0], xs[:, 1])
    plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
    if is_lim:
        plt.xlim(*xlim)
        plt.ylim(*ylim)
    print('final position error, variance:\n\t', mu - np.array([iters, iters]), var)
    plt.show()
Example #29
0
def run_pf1(N,
            iters=18,
            sensor_std_err=.1,
            do_plot=True,
            plot_particles=False,
            xlim=(0, 20),
            ylim=(0, 20),
            initial_x=None):
    landmarks = np.array([[-1, 2], [5, 10], [12, 14], [18, 21]])
    NL = len(landmarks)

    # plt.figure()

    # create particles and weights
    if initial_x is not None:
        particles = create_gaussian_particles(mean=initial_x,
                                              std=(5, 5, np.pi / 4),
                                              N=N)
    else:
        particles = create_uniform_particles((0, 20), (0, 20), (0, 6.28), N)
    weights = np.ones(N) / N

    if plot_particles:
        alpha = .20
        if N > 5000:
            alpha *= np.sqrt(5000) / np.sqrt(N)
        plt.scatter(particles[:, 0], particles[:, 1], alpha=alpha, color='g')

    xs = []
    robot_pos = np.array([0., 0.])
    for x in range(iters):
        robot_pos += (1, 1)

        # distance from robot to each landmark
        zs = (norm(landmarks - robot_pos, axis=1) +
              (randn(NL) * sensor_std_err))

        # move diagonally forward to (x+1, x+1)
        predict(particles, u=(0.00, 1.414), std=(.2, .05))

        if plot_particles:
            plt.scatter(particles[:, 0],
                        particles[:, 1],
                        color='k',
                        marker=',',
                        s=1)
        print("before_update")
        print(particles)
        plt.show()
        # incorporate measurements
        update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)
        if plot_particles:
            plt.scatter(particles[:, 0],
                        particles[:, 1],
                        color='k',
                        marker=',',
                        s=1)
        print("after_update")
        plt.show()
        # resample if too few effective particles
        if neff(weights) < N / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
            assert np.allclose(weights, 1 / float(N))
        mu, var = estimate(particles, weights)
        xs.append(mu)

        if plot_particles:
            plt.scatter(particles[:, 0],
                        particles[:, 1],
                        color='k',
                        marker=',',
                        s=1)
        p1 = plt.scatter(robot_pos[0],
                         robot_pos[1],
                         marker='+',
                         color='k',
                         s=180,
                         lw=3)
        p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')

    xs = np.array(xs)
    # plt.plot(xs[:, 0], xs[:, 1])
    plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
    plt.xlim(*xlim)
    plt.ylim(*ylim)

    print('final position error, variance:\n\t', mu - np.array([iters, iters]),
          var)
Example #30
0
                zs = (norm(landmarks - robot_pos, axis=1) +
                      (randn(NL) * sensor_std_err))

                # move
                predict(particles, u=(speed, heading), std=(0.5, 0.4))

                # incorporate measurements
                update(particles,
                       weights,
                       z=zs,
                       R=sensor_std_err,
                       landmarks=landmarks)

                # resample if too few effective particles
                if neff(weights) < N / 2:
                    indexes = systematic_resample(weights)
                    resample_from_index(particles, weights, indexes)

                mu, var = estimate(particles, weights)

                if plot_particles:
                    plt.scatter(particles[:, 0],
                                particles[:, 1],
                                color='r',
                                marker=',',
                                s=1,
                                alpha=0.1)  #0.05)
                plt1 = plt.scatter(robot_pos[0],
                                   robot_pos[1],
                                   marker='+',
                                   color='w',
Example #31
0
def particle_pub():
    rospy.init_node('particle_publisher',anonymous=True)

    # Publishers
    pub_sim_rob = rospy.Publisher('mobile_agent', PoseStamped, queue_size=10)
    pub_particles = rospy.Publisher('particles', PoseArray, queue_size=20)
    pub_estimate = rospy.Publisher('state_estimate', PoseStamped, queue_size=10)

    rate = rospy.Rate(100)     # 100 Hz rate

    # Instance of simulated robot (constant velocity)
    mob_rob = MobAgent(0,0,0.01)
    vx = 0.1
    vy = 0.1

    num_particles = 1000

    # Create particle filter
    initial_x=(0.6,0.6, np.pi/4)
    particles = create_gaussian_particles(mean=initial_x, std=(0.35, 0.4, np.pi/4), N=num_particles)
    weights = np.ones(num_particles) / num_particles
    #print(particles)
    #fig, ax = plt.subplots()
    #ax.scatter(particles[:,0],particles[:,1],color='green',s=3)
    #ax.grid()
    #plt.show()
    #exit(0)

    # Estimate
    xs = []

    # For plots 
    ground_truth_lst = []
    meas_lst = []
    estimate_lst = []

    while not rospy.is_shutdown():
        # ----------------------- Agent Simulation
        # Evolve simulated agent
        mob_rob.step(vx,vy)

        # Get simulated noisy measurement
        [x_meas, y_meas] = mob_rob.state_meas()

        # Ground truth message
        p_real = build_rob_pose_msg(mob_rob)          
        pub_sim_rob.publish(p_real)

        # ------------------------ Prediction step
        predict_linear_planar(particles, u=(vx, vy), std=(.005,.005), dt=mob_rob.dt)

        # Particles message
        p_particles = build_particles_msg(particles)
        pub_particles.publish(p_particles)
        # -----------------------------------------

        # ------------------------ Correction step
        sensor_std_err = 0.05
        z = np.linalg.norm(np.array([x_meas,y_meas]))
        update_fullstate(particles, weights, z=z, R=sensor_std_err)
        # Resample triggering
        if neff(weights) < num_particles/2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)
        #-----------------------------------------

        # --------------------------- Estimation
        mu, var = estimate(particles, weights)

        # Estimate message
        p_estimate = build_estimate_msg(mu)
        pub_estimate.publish(p_estimate)
        # --------------------------------------

        # Saving for plots
        ground_truth_lst.append([mob_rob.x,mob_rob.y])
        meas_lst.append([x_meas,y_meas])
        xs.append(mu)
        estimate_lst.append(mu)

        rate.sleep()
    # ----------------------------------------------------------------------

    print("Plotting results")
    # Post processing and plots when execution is over
    # Ground truth simulated robot and noisy measurements
    gt_plot = np.array(ground_truth_lst)
    meas_plot = np.array(meas_lst)
    estimate_plot = np.array(estimate_lst)
    fig, ax = plt.subplots()
    ax.plot(gt_plot[:,0],gt_plot[:,1],color='k',linewidth=2)
    ax.scatter(meas_plot[:,0],meas_plot[:,1],color='b',s=3)
    ax.plot(estimate_plot[:,0],estimate_plot[:,1],color='r',linewidth=2)
    ax.set(xlabel='x(m)', ylabel='y(m)',
       title='Ground truth robot pose')
    ax.legend(['Ground truth','Measurements'])
    ax.grid()
    plt.show()