예제 #1
0
    def update(self, z):
        # TODO implement correction step

        lm_id = int(z[1])

        dx = self._field_map.landmarks_poses_x[lm_id] - self.mu_bar[0]
        dy = self._field_map.landmarks_poses_y[lm_id] - self.mu_bar[1]
        dn = dx**2 + dy**2

        H = np.array([[dy / dn, -dx / dn, -1]])

        for i in range(self.num_p):
            z_bar = get_observation(np.squeeze(self.X[i]), z[1])[0]
            self.weight[i] = gaussian.pdf(
                (z[0] - z_bar), 0,
                np.sqrt(H.dot(self.Sigma_bar).dot(H.T) + self._Q))

        self.weight = self.weight / np.sum(self.weight)

        R = uniform(0, 1 / self.num_p)
        c = self.weight[0]
        X = np.zeros_like(self.X)
        i = 0

        for m in range(self.num_p):
            U = R + m / self.num_p
            while U > c:
                i += 1
                c += self.weight[i]
            X[m] = self.X[i]

        self.X = X
        self._state = get_gaussian_statistics(self.X)
예제 #2
0
    def update(self, z):
        # TODO implement correction step

        Qt = self._Q
        standard_deviation = np.sqrt(Qt)

        observation = z[0]
        lm_id = z[1]

        expected_observation = np.array([
            get_observation(self.particles[i], lm_id)[0] for i in range(self.M)
        ])
        angle_deviations = np.array([
            wrap_angle(expected_observation[i] - observation)
            for i in range(self.M)
        ])

        weights = gaussian().pdf(angle_deviations / standard_deviation)
        weights = weights / np.sum(weights)  # normalization

        self.particles = self.particles[self.low_variance_sampling(weights)]
        self.X = self.particles

        gaussian_parameters = get_gaussian_statistics(self.particles)
        self._state.mu = gaussian_parameters.mu
        self._state.Sigma = gaussian_parameters.Sigma
예제 #3
0
    def predict(self, u):
        # TODO Implement here the PF, perdiction part

        for i in range(self.num_p):
            self.X[i] = sample_from_odometry(self.X[i], u, self._alphas)

        self._state_bar = get_gaussian_statistics(self.X)
예제 #4
0
    def update(self, z):
        self.w = np.zeros((self.M))
        for i in range(self.M):
            distance = wrap_angle(get_observation(self.X[i], z[1])[0])
            self.w[i] = gaussian.pdf(z[0], distance, np.sqrt(self._Q))
        self.w = self.w / sum(self.w)

        # Systematic_resampling (said to be efficient in most situations)
        positions = (np.arange(self.M) + np.random.uniform(0, 1)) / self.M
        indexes = np.zeros(self.M, 'i')
        cum_dist = np.cumsum(self.w)
        i, j = 0, 0
        while i < self.M:
            if positions[i] < cum_dist[j]:
                indexes[i] = j
                i += 1
            else:
                j += 1

        self.X[:] = self.X[indexes]
        self.X[:, -1] = np.array([wrap_angle(x) for x in self.X[:, -1]])
        self.w.fill(1.0 / self.M)
        # np.random.shuffle(self.X)
        stats = get_gaussian_statistics(self.X)
        self._state.mu = stats.mu
        self._state.Sigma = stats.Sigma
예제 #5
0
 def predict(self, u):
     # TODO Implement here the PF, perdiction part
     for i in range(self.M):
         self.X[i] = sample_from_odometry(self.X[i], u, self._alphas)
     stats = get_gaussian_statistics(self.X)
     if not self.global_loc:
         self._state_bar.mu = stats.mu
         self._state_bar.Sigma = stats.Sigma
예제 #6
0
 def update(self, z):
     # PF correction step
     self.weights_update(z)
     self.X, self.w = resampling(self.X_bar, self.w)
     # self.X, self.w = low_variance_sampler(self.X_bar, self.w)
     updated_pose = pose_from_particles(self.X, self.w)
     self._state.mu = updated_pose[np.newaxis].T
     self._state.Sigma = get_gaussian_statistics(self.X.T).Sigma
예제 #7
0
    def predict(self, u):
        # PF, prediction part
        for m in range(self.M):
            self.X_bar[:, m] = sample_from_odometry(self.X[:, m], u,
                                                    self._alphas)
        self.w_bar = self.w / sum(self.w)  # keep previous weights
        updated_pose_bar = pose_from_particles(self.X_bar, self.w_bar)

        self._state_bar.mu = updated_pose_bar[np.newaxis].T
        self._state_bar.Sigma = get_gaussian_statistics(self.X_bar.T).Sigma
예제 #8
0
    def predict(self, u):
        # TODO Implement here the PF, perdiction part

        for i in range(self.M):
            self.particles[i] = sample_from_odometry(
                self.particles[i], u, self._alphas)  # many noisy particles
        self.X = self.particles

        gaussian_parameters = get_gaussian_statistics(
            self.particles
        )  # parameters of the gaussian distribution of particles

        self._state_bar.mu = gaussian_parameters.mu
        self._state_bar.Sigma = gaussian_parameters.Sigma
예제 #9
0
    def predict(self, u):
        # TODO Implement here the PF, perdiction part
        self._state_bar.mu = self.mu
        self._state_bar.Sigma = self.Sigma

        # for particle in self.particle_set:
        #     print(sample_from_odometry(particle, u, self._alphas))

        for idx, particle in enumerate(self.particle_set):
            self.particle_set[idx] = sample_from_odometry(
                particle, u, self._alphas)
        # print('particle_set', self.particle_set)

        self._state_bar = get_gaussian_statistics(self.particle_set)
예제 #10
0
    def update(self, z):
        # TODO implement correction step
        self._state.mu = self._state_bar.mu
        self._state.Sigma = self._state_bar.Sigma

        # UPDATE WEIGHTS. incorporate measurements into the weights.
        stand_dev = np.sqrt(self._Q)  # self._Q - observation noise var
        normal_r_v = scipy.stats.norm(
            scale=stand_dev)  # randon variable distribution
        for idx, weight in enumerate(self.weights):
            innovation = z[0] - get_observation(self.particle_set[idx], z[1])[
                0]  # how far the observation from the expected measurement
            weight_update = normal_r_v.pdf(
                innovation
            )  # pick the value of the pdf at innovation with zero mean
            # print('\ninnovation', innovation)
            # print('pdf value: ', weight_update)
            self.weights[
                idx] = weight_update  # In the case of multiple observations at the same time: self.weights[idx] *= weight_update
        self.weights /= sum(
            self.weights
        )  # to make them normalized (the sum equal to 1), stability

        # RESAMPLING
        # print('\nweights\n',self.weights)
        cumulative_sum = np.cumsum(
            self.weights
        )  # array, distance between values bigger if the weight is bigger -> more chanses to get into the interval using random value generator
        cumulative_sum[-1] = 1.  # to avoid errors
        # print('cumulative_sum\n',cumulative_sum)
        indexes = np.searchsorted(cumulative_sum,
                                  np.random.random(self.num_particles))
        # print('indexes\n',indexes)
        # resample according to indexes (pick up only the best particles with highest weights)
        self.particle_set = self.particle_set[indexes]
        self.weights = self.weights[indexes]
        self.weights /= np.sum(
            self.weights)  # to make them normalized (the sum equal to 1)

        self._state = get_gaussian_statistics(self.particle_set)