示例#1
0
def test_kwargs():
    def check_kwargs(x, **kwargs):
        assert "test_1" in kwargs
        assert "t" in kwargs
        assert kwargs["test_1"] == "ok"
        assert kwargs["t"] == 1.0
        return x

    # silly initialisation, but uses all parameters
    pf = ParticleFilter(
        prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
        observe_fn=lambda x, **kwargs: check_kwargs(x, **kwargs),
        n_particles=100,
        dynamics_fn=lambda x, **kwargs: check_kwargs(x, **kwargs),
        transform_fn=lambda x, w, **kwargs: check_kwargs(x, **kwargs),
        noise_fn=lambda x, **kwargs: check_kwargs(x, **kwargs),
        weight_fn=lambda x, y, **kwargs: check_kwargs(np.ones(len(x)), **kwargs
                                                      ),
        resample_proportion=0.2,
        column_names=["test"],
        internal_weight_fn=lambda x, y, **kwargs: check_kwargs(
            np.ones(len(x)), **kwargs),
        n_eff_threshold=1.0,
    )
    pf.update(np.array([[1]]), test_1="ok", t=1.0)
示例#2
0
def test_no_observe():
    # check that
    pf = ParticleFilter(prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
                        n_particles=10)
    for i in range(10):
        pf.update(None)
        assert len(pf.weights) == len(pf.particles) == 10
        assert pf.particles.shape == (10, 1)
        assert np.allclose(np.sum(pf.weights), 1.0)
示例#3
0
def test_weights():
    # verify weights sum to 1.0
    pf = ParticleFilter(prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
                        n_particles=100)
    for i in range(10):
        pf.update(np.array([1]))
        assert len(pf.weights) == len(pf.particles) == 100
        assert pf.particles.shape == (100, 1)
        assert np.allclose(np.sum(pf.weights), 1.0)
示例#4
0
def test_init():
    # silly initialisation, but uses all parameters
    pf = ParticleFilter(
        prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
        observe_fn=lambda x: x,
        n_particles=100,
        dynamics_fn=lambda x: x,
        noise_fn=lambda x: x,
        weight_fn=lambda x, y: np.ones(len(x)),
        resample_proportion=0.2,
        column_names=["test"],
        internal_weight_fn=lambda x, y: np.ones(len(x)),
        n_eff_threshold=1.0,
    )
    pf.update(np.array([1]))
示例#5
0
def test_transform_fn():
    # silly initialisation, but uses all parameters
    pf = ParticleFilter(
        prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
        observe_fn=lambda x: x,
        n_particles=100,
        dynamics_fn=lambda x: x,
        transform_fn=lambda x, w: 2 * x,
        noise_fn=lambda x: x,
        weight_fn=lambda x, y: np.ones(len(x)),
        resample_proportion=0.2,
        column_names=["test"],
        internal_weight_fn=lambda x, y: np.ones(len(x)),
        n_eff_threshold=1.0,
    )
    for i in range(10):
        pf.update(np.array([1]))
        assert np.allclose(pf.original_particles * 2.0,
                           pf.transformed_particles)
def test_init():
    # silly initialisation, but uses all parameters
    pf = ParticleFilter(
        prior_fn=lambda x: np.random.normal(0, 1, (1, 100)),
        observe_fn=lambda x: x,
        n_particles=100,
        dynamics_fn=lambda x: x,
        noise_fn=lambda x: x,
        weight_fn=lambda x: x,
        resample_proportion=0.2,
        column_names=["test"],
        internal_weight_fn=lambda x: x,
        n_eff_threshold=1.0,
    )
示例#7
0
    def initialize(self, initial_position):
        # Prior sampling function for each of the state variables
        # Centered around initial position estimate
        self.prior_fn = independent_sample([
            norm(loc=initial_position[0], scale=2).rvs,
            norm(loc=initial_position[1], scale=2).rvs,
            uniform(loc=initial_position[2], scale=np.pi / 2).rvs
        ])

        self.pf_lock.acquire()

        self.pf = ParticleFilter(
            n_particles=self.n,
            prior_fn=self.prior_fn,
            observe_fn=self.observation_function,
            weight_fn=self.weight_function,
            dynamics_fn=self.dynamics_function,
            noise_fn=lambda x: gaussian_noise(x, sigmas=[0.08, 0.08, 0.08]))

        self.pf.update()
        rospy.loginfo("Finished initial pf update")

        self.pf_lock.release()
        rospy.loginfo("Released initial lock")
示例#8
0
def test_partial_missing():
    # check that
    pf = ParticleFilter(prior_fn=lambda n: np.random.normal(0, 1, (n, 4)),
                        n_particles=100)
    for i in range(10):
        masked_input = ma.masked_equal(np.array([1, 999, 0, 999]), 999)
        pf.update(masked_input)
        pf.update(np.array([1, 1, 1, 1]))
        assert np.allclose(np.sum(pf.weights), 1.0)
        assert len(pf.weights) == len(pf.particles) == 100
示例#9
0
                        [0, 0, 1., 0, 0, dt], [0, 0, 0, 1., 0, 0],
                        [0, 0, 0, 0, 1., 0], [0, 0, 0, 0, 0, 1.]]).T)
    return xp


def rbf_error(x, y, sigma=1):
    # RBF kernel
    d = np.sum((x - y)**2, axis=1)
    return np.exp(-d / (2.0 * sigma**2))


pf = ParticleFilter(
    prior_fn=prior_fn,
    observe_fn=ob_fn,
    n_particles=500,
    dynamics_fn=process_fn,
    noise_fn=lambda x: gaussian_noise(x, sigmas=[5, 5, 5, 0.05, 0.05, 0.1]),
    weight_fn=lambda x, y: rbf_error(x, y, sigma=2),
    resample_proportion=0.7,
    column_names=columns,
)

pf.init_filter()

# ------------- Running Particle filter with animation -------------
#

# class UpdateParticle(object):
#     def __init__(self, ax, preds, labels):
#         self.nppreds = preds
#         self.nplabel = labels
#         self.ax = ax
示例#10
0
def test_resampler():
    pf = ParticleFilter(
        prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
        n_particles=100,
        resample_fn=None  # should use default
    )
    for i in range(10):
        pf.update(np.array([1]))

    pf = ParticleFilter(prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
                        n_particles=100,
                        resample_fn=basic_resample)
    for i in range(10):
        pf.update(np.array([1]))

    for sampler in [
            stratified_resample, systematic_resample, residual_resample,
            multinomial_resample
    ]:
        pf = ParticleFilter(prior_fn=lambda n: np.random.normal(0, 1, (n, 1)),
                            n_particles=100,
                            resample_fn=sampler)
        for i in range(10):
            pf.update(np.array([1]))
示例#11
0
def example_filter():
    # create the filter
    pf = ParticleFilter(
        prior_fn=prior_fn,
        observe_fn=blob,
        n_particles=100,
        dynamics_fn=velocity,
        noise_fn=lambda x: cauchy_noise(x, sigmas=[0.05, 0.05, 0.01, 0.005, 0.005]),
        weight_fn=lambda x, y: squared_error(x, y, sigma=2),
        resample_proportion=0.2,
        column_names=columns,
    )

    # np.random.seed(2018)
    # start in centre, random radius
    s = np.random.uniform(2, 8)

    # random movement direction
    dx = np.random.uniform(-0.25, 0.25)
    dy = np.random.uniform(-0.25, 0.25)

    # appear at centre
    x = img_size // 2
    y = img_size // 2
    scale_factor = 20

    # create window
    cv2.namedWindow("samples", cv2.WINDOW_NORMAL)
    cv2.resizeWindow("samples", scale_factor * img_size, scale_factor * img_size)

    for i in range(1000):
        # generate the actual image
        low_res_img = blob(np.array([[x, y, s]]))
        pf.update(low_res_img)

        # resize for drawing onto
        img = cv2.resize(
            np.squeeze(low_res_img), (0, 0), fx=scale_factor, fy=scale_factor
        )

        cv2.putText(
            img,
            "ESC to exit",
            (50, 50),
            cv2.FONT_HERSHEY_SIMPLEX,
            1,
            (255, 255, 255),
            2,
            cv2.LINE_AA,
        )

        color = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_GRAY2RGB)

        x_hat, y_hat, s_hat, dx_hat, dy_hat = pf.mean_state

        # draw individual particles
        for particle in pf.original_particles:

            xa, ya, sa, _, _ = particle
            sa = np.clip(sa, 1, 100)
            cv2.circle(
                color,
                (int(ya * scale_factor), int(xa * scale_factor)),
                int(sa * scale_factor),
                (1, 0, 0),
                1,
            )

        # x,y exchange because of ordering between skimage and opencv
        cv2.circle(
            color,
            (int(y_hat * scale_factor), int(x_hat * scale_factor)),
            int(s_hat * scale_factor),
            (0, 1, 0),
            1,
            lineType=cv2.LINE_AA,
        )

        cv2.line(
            color,
            (int(y_hat * scale_factor), int(x_hat * scale_factor)),
            (
                int(y_hat * scale_factor + 5 * dy_hat * scale_factor),
                int(x_hat * scale_factor + 5 * dx_hat * scale_factor),
            ),
            (0, 0, 1),
            lineType=cv2.LINE_AA,
        )

        cv2.imshow("samples", color)
        result = cv2.waitKey(20)
        # break on escape
        if result == 27:
            break
        x += dx
        y += dy

    cv2.destroyAllWindows()
示例#12
0
class UWBParticleFilter:
    # Implementation of a particle filter to predict the pose of the robot given distance measurements from the modules
    # anchor_names is a list of the name of all of the anchors
    # tag_transforms is a 2d numpy array storing the [x,y] coordinates of each tag in the robot frame
    def __init__(self, num_sensors, anchor_names, tag_transforms, lidar_params,
                 initial_position, occ_grid, update_rate):
        # Number of particles
        self.n = 100
        # Sensor covariance for range measurement of UWB modules
        self.UWB_COVARIANCE = 0.5
        self.IMU_COVARIANCE = 0.01

        self.update_rate = float(update_rate)  #Hz

        # Members of the state
        self.state_desc = ["x", "y", "theta"]

        # Number of sensors that we are reading data from
        self.num_sensors = num_sensors

        self.map = occ_grid

        self.num_anchors = len(anchor_names)
        self.anchor_names = anchor_names
        self.anchor_positions = np.zeros(shape=(len(anchor_names), 2))

        self.num_tags = tag_transforms.shape[0]
        self.tag_transforms = tag_transforms

        # Save LIDAR parameters
        self.laser_transform = lidar_params["transform"]
        self.lidar_range_max = lidar_params["range_max"]
        self.lidar_range_min = lidar_params["range_min"]
        self.num_scans = lidar_params["num_scans"]

        # Create a lock object for blocking parallel access to pf objects
        self.pf_lock = Lock()

        self.initialize(initial_position)

    def initialize(self, initial_position):
        # Prior sampling function for each of the state variables
        # Centered around initial position estimate
        self.prior_fn = independent_sample([
            norm(loc=initial_position[0], scale=2).rvs,
            norm(loc=initial_position[1], scale=2).rvs,
            uniform(loc=initial_position[2], scale=np.pi / 2).rvs
        ])

        self.pf_lock.acquire()

        self.pf = ParticleFilter(
            n_particles=self.n,
            prior_fn=self.prior_fn,
            observe_fn=self.observation_function,
            weight_fn=self.weight_function,
            dynamics_fn=self.dynamics_function,
            noise_fn=lambda x: gaussian_noise(x, sigmas=[0.08, 0.08, 0.08]))

        self.pf.update()
        rospy.loginfo("Finished initial pf update")

        self.pf_lock.release()
        rospy.loginfo("Released initial lock")

    # weight_function
    # Accepts the hypothetical observations for each particle and real observations for all of the sensors and
    # computes a weight for the particle
    # hyp_observed - (nxh) matrix containing the hypothetical observation for each particle
    # real_observed - (h,) vector containing the real observation
    # returns a (n,) vector containing the weights for each of the particles
    def weight_function(self, hyp_observed, real_observed):
        # Create output vector of dimension (n,)
        particle_weights = np.zeros(shape=(hyp_observed.shape[0], ),
                                    dtype=np.float32)

        #print(real_observed)

        # First split UWB and IMU data into separate arrays
        hyp_observed_uwb = hyp_observed[:, :-1]
        hyp_observed_imu = hyp_observed[:, -1:]

        real_observed_uwb = real_observed[0, :-1]
        real_observed_imu = real_observed[0, -1:]

        #print(real_observed_imu)

        # Assume the range measurements for the UWB modules has gaussian noise
        # Iterate over each particle

        #TODO Real observation does not have particle dimension - need to map all parameters with one fixed input
        # def calc_particle_weight(hyp_observation_uwb, hyp_observation_imu):
        #     # Calculate the gaussian pdf for the difference between the real and expected sensor measurements for all unmasked elements in the array
        #     # Since all of the sensor measurements are independent, we can simply multiply the 1-D probabilities
        #     # We construct a gaussian with a mean of 0 and a variance empirically determined for the UWB modules
        #     uwb_measurement_probabilities = norm(0, self.UWB_COVARIANCE).pdf(hyp_observation_uwb[~real_observed_uwb.mask[0]] - real_observed_uwb.data[0][~real_observed_uwb.mask[0]])
        #     uwb_particle_weight = np.prod(uwb_measurement_probabilities, axis = 0)
        #     #print("UWB weighting  time: {}".format(time.clock() - start))

        #     imu_measurement_probability = norm(0, self.IMU_COVARIANCE).pdf(hyp_observation_imu[~real_observed_imu.mask[0]] - real_observed_imu.data[0][~real_observed_imu.mask[0]])
        #     imu_particle_weight = np.prod(imu_measurement_probability, axis = 0)

        #     return uwb_particle_weight + imu_particle_weight

        # particle_weights = map(calc_particle_weight, hyp_observed_uwb, hyp_observed_imu)
        for i in range(hyp_observed.shape[0]):

            #print("UWB weighting  time: {}".format(time.clock() - start))

            #imu_measurement_probability = norm(0, self.IMU_COVARIANCE).pdf(hyp_observed_imu[i][~real_observed_imu.mask[0]] - real_observed_imu.data[0][~real_observed_imu.mask[0]])
            #imu_particle_weight = np.prod(imu_measurement_probability, axis = 0)

            lidar_particle_weight = self.calc_lidar_particle_weight_likelihood_field(
                hyp_observed[i], real_observed[0])

            imu_particle_weight = self.calc_imu_particle_weight(
                hyp_observed[i][-1:], real_observed[0][-1:])

            # print("{} {} {}".format(hyp_observed_imu[i], real_observed_imu, imu_measurement_probability))

            #particle_weights[i] = calc_uwb_particle_weight(hyp_observed[i], real_observed[0]) + imu_particle_weight + lidar_particle_weight
            particle_weights[i] = self.calc_uwb_particle_weight(
                hyp_observed[i], real_observed[0]) + 2 * imu_particle_weight

        # for one, two in zip(particle_weights, particleWeights):
        #     if(one != two):
        #         delta = one - two
        #         print("BROKEN {}".format(delta))
        rospy.loginfo("Sucessfully completed weight function")
        return particle_weights

    def calc_uwb_particle_weight(self, hyp_observed, real_observed):
        #print(real_observed)
        # Calculate the gaussian pdf for the difference between the real and expected sensor measurements for all unmasked elements in the array
        # Since all of the sensor measurements are independent, we can simply multiply the 1-D probabilities
        # We construct a gaussian with a mean of 0 and a variance empirically determined for the UWB modules
        uwb_measurement_probabilities = norm(
            0,
            self.UWB_COVARIANCE).pdf(hyp_observed[~real_observed.mask] -
                                     real_observed.data[~real_observed.mask])
        return np.prod(uwb_measurement_probabilities, axis=0)

    def calc_imu_particle_weight(self, hyp_observed, real_observed):
        #print(real_observed)
        imu_measurement_probability = norm(
            0,
            self.IMU_COVARIANCE).pdf(hyp_observed[~real_observed.mask] -
                                     real_observed.data[~real_observed.mask])
        return np.prod(imu_measurement_probability, axis=0)

    def calc_lidar_particle_weight_likelihood_field(self, hyp_observed,
                                                    real_observed):
        # The hyp_observed vector stores the estimated 2d pose of the lidar in the world frame
        #TODO this is a hacky way to get the pose that needs to be fixed later
        # Pose of the lidar retrieved from hype observed
        lidar_pose = hyp_observed[self.num_tags *
                                  self.num_anchors:self.num_tags *
                                  self.num_anchors + 3]

        #TODO Parametrize these constants
        z_hit = 1
        z_rand = 0
        self.sigma_hit = 0.005

        # Initialize particle weight
        p = 1.0

        # Calculate constant terms for the particle weight
        z_hit_denom = 2 * self.sigma_hit * self.sigma_hit
        z_rand_mult = 1.0 / self.lidar_range_max

        for i in range(self.num_scans):
            if (real_observed.mask[self.num_tags * self.num_anchors + i] == 0):
                # Retrieve range & bearing in easy to access variables
                obs_range = real_observed[self.num_tags * self.num_anchors + i]
                obs_bearing = real_observed[self.num_tags * self.num_anchors +
                                            self.num_scans + i]

                pz = 0.0

                # Compute the endpoint of the beam
                beam_x = lidar_pose[0] + obs_range * np.cos(lidar_pose[2] +
                                                            obs_bearing)
                beam_y = lidar_pose[1] + obs_range * np.sin(lidar_pose[2] +
                                                            obs_bearing)

                # Get occupancy grid coordinates
                #TODO Move this to map utils
                mi = int(np.floor((beam_x - 0) / self.map.resolution + 0.5))
                mj = int(np.floor((beam_y - 0) / self.map.resolution + 0.5))

                # Part 1: Get distance from the hit to closest obstacle.
                # Off-map penalized as max distance
                if (not self.map.on_map(mi, mj)):
                    z = self.map.MAX_DISTANCE
                else:
                    z = self.map.distances[
                        mi, mj]  # Retrieve distance to nearest neighbor

                # Gaussian model
                # NOTE: this should have a normalization of 1/(sqrt(2pi)*sigma)
                pz += z_hit * np.exp(-(z * z) / z_hit_denom)

                # Part 2: random measurements
                pz += z_rand * z_rand_mult

                # TODO: outlier rejection for short readings

                assert pz <= 1.0
                assert pz >= 0.0

                #      p *= pz;
                # here we have an ad-hoc weighting scheme for combining beam probs
                # works well, though...
                p += pz * pz * pz

        return p

    # observation_function
    # Accepts the state of all the particles and returns the predicted observation for each one
    # particles states - n x d array: n is the number of particles and d is state dimension
    def observation_function(self, particle_states):
        # Create output array of dimension n x h
        # n is the numer of particles
        # h is the dimension of the observation
        expected_observations = np.zeros(shape=(self.n, self.num_sensors))

        # Calculated expected observations
        num_uwb_measurements = self.num_tags * self.num_anchors
        for i in range(self.n):
            # Update UWB predicted observations
            expected_observations[i][
                0:num_uwb_measurements] = self.uwb_observation_function(
                    particle_states[i])

            # Don't update the lidar observations because we are using the likelihood model to generate the weights for the particles
            # If we switch to the beam model, we will have to update expected observations using raycasts
            expected_observations[
                i][num_uwb_measurements:num_uwb_measurements +
                   3] = self.lidar_observation_function(particle_states[i])

            # Add the IMU observation, which is just the orientation stored in the pf state
            expected_observations[i][-1] = self.imu_observation_function(
                particle_states[i])

        #print("Loop time: {}".format(time.clock()-start))
        rospy.loginfo("Successfully completed observation function")
        return expected_observations

    # uwb_observation_function
    # Accepts the state of a given particle and returns the UWB observations for that particle
    def uwb_observation_function(self, particle_state):
        expected_observations = np.zeros(shape=(self.num_tags *
                                                self.num_anchors))
        for j in range(self.num_tags):
            for k in range(self.num_anchors):
                #print("{} {}".format(particle_states[i], self.anchor_positions[j]), end = "\n")

                # Calculate expected position of the tag in the world frame
                x_tag = self.tag_transforms[j][0] * np.cos(
                    particle_state[2]) - self.tag_transforms[j][1] * np.sin(
                        particle_state[2]) + particle_state[0]
                y_tag = self.tag_transforms[j][0] * np.sin(
                    particle_state[2]) + self.tag_transforms[j][1] * np.cos(
                        particle_state[2]) + particle_state[1]
                expected_tag_position = np.array([x_tag, y_tag])

                # Expected observation is the distance from the tag to the anchor
                expected_observations[self.num_anchors * j +
                                      k] = np.linalg.norm(
                                          expected_tag_position -
                                          self.anchor_positions[k])
        return expected_observations

    def imu_observation_function(self, particle_state):
        return particle_state[2]

    # Currently, the lidar observation function simply returns the pose of the lidar given the estimated particle pose
    def lidar_observation_function(self, particle_state):
        # Get 2D pose of LIDAR relative to map for the current particle
        x_lidar = self.laser_transform[0] * np.cos(
            particle_state[2]) - self.laser_transform[1] * np.sin(
                particle_state[2]) + particle_state[0]
        y_lidar = self.laser_transform[0] * np.sin(
            particle_state[2]) + self.laser_transform[1] * np.cos(
                particle_state[2]) + particle_state[1]
        theta_lidar = particle_state[2]
        return np.array([x_lidar, y_lidar, particle_state[2]])

    # dynamics_function
    # Accepts the state of all of the particles and returns the predicted state based on the control input to the robot
    # Applies a dynamics model to evolve the particles
    # Control input read from ROS topic cmd_vel
    # Also applies noise proportional to the control input
    # Arguments:
    #   particles_states - (n,d) vector containing the state of all of the particles
    #  returns predicted particle_states (n,d) vector with evolved particles
    def dynamics_function(self, particle_states):
        # Get current cmd_vel
        try:
            cmd_vel = rospy.wait_for_message("/cmd_vel", Twist, timeout=0.1)
        except:  # If nobody is publishing, set the commands to zero
            cmd_vel = Twist()
            cmd_vel.angular.z = 0
            cmd_vel.linear.x = 0

        # Time step between updates
        delta_t = 1 / self.update_rate

        # Calculate change in angle and arc length traversed
        #TODO: FIGURE OUT WHY THESE CONSTANTS ARE REQUIRED
        delta_theta = cmd_vel.angular.z * delta_t
        delta_s = cmd_vel.linear.x * delta_t

        #print("delta_x: {} \t delta_y: {} \t delta_theta: {}".format(delta_x, delta_y, delta_theta))

        # Init variables to store translation in x and y directions in the robot frame
        # delta_y is the forward direction of the robot
        # delta_x is the right direction of the robot
        # NOTE: This coordinate frame does not coincide with the ROS coordinate frame of the robot
        # ROS coordinate frame is rotated 90 degrees counter-clockwise
        delta_x_robot = 0.0
        delta_y_robot = 0.0

        # This is a singularity where the robot only moves forward
        # Radius of circle is infinite
        if delta_theta == 0:
            delta_y_robot = delta_s
        else:
            # Radius of the circle along which the robot is travelling
            r = delta_s / delta_theta

            # Next, calculate the translation in the robot frame
            delta_x_robot = r * (np.cos(delta_theta) - 1)
            delta_y_robot = r * np.sin(delta_theta)

        for i in range(particle_states.shape[0]):

            # Transform x and y translation in robot frame to world coordinate frame for each particle
            # TODO REMOVE HACKY pi/2 fix
            delta_x = delta_x_robot * np.cos(
                particle_states[i][2] - np.pi /
                2) - delta_y_robot * np.sin(particle_states[i][2] - np.pi / 2)
            delta_y = delta_x_robot * np.sin(
                particle_states[i][2] - np.pi /
                2) + delta_y_robot * np.cos(particle_states[i][2] - np.pi / 2)

            particle_states[i][0] = particle_states[i][0] + delta_x
            particle_states[i][1] = particle_states[i][1] + delta_y
            particle_states[i][2] = particle_states[i][2] + delta_theta

            # if(i == 0):
            #     print("delta_x: {} \t delta_y: {} \t delta_theta: {}".format(delta_x_robot, delta_y_robot, delta_theta))

        rospy.loginfo("Succesfully completed dynamics function")

        # Return updated particles
        return particle_states

    # update
    # Takes in the observation from the sensors and calls an update step on the particle filter
    # Accepts the observation as (4,) array containing range measurements from the anchors
    # Supports masked numpy array for partial observations
    def update(self, observation):
        self.pf_lock.acquire()
        print(observation.mask)
        self.pf.update(observation)
        self.pf_lock.release()

    def get_particle_states(self):

        self.pf_lock.acquire()
        particle_states = self.pf.original_particles
        self.pf_lock.release()
        return particle_states

    def get_state(self):
        self.pf_lock.acquire()
        mean_state = self.pf.mean_state
        self.pf_lock.release()
        return mean_state
示例#13
0
    def _pfilter_init(self):
        """
        Setup the particle filter.

        """

        # initialisation of particle filter implementation
        # refer https://github.com/johnhw/pfilter/blob/master/README.md

        columns = ["x", "y", "heading", "life_span"]
        map_x_size = self.carpet_map.grid.shape[1] * self.carpet_map.cell_size
        map_y_size = self.carpet_map.grid.shape[0] * self.carpet_map.cell_size

        def prior_fn(n_particles: int):
            """
            Sample n random particles from p(x|z)
            i.e. if last color is BEIGE, return a sample
            where proportion self._weight_fn_p of particles lie on
            random beige cells, and proportion (1-self._weight_fn_p)
            lie on other cells
            """
            # create a grid of sample probablilities, equal in shape
            # to the map grid, such that the sum of all cells
            # which match the most recent color equals self._weight_fn_p
            # and the sum of all cells = 1.
            p_mat = np.zeros_like(self.carpet_map.grid, dtype=float)

            if self._most_recent_color is None:
                matching = np.zeros_like(self.carpet_map.grid)
            else:
                matching = self.carpet_map.grid == self._most_recent_color.index

            num_matches = np.sum(matching)
            if num_matches == 0 or num_matches == self.carpet_map.grid.size:
                p_mat[:] = 1 / self.carpet_map.grid.size
            else:
                p_mat[matching] = self._weight_fn_p / np.sum(matching)
                p_mat[~matching] = (1 - self._weight_fn_p) / np.sum(~matching)

            # sample from the grid using the probabilities from above
            p_mat_flat = p_mat.flatten()
            selected_grid_linear_indices = np.random.choice(range(
                len(p_mat_flat)),
                                                            size=n_particles,
                                                            p=p_mat_flat)
            #convert linear indices back to grid indices
            y_indices, x_indices = np.unravel_index(
                selected_grid_linear_indices, self.carpet_map.grid.shape)

            # convert sampled grid indices into x/y coordinates
            # add noise to sample uniformly across selected grid cells
            x_coords = (x_indices +
                        uniform().rvs(n_particles)) * self.carpet_map.cell_size
            y_coords = (self.carpet_map.grid.shape[0] -
                        (y_indices + uniform().rvs(n_particles))
                        ) * self.carpet_map.cell_size

            heading = uniform(loc=0, scale=2 * np.pi).rvs(n_particles)
            age = np.zeros_like(heading, dtype=float)

            return np.column_stack([x_coords, y_coords, heading, age])

        def observe_fn(state: np.array, **kwargs) -> np.array:
            return self.carpet_map.get_color_at_coords(state[:, 0:3])

        def weight_fn(hyp_observed: np.array, real_observed: np.array,
                      **kwargs):
            """
            weight p for correct observations, 1-p for incorrect
            """
            p = self._weight_fn_p

            correct_observation = np.squeeze(hyp_observed) == np.squeeze(
                real_observed)

            weights = correct_observation * p + ~correct_observation * (1 - p)
            return weights

        def odom_update(x: np.array, odom: np.array) -> np.array:
            poses = add_poses(x[:, 0:3], np.array([odom]))
            life_span = x[:, 3]
            life_span += 1
            return np.column_stack((poses, life_span))

        self._pfilter = ParticleFilter(
            prior_fn=prior_fn,
            observe_fn=observe_fn,
            n_particles=self._n_particles,
            dynamics_fn=odom_update,
            noise_fn=lambda x, odom: gaussian_noise(
                x,
                sigmas=[
                    self._odom_pos_noise, self._odom_pos_noise, self.
                    _odom_heading_noise, 0
                ]),
            weight_fn=weight_fn,
            resample_proportion=self._resample_proportion,
            column_names=columns)
示例#14
0
class CarpetBasedParticleFilter():
    def __init__(self,
                 carpet_map: CarpetMap,
                 log_inputs: bool = False,
                 resample_proportion: float = 0,
                 weight_fn_p: float = 0.95,
                 odom_pos_noise: float = 0.01,
                 odom_heading_noise: float = 0.01,
                 n_particles: int = 500):
        """
        Initialise with a given map.
        if `log_inputs` is set True, all inputs to `update` will be logged,
        and can be saved with CarpetBasedParticleFilter.write_input_log(logpath)
        """
        self.carpet_map = carpet_map
        self.log_inputs = log_inputs
        self.input_log = []
        self._pfilter = None
        self._resample_proportion = resample_proportion
        self._weight_fn_p = weight_fn_p
        self._odom_pos_noise = odom_pos_noise
        self._odom_heading_noise = odom_heading_noise
        self._n_particles = n_particles
        self._most_recent_color = None  # used in pfilter initialisation fn

    def _pfilter_init(self):
        """
        Setup the particle filter.

        """

        # initialisation of particle filter implementation
        # refer https://github.com/johnhw/pfilter/blob/master/README.md

        columns = ["x", "y", "heading", "life_span"]
        map_x_size = self.carpet_map.grid.shape[1] * self.carpet_map.cell_size
        map_y_size = self.carpet_map.grid.shape[0] * self.carpet_map.cell_size

        def prior_fn(n_particles: int):
            """
            Sample n random particles from p(x|z)
            i.e. if last color is BEIGE, return a sample
            where proportion self._weight_fn_p of particles lie on
            random beige cells, and proportion (1-self._weight_fn_p)
            lie on other cells
            """
            # create a grid of sample probablilities, equal in shape
            # to the map grid, such that the sum of all cells
            # which match the most recent color equals self._weight_fn_p
            # and the sum of all cells = 1.
            p_mat = np.zeros_like(self.carpet_map.grid, dtype=float)

            if self._most_recent_color is None:
                matching = np.zeros_like(self.carpet_map.grid)
            else:
                matching = self.carpet_map.grid == self._most_recent_color.index

            num_matches = np.sum(matching)
            if num_matches == 0 or num_matches == self.carpet_map.grid.size:
                p_mat[:] = 1 / self.carpet_map.grid.size
            else:
                p_mat[matching] = self._weight_fn_p / np.sum(matching)
                p_mat[~matching] = (1 - self._weight_fn_p) / np.sum(~matching)

            # sample from the grid using the probabilities from above
            p_mat_flat = p_mat.flatten()
            selected_grid_linear_indices = np.random.choice(range(
                len(p_mat_flat)),
                                                            size=n_particles,
                                                            p=p_mat_flat)
            #convert linear indices back to grid indices
            y_indices, x_indices = np.unravel_index(
                selected_grid_linear_indices, self.carpet_map.grid.shape)

            # convert sampled grid indices into x/y coordinates
            # add noise to sample uniformly across selected grid cells
            x_coords = (x_indices +
                        uniform().rvs(n_particles)) * self.carpet_map.cell_size
            y_coords = (self.carpet_map.grid.shape[0] -
                        (y_indices + uniform().rvs(n_particles))
                        ) * self.carpet_map.cell_size

            heading = uniform(loc=0, scale=2 * np.pi).rvs(n_particles)
            age = np.zeros_like(heading, dtype=float)

            return np.column_stack([x_coords, y_coords, heading, age])

        def observe_fn(state: np.array, **kwargs) -> np.array:
            return self.carpet_map.get_color_at_coords(state[:, 0:3])

        def weight_fn(hyp_observed: np.array, real_observed: np.array,
                      **kwargs):
            """
            weight p for correct observations, 1-p for incorrect
            """
            p = self._weight_fn_p

            correct_observation = np.squeeze(hyp_observed) == np.squeeze(
                real_observed)

            weights = correct_observation * p + ~correct_observation * (1 - p)
            return weights

        def odom_update(x: np.array, odom: np.array) -> np.array:
            poses = add_poses(x[:, 0:3], np.array([odom]))
            life_span = x[:, 3]
            life_span += 1
            return np.column_stack((poses, life_span))

        self._pfilter = ParticleFilter(
            prior_fn=prior_fn,
            observe_fn=observe_fn,
            n_particles=self._n_particles,
            dynamics_fn=odom_update,
            noise_fn=lambda x, odom: gaussian_noise(
                x,
                sigmas=[
                    self._odom_pos_noise, self._odom_pos_noise, self.
                    _odom_heading_noise, 0
                ]),
            weight_fn=weight_fn,
            resample_proportion=self._resample_proportion,
            column_names=columns)

    def update(self,
               odom: OdomMeasurement,
               color: Color,
               ground_truth: Optional[Pose] = None) -> None:
        """
        Update the particle filter based on measured odom and color
        If optional ground truth pose is provided, and if input logging is enabled, the 
        ground truth pose will be logged.
        """
        if self.log_inputs:
            self.input_log.append((odom, color, ground_truth))

        self._most_recent_color = color

        # if particle filter is not intialised, init it
        if self._pfilter is None:
            self._pfilter_init()

        odom_array = [odom.dx, odom.dy, odom.dheading]
        if color == UNCLASSIFIED:
            self._pfilter.update(observed=None, odom=odom_array)
        else:
            self._pfilter.update(np.array([color.index]), odom=odom_array)

    def seed(self,
             seed_pose: Pose,
             pos_std_dev: float = 1.,
             heading_std_dev: float = 0.3) -> None:
        """
        Reinitialise particles around given seed pose
        Particles are initialised with standard deviations around
        position and heading as specified
        """

        # log seed in ground truth portion of input log
        if self.log_inputs:
            self.input_log.append((None, None, seed_pose))

        # if particle filter is not intialised, init it
        if self._pfilter is None:
            self._pfilter_init()
        self._pfilter.particles = independent_sample([
            norm(loc=seed_pose.x, scale=pos_std_dev).rvs,
            norm(loc=seed_pose.y, scale=pos_std_dev).rvs,
            norm(loc=seed_pose.heading, scale=heading_std_dev).rvs,
            norm(loc=0, scale=0).rvs,
        ])(self._n_particles)

    def get_current_pose(self) -> Pose:
        if self._pfilter is None:
            return None

        state = self._pfilter.mean_state
        return Pose(x=state[0], y=state[1], heading=state[2])

    def get_particles(self) -> np.ndarray:
        if self._pfilter is None:
            return None
        return self._pfilter.particles

    def write_input_log(self, log_path: str) -> None:
        assert self.log_inputs, "Error - requested 'write_input_log', but input logging is disabled"
        with open(log_path, 'wb') as f:
            pickle.dump(self.input_log, f, protocol=pickle.HIGHEST_PROTOCOL)