Esempio n. 1
0
def update_particle(particle, v_c, w_c, v_c_cov, w_c_cov, z, thk, motion_noise,
                    dt, true_pos, true_neg, alpha, beta, z_max, first_time,
                    num_particles):
    #sample the motion model
    x = np.array([[particle.x], [particle.y], [particle.theta]])
    x_next = dynamics.propogate_next_state(x, v_c + stat_filter.noise(v_c_cov),
                                           w_c + stat_filter.noise(w_c_cov),
                                           dt)
    # particle.x = copy.deepcopy(x_next[0][0])
    # particle.y = copy.deepcopy(x_next[1][0])
    # particle.theta = copy.deepcopy(dynamics.wrap_angle(x_next[2][0]))

    particle.x = x_next[0][0]
    particle.y = x_next[1][0]
    particle.theta = dynamics.wrap_angle(x_next[2][0])

    #calculate weights from measurement model
    if first_time:
        particle.weight = 1.0 / num_particles
    else:
        particle.weight = ogc.likelihood_field_range_finder_model(
            z, x_next, thk, z_max, particle.occ_grid)

    #update the map for the particle
    X = [particle.x, particle.y, particle.theta]
    particle.occ_grid = ogc.occupancy_grid_mapping(particle.occ_grid, X, z,
                                                   thk, true_pos, true_neg,
                                                   alpha, beta, z_max)

    return particle
Esempio n. 2
0
def mcl_turtlebot(chi, v_c, w_c, ranges, bearings, landmark_pts, dt, alpha,
                  sigma_r, sigma_phi):
    num_points = len(chi)
    num_states = len(chi[0])
    chi_bar = np.zeros((num_points, num_states, 1))
    weights = []
    for i in range(0, num_points):
        v_c_cov = alpha[0] * v_c * v_c + alpha[1] * w_c * w_c
        w_c_cov = alpha[2] * v_c * v_c + alpha[3] * w_c * w_c
        x_next = dynamics.propogate_next_state(
            chi[i], v_c + stat_filter.noise(v_c_cov),
            w_c + stat_filter.noise(w_c_cov), dt)
        for j in range(0, num_states):
            chi_bar[i][j] = x_next[j]

        weight = 1
        for j in range(0, len(landmark_pts)):
            f = [ranges[j], bearings[j]]
            weight = weight * dynamics.landmark_measurement_model(
                f, chi_bar[i], landmark_pts[j], sigma_r, sigma_phi)

        weights.append(weight)

    weights = weights / np.sum(weights)

    chi = stat_filter.low_variance_sampler(chi_bar, weights)

    return chi
Esempio n. 3
0
def simulate_sensor_data(x, sigma_r, sigma_phi):
    ranges = []
    bearings = []
    for i in range(0,len(landmark_pts)):
        ranges.append(dynamics.norm([landmark_pts[i][0]-x[0],landmark_pts[i][1]-x[1]]) + stat_filter.noise(sigma_r*sigma_r))
        bearing_map = np.arctan2(landmark_pts[i][1]-x[1],landmark_pts[i][0]-x[0])
        bearings.append(bearing_map-x[2] + stat_filter.noise(sigma_phi*sigma_phi))

    return ranges, bearings
def simulate_sensor_data(x, sigma_r, sigma_phi, fov=360.0 * np.pi / 180.0):
    ranges = []
    bearings = []
    correspondence = []
    for i in range(0, len(landmark_pts)):
        bearing_map = np.arctan2(landmark_pts[i][1] - x[1][0],
                                 landmark_pts[i][0] - x[0][0])
        bearing = bearing_map - x[2][0] + stat_filter.noise(
            sigma_phi * sigma_phi)
        if bearing <= fov and bearing >= -fov:
            bearings.append(bearing)
            ranges.append(
                dynamics.norm(
                    [landmark_pts[i][0] - x[0], landmark_pts[i][1] - x[1]]) +
                stat_filter.noise(sigma_r * sigma_r))
            correspondence.append(i)

    return ranges, bearings, correspondence
Esempio n. 5
0
    sigma_s = 0.0

    Q = np.array([[sigma_r*sigma_r , 0.0],
                  [0.0, sigma_phi*sigma_phi]])

    v = []
    w = []

    sigma = np.eye(3)

    sigma_x = [2.0]
    sigma_y = [2.0]
    sigma_theta = [2.0]

    for i in range(0,len(v_c)):
        v.append(v_c[i] + stat_filter.noise(alpha1*v_c[i]*v_c[i] + alpha2*w_c[i]*w_c[i]))
        w.append(w_c[i] + stat_filter.noise(alpha3*v_c[i]*v_c[i] + alpha4*w_c[i]*w_c[i]))

    x = []
    mu = []

    x_plot = []
    y_plot = []
    theta_plot = []

    x_plot_est = []
    y_plot_est = []
    theta_plot_est = []

    x.append(x0)
    mu.append(x0)
Esempio n. 6
0
def fast_slam_known_correspondence_turtlebot(v_c, w_c, ranges, bearings,
                                             correspondences, particles, dt, Q,
                                             alpha):

    #calculated a fit over experiments with values
    exponent = -0.0556 * len(ranges) + 8.5556

    scale = 10**exponent

    counter = 0

    for i in range(len(particles)):
        v_c_cov = alpha[0] * v_c * v_c + alpha[1] * w_c * w_c
        w_c_cov = alpha[2] * v_c * v_c + alpha[3] * w_c * w_c
        x = [[particles[i].x], [particles[i].y], [particles[i].theta]]
        # print "x before: ", particles[i].x
        # print "y before: ", particles[i].y
        # print "theta before: ", particles[i].theta
        x_next = dynamics.propogate_next_state(
            x, v_c + stat_filter.noise(v_c_cov),
            w_c + stat_filter.noise(w_c_cov), dt)
        particles[i].x = copy.deepcopy(x_next[0][0])
        particles[i].y = copy.deepcopy(x_next[1][0])
        particles[i].theta = copy.deepcopy(dynamics.wrap_angle(x_next[2][0]))

        # print i
        # print "x_p0: ", particles[0].x
        # print "y_p0: ", particles[0].y
        # print "theta_p0: ", particles[0].theta
        # print "x: ", particles[i].x
        # print "y: ", particles[i].y
        # print "theta: ", particles[i].theta

        # pdb.set_trace()
        particles[i].weight = 1.0

        for k in range(len(ranges)):

            j = correspondences[k]

            if particles[i].seen.count(j) < 1:
                particles[i].seen.append(j)

                # print "particle len: ", len(particles)
                # print "range len: ", len(ranges)
                # counter += 1
                # print counter
                x = particles[i].x + ranges[k] * np.cos(bearings[k] +
                                                        particles[i].theta)
                y = particles[i].y + ranges[k] * np.sin(bearings[k] +
                                                        particles[i].theta)

                ldm = landmark.Landmark(j, x, y)
                particles[i].landmarks.append(ldm)

                H = calc_H(ldm, particles[i])[0]
                H_inv = np.linalg.inv(H)
                ldm.sigma = np.dot(np.dot(H_inv, Q), H_inv.T)
                # print "sigma: ", ldm.sigma
                # print "H: ", H
                ldm.weight = scale * 1.0 / len(particles)
                particles[i].weight *= ldm.weight
                # particles[i].weight = 1.0/len(particles)

            else:
                idx = particles[i].seen.index(j)

                ldm = particles[i].landmarks[idx]

                z_hat, H = calc_z_hat_and_H(ldm, particles[i])

                S = np.dot(np.dot(H, ldm.sigma), H.T) + Q
                K = np.dot(np.dot(ldm.sigma, H.T), np.linalg.inv(S))

                z = np.array([[ranges[k]], [bearings[k]]])

                res = np.array(z - z_hat)

                res[1][0] = dynamics.wrap_angle(res[1][0])

                mu = np.array([[ldm.x], [ldm.y]]) + np.dot(K, res)

                ldm.x = mu[0][0]
                ldm.y = mu[1][0]

                ldm.sigma = np.dot(np.eye(2) - np.dot(K, H), ldm.sigma)

                particles[i].landmarks[idx] = ldm

                # particles[i].weight = np.linalg.det(2*np.pi*S)*np.exp(-.5*np.dot(np.dot(res.T,np.linalg.inv(S)),res))[0][0]
                ldm.weight = scale * (np.linalg.det(2 * np.pi * S) * np.exp(
                    -.5 * np.dot(np.dot(res.T, np.linalg.inv(S)), res))[0][0])
                particles[i].weight *= ldm.weight

                # print "Weights: ", particles[i].weight, " For: ", i

        print "Weights: ", particles[i].weight, " For: ", i

    particles = fast_slam_particle.normalize_weights(particles)
    new_particles = stat_filter.low_variance_sampler(particles)

    return new_particles