Ejemplo n.º 1
0
def particle_filter(ps, control, scan):
    global weights
    v, w = control
    v_dt = v*dt
    w_dt = w*dt
    sigma = sqrt(v*v + w*w)/6.0 * dt

    def motion_update(p):
        x, y, t = p
        new_p = (x + random.gauss(v_dt*cos(t), sigma),
                 y + random.gauss(v_dt*sin(t), sigma), 
                 t + random.gauss(w_dt, sigma))
        if not mcl_tools.map_hit(new_p[0], new_p[1]):
            return new_p
        else:
            return mcl_tools.random_particle()


    '''
    p_prime = [motion_update(p) if not mcl_tools.map_hit(p[0], p[1]) else
               mcl_tools.random_particle()
               for p in ps]
    '''
    
    new_weights = np.array([particle_weight(p, scan) for p in ps])
    weights *= new_weights
    weights /= weights.sum()
    wvar = 1./sum([w*w for w in weights])
    if wvar < random.gauss(PAR_COUNT*.81, 60):
        ps = mcl_tools.random_sample(ps, PAR_COUNT - NOISE, weights) + [mcl_tools.random_particle() for ii in xrange(NOISE)]
        weights = [1.0 for ii in xrange(PAR_COUNT)]
    else:
        pass

    return [motion_update(p) for p in ps] 
Ejemplo n.º 2
0
def particle_filter(ps, control, scan):
    global last_time, PAR_COUNT, MID_LIFE, MID_UNCERT, MID_COUNT, \
        LONG_LIFE, LONG_UNCERT, LONG_COUNT

    if last_time is None:
        last_time = rp.get_rostime()

    # probabilistically move all the particles
    new_pos = partial(integrate_control_to_distance, control, (rp.get_rostime() - last_time).to_sec())
    last_time = rp.get_rostime()
    new_ps = []
    for part in ps:
        # part[0] = x
        # part[1] = y
        if not mcl_tools.map_hit(part[0], part[1]):
            # print "hit"
            new_ps.append(new_pos(part))
        else:
            new_ps.append(mcl_tools.random_particle())
    ps = new_ps  # update our particle set

    # update weights of each particle
    weights = []
    for part in ps:
        weight = particle_weight(scan, part)
        weights.append(weight)

    # normalize the weights
    weights = np.multiply(weights, 1.0 / np.sum(weights))

    '''
    ############ RESAMPLING! ##############
    '''
    correction = False

    # mid level corrections
    if MID_COUNT > MID_LIFE:  # crisis
        correction = True
        # resample 20%
        ps = mcl_tools.random_sample(ps, PAR_COUNT - MID_UNCERT, weights)
        rand_ps = []
        for x in range(MID_UNCERT):
            rand_ps.append(mcl_tools.random_particle())
        ps.extend(rand_ps)
        MID_COUNT = 0
        print "mid"
        return ps
    else:
        MID_COUNT += 1

    # big corrections
    if LONG_COUNT > LONG_LIFE:
        correction = True
        # resample 50%
        ps = mcl_tools.random_sample(ps, PAR_COUNT - LONG_UNCERT, weights)
        rand_ps = []
        for x in range(LONG_UNCERT):
            rand_ps.append(mcl_tools.random_particle())
        ps.extend(rand_ps)
        LONG_COUNT = 0
        print "long"
        return ps
    else:
        LONG_COUNT += 1

    # no corrections (normal resample all particles)
    if not correction:
        ps = mcl_tools.random_sample(ps, PAR_COUNT, weights)
        return ps
Ejemplo n.º 3
0
def particle_filter(ps, control, scan):
    """
    :param ps:      particle list
    :param control: tuple of linear and angular velocity
    :param scan:    laser scan msg
    :return:        updated list of particles
    """
    global W_SLOW, W_FAST, COUNT, PRIOR_TIME

    # need this to set initial time
    if PRIOR_TIME is None:
        PRIOR_TIME = rp.get_rostime()

    # <editor-fold desc="motion update">
    # adjust speed based on simulation
    time_dif = (rp.get_rostime() - PRIOR_TIME).to_sec()
    con = [time_dif * i for i in control]
    #con = np.multiply(time_dif, control) #numpy should be used for large data sets
    rp.loginfo("control" + str(con))

    #moving each particle based on adjusted velocity
    for p in xrange(PARTICLE_COUNT):
        ps[p].move(con)
    # </editor-fold>

    # <editor-fold desc="Weight update">
    weight_p = []
    w_avg = 0.0
    w_sum = 0.0
    max_w = (0, 0.0)  # id, weight

    for p in xrange(PARTICLE_COUNT):
        w_sum += ps[p].measurement_prob(scan)
        weight = ps[p].get_weight()  # individual weight
        weight_p.append(weight)
        w_avg += 1.0/PARTICLE_COUNT * weight

        if weight > max_w[1]:
            max_w = p, weight
    #print weight_p
    mcl_tools.show_best_pose(ps[max_w[0]].get_pose())
    # </editor-fold>

    # <editor-fold desc="random sampling with probability">

    sample_p = []

    # do actual resampling

    # 0 <= alpha_slow << alpha_fast
    alpha_slow = 0.001   # decay rate for long-term avg
    alpha_fast = 0.08    # decay rate for short-term avg

    W_SLOW += alpha_slow * (w_avg - W_SLOW)
    W_FAST += alpha_fast * (w_avg - W_FAST)
    #print 'long term avg: ', W_SLOW
    #print 'short term avg: ', W_FAST

    # if short-term likelihood is worse long-term, samples added in proportion to quotient
    # sudden decrease in likelihood increases number of random samples
    random_prob = max(0.0, 1.0 - W_FAST/W_SLOW)

    # create an array of random indexes (1 to particle count-1)
    rand_array = np.random.randint(low=1, high=PARTICLE_COUNT-1, size=int(PARTICLE_COUNT * random_prob))  # array of indexes to randomly move
    index = int(np.random.random() * PARTICLE_COUNT-len(rand_array))
    beta = 0.0
    for k in xrange(PARTICLE_COUNT-len(rand_array)):
        #print "gets here: ", k
        # from thrun's udacity class
        beta += np.random.random() * 2.0 * (max_w[1]/w_sum)
        #print beta, ps[index].get_weight()
        count = 0
        while beta > ps[index].get_weight():
            count += 1
            #print "stuck here?", count
            beta -= ps[index].get_weight()
            index = (index + 1) % PARTICLE_COUNT
        sample_p.append(ps[index])

    #print "sample_p len ", len(sample_p) #weight
    # create random particles

    while len(sample_p) < PARTICLE_COUNT:
        r = ROBOT()
        r.set_weight(w_avg)  # give it a weight so it shows up
        COUNT += 1
        sample_p.append(r)

    #print '\ncreated ', COUNT, ' new particles'
    #print 'total num particles:', len(sample_p)

    ps = sample_p

    # </editor-fold>
    weight_p = np.multiply(weight_p, 1.0/np.sum(weight_p))  # normalize the weights

    # reset data
    COUNT = 0
    PRIOR_TIME = rp.get_rostime()

    return mcl_tools.random_sample(ps, PARTICLE_COUNT, weight_p)