Пример #1
0
def turn_to_search():
    #    turn_counter += 1
    print 'Turning around. Number of turns:', turn_counter

    # rotate
    arlo.go_diff(30, 29, 0, 1)
    sleep((math.pi * 0.25) / rotationInOneSecond)
    arlo.stop()
    for particle in particles:
        par.move_particle(particle, 0, 0,
                          -(math.pi * 0.25) / rotationInOneSecond)
    return 1
Пример #2
0
def move_particles(particles, dist, angle):
    delta_x = [(np.sin(np.radians(90) - elem.getTheta()) * dist) /
               np.sin(np.radians(90)) for elem in particles]
    delta_y = [(np.sin(elem.getTheta()) * dist) / np.sin(np.radians(90))
               for elem in particles]
    delta_theta = np.radians(angle)
    [
        particle.move_particle(elem, p_delta_x, p_delta_y, delta_theta)
        for elem, p_delta_x, p_delta_y in zip(particles, delta_x, delta_y)
    ]
Пример #3
0
        LMInSight = False

        #if driving_dist > 0:
        #    sleep(driving_dist/translationInOneSecond)
        #arlo.stop()

        # Move particles
        # for particle in particles:
        #     dx=delt(paXarticle.getTheta(),actual_driven_dist)
        #     dy=deltaY(particle.getTheta(),actual_driven_dist)
        #     par.move_particle(particle, dx, -dy, lastMeasuredAngle)
        for particle in particles:
            dx = np.cos(particle.getTheta()) * actual_driven_dist
            dy = np.sin(particle.getTheta()) * actual_driven_dist
            par.move_particle(particle, dx, -dy, 0)

    elif not LMInSight or (LMInSight and visitedLM[lastSeenLM]
                           ) and not (visitedLM[0] and visitedLM[1]):

        if turn_counter < 8:
            turn_counter += 1
            # rotate
            arlo.go_diff(30, 29, 0, 1)
            sleep((math.pi * 0.25) / rotationInOneSecond)
            arlo.stop()
            for particle in particles:
                par.move_particle(particle, 0, 0,
                                  -(math.pi * 0.25) / rotationInOneSecond)
        elif visitedLM[0] or visitedLM[1]:  # Explore
Пример #4
0
 def move_explosion(self):
     tf = pygame.time.get_ticks() / 1000
     for particle in self.particle_group:
         particle.move_particle(tf)
Пример #5
0
def updateParticles(particles):
    for particle in particles:
        dx = np.cos(particle.getTheta()) * translationInOneSecond
        dy = np.sin(particle.getTheta()) * translationInOneSecond
        par.move_particle(particle, dx, dy,
                          -(math.pi * 0.50) / rotationInOneSecond)
Пример #6
0
    # The purpose of the next function is get closer into the middel of the two boxes. So after the robot have droven
    # half of the distance to the second box, the robot wil drive from its position into the middel of the virtual chart.

    elif not LMInSight or (LMInSight and visitedLM[lastSeenLM]
                           ) and not (visitedLM[0] and visitedLM[1]):

        if turn_counter < 8:
            turn_counter += 1
            print 'Turning around. Number of turns:', turn_counter

            # rotate
            arlo.go_diff(30, 29, 0, 1)
            sleep((math.pi * 0.25) / rotationInOneSecond)
            arlo.stop()
            for particle in particles:
                par.move_particle(particle, 0, 0,
                                  -(math.pi * 0.25) / rotationInOneSecond)

        elif visitedLM[0] or visitedLM[1]:  # Going around a box. Explore

            print 'Trying to go around a box in front of me'
            arlo.go_diff(80, 79, 0, 0)
            sleep(0.25)

            arlo.go_diff(30, 29, 1, 0)
            sleep((math.pi * 0.30) / rotationInOneSecond)

            driving_dist = 30
            t = driving_dist / translationInOneSecond
            stop_dist = 200
            actdrive = driveArlo(dt, t, particles)
Пример #7
0
def updateParticles(d_dist, d_theta):
    for particle in particles:
        dx = np.cos(particle.getTheta()) * d_dist
        dy = np.sin(particle.getTheta()) * d_dist
        par.move_particle(particle, dx, -dy, d_theta)
Пример #8
0
    updated_values = con.control_frindo(LANDMARK, former_turn, est_pose,
                                        frindo)

    former_turn += updated_values[2]
    angular_velocity = updated_values[1]
    velocity = updated_values[0]

    num_particles = len(particles)
    for p in particles:
        # calculates new orientation
        if angular_velocity != 0:
            curr_angle = add_to_angular(p.getTheta(), angular_velocity)
        if velocity != 0:
            [x, y] = move_vector(p, velocity)
            particle.move_particle(p, x, y, curr_angle)

    if velocity != 0:
        particle.add_uncertainty(particles, 12, 15)
    if velocity == 0 and angular_velocity != 0:
        particle.add_uncertainty(particles, 0, 15)

    # Fetch next frame
    colour, distorted = cam.get_colour()

    # Detect objects
    objectType, measured_distance, measured_angle, colourProb = cam.get_object(
        colour)
    if objectType != 'none':
        obs_landmark = ret_landmark(colourProb, objectType)
        print obs_landmark
Пример #9
0
def update_particles(particles, cam, velocity, angular_velocity, world,
                     WIN_RF1, WIN_World):
    #print 'update: ' + str(angular_velocity)
    cv2.waitKey(4)
    num_particles = len(particles)
    for p in particles:
        # calculates new orientation
        curr_angle = add_to_angular_v2(np.degrees(p.getTheta()),
                                       angular_velocity)
        if velocity > 0.0:
            [x, y] = move_vector(p, velocity)
            particle.move_particle(p, x, y, curr_angle)
        else:
            particle.move_particle(p, 0.0, 0.0, curr_angle)
    if velocity != 0.0:
        particle.add_uncertainty(particles, 12, 5)
    if velocity == 0.0 and angular_velocity != 0.0:
        particle.add_uncertainty(particles, 0, 5)

    # Fetch next frame
    colour, distorted = cam.get_colour()

    # Detect objects
    objectType, measured_distance, measured_angle, colourProb = cam.get_object(
        colour)

    if objectType != 'none':
        obs_landmark = ret_landmark(colourProb, objectType)
        observed_obj = [
            objectType, measured_distance, measured_angle, obs_landmark
        ]

        list_of_particles = weight_particles(particles,
                                             np.degrees(measured_angle),
                                             measured_distance, obs_landmark)

        particles = resample_particles(list_of_particles[:, [0, 2]])

        particle.add_uncertainty(particles, 15, 5)

        cam.draw_object(colour)
    else:
        observed_obj = [None, None, None, None]
        for p in particles:
            p.setWeight(1.0 / num_particles)

        particle.add_uncertainty(particles, 10, 5)

    est_pose = particle.estimate_pose(particles)

    draw_world(est_pose, particles, world)
    #Show frame
    cv2.imshow(WIN_RF1, colour)
    #Show world
    cv2.imshow(WIN_World, world)

    return {
        'est_pos': est_pose,
        'obs_obj': observed_obj,
        'particles': particles
    }
Пример #10
0
def update_particles(particles, cam, velocity, angular_velocity, world,
                     WIN_RF1, WIN_World):
    raw_input()
    print 'update: ' + str(angular_velocity)
    cv2.waitKey(4)
    num_particles = len(particles)
    for p in particles:
        # calculates new orientation

        curr_angle = add_to_angular_v2(np.degrees(p.getTheta()), angular_velocity)
        print 'theta_rad: ' + str(p.getTheta())
        print 'theta_deg: ' + str(np.degrees(p.getTheta()))
        print 'cur_ang_deg: ' + str(np.degrees(curr_angle))
        if velocity > 0.0:
            [x, y] = move_vector(p, velocity)
            particle.move_particle(p, x, y, curr_angle)
        else:
            particle.move_particle(p, 0.0, 0.0, curr_angle)
            print 'cur_ang_rad: ' + str(curr_angle)
    if velocity != 0.0:
        particle.add_uncertainty(particles, 12, 15)
    if velocity == 0.0 and angular_velocity != 0.0:
        particle.add_uncertainty(particles, 0, 15)

    # Fetch next frame
    colour, distorted = cam.get_colour()

    # Detect objects
    objectType, measured_distance, measured_angle, colourProb = cam.get_object(
        colour)

    if objectType != 'none':
        obs_landmark = ret_landmark(colourProb, objectType)
        observed_obj = [objectType, measured_distance, measured_angle,
                        obs_landmark]


        list_of_particles = weight_particles(particles,
                                             np.degrees(measured_angle),
                                             measured_distance, obs_landmark)


        particles = []
        for count in range(0, num_particles):
            rando = np.random.uniform(0.0,1.0)  # np.random.normal(0.0, 1.0, 1)
            # dicto = {'i': 500,
            #          'n': 2}
            p = when_in_range(list_of_particles,
                              0,
                              num_particles,
                              rando)
            particles.append(
                particle.Particle(p.getX(), p.getY(), p.getTheta(),
                                  1.0 / num_particles))
        print 'list_of_particles: ' + str(list_of_particles)
        print 'particles: ' + str(particles)

        particle.add_uncertainty(particles, 5, 2)

        # new random particles added
        #for c in range(0, int(math.ceil(num_particles * 0.05))):
        #    p = particle.Particle(500.0 * np.random.ranf() - 100,
        #                          500.0 * np.random.ranf() - 100,
        #                          2.0 * np.pi * np.random.ranf() - np.pi, 0.0)

        #    particles.append(p)

        # Draw detected pattern
        cam.draw_object(colour)
    else:
        observed_obj = [None, None, None, None]
        # No observation - reset weights to uniform distribution
        for p in particles:
            p.setWeight(1.0 / num_particles)

        particle.add_uncertainty(particles, 5, 2)

    # est_pose = particle.estimate_pose(particles)  # The estimate of the robots current pose
    # return [est_pose, observed_obj]

    est_pose = particle.estimate_pose(
        particles)  # The estimate of the robots current pose

    print 'Updated pose: ' + str([est_pose.getX(), est_pose.getY()])
    draw_world(est_pose, particles, world)
    # Show frame
    cv2.imshow(WIN_RF1, colour)
    # Show world
    cv2.imshow(WIN_World, world)
    return {'est_pos': est_pose,
            'obs_obj': observed_obj,
            'particles': particles}