コード例 #1
0
ファイル: Q8.py プロジェクト: michaelyeah7/hw0_pf
def Q8():
    #load data, need odometry.dat for command, measurement.dat for comparision, landmark_gt.dat for measurement model
    # odom_data = load_data('ds1_Odometry.dat', 3, 0, [0, 4, 5])
    odom_data = [[0.5, 0, 1], [0, -1 / (2 * np.pi), 1], [0.5, 0, 1],
                 [0, 1 / (2 * np.pi), 1], [0.5, 0, 1]]
    landmark_gt_data = load_data('ds1_Landmark_Groundtruth.dat', 3, 0,
                                 [0, 2, 4, 6, 8])
    barcode_data = load_data('ds1_Barcodes.dat', 3, 0, [0, 3])
    barcode_dict = generate_barcode_dict(barcode_data)

    gt_data = load_data('ds1_Groundtruth.dat', 3, 0, [0, 3, 5, 7])

    #init particles
    particle_num = 50
    filter = ParticleFilter(particle_num)
    # init_pose = [gt_data[0][1], gt_data[0][2], gt_data[0][3]]
    init_pose = [0, 0, 0]
    filter.generate_particles(init_pose)

    #init environment with robot and particles
    particle_env = ParticleEnv(init_pose, filter.particles)

    maxsteps = len(odom_data)
    k = 0
    robot_trajectory = [init_pose]
    robot_pos_predict_trajectory = []
    for i in range(maxsteps):
        vel = [odom_data[i][0], odom_data[i][1]]
        duration = odom_data[i][2]
        particle_env.forward(vel, duration)

        #record the robot's trajectory and filter' prediction of robot's pos
        robot_trajectory.append(particle_env.robot_pos)

        #get robot's measurement in a duration, if null go next duration
        # k, robot_measurements = particle_env.find_available_measurement(k, odom_data[i-1][0], odom_data[i][0])
        # print("robot_measurements length",len(robot_measurements))
        robot_measurements = []
        if (len(robot_measurements) == 0):
            robot_pos_predict_trajectory.append(particle_env.robot_pos_predict)
            i += 1
        else:
            #robot_measurements is the list of [barcode, range, bearing]
            #given one duration's robot_measurements, update all particles' weights
            filter.update(robot_measurements)

            #record the filter's prediction
            robot_pos_predict_trajectory.append(particle_env.robot_pos_predict)

            # if degeneracy is too high, resample
            filter.resample()
            i += 1
    print("prediction", robot_pos_predict_trajectory)
    plot_predict_trajectory_Q8(robot_trajectory, robot_pos_predict_trajectory)
コード例 #2
0
ファイル: localize.py プロジェクト: ehuang3/pf
def main():
    parser = argparse.ArgumentParser(description='Laser unit test')
    parser.add_argument('--map', type=str, default='../data/map/wean.dat', help='Ground truth occupancy map')
    parser.add_argument('--log', type=str, default='../data/log/robotdata1.log', help='Robot data log file')
    parser.add_argument('--lfield', type=str, default='./config/lfield_40.csv', help='Likelihood field')
    parser.add_argument('--save', type=str, default='./test/', help='Which folder to save to.')
    args = parser.parse_args()

    # Read in map.
    map = Map()
    map.readMap(args.map)

    # Number of data points.
    num_odometry = 0;
    num_laser = 0;

    # Laser scan data.
    z = []

    # Read in log file.
    with open(args.log) as log:
        for line in log:
            token = line.split()
            if token[0] == 'O':
                num_odometry += 1
            elif token[0] == 'L':
                num_laser += 1
                z.append(np.array(token[1:]))

    # n = len(z)
    # z = [z[i] for i in range(0, n-1, 3)]

    # Convert z into full numpy array.
    z = np.array(z, dtype='float64')

    # Load likelihood field.
    L = LikelihoodField(0.01, 40, 8)
    L.loadField(args.lfield)

    # Run filter
    filter = ParticleFilter()
    # filter.setSensorModel(SensorModel(map, 0.7, 0.29, 0.01))
    filter.setLikelihoodField(L)
    filter.run(map, z, args.save)
コード例 #3
0
 def __init__(self):
     self._has_move = False  # Check if left or right arrow has been pressed
     # Class' instancies
     self._static_landmarks = StaticLandmarks(
         [randint(1, DIM_OF_WORLD[0] - 1) for i in range(NB_OF_LANDMARKS)],
         [randint(1, DIM_OF_WORLD[1] - 1) for i in range(NB_OF_LANDMARKS)])
     self._robot = Robot(randint(1, DIM_OF_WORLD[0] - 1),
                         randint(1, DIM_OF_WORLD[1] - 1), 0, STD_ACTUATORS)
     self._distance_sensor = DistanceSensor(self._robot,
                                            self._static_landmarks,
                                            STD_SENSOR)
     self._particle_filter = ParticleFilter(self._robot._noisy_position,
                                            NB_OF_PARTICLES)
     self._GI_handler = GraphicalInterfaceHandler(
         self._robot._position, self._static_landmarks,
         self._particle_filter._particles)
     # Make the Gi event reactive
     self.cid = self._GI_handler._trajectory.figure.canvas.mpl_connect(
         'key_press_event', self)
コード例 #4
0
 def getInference(self):
     if not self.hasInference:
         rows = self.model.getBeliefRows()
         cols = self.model.getBeliefCols()
         if Const.INFERENCE == 'particleFilter':
             self.inference = ParticleFilter(rows, cols)
         elif Const.INFERENCE == 'exactInference':
             self.inference = ExactInference(rows, cols)
         else:
             raise Exception(Const.INFERENCE + ' not understood')
         self.hasInference = True
     return self.inference
コード例 #5
0
ファイル: robotSim.py プロジェクト: kunyanglu/auv-sim
def main():
    #pos = create_cartesian(catalina.START, catalina.ORIGIN_BOUND)
    test_robot = RobotSim(5.0, 5.0, 0, 0.1, 0.5, 1, 3, 0)
    BOUNDARY_RANGE = 500
    """
    for i in range(test_robot.num_of_auv - 1):
        x = random.uniform(- BOUNDARY_RANGE, BOUNDARY_RANGE)
        y = random.uniform(- BOUNDARY_RANGE, BOUNDARY_RANGE)
        z = random.uniform(- BOUNDARY_RANGE, BOUNDARY_RANGE)
        theta = 0
        velocity = random.uniform(0,4)
        curr_traj_pt_index = 0
        w_1 = random.uniform(-math.pi/2, math.pi/2)
    
        test_robot.auv_dict[i + 1] = Auv(x,y,z, theta, velocity, w_1, curr_traj_pt_index, i)
    """
    theta = 0
    velocity = 1 / 2
    w_1 = random.uniform(-math.pi / 2, math.pi / 2)
    curr_traj_pt_index = 0
    test_robot.auv_dict[1] = Auv(100, 0, 10, theta, velocity, w_1,
                                 curr_traj_pt_index, 1)
    test_robot.auv_dict[2] = Auv(0, 150, 10, theta, velocity, w_1,
                                 curr_traj_pt_index, 2)
    # load shark trajectories from csv file
    # the second parameter specify the ids of sharks that we want to track
    test_robot.setup("./data/shark_tracking_data_x.csv",
                     "./data/shark_tracking_data_y.csv", [1, 2])
    shark_state_dict = test_robot.get_all_sharks_state()
    # create a dictionary of all the particleFilters
    shark_state = shark_state_dict[1]
    auv_state = [test_robot.auv_dict[1], test_robot.auv_dict[2]]
    #list of auv objects
    test_robot.filter_dict[0] = ParticleFilter(shark_state.x, shark_state.y,
                                               auv_state)

    test_robot.main_navigation_loop()

    # test_robot.display_auv_trajectory()

    # to not show that live_graph, you can pass in "False"
    test_robot.main_navigation_loop(True)
コード例 #6
0
'''
Created on May 3, 2017

@author: vinicius
'''
import numpy as np
import pygame
import time
from particleFilter import ParticleFilter
from math import cos, sin, pi
from funcoesProbabilidade import sample

largura, altura = 400, 400
pf = ParticleFilter()
pygame.init()
screen = pygame.display.set_mode((largura, altura))
done = False
bg = pygame.image.load("mapa.jpg")
i = 0
X = []
PosicaoRobo = (20, 20, 0)
for j in range(99):
    X.append(pf.newRandomParticle())
#X = []
X.append((20, 20, 0))


#X.append(pf.newRandomParticle())
def move():
    global i
    global u
コード例 #7
0
minimumDetectionArea = 2000
imageSize = (480, 360)
history_time = 50
image_no = 0
speed_mean = initialSpeed

# Creating an instance for VideoCapture object
# Your video file goes here
capture = cv2.VideoCapture("Video1.mp4")

#creating an instance of BackgroundSubtractor object
bg_sub = cv2.createBackgroundSubtractorMOG2(history=history_time,
                                            detectShadows=False)

# Creating instance of ParticleFilter object
pf = ParticleFilter(N, initialConditions, imageSize)

#permanent loop while the frame is available
while (1):

    #reading the current frame
    ret, captured_frame = capture.read()

    #break the loop if no input is received
    if captured_frame is None:
        break

    #resizing the frame received
    captured_frame = cv2.resize(captured_frame, (imageSize))

    #applying background subtraction method to get foreground mask
コード例 #8
0
ファイル: main.py プロジェクト: rhythm92/smallParticleFilter
        x0 = np.array([0.3, 0.5])
        oscillator = XYContainer()

    dataGen = GenerateTestData(oscillator, x0.size)
    pltWorker = extPlot()
    numSamples = 10
    samplingTime = 10.0 / float(numSamples)
    dt = 0.001
    dtf = 0.001
    procs = 0.01
    obs = 0.01
    (x, y) = dataGen.generateSamplePointsGG(samplingTime, numSamples, dt, x0, procs, obs)
    print "samplePoints generated"
    finalTime = samplingTime * (numSamples - 1)

    pf = ParticleFilter(oscillator, x0.size)
    pf.runFilter(y, samplingTime, dtf, procs, obs)
    pltWorker.plotPath(x, finalTime, 0.5)
    pltWorker.plotMarkers(y, finalTime, mk="*", ms=10, a=0.4)
    pltWorker.plotPFMarkers(pf.x, pf.w, finalTime)
    pltWorker.save("dists.pdf")
    pltWorker.clear()
    z = pf.getAveragePath()
    pltWorker.plotPath(x, finalTime, 0.5)
    pltWorker.plotMarkers(y, finalTime)
    pltWorker.plotMarkers(z, finalTime, mk="x", ms=8)
    pltWorker.save("sir.pdf")
    pltWorker.clear()
    ac = dataGen.calculateAccuracy(x, z)
    print "Particle filter error:", ac
コード例 #9
0
        oscillator = XYContainer()

    dataGen = GenerateTestData(oscillator, x0.size)
    pltWorker = extPlot()
    numSamples = 10
    samplingTime = 10.0 / float(numSamples)
    dt = 0.001
    dtf = 0.001
    procs = 0.01
    obs = 0.01
    (x, y) = dataGen.generateSamplePointsGG(samplingTime, numSamples, dt, x0,
                                            procs, obs)
    print 'samplePoints generated'
    finalTime = samplingTime * (numSamples - 1)

    pf = ParticleFilter(oscillator, x0.size)
    pf.runFilter(y, samplingTime, dtf, procs, obs)
    pltWorker.plotPath(x, finalTime, 0.5)
    pltWorker.plotMarkers(y, finalTime, mk='*', ms=10, a=0.4)
    pltWorker.plotPFMarkers(pf.x, pf.w, finalTime)
    pltWorker.save("dists.pdf")
    pltWorker.clear()
    z = pf.getAveragePath()
    pltWorker.plotPath(x, finalTime, 0.5)
    pltWorker.plotMarkers(y, finalTime)
    pltWorker.plotMarkers(z, finalTime, mk='x', ms=8)
    pltWorker.save('sir.pdf')
    pltWorker.clear()
    ac = dataGen.calculateAccuracy(x, z)
    print "Particle filter error:", ac
コード例 #10
0
class Base:
    def __init__(self):
        self._has_move = False  # Check if left or right arrow has been pressed
        # Class' instancies
        self._static_landmarks = StaticLandmarks(
            [randint(1, DIM_OF_WORLD[0] - 1) for i in range(NB_OF_LANDMARKS)],
            [randint(1, DIM_OF_WORLD[1] - 1) for i in range(NB_OF_LANDMARKS)])
        self._robot = Robot(randint(1, DIM_OF_WORLD[0] - 1),
                            randint(1, DIM_OF_WORLD[1] - 1), 0, STD_ACTUATORS)
        self._distance_sensor = DistanceSensor(self._robot,
                                               self._static_landmarks,
                                               STD_SENSOR)
        self._particle_filter = ParticleFilter(self._robot._noisy_position,
                                               NB_OF_PARTICLES)
        self._GI_handler = GraphicalInterfaceHandler(
            self._robot._position, self._static_landmarks,
            self._particle_filter._particles)
        # Make the Gi event reactive
        self.cid = self._GI_handler._trajectory.figure.canvas.mpl_connect(
            'key_press_event', self)

    # Event sensitive function, called when a key is pressed
    def __call__(self, event):
        """
            Event sensitve function. Called anytime a key is pressed
            Used to call the filter's function, update the robot's positions
            and to plot the new values in the GI
        """
        self.check_key(
        )  # Update the robot position w/respect to the pressed key
        if self._has_move == True:
            # Update sensor info (with noise)
            noisy_dist_function = self._distance_sensor.compute_noisy_distance(
            )
            # Update the particles'position upon actuator state
            self._particle_filter.prediction(self._robot._v, self._robot._w,
                                             STD_ACTUATORS)
            # Compute particles'weight
            self._particle_filter.update_weights(noisy_dist_function,
                                                 self._static_landmarks,
                                                 STD_SENSOR)
            # Resample particules
            self._particle_filter.resample_particles()
            # Estimate position (barycenter)
            estimated_positon, covariance = self._particle_filter.estimate_position(
            )
            # Print output
            # self.print_out(self._robot._noisy_position,estimated_positon,covariance)
            # Update the graphical interface
            self._GI_handler.update_GI(self._robot._position,
                                       self._robot._noisy_position,
                                       self._particle_filter._particles)
            self._has_move = False

    def check_key(self):
        """
            Check if one of the arrows key has been pressed and update the robot's
            position accordingly
        """
        if keyboard.is_pressed('right'):
            self._robot.move(1)
            self._has_move = True
        if keyboard.is_pressed('left'):
            self._has_move = True
            self._robot.move(0)
        if keyboard.is_pressed('up'):
            self._robot.change_orientation(1)
        if keyboard.is_pressed('down'):
            self._robot.change_orientation(0)

    def print_out(self, real_pos, estimated_pos, covariance):
        print("\nrealistic robot position :")
        real_pos.print_pos()
        print("estimated position :")
        estimated_pos.print_pos()
        print("with covariance = " + str(covariance))

    def run(self):
        """
            Loop until 'q' is pressed.
            The __call__ function is dealing with the event handling
        """
        while not keyboard.is_pressed('q'):
            plt.show()
            plt.close()