def go_particle(): img = Image() detect = Detection() found = [] initialState = None pf = None histograms = [] detections = [] hist_ref = None #Recoleccion while(True): img.get() #Face de recoleccion de información/ Aprendizaje if len(histograms) < 10: print len(histograms) #Detect faces found = detect.faces(img.image) if len(found) > 0: hist_ref = img.getColorHistogram(found[0]).ravel() histograms.append(hist_ref) img.show_hist(hist_ref) hist_ref=hist_ref/float(hist_ref.sum()) else: #Aprendizaje finalizado #Iniciar filtro, utilizar el ultimo ROI/found como initialState # print len(hist_ref) if initialState is None: x,y,w,h = found[0] hist = img.getColorHistogram(found[0]).ravel() dx = random.uniform(0,5) dy = random.uniform(0,5) dw = random.uniform(0,.5) dh = random.uniform(0,.5) initialState = np.array([x,y,w,h,dx,dy,dw,dh]) ceroState = np.random.uniform(0,img.size[1],8) cov = np.cov(np.asarray([initialState,ceroState]).T) pf = ParticleFilter(initialState,cov,hist_ref,10.,100) u=np.zeros((100,4)) for rect in pf.states: rect = rect[:4] img.draw_roi([rect]) # pf.update(img) else: #ya se ha inicializado el filtro ahora se busca actualizar y predecir old=pf.states pf.predict(img.size[0],img.size[1],u) pf.update(img) for rect in pf.states: rect = rect[:4] img.draw_roi([rect]) u=(old-pf.states)[:,-4:] img.show() if 0xFF & cv2.waitKey(5) == 27: break
def main(): rospy.init_node("particle_based_tracking") detector = Detector(visualize=False, modelfileName=rospy.get_param('~model')) transformer = Transformer("transformer") pf = ParticleFilter(500, HParticle, metric, explorationFactor=0.1, noiseFactor=0.05, averageType="weighted") pf.generateParticles() viz = PFViz(pf, "/odom", "myViz") history = [] while not rospy.is_shutdown(): try: detector.getImage() detector.processImage() pixels = detector.centroids[:] if pixels: startPoint, endPoints = transformer.transform(pixels) rospy.loginfo(len(startPoint)) rospy.loginfo(np.asarray(endPoints).shape) history.append(tuple((startPoint[:-1], np.asarray(endPoints)[:,:-1]))) T0 = time.time() for _ in range(0, numItersPerSample): print "Updating particle filter", print "\tmeasuring", t0 = time.time() pf.measureParticles(history) t1 = time.time() print "dt=%f" % (t1-t0), print "\tweighting", t0 = time.time() pf.calculateWeights() t1 = time.time() print "dt=%f" % (t1-t0), print "\tpredicting", t0 = time.time() prediction = pf.predict() t1 = time.time() print "dt=%f" % (t1-t0), viz.update(history[-1]) print "\tresampling", t0 = time.time() pf.resample() t1 = time.time() print "dt=%f" % (t1-t0), pf.update(None) print T1 = time.time() print "Total particle filter update time %f" % (T1-T0) except KeyboardInterrupt: break
Map = read_map_from_file('map_data.txt') measurements = read_measurements_from_file('measurements.txt') # Number of particles in the set is an important parameter. pf = ParticleFilter(30) # Fill in the graph data as defined below so that # a 2-D plot can be drawn. graph = [] count = 0 for m in measurements: count += 1 if not pf.initialized: # Initialize the particle set using GNSS measurement. pf.initialize(m['gnss_x'], m['gnss_y'], m['gnss_theta'], *pos_std) else: # Prediction step pf.predict(delta_t, m['previous_velocity'], m['previous_yawrate'], *pos_std) # Feed the particle with observations to update weights. observations = [{'x': x, 'y': y} for (x, y) in \ zip (m['measurement_x'], m['measurement_y'])] pf.update_weights(sensor_range, *landmark_std, observations, Map) # Resample particles to capture posterior belief distribution. pf.resample() # Select the best (highest weight) particle; # we will assume that this represents the vehicle's position. particle = pf.get_best_particle() # Print for debugging purposes. print("[%d] %f, %f" % (count, particle['x'], particle['y'])) # Collect data for post-mortem plotting. graph.append({ 'position': (particle['x'], particle['y']), 'particles': [(p['x'], p['y']) for p in pf.particles],
def draw_pred_gif_old(self, full_getter, p=1.0, use_pf=False, sim_config=None, use_stepper=False, folder_plots=FOLDER_PLOTS, tag=0, normalize=False, nice_start=True): ep_images, poses = full_getter() ep_images = ep_images[0, ...].reshape((1,) + ep_images.shape[1:]) ep_images_masked, removed_percepts = self.mask_percepts(ep_images, p, return_indices=True) # net_preds = self.pred_ae.predict(ep_images_masked[:, 0:-SERIES_SHIFT, ...]) net_preds = self.pred_ae.predict(ep_images_masked[:, :, ...]) # stepper predictions stepper_pred = [] if use_stepper: self.stepper.reset_states() for t in range(EP_LEN-SERIES_SHIFT): im = ep_images_masked[:, t, ...] stepper_pred.append(self.stepper.predict(im)) pf_pred = [] if use_pf: pf = ParticleFilter(sim_config, n_particles=4000, nice_start=nice_start) for t in range(EP_LEN-SERIES_SHIFT): if not removed_percepts[t]: pose = poses[0, t, 0, :] pf.update(pose) pf.resample() # add noise only if next percept is available # if t+1 < EP_LEN-SERIES_SHIFT and not removed_percepts[t+1]: # pf.resample() # pf.add_noise() pf.predict() pf_pred.append(pf.draw()) # create header with labels col = np.zeros((HEADER_HEIGHT, 1)) labels = [] labels.append(create_im_label('Ob')) labels.append(create_im_label('GT')) labels.append(create_im_label('AE')) if use_stepper: labels.append(create_im_label('ST')) if use_pf: labels.append(create_im_label('PF')) header = [col] for label in labels: header.append(label) header.append(col) header = np.concatenate(header, axis=1) # combine predictions col = np.ones((IM_HEIGHT, 1)) frames = [] for t in range(EP_LEN - SERIES_SHIFT): images = [] images.append(ep_images_masked[0, t+SERIES_SHIFT, :, :, 0]) images.append(ep_images[0, t+SERIES_SHIFT, :, :, 0]) if normalize: net_preds[0, t, :, :, 0] /= np.max(net_preds[0, t, :, :, 0]) images.append(net_preds[0, t, :, :, 0]) if use_stepper: if normalize: stepper_pred[t][0, :, :, 0] /= np.max(stepper_pred[t][0, :, :, 0]) images.append(stepper_pred[t][0, :, :, 0]) if use_pf: if normalize: pf_pred[t][:, :, 0] /= np.max(pf_pred[t][:, :, 0]) images.append(pf_pred[t][:, :, 0]) table = [col] for image in images: table.append(image) table.append(col) frame = np.concatenate(table, axis=1) # print(frame.shape) width = frame.shape[1] row = np.ones((1, width)) frame = np.concatenate([header, frame, row], axis=0) frames.append(frame) fpath = '{}/predictions-{}.gif'.format(folder_plots, tag) imageio.mimsave(fpath, frames)
def draw_pred_gif(self, full_getter, p=1.0, use_pf=False, sim_config=None, use_stepper=False, folder_plots=FOLDER_PLOTS, tag=0, normalize=False): ep_images, poses, eps_vels = full_getter() ep_images = ep_images[0, ...].reshape((1,) + ep_images.shape[1:]) ep_images_masked, removed_percepts = self.mask_percepts(ep_images, p, return_indices=True) # net_preds = self.pred_ae.predict(ep_images_masked[:, 0:-SERIES_SHIFT, ...]) net_preds = self.pred_ae.predict(ep_images_masked[:, :, ...]) # stepper predictions # stepper_pred = [] # if use_stepper: # self.stepper.reset_states() # for t in range(EP_LEN-SERIES_SHIFT): # im = ep_images_masked[:, t, ...] # stepper_pred.append(self.stepper.predict(im)) pf_pred = [] if use_pf: pf = ParticleFilter(sim_config, n_particles=3000) init_poses = poses[0][0] init_vels = eps_vels[0][0] pf.warm_start(init_poses, init_vels) for t in range(EP_LEN-SERIES_SHIFT): if not removed_percepts[t]: measurements = poses[0][t] # print(measurements) pf.update(measurements) pf.resample() pf_pred.append(pf.draw()) pf.predict() # combine predictions percepts = [] truths = [] pae_preds = [] pf_preds = [] pae_losses = [] pf_losses = [] for t in range(EP_LEN - SERIES_SHIFT): percepts.append(ep_images_masked[0, t+SERIES_SHIFT, :, :, 0]) truths.append(ep_images[0, t+SERIES_SHIFT, :, :, 0]) pae_preds.append(net_preds[0, t, :, :, 0]) if use_pf: pf_preds.append(pf_pred[t][:, :, 0]) pae_losses.append(np.mean((truths[-1] - pae_preds[-1])**2)) pf_losses.append(np.mean((truths[-1] - pf_preds[-1])**2)) if normalize: pae_preds[-1] /= np.max(pae_preds[-1]) if use_pf: pf_preds[-1] /= np.max(pf_preds[-1]) # if use_stepper: # if normalize: # stepper_pred[t][0, :, :, 0] /= np.max(stepper_pred[t][0, :, :, 0]) # images.append(stepper_pred[t][0, :, :, 0]) imageio.mimsave('{}/percepts-{}.gif'.format(folder_plots, tag), percepts) imageio.mimsave('{}/truths-{}.gif'.format(folder_plots, tag), truths) imageio.mimsave('{}/pae_preds-{}.gif'.format(folder_plots, tag), pae_preds) if use_pf: imageio.mimsave('{}/pf_preds-{}.gif'.format(folder_plots, tag), pf_preds) return {'pae_losses': pae_losses, 'pf_losses': pf_losses}
def pf_multi_run_plot(net, sim_conf, fpath='ims/last_test.csv', cuda=True, runs=10, p_mask=1.0, n_particles=100, gif_no=0): CONSISTENT_NOISE = False RUN_LENGTH = 160 DURATION = 0.4 N_SIZE = 256 pf_loss_ar = np.zeros(RUN_LENGTH) pae_loss_ar = np.zeros(RUN_LENGTH) for _ in range(runs): w = World(**sim_conf) pf = ParticleFilter(sim_conf, n_particles=n_particles) pos = [body.pos for body in w.bodies] vel = [body.vel for body in w.bodies] pf.warm_start(pos, vel=vel) ims_percept = [] ims_pf_belief = [] ims_pf_sample = [] loss_pf = [] loss_sample_mse = [] masked_percepts = np.zeros(RUN_LENGTH) < 1 for i in range(RUN_LENGTH): if i < 8 or np.random.rand() > p_mask: measures = [ body.pos + sim_conf['measurement_noise'] * np.random.randn(2) for body in w.bodies ] pf.update(measures) masked_percepts[i] = False pf.resample() else: masked_percepts[i] = True w.run() pf.predict() percept = w.draw() belief = pf.draw()[:, :, 0] sample = pf.parts[np.random.randint(pf.n)].draw() loss_pf.append(np.mean((percept - belief)**2)) loss_sample_mse.append(np.mean((percept - sample)**2)) ims_percept.append(percept) ims_pf_belief.append(belief) ims_pf_sample.append(sample) # run predictions with the network x = np.array(ims_percept) x = x.reshape((1, RUN_LENGTH, 28, 28, 1)) x[:, masked_percepts, ...] = 0 x = x.transpose((1, 0, 4, 2, 3)) x = Variable(torch.FloatTensor(x)) if cuda: net = net.cuda() x = x.cuda() states = net.bs_prop(x) # create expected observations obs_expectation = net.decoder(states) obs_expectation = obs_expectation.view(x.size()) obs_expectation = obs_expectation.data.cpu().numpy() obs_expectation = obs_expectation.reshape((RUN_LENGTH, 28, 28)) # create observation samples (constant or varying noise accross time) if CONSISTENT_NOISE is True: noise = Variable(torch.FloatTensor(1, N_SIZE)) noise.data.normal_(0, 1) noise = noise.expand(RUN_LENGTH, N_SIZE) else: noise = Variable(torch.FloatTensor(RUN_LENGTH, N_SIZE)) noise.data.normal_(0, 1) if cuda: noise = noise.cuda() # states_non_ep = states.unfold(0, 1, (EP_LEN*BATCH_SIZE)//GAN_BATCH_SIZE).squeeze(-1) pae_samples = net.G(noise, states.squeeze_(1)) pae_samples = net.decoder(pae_samples) pae_samples = pae_samples.view(x.size()) pae_samples = pae_samples.data.cpu().numpy() pae_samples = pae_samples.reshape((RUN_LENGTH, 28, 28)) pae_ims = [] pae_samples_ims = [] loss_pae = [] for i in range(RUN_LENGTH): pae_ims.append(obs_expectation[i, ...]) pae_samples_ims.append(pae_samples[i, ...]) loss_pae.append( np.mean((ims_percept[i] - obs_expectation[i, ...])**2)) pf_loss_ar += np.array(loss_pf) pae_loss_ar += np.array(loss_pae) ims_ar = np.array(ims_percept) av_pixel_intensity = np.mean(ims_ar) baseline_level = np.mean((ims_ar - av_pixel_intensity)**2) baseline = np.ones(len(loss_pf)) * baseline_level # print("Uninformative baseline level at {}".format(baseline_level)) pf_loss_ar /= runs pae_loss_ar /= runs baseline_ar = baseline df = pd.DataFrame({ 'pf loss': pf_loss_ar, 'pae loss': pae_loss_ar, 'baseline': baseline_ar }) df.to_csv(fpath) plt.plot(pf_loss_ar) plt.plot(pae_loss_ar) plt.plot(baseline, 'g--') plt.title("Image reconstruction loss vs timestep") plt.ylabel("loss (MSE)") plt.xlabel("timestep") plt.legend(["PF", "PAE", "baseline"], loc=4) plt.savefig("ims/{}-plot.png".format(gif_no)) # plt.show() plt.clf() imageio.mimsave("ims/{}-percept.gif".format(gif_no), ims_percept, duration=DURATION) imageio.mimsave("ims/{}-pf_belief.gif".format(gif_no), ims_pf_belief, duration=DURATION) imageio.mimsave("ims/{}-pf_sample.gif".format(gif_no), ims_pf_sample, duration=DURATION) imageio.mimsave("ims/{}-pae_belief.gif".format(gif_no), pae_ims, duration=DURATION) imageio.mimsave("ims/{}-pae_sample.gif".format(gif_no), pae_samples_ims, duration=DURATION) page = """ <html> <body> Configuration: {1} <br> <img src="{0}-plot.png" align="center"> <table> <tr> <th>Ground truth</th> <th>Particle Filter</th> <th>PF Sample</th> <th>Predictive AE</th> <th>PAE Sample</th> </tr> <tr> <td><img src="{0}-percept.gif" width="140"></td> <td><img src="{0}-pf_belief.gif" width="140"></td> <td><img src="{0}-pf_sample.gif" width="140"></td> <td><img src="{0}-pae_belief.gif" width="140"></td> <td><img src="{0}-pae_sample.gif" width="140"></td> </tr> </table> </body> </html> """.format(gif_no, sim_conf) with open("ims/page-{}.html".format(gif_no), 'w') as f: f.write(page)
class LocalizationNode(object): ''' Class to hold all ROS related transactions to use split and merge algorithm. ''' #=========================================================================== def __init__(self, odom_lin_sigma=0.025, odom_ang_sigma=np.deg2rad(2), meas_rng_noise=0.2, meas_ang_noise=np.deg2rad(10), x_init=0, y_init=0, theta_init=0): ''' Initializes publishers, subscribers and the particle filter. ''' # Threads self.lock = threading.RLock() # Publishers self.pub_map = rospy.Publisher("map", Marker, queue_size = 2) self.pub_lines = rospy.Publisher("lines", Marker, queue_size = 2) self.pub_lines_mean = rospy.Publisher("lines_mean", Marker, queue_size = 2) self.pub_particles = rospy.Publisher("particles", PoseArray, queue_size = 2) self.pub_big_particle = rospy.Publisher("mean_particle", PoseStamped, queue_size = 2) self.pub_odom = rospy.Publisher("mean_particle_odom", Odometry, queue_size = 2) self.pub_wei = rospy.Publisher("weights", MarkerArray, queue_size = 2) # Subscribers self.sub_scan = rospy.Subscriber("lines", Marker, self.laser_callback) self.sub_odom = rospy.Subscriber("odom", Odometry, self.odom_callback) # TF self.tfBroad = tf.TransformBroadcaster() # Incremental odometry self.last_odom = None self.odom = None # Flags self.new_odom = False self.new_laser = False self.pub = False self.time = None self.lines = None self.i = 0 # Particle filter self.part_filter = ParticleFilter(get_dataset3_map(), 500, odom_lin_sigma, odom_ang_sigma, meas_rng_noise, meas_ang_noise, x_init, y_init, theta_init) #=========================================================================== def odom_callback(self, msg): ''' Publishes a tf based on the odometry of the robot and calculates the incremental odometry as seen from the vehicle frame. ''' # Lock thread self.lock.acquire() # Save time self.time = msg.header.stamp # Translation trans = (msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z) # Rotation rot = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w) # Publish transform self.tfBroad.sendTransform(translation = trans, rotation = rot, time = msg.header.stamp, child = '/base_footprint', parent = '/world') # Incremental odometry if self.last_odom is not None: # Increment computation delta_x = msg.pose.pose.position.x - self.last_odom.pose.pose.position.x delta_y = msg.pose.pose.position.y - self.last_odom.pose.pose.position.y yaw = yaw_from_quaternion(msg.pose.pose.orientation) lyaw = yaw_from_quaternion(self.last_odom.pose.pose.orientation) # Odometry seen from vehicle frame self.uk = np.array([delta_x * np.cos(lyaw) + delta_y * np.sin(lyaw), -delta_x * np.sin(lyaw) + delta_y * np.cos(lyaw), angle_wrap(yaw - lyaw)]) # Flag available self.cur_odom = msg self.new_odom = True # Save initial odometry for increment else: self.last_odom = msg # Unlock thread self.lock.release() #=========================================================================== def laser_callback(self, msg): ''' Reads lines coming from split and merge node. ''' # Lock thread self.lock.acquire() # Save time self.time = msg.header.stamp self.lines = None # Assertion assert msg.type == Marker.LINE_LIST if msg.ns == 'scan_line': # Retrieve lines from split and merge line = list() for point in msg.points: line.append(point.x) line.append(point.y) self.lines = np.array(line).reshape((-1, 4)) # Have valid points if self.lines.shape[0] > 0: # Flag self.new_laser = True else: self.lines = None # Unlock thread self.lock.release() #=========================================================================== def iterate(self): ''' Main loop of the filter. ''' lines = None # Prediction if self.new_odom: # Copy safely self.lock.acquire() uk = self.uk.copy() self.last_odom = self.cur_odom self.new_odom = False self.pub = True self.lock.release() # Predict filter self.part_filter.predict(uk) # Weightimg and resampling if self.new_laser and self.lines is not None: # Copy safely self.lock.acquire() lines = self.lines.copy() self.new_laser = False self.pub = True self.lock.release() # Update and resample filter self.part_filter.weight(lines) if self.part_filter.moving == True and self.part_filter.n_eff<self.part_filter.num/2.0: self.part_filter.resample() # Publish results if self.pub: self.publish_results(lines) self.pub = False #=========================================================================== def publish_results(self, lines): ''' Publishes all results from the filter. ''' if self.time is not None: time = rospy.Time.now() # Map of the room map_lines = get_dataset3_map() publish_lines(map_lines, self.pub_map, frame='/world', ns='map', color=(0,1,0)) # Particles and biggest weighted particle msg, msg_mean, msg_odom, trans, rot, msg_wei = get_particle_msgs(self.part_filter, time) self.pub_particles.publish(msg) self.pub_big_particle.publish(msg_mean) self.pub_odom.publish(msg_odom) self.pub_wei.publish(msg_wei) self.tfBroad.sendTransform(translation = trans, rotation = rot, time = time, parent = 'world', child = 'mean_particle') # Publish scanned lines if lines is not None: publish_lines(lines, self.pub_lines_mean, frame='mean_particle', time=time, ns='scan_lines_mean', color=(0,0,1))
class LocalizationNode(object): ''' Class to hold all ROS related transactions to use split and merge algorithm. ''' #=========================================================================== def __init__(self, odom_lin_sigma=0.025, odom_ang_sigma=np.deg2rad(2), meas_rng_noise=0.2, meas_ang_noise=np.deg2rad(10), x_init=0, y_init=0, theta_init=0): ''' Initializes publishers, subscribers and the particle filter. ''' # Threads self.lock = threading.RLock() # Publishers self.pub_map = rospy.Publisher("map", Marker, queue_size=2) self.pub_lines = rospy.Publisher("lines", Marker, queue_size=2) self.pub_lines_mean = rospy.Publisher("lines_mean", Marker, queue_size=2) self.pub_particles = rospy.Publisher("particles", PoseArray, queue_size=2) self.pub_big_particle = rospy.Publisher("mean_particle", PoseStamped, queue_size=2) self.pub_odom = rospy.Publisher("mean_particle_odom", Odometry, queue_size=2) self.pub_wei = rospy.Publisher("weights", MarkerArray, queue_size=2) # Subscribers self.sub_scan = rospy.Subscriber("lines", Marker, self.laser_callback) self.sub_odom = rospy.Subscriber("odom", Odometry, self.odom_callback) # TF self.tfBroad = tf.TransformBroadcaster() # Incremental odometry self.last_odom = None self.odom = None # Flags self.new_odom = False self.new_laser = False self.pub = False self.time = None self.lines = None # Particle filter self.part_filter = ParticleFilter(get_dataset3_map(), 500, odom_lin_sigma, odom_ang_sigma, meas_rng_noise, meas_ang_noise, x_init, y_init, theta_init) #=========================================================================== def odom_callback(self, msg): ''' Publishes a tf based on the odometry of the robot and calculates the incremental odometry as seen from the vehicle frame. ''' # Lock thread self.lock.acquire() # Save time self.time = msg.header.stamp # Translation trans = (msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z) # Rotation rot = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w) # Publish transform self.tfBroad.sendTransform(translation=trans, rotation=rot, time=msg.header.stamp, child='/base_footprint', parent='/world') # Incremental odometry if self.last_odom is not None: # Increment computation delta_x = msg.pose.pose.position.x - self.last_odom.pose.pose.position.x delta_y = msg.pose.pose.position.y - self.last_odom.pose.pose.position.y yaw = yaw_from_quaternion(msg.pose.pose.orientation) lyaw = yaw_from_quaternion(self.last_odom.pose.pose.orientation) # Odometry seen from vehicle frame self.uk = np.array([ delta_x * np.cos(lyaw) + delta_y * np.sin(lyaw), -delta_x * np.sin(lyaw) + delta_y * np.cos(lyaw), angle_wrap(yaw - lyaw) ]) # Flag available self.cur_odom = msg self.new_odom = True # Save initial odometry for increment else: self.last_odom = msg # Unlock thread self.lock.release() #=========================================================================== def laser_callback(self, msg): ''' Reads lines coming from split and merge node. ''' # Lock thread self.lock.acquire() # Save time self.time = msg.header.stamp self.lines = None # Assertion assert msg.type == Marker.LINE_LIST if msg.ns == 'scan_line': # Retrieve lines from split and merge line = list() for point in msg.points: line.append(point.x) line.append(point.y) self.lines = np.array(line).reshape((-1, 4)) # Have valid points if self.lines.shape[0] > 0: # Flag self.new_laser = True else: self.lines = None # Unlock thread self.lock.release() #=========================================================================== def iterate(self): ''' Main loop of the filter. ''' lines = None # Prediction if self.new_odom: # Copy safely self.lock.acquire() uk = self.uk.copy() self.last_odom = self.cur_odom self.new_odom = False self.pub = True self.lock.release() # Predict filter self.part_filter.predict(uk) # Weightimg and resampling if self.new_laser and self.lines is not None: # Copy safely self.lock.acquire() lines = self.lines.copy() self.new_laser = False self.pub = True self.lock.release() # Update and resample filter self.part_filter.weight(lines) if self.part_filter.moving == True and self.part_filter.n_eff < self.part_filter.num / 2.0: self.part_filter.resample() # Publish results if self.pub: self.publish_results(lines) self.pub = False #=========================================================================== def publish_results(self, lines): ''' Publishes all results from the filter. ''' if self.time is not None: time = rospy.Time.now() # Map of the room map_lines = get_dataset3_map() publish_lines(map_lines, self.pub_map, frame='/world', ns='map', color=(0, 1, 0)) # Particles and biggest weighted particle msg, msg_mean, msg_odom, trans, rot, msg_wei = get_particle_msgs( self.part_filter, time) self.pub_particles.publish(msg) self.pub_big_particle.publish(msg_mean) self.pub_odom.publish(msg_odom) self.pub_wei.publish(msg_wei) self.tfBroad.sendTransform(translation=trans, rotation=rot, time=time, parent='world', child='mean_particle') # Publish scanned lines if lines is not None: publish_lines(lines, self.pub_lines_mean, frame='mean_particle', time=time, ns='scan_lines_mean', color=(0, 0, 1))
def run_SLAM(ds, iter_step, N, ocpy_odds, empty_odds): ''' Implement SLAM. Args: ds (int): dataset iter_step (int): step for the time stamps N (list): noise ocpy_odds (float): occupancy log odds empty_odds (float): empty log odds ''' # load data = DataLoader(ds) noise = np.array([0.25, 0.25, 0.1 * np.pi / 180]) res = 0.1 grid_size = int(80 / res) lidar_rng = data.lidar_ranges # encoder trajectory in meters encoder_x, encoder_y, theta = data.encoder_x, data.encoder_y, data.encoder_theta # number of data num = max(encoder_x.shape) # current pose in pixels (/res) encoder_pose = np.hstack((encoder_x / res, encoder_y / res, theta)) # initialize particle filter pf = ParticleFilter(ds, grid_size, encoder_pose, lidar_rng, noise, iter_step, N, ocpy_odds, empty_odds) # iterate trajectory points for i in range(0, num, iter_step): print( f'\ndataset:{ds}|N:{N}|opcy_odds:{ocpy_odds:.2f}|Progress:{i/num*100:.2f}%' ) pf.predict(encoder_pose[i], lidar_rng[:, i]) pf.update(encoder_pose[i], lidar_rng[:, i], i) if i % 460 == 0: # save the SLAM progress fig = pf.grid.copy() fig[pf.grid > 0] = 2 fig[np.logical_and(0 <= pf.grid, pf.grid <= 0)] = 0 fig[pf.grid < 0] = 0.5 plt.figure() plt.axis('off') plt.imshow(fig, cmap='gray') plt.savefig( f'img/result/ds{ds}_lidar_step{iter_step}_n_{noise[0]}_ang_{noise[2]:.5f}_log_{ocpy_odds:.2f}_N_{N}_i_{i}.png' ) # plot fig = pf.grid.copy() fig[pf.grid > 0] = 2 fig[np.logical_and(0 <= pf.grid, pf.grid <= 0)] = 0 fig[pf.grid < 0] = 0.5 # lidar only plt.figure() plt.axis('off') plt.imshow(fig, cmap='gray') plt.savefig( f'img/result/ds{ds}_lidar_step{iter_step}_n_{noise[0]}_ang_{noise[2]:.5f}_log_{ocpy_odds:.2f}_N_{N}.png' ) # lidar with trajectory plt.figure() plt.axis('off') plt.plot(pf.slam_loc[:, 1], pf.slam_loc[:, 0], 'r-') plt.plot(pf.walk_loc[:, 1], pf.walk_loc[:, 0], 'b-') plt.imshow(fig, cmap='gray') # texture map plt.figure() plt.axis('off') plt.imshow(pf.texture_map.astype(np.uint8)) plt.savefig(f'img/result/ds{ds}_texture_step{iter_step}_n_{noise[0]}.png') plt.show() plt.close()
np.random.seed(0) (A, H, Q, R) = create_model_parameters() simulation_time = 20 n_particles = 5000 # Initial state x0 = np.array([0, 0.1, 0, 0.1]) state, measurements = simulate_system(simulation_time, x0) particle_filter = ParticleFilter(A, H, Q, R, x0, n_particles) estimated_state = np.zeros((simulation_time, 4)) for k in range(simulation_time): particle_filter.predict() particle_filter.update_particle_weights(measurements[k, :]) estimated_state[k, :] = particle_filter.calculate_estimate() particle_filter.resample() # Plot X-Y plt.figure() plt.plot(state[:, 0], state[:, 2], '-bo') plt.plot(estimated_state[:, 0], estimated_state[:, 2], '-ko') plt.plot(measurements[:, 0], measurements[:, 1], ':rx') plt.xlabel('x [m]') plt.ylabel('y [m]') plt.legend(['true state', 'inferred state', 'observed measurement'])
class TJ2RomiPfNode: def __init__(self): self.node_name = "tj2_romi_pf" rospy.init_node(self.node_name, # disable_signals=True # log_level=rospy.DEBUG ) # rospy.on_shutdown(self.shutdown_hook) self.map_frame = rospy.get_param("~map_frame", "map") self.base_frame = rospy.get_param("~base_frame", "base_link") self.map_service_name = rospy.get_param("~static_map", "static_map") self.show_particles = rospy.get_param("~publish_particles", True) self.initial_range = rospy.get_param("~initial_range", None) self.input_std = rospy.get_param("~input_std", None) self.meas_std_val = rospy.get_param("~meas_std_val", 0.007) self.num_particles = rospy.get_param("~num_particles", 100) self.input_std = rospy.get_param("~input_std", None) if self.initial_range is None: self.initial_range = [1.0, 1.0, 1.0] if self.input_std is None: self.input_std = [0.007, 0.007] self.input_vector = np.zeros(len(self.input_std)) self.pf = ParticleFilter(self.num_particles, self.meas_std_val, self.input_std) self.prev_pf_time = rospy.Time.now().to_sec() self.map_raytracer = MapRaytracer(self.base_frame, "ultrasonic1", "ultrasonic2") self.ultrasonic1_sub = rospy.Subscriber("ultrasonic1", Float64, self.ultrasonic1_callback, queue_size=25) self.ultrasonic2_sub = rospy.Subscriber("ultrasonic2", Float64, self.ultrasonic2_callback, queue_size=25) self.odom_sub = rospy.Subscriber("odom", Odometry, self.odom_callback, queue_size=25) self.particles_pub = rospy.Publisher("pf_particles", PoseArray, queue_size=5) self.broadcaster = tf2_ros.TransformBroadcaster() self.get_map = rospy.ServiceProxy(self.map_service_name, GetMap) rospy.loginfo("Waiting for service %s" % self.map_service_name) rospy.wait_for_service(self.map_service_name) self.map_msg = self.get_map().map self.map_raytracer.set_map(self.map_msg) rospy.loginfo("%s init done" % self.node_name) def ultrasonic1_callback(self, msg): self.pf.update(msg.data, "ultrasonic1") def ultrasonic2_callback(self, msg): self.pf.update(msg.data, "ultrasonic2") def odom_callback(self, msg): current_time = msg.header.stamp.to_sec() dt = current_time - self.prev_pf_time self.prev_pf_time = current_time self.input_vector[0] = -msg.twist.twist.linear.x self.input_vector[1] = -msg.twist.twist.angular.z self.pf.predict(self.input_vector, dt) def publish_all_pose(self): mean = self.pf.mean() # TODO: first cancel out odom transform, then apply map transform msg = TransformStamped() msg.header.stamp = rospy.Time.now() msg.header.frame_id = self.map_frame msg.child_frame_id = self.base_frame msg.transform.translation.x = mean[0] msg.transform.translation.y = mean[1] msg.transform.translation.z = mean[2] msg.transform.rotation.w = 1.0 self.broadcaster.sendTransform(msg) def publish_particles(self): particles_msg = PoseArray() particles_msg.header.frame_id = self.map_frame particles_msg.header.stamp = rospy.Time.now() for particle in self.pf.particles: pose_msg = Pose() pose_msg.position.x = particle[0] pose_msg.position.y = particle[1] pose_msg.position.z = particle[2] particles_msg.poses.append(pose_msg) self.particles_pub.publish(particles_msg) def run(self): rate = rospy.Rate(30.0) while True: rate.sleep() if rospy.is_shutdown(): break self.pf.check_resample() self.publish_all_pose() if self.show_particles: self.publish_particles()
class LocalizationNode(object): ''' Class to hold all ROS related transactions to use split and merge algorithm. ''' #=========================================================================== def __init__(self, odom_lin_sigma=0.025, odom_ang_sigma=np.deg2rad(2), meas_rng_noise=0.2, meas_ang_noise=np.deg2rad(10)): ''' Initializes publishers, subscribers and the particle filter. ''' # Publishers self.pub_lines = rospy.Publisher("lines", Marker) self.pub_particles = rospy.Publisher("particles", PoseArray) self.pub_big_particle = rospy.Publisher("mean_particle", PoseStamped) self.pub_odom = rospy.Publisher("mean_particle_odom", Odometry) # Subscribers self.sub_scan = rospy.Subscriber("scan", LaserScan, self.laser_callback) self.sub_odom = rospy.Subscriber("odom", Odometry, self.odom_callback) # TF self.tfBroad = tf.TransformBroadcaster() # Incremental odometry self.last_odom = None self.odom = None # Flags self.new_odom = False self.new_laser = False self.pub = False # Particle filter self.part_filter = ParticleFilter(get_map(), 500, odom_lin_sigma, odom_ang_sigma, meas_rng_noise, meas_ang_noise) #=========================================================================== def odom_callback(self, msg): ''' Publishes a tf based on the odometry of the robot and calculates the incremental odometry as seen from the vehicle frame. ''' # Save time self.time = msg.header.stamp # Translation trans = (msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z) # Rotation rot = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w) # Publish transform self.tfBroad.sendTransform(translation=trans, rotation=rot, time=msg.header.stamp, child='/openni_depth_frame', parent='/world') # Incremental odometry if self.last_odom is not None: # Increment computation delta_x = msg.pose.pose.position.x - self.last_odom.pose.pose.position.x delta_y = msg.pose.pose.position.y - self.last_odom.pose.pose.position.y yaw = yaw_from_quaternion(msg.pose.pose.orientation) lyaw = yaw_from_quaternion(self.last_odom.pose.pose.orientation) # Odometry seen from vehicle frame self.uk = np.array([ delta_x * np.cos(lyaw) + delta_y * np.sin(lyaw), -delta_x * np.sin(lyaw) + delta_y * np.cos(lyaw), angle_wrap(yaw - lyaw) ]) # Flag self.new_odom = True # For next loop self.last_odom = msg #=========================================================================== def laser_callback(self, msg): ''' Function called each time a LaserScan message with topic "scan" arrives. ''' # Save time self.time = msg.header.stamp # Project LaserScan to points in space rng = np.array(msg.ranges) ang = np.linspace(msg.angle_min, msg.angle_max, len(msg.ranges)) points = np.vstack((rng * np.cos(ang), rng * np.sin(ang))) # Filter long ranges cond = rng < msg.range_max points = points[:, rng < msg.range_max] # Use split and merge to obtain lines and publish self.lines = splitandmerge(points, split_thres=0.1, inter_thres=0.3, min_points=6, dist_thres=0.12, ang_thres=np.deg2rad(10)) # Have valid points if self.lines is not None: # Publish results publish_lines(self.lines, self.pub_lines, frame=msg.header.frame_id, time=msg.header.stamp, ns='scan_lines', color=(1, 0, 0)) publish_lines(self.lines, self.pub_lines, frame='/mean_particle', time=msg.header.stamp, ns='scan_lines_mean', color=(0, 0, 1)) # Flag self.new_laser = True #=========================================================================== def iterate(self): ''' Main loop of the filter. ''' # Prediction if self.new_odom: self.part_filter.predict(self.uk.copy()) self.new_odom = False self.pub = True # Weightimg and resampling if self.new_laser: self.part_filter.weight(self.lines.copy()) self.part_filter.resample() self.new_laser = False self.pub = True # Publish results if self.pub: self.publish_results() self.pub = False #=========================================================================== def publish_results(self): ''' Publishes all results from the filter. ''' # Map of the room map_lines = get_map() publish_lines(map_lines, self.pub_lines, frame='/world', ns='map', color=(0, 1, 0)) # Particles and biggest weighted particle msg, msg_mean, msg_odom, trans, rot = get_particle_msgs( self.part_filter) self.pub_particles.publish(msg) self.pub_big_particle.publish(msg_mean) self.pub_odom.publish(msg_odom) self.tfBroad.sendTransform(translation=trans, rotation=rot, time=self.time, child='/mean_particle', parent='/world')
class ParticleFilterNode(object): """ The class that represents a Particle Filter ROS Node """ def __init__(self): rospy.init_node('pf') real_robot = False # create instances of two helper objects that are provided to you # as part of the project self.particle_filter = ParticleFilter() self.occupancy_field = OccupancyField() self.TFHelper = TFHelper() self.sensor_model = sensor_model = SensorModel( model_noise_rate=0.5, odometry_noise_rate=0.15, world_model=self.occupancy_field, TFHelper=self.TFHelper) self.position_delta = None # Pose, delta from current to previous odometry reading self.last_scan = None # list of ranges self.odom = None # Pose, current odometry reading self.x_y_spread = 0.3 # Spread constant for x-y initialization of particles self.z_spread = 0.2 # Spread constant for z initialization of particles self.n_particles = 150 # number of particles # pose_listener responds to selection of a new approximate robot # location (for instance using rviz) rospy.Subscriber("initialpose", PoseWithCovarianceStamped, self.update_initial_pose) rospy.Subscriber("odom", Odometry, self.update_position) rospy.Subscriber("stable_scan", LaserScan, self.update_scan) # publisher for the particle cloud for visualizing in rviz. self.particle_pub = rospy.Publisher("particlecloud", PoseArray, queue_size=10) def update_scan(self, msg): """Updates the scan to the most recent reading""" self.last_scan = [(i, msg.ranges[i]) for i in range(len(msg.ranges))] def update_position(self, msg): """Calculate delta in position since last odometry reading, update current odometry reading""" self.position_delta = Vector3() this_pos = self.TFHelper.convert_pose_to_xy_and_theta(msg.pose.pose) if self.odom is not None: prev_pos = self.TFHelper.convert_pose_to_xy_and_theta(self.odom) self.position_delta.x = this_pos.x - prev_pos.x self.position_delta.y = this_pos.y - prev_pos.y self.position_delta.z = self.TFHelper.angle_diff( this_pos.z, prev_pos.z) else: self.position_delta = this_pos self.odom = msg.pose.pose self.particle_filter.predict(self.position_delta) def reinitialize_particles(self, initial_pose): """Reinitialize particles when a new initial pose is given.""" self.particle_filter.particles = [] for i in range(self.n_particles): pos = Vector3() initial_pose_trans = self.TFHelper.convert_pose_to_xy_and_theta( initial_pose) pos.x = initial_pose_trans.x + (2 * randn() - 1) * self.x_y_spread pos.y = initial_pose_trans.y + (2 * randn() - 1) * self.x_y_spread pos.z = initial_pose_trans.z + (2 * randn() - 1) * self.z_spread new_particle = Particle(position=pos, weight=1 / float(self.n_particles), sensor_model=self.sensor_model) self.particle_filter.add_particle(new_particle) def update_initial_pose(self, msg): """ Callback function to handle re-initializing the particle filter based on a pose estimate. These pose estimates could be generated by another ROS Node or could come from the rviz GUI """ self.reinitialize_particles(msg.pose.pose) self.TFHelper.fix_map_to_odom_transform(msg.pose.pose, msg.header.stamp) def publish_particles(self): """ Extract position from each particle, transform into pose, and publish them as PoseArray""" pose_array = PoseArray() for p in self.particle_filter.particles: pose_array.poses.append( self.TFHelper.convert_vector3_to_pose(p.position)) pose_array.header.frame_id = "map" self.particle_pub.publish(pose_array) def run(self): r = rospy.Rate(5) while not (rospy.is_shutdown()): # in the main loop all we do is continuously broadcast the latest # map to odom transform self.TFHelper.send_last_map_to_odom_transform() if len(self.particle_filter.particles) > 0: if self.last_scan != None: self.particle_filter.integrate_observation(self.last_scan) self.last_scan = None self.particle_filter.normalize() self.publish_particles() self.particle_filter.resample() r.sleep()