def particle_SLAM(src_dir, dataset_id=0, split_name='train', running_mode='test_SLAM', log_dir='logs'): '''Your main code is here. ''' ############################################################################################### #* Student's input #TODO: change the resolution of the map - the distance between two cells (meters) map_resolution = 0.05 # Number of particles #TODO: change the number of particles num_p = 100 #TODO: change the process' covariance matrix mov_cov = np.array([[1e-8, 0, 0],[0, 1e-8, 0],[0, 0 , 1e-8]]) #TODO: set a threshold value of probability to consider a map's cell occupied p_thresh = 0.6 #TODO: change the threshold of the percentage of effective particles to decide resampling percent_eff_p_thresh = 0.5 #*End student's input ############################################################################################### # Test prediction if running_mode == 'test_prediction': test_prediction(src_dir, dataset_id, split_name, log_dir) exit(1) if running_mode == 'test_update': test_update(src_dir, dataset_id, split_name, log_dir, map_resolution) exit(1) # Test SLAM # Create a SLAM instance slam_inc = SLAM() # with open('SLAM_train_1.pkl', 'rb') as pickle_file: # slam_inc = pickle.load(pickle_file) slam_inc.psx = 0 slam_inc.psy = 0 slam_inc.bresenDict = {} slam_inc.dict_use_count = 0 # Read data slam_inc._read_data(src_dir, dataset_id, split_name) num_steps = int((slam_inc.num_data_)//1) # Characterize the sensors' specifications slam_inc._characterize_sensor_specs(p_thresh) # Initialize particles mov_cov = np.diag([0.1,0.1,0.1])/1000 slam_inc._init_particles(num_p, mov_cov, percent_eff_p_thresh=percent_eff_p_thresh) # Iniitialize the map slam_inc._init_map(map_resolution) # Starting time index t0 = 0 # Initialize the particle's poses using the lidar measurements at the starting time slam_inc.particles_[:,0] = slam_inc.lidar_.data_[t0]['pose'][0] # slam_inc.particles_[2,:] = slam_inc.particles_[2,:] +np.pi/12 # slam_inc.particles_[:2,:] = slam_inc.particles_[:2,:] + 0.1 # Indicator to notice that map has not been built build_first_map = False # iterate next time steps all_particles = deepcopy(slam_inc.particles_) num_resamples = 0 # num_steps = t0+400 for t in tqdm.tqdm(range(t0, num_steps)): # Ignore lidar scans that are obtained before the first IMU if slam_inc.lidar_.data_[t]['t'][0][0] - slam_inc.joints_.data_['ts'][0][0] < 0: # print("skipping") continue if not build_first_map: slam_inc._build_first_map(t) # print("first mappp") # t0 = t # print(map_resolution) build_first_map = True continue # MAP_2_display = genMap(slam_inc, t) # MAP_fig_path = log_dir + '/0000processing_SLAM_map_t_9_34_a'+str((t-t0+1))+ split_name + '_' + str(dataset_id) + '.jpg' # cv2.imwrite(MAP_fig_path, MAP_2_display) # if (t-t0)<5000: # slam_inc.mov_cov = np.diag([0.1,0.1,1])/10 # slam_inc.particles_[0,:] = slam_inc.particles_[0,:] - 0.001 # else: # slam_inc.mov_cov = np.diag([0.1,0.1,0.1])/1000 # Prediction slam_inc._predict(t) # Update slam_inc._update(t,t0=t0,fig='off') # Resample particles if necessary; num_eff = 1.0/np.sum(np.dot(slam_inc.weights_,slam_inc.weights_)) logging.debug('>> Number of effective particles: %.2f'%num_eff) if num_eff < slam_inc.percent_eff_p_thresh_*slam_inc.num_p_: num_resamples += 1 logging.debug('>> Resampling since this < threshold={0}| Resampling times/t = {1}/{2}'.format(\ slam_inc.percent_eff_p_thresh_*slam_inc.num_p_, num_resamples, t-t0 + 1)) [slam_inc.particles_,slam_inc.weights_] = prob.stratified_resampling(\ slam_inc.particles_,slam_inc.weights_,slam_inc.num_p_) # Plot the estimated trajectory if (t - t0 + 1)%1000 == 0 or t==num_steps-1: print(t-t0) print("use dict for ", slam_inc.dict_use_count) # Save the result log_file = log_dir + '/SLAM_' + split_name + '_' + str(dataset_id) + '.pkl' try: with open(log_file, 'wb') as f: pickle.dump(slam_inc,f,pickle.HIGHEST_PROTOCOL) print(">> Save the result to: %s"%log_file) except Exception as e: print('Unable to write data to', log_file, ':', e) raise # Gen map + trajectory MAP_2_display = genMap(slam_inc, t) MAP_fig_path = log_dir + '/processing_SLAM_map_t_6_51_p_W'+ split_name + '_' + str(dataset_id) + '.jpg' #12.21 a: np.diag([0.1,0.1,1])/10000 #11.51 p: np.diag([0.1,0.1,1])/1000 #12.33 a: np.diag([0.1,0.1,1])/100000 #1.07 a: np.diag([0.1,0.1,1])/100 #1.30 a: np.diag([0.1,0.1,0.1])/100 #1.40 a: np.diag([0.1,0.1,0.1])/1000 #9.32 a: np.diag([0.1,0.1,0.1])/10000 #9.48 a: np.diag([0.1,0.1,0.1])/1000 (only noise) #10.33 a: np.diag([0.1,0.1,0.1])/100 (only noise) #12.28 p: np.diag([0.1,0.1,0.1])/100 (only noise, lower resampling) #12.51 p: np.diag([0.1,0.1,0.1])/1000 (only noise, lower resampling, sin positive, vectorized) #WORKSSSSS #7.31 p : reloaded pickle from 19500 #11.32 p: reloaded pickle from 19200 #3.03 a: trying more hack with x,y from 19800 cv2.imwrite(MAP_fig_path, MAP_2_display) plt.title('Estimated Map at time stamp %d/%d'%(t, num_steps - t0 + 1)) plt.imshow(MAP_2_display) plt.pause(0.01) logging.debug(">> Save %s"%MAP_fig_path) # Return best_p which are an array of size 3xnum_data that represents the best particle over the whole time stamp return slam_inc.best_p_
R_pose_cells = self.best_p_indices_[:,t] x = np.append(map_cells[0], R_pose_cells[0]) y = np.append(map_cells[1], R_pose_cells[1]) contors = np.array([y, x]).T.astype(np.int) mask = np.zeros_like(self.log_odds_) cv2.drawContours(image=mask, contours=[contors], contourIdx=0, color=np.log(self.p_false_), thickness=cv2.FILLED) self.log_odds_ += mask MAP['map'] = self.log_odds_ self.MAP_ = MAP return MAP if __name__ == "__main__": slam_inc = SLAM() slam_inc._read_data('data/train', 0, 'train') slam_inc._init_particles(num_p=100) slam_inc._init_map() slam_inc._build_first_map() MAP = gf.genMap(slam_inc, end_t=1) plt.imshow(MAP) plt.show()
def particle_SLAM(src_dir, dataset_id=0, split_name='train', running_mode='test_SLAM', log_dir='logs'): '''Your main code is here. ''' ############################################################################################### #* Student's input #TODO: change the resolution of the map - the distance between two cells (meters) map_resolution = 0.05 # Number of particles #TODO: change the number of particles num_p = 100 #TODO: change the process' covariance matrix mov_cov = np.array([[1e-7, 0, 0], [0, 1e-7, 0], [0, 0, 1e-7]]) #TODO: set a threshold value of probability to consider a map's cell occupied p_thresh = 0.7 #TODO: change the threshold of the percentage of effective particles to decide resampling percent_eff_p_thresh = 0.05 #*End student's input ############################################################################################### # Test prediction if running_mode == 'test_prediction': test_prediction(src_dir, dataset_id, split_name, log_dir) exit(1) if running_mode == 'test_update': test_update(src_dir, dataset_id, split_name, log_dir, map_resolution) exit(1) # Test SLAM # Create a SLAM instance slam_inc = SLAM() # Read data slam_inc._read_data(src_dir, dataset_id, split_name) num_steps = slam_inc.num_data_ # Characterize the sensors' specifications slam_inc._characterize_sensor_specs(p_thresh) # Initialize particles slam_inc._init_particles(num_p, mov_cov, percent_eff_p_thresh=percent_eff_p_thresh) # Iniitialize the map slam_inc._init_map(map_resolution) # Starting time index t0 = 0 # Initialize the particle's poses using the lidar measurements at the starting time slam_inc.particles_[:, 0] = slam_inc.lidar_.data_[t0]['pose'][0] # Indicator to notice that map has not been built build_first_map = False # iterate next time steps all_particles = deepcopy(slam_inc.particles_) num_resamples = 0 for t in tqdm.tqdm(range(t0, num_steps - t0)): # Ignore lidar scans that are obtained before the first IMU if slam_inc.lidar_.data_[t]['t'][0][0] - slam_inc.joints_.data_['ts'][ 0][0] < 0: continue if not build_first_map: slam_inc._build_first_map(t) t0 = t build_first_map = True continue # Prediction slam_inc._predict(t) # Update slam_inc._update(t, t0=t0, fig='off') # Resample particles if necessary num_eff = 1.0 / np.sum(np.dot(slam_inc.weights_, slam_inc.weights_)) logging.debug('>> Number of effective particles: %.2f' % num_eff) if num_eff < slam_inc.percent_eff_p_thresh_ * slam_inc.num_p_: num_resamples += 1 logging.debug('>> Resampling since this < threshold={0}| Resampling times/t = {1}/{2}'.format(\ slam_inc.percent_eff_p_thresh_*slam_inc.num_p_, num_resamples, t-t0 + 1)) [slam_inc.particles_,slam_inc.weights_] = prob.stratified_resampling(\ slam_inc.particles_,slam_inc.weights_,slam_inc.num_p_) # Plot the estimated trajectory if (t - t0 + 1) % 1000 == 0 or t == num_steps - 1: # Save the result log_file = log_dir + '/SLAM_' + split_name + '_' + str( dataset_id) + '.pkl' try: with open(log_file, 'wb') as f: pickle.dump(slam_inc, f, pickle.HIGHEST_PROTOCOL) print(">> Save the result to: %s" % log_file) except Exception as e: print('Unable to write data to', log_file, ':', e) raise # Gen map + trajectory MAP_2_display = genMap(slam_inc, t) MAP_fig_path = log_dir + '/processing_SLAM_map_' + split_name + '_' + str( dataset_id) + '.jpg' cv2.imwrite(MAP_fig_path, MAP_2_display) plt.title('Estimated Map at time stamp %d/%d' % (t, num_steps - t0 + 1)) plt.imshow(MAP_2_display) plt.pause(0.01) logging.debug(">> Save %s" % MAP_fig_path) # Return best_p which are an array of size 3xnum_data that represents the best particle over the whole time stamp return slam_inc.best_p_
def particle_SLAM(src_dir, dataset_id=0, split_name='train', running_mode='test_SLAM', log_dir='logs'): '''Your main code is here. ''' ############################################################################################### #* Student's input #TODO: change the resolution of the map - the distance between two cells (meters) map_resolution = 0.05 # Number of particles #TODO: change the number of particles num_p = 100 #TODO: change the process' covariance matrix mov_cov = np.array([[1e-8, 0, 0],[0, 1e-8, 0],[0, 0 , 1e-8]]) #TODO: set a threshold value of probability to consider a map's cell occupied p_thresh = 0.6 #TODO: change the threshold of the percentage of effective particles to decide resampling percent_eff_p_thresh = 0.5 #*End student's input ############################################################################################### """ # Test prediction if running_mode == 'test_prediction': test_prediction(src_dir, dataset_id, split_name, log_dir) exit(1) if running_mode == 'test_update': test_update(src_dir, dataset_id, split_name, log_dir, map_resolution) exit(1) """ # Test SLAM # Create a SLAM instance slam_inc = SLAM() # Read data slam_inc._read_data(src_dir, dataset_id, split_name) num_steps = slam_inc.num_data_ # Characterize the sensors' specifications # slam_inc._characterize_sensor_specs(p_thresh) # Initialize particles slam_inc._init_particles(num_p, mov_cov, percent_eff_p_thresh=percent_eff_p_thresh) # Iniitialize the map slam_inc._init_map(map_resolution) # Starting time index t0 = 0 # Initialize the particle's poses using the lidar measurements at the starting time slam_inc.particles_[:,0] = slam_inc.lidar_.data_[t0]['pose'][0] # Indicator to notice that map has not been built build_first_map = False # iterate next time steps all_particles = deepcopy(slam_inc.particles_) num_resamples = 0 for t in tqdm.tqdm(range(0,1)):#(range(t0, num_steps - t0)): slam_inc._build_first_map(t0 = 0) MAP_2_display = genMap(slam_inc, t) #MAP_2_display = 255*np.ones((slam_inc.MAP_['map'].shape[0],slam_inc.MAP_['map'].shape[1],3),dtype=np.uint8) #MAP_2_display[10:200 , : , :] = [0,0,0] MAP_fig_path = log_dir + '/processing_SLAM_map_'+ split_name + '_' + str(dataset_id) + '.jpg' cv2.imwrite(MAP_fig_path, MAP_2_display) plt.title('Estimated Map at time stamp %d/%d'%(t, num_steps - t0 + 1)) plt.imshow(MAP_2_display) plt.pause(0.1) """