Пример #1
0
def load_raw_data(root_dir: str, save_path: str = "./save/features"):
    '''
    Save raw csv to np.ndarray

    @input root_dir (string): root directory contains .csv data

    @input save_path (string): save features to this path, default "./save/features"
    '''
    afl = ArgoverseForecastingLoader(root_dir)
    files = os.listdir(root_dir)
    for f in files:
        if not f.endswith(".csv"):
            continue
        seq_path = os.path.join(root_dir, f)
        print("Processing " + seq_path)
        id_list = afl.get(seq_path).track_id_list
        agent_traj = afl.get(seq_path).agent_traj
        df = afl.get(seq_path).seq_df
        tarj_list = []
        df['TIMESTAMP'] -= df['TIMESTAMP'].min()
        for id in id_list:
            subdf = df.loc[df['TRACK_ID'] == id]
            tarj_list.append(
                subdf.drop(columns='CITY_NAME').sort_values(
                    by=['TIMESTAMP']).to_numpy())
        with open(os.path.join(
                save_path,
                f[:-4] + ".save",
        ), "wb") as f:
            pickle.dump(tarj_list, f)
Пример #2
0
    def save_results_single_pred(self):
        print("running save results")
        afl=ArgoverseForecastingLoader("data/val/data/")
        checkpoint = torch.load(self.model_dir+'best-model.pt', map_location=lambda storage, loc: storage)
        # self.model.load_state_dict(torch.load(self.model_dir+'best-model.pt')['model_state_dict'])
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.model.eval()
        
        save_results_path=self.model_dir+"/results/"
        # pdb.set_trace()
        if not os.path.exists(save_results_path):
            os.mkdir(save_results_path)
        num_batches=len(self.val_loader.batch_sampler)
        
        for i_batch,traj_dict in enumerate(self.val_loader):
            print(f"Running {i_batch}/{num_batches}",end="\r")
            gt_traj=traj_dict['gt_unnorm_agent'].numpy()
            # output=self.model(traj_dict,mode='validate')
            output=self.model(traj_dict)
            output=self.val_loader.dataset.inverse_transform(output,traj_dict)
            
            output=output.detach().cpu().numpy()
            seq_paths=traj_dict['seq_path']
            
            for index,seq_path in enumerate(seq_paths):
                loader=afl.get(seq_path)
                input_array=loader.agent_traj[0:20,:]
                city=loader.city
                del loader
                seq_index=int(os.path.basename(seq_path).split('.')[0])

                output_dict={'seq_path':seq_path,'seq_index':seq_index,'input':input_array,
                            'output':output[index],'target':gt_traj[index],'city':city}
                with open(f"{save_results_path}/{seq_index}.pkl", 'wb') as f:
                    pickle.dump(output_dict,f) 
Пример #3
0
 def __init__(self,root_dir='argoverse-data/forecasting_sample/data',train_seq_size=20,cuda=False,test=False):
     super(Argoverse_Data,self).__init__()
     self.root_dir=root_dir
     self.afl = ArgoverseForecastingLoader(self.root_dir)
     self.seq_paths=glob.glob(f"{self.root_dir}/*.csv")
     self.train_seq_size=train_seq_size
     self.use_cuda=cuda
     self.mode_test=test
Пример #4
0
    def __init__(self, split, config, train=False):

        self.config = config
        self.train = train
        split2 = config['val_split'] if split=='val' else config['test_split']
        split = self.config['preprocess_val'] if split=='val' else self.config['preprocess_test']

        self.avl = ArgoverseForecastingLoader(split2)
        if 'preprocess' in config and config['preprocess']:
            if train:
                self.split = np.load(split, allow_pickle=True)
            else:
                self.split = np.load(split, allow_pickle=True)
        else:
            self.avl = ArgoverseForecastingLoader(split)
            self.am = ArgoverseMap()
Пример #5
0
 def __init__(
     self,
     url=None,
     raw_dir=None,
     save_dir=None,
     force_reload=False,
     verbose=False,
     fraction=1.0,
     mode="train",
     mem_cache=25000,
 ):
     super(MyDataset, self).__init__(name='dataset_name',
                                     url=url,
                                     raw_dir=raw_dir,
                                     save_dir=save_dir,
                                     force_reload=force_reload,
                                     verbose=verbose)
     self.ac = AnyCache(cachedir=save_dir)
     self.argo_loader = ArgoverseForecastingLoader(raw_dir)
     self.fraction = fraction if fraction <= 1.0 else 1.0
     self.mode = mode
     if self.mode == 'test':
         self.mem_cache = 0
     else:
         self.mem_cache = mem_cache
    def __init__(self,
                 dataset_dir: str,
                 convert_tf_record=False,
                 save_img=False,
                 overwrite_rendered_file=True) -> None:

        self.dataset_dir = dataset_dir
        print("Load data...")
        self.afl = ArgoverseForecastingLoader(dataset_dir)
        self.num = len(self.afl)
        self.log_agent_pose = None
        self.timestamps = None
        self.filenames = [None for i in range(self.num)]
        for i, seq in enumerate(self.afl.seq_list):
            self.filenames[i] = int(str(seq).split('/')[-1][0:-4])

        self.convert_tf_record = convert_tf_record
        self.overwrite_rendered_file = overwrite_rendered_file
        self.save_img = save_img

        if save_img:
            if not Path(f"{self.dataset_dir}../rendered_image").exists():
                os.makedirs(f"{self.dataset_dir}../rendered_image")
            num_shards = ceil(max(self.filenames) / FRAME_IN_SHARD)
            for i in range(1, num_shards + 1):
                if not Path(
                        f"{self.dataset_dir}../rendered_image/{i}").exists():
                    os.makedirs(f"{self.dataset_dir}../rendered_image/{i}")
Пример #7
0
    def __init__(self,
                 every_n_steps,
                 output_dir,
                 width=100,
                 height=100,
                 scale=10,
                 mode='train',
                 source_dir='/workspace/datasets/argo/forecasting/val/data/'):
        self._every_n_steps = every_n_steps
        self.last_epoch = -1

        self.source_dir = source_dir
        self.avm = ArgoverseMap()
        self.seq_lane_props = {}
        self.seq_lane_props['PIT'] = self.avm.city_lane_centerlines_dict['PIT']
        self.seq_lane_props['MIA'] = self.avm.city_lane_centerlines_dict['MIA']
        self.afl = ArgoverseForecastingLoader(source_dir)
        self.seq_file = None

        self._width = width
        self._height = height
        self._min_alpha = 0.1

        self._output_dir = Path(output_dir)
        if not self._output_dir.exists():
            self._output_dir.mkdir(parents=True)

        self.mode = mode

        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111)
Пример #8
0
 def __init__(self,
              root_dir='argoverse-data/forecasting_sample/data',
              train_seq_size=20):
     super(Argoverse_Data, self).__init__()
     self.root_dir = root_dir
     self.afl = ArgoverseForecastingLoader(self.root_dir)
     self.seq_paths = glob.glob(f"{self.root_dir}/*.csv")
     self.train_seq_size = train_seq_size
Пример #9
0
    def __init__(self, cfg):
        super().__init__()
        self.am = ArgoverseMap()

        self.axis_range = self.get_map_range(self.am)
        self.city_halluc_bbox_table, self.city_halluc_tableidx_to_laneid_map = self.am.build_hallucinated_lane_bbox_index(
        )
        self.laneid_map = self.process_laneid_map()
        self.vector_map, self.extra_map = self.generate_vector_map()
        # am.draw_lane(city_halluc_tableidx_to_laneid_map['PIT']['494'], 'PIT')
        # self.save_vector_map(self.vector_map)

        self.last_observe = cfg['last_observe']
        ##set root_dir to the correct path to your dataset folder
        self.root_dir = cfg['data_locate']
        self.afl = ArgoverseForecastingLoader(self.root_dir)
        self.map_feature = dict(PIT=[], MIA=[])
        self.city_name, self.center_xy, self.rotate_matrix = dict(), dict(
        ), dict()
Пример #10
0
	def __init__(self, root : str):


		"""	
			create the dataset manager object

			- params:
				- root : str : root path for the data
		"""

		self.root_path = root

		# - seq_list -> list with paths for each sequence file
		self.manager = ArgoverseForecastingLoader(self.root_path)

		self.ekf = EKF()
		self.savgol = SavitzkyGolov(window_length=5,
									poly=3)
		self.compute_features = PlainFeatures()
Пример #11
0
def converter_csv_to_argo(input_path: str, output_path: str):
    afl = ArgoverseForecastingLoader(input_path)
    output_all = {}
    counter = 1
    for data in afl:
        print('\r' + str(counter) + '/' + str(len(afl)), end="")
        seq_id = int(data.current_seq.name[:-4])
        output_all[seq_id] = np.expand_dims(data.agent_traj[20:, :], 0)
        counter += 1
    generate_forecasting_h5(output_all, output_path)  # this might take awhile
 def __init__(self, train_test_val, root_dir=None, afl=None, avm=None):
     # self.root_dir = '/media/bartosz/hdd1TB/workspace_hdd/datasets/argodataset/argoverse-forecasting/train/data' if root_dir is None else root_dir
     self.root_dir = f"/media/bartosz/hdd1TB/workspace_hdd/datasets/argodataset/argoverse-forecasting/forecasting_{train_test_val}_v1.1/{train_test_val}/data" if root_dir is None else root_dir
     self.pickle_path = "/media/bartosz/hdd1TB/workspace_hdd/SS-LSTM/data/argoverse/ss_lstm_format_argo_forecasting_v11_{train_test_val}.pickle"
     self.pickle_cache_path = "/media/bartosz/hdd1TB/workspace_hdd/SS-LSTM/data/argoverse/cacheio_v11/ss_lstm_format_argo_forecasting_v11_{train_test_val}_{range}.pickle"
     self.img_dataset_dir = "/media/bartosz/hdd1TB/workspace_hdd/SS-LSTM/data/argoverse/imgs_ds_forecasting_v11"
     self.obs, self.pred = 20, 30
     self.afl = ArgoverseForecastingLoader(self.root_dir) if afl is None else afl
     self.avm = ArgoverseMap() if avm is None else avm
     self.scene_input, self.social_input, self.person_input, self.expected_output = None, None, None, None
     self.total_nb_of_segments = len(self.afl)
     self.train_test_val = train_test_val
Пример #13
0
 def __init__(self, root_dir, args):
     self.AFL = ArgoverseForecastingLoader(root_dir)
     self.use_yaw = args.use_yaw
     self.hist_len = int(args.time_hist / args.dt)
     self.fut_len = int(args.time_pred / args.dt)
     self.time_len = self.hist_len + self.fut_len
     if self.use_yaw:
         self.feature_size = 3
     else:
         self.feature_size = 2
     self.random_rotation = args.random_rotation
     self.normalize_angle = args.normalize_angle
     self.down_sampling = args.down_sampling
Пример #14
0
    def __init__(self, data_dir, obs_len=20, position_downscaling_factor=100):
        """
        Args:
            inp_dir: Directory with all trajectories
            obs_len: length of observed trajectory
        """
        self.data_dir = data_dir
        self.obs_len = obs_len
        self.position_downscaling_factor = position_downscaling_factor

        assert os.path.isdir(data_dir), 'Invalid Data Directory'
        self.afl = ArgoverseForecastingLoader(data_dir)
        self.avm = ArgoverseMap()
Пример #15
0
    def __init__(self,
                 data_dict: Dict[str, Any],
                 args: Any,
                 mode: str,
                 base_dir="/work/vita/sadegh/argo/argoverse-api/",
                 use_history=True,
                 use_agents=True,
                 use_scene=True):
        """Initialize the Dataset.
        Args:
            data_dict: Dict containing all the data
            args: Arguments passed to the baseline code
            mode: train/val/test mode
        """
        self.data_dict = data_dict
        self.args = args
        self.mode = mode
        self.use_history = use_history
        self.use_agents = use_agents
        self.use_scene = use_scene
        # Get input
        self.input_data = data_dict["{}_input".format(mode)]
        if mode != "test":
            self.output_data = data_dict["{}_output".format(mode)]
        self.data_size = self.input_data.shape[0]

        # Get helpers
        self.helpers = self.get_helpers()
        self.helpers = list(zip(*self.helpers))

        middle_dir = mode if mode != "test" else "test_obs"
        self.root_dir = base_dir + middle_dir + "/data"

        ##set root_dir to the correct path to your dataset folder
        self.afl = ArgoverseForecastingLoader(self.root_dir)

        self.avm = ArgoverseMap()
        self.mf = MapFeaturesUtils()
Пример #16
0
    def __init__(self, split, config, map_param, train=True):
        self.config = config
        self.train = train
        self.map_param = map_param  # for Scenic network

        if 'preprocess' in config and config['preprocess']:
            if train:
                self.split = np.load(self.config['preprocess_train'],
                                     allow_pickle=True)
            else:
                self.split = np.load(self.config['preprocess_val'],
                                     allow_pickle=True)
        else:
            self.avl = ArgoverseForecastingLoader(split)
Пример #17
0
 def __init__(self,root,train = True,test = False):
     '''
     根据路径获得数据,并根据训练、验证、测试划分数据
     train_data 和 test_data路径分开
     '''
     self.test = test
     afl = ArgoverseForecastingLoader(root)
     self.avm = ArgoverseMap()
     
     if self.test:
         self.afl = afl
     elif train:
         self.afl = afl[:int(0.7*len(afl))]
     else:
         self.afl = afl[int(0.7*len(afl)):]
Пример #18
0
    def __init__(self, split, config, train=True):
        self.config = config
        self.train = train
        
        if 'preprocess' in config and config['preprocess']:
            if train:
                self.split = np.load(self.config['preprocess_train'], allow_pickle=True)
            else:
                self.split = np.load(self.config['preprocess_val'], allow_pickle=True)
        else:
            self.avl = ArgoverseForecastingLoader(split)
            self.am = ArgoverseMap()

        if 'raster' in config and config['raster']:
            #TODO: DELETE
            self.map_query = MapQuery(config['map_scale'])
Пример #19
0
    def __init__(self,
                 file_path: str,
                 shuffle: bool = True,
                 random_rotation: bool = False,
                 max_car_num: int = 50,
                 freq: int = 10,
                 use_interpolate: bool = False,
                 use_lane: bool = False,
                 use_mask: bool = True):
        if not os.path.exists(file_path):
            raise Exception("Path does not exist.")

        self.afl = ArgoverseForecastingLoader(file_path)
        self.shuffle = shuffle
        self.random_rotation = random_rotation
        self.max_car_num = max_car_num
        self.freq = freq
        self.use_interpolate = use_interpolate
        self.am = ArgoverseMap()
        self.use_mask = use_mask
        self.file_path = file_path
Пример #20
0
 def __init__(self, root, train=True, test=False):
     '''
     根据路径获得数据,并根据训练、验证、测试划分数据
     train_data 和 test_data路径分开
     '''
     self.test = test
     self.train = train
     self.afl = ArgoverseForecastingLoader(root)
     self.avm = ArgoverseMap()
     root_dir = Path(root)
     r = [(root_dir / x).absolute() for x in os.listdir(root_dir)]
     n = len(r)
     if self.test == True:
         self.start = 0
         self.end = n
     elif self.train:
         self.start = 0
         self.end = int(0.7 * n)
     else:
         self.start = int(0.7 * n) + 1
         self.end = n
Пример #21
0
	def __init__(self, root : str):


		"""	
			create the dataset manager object

			- params:
				- root : str : root path for the data
		"""

		self.root_path = root


		self.manager = ArgoverseForecastingLoader(self.root_path)


		self.ekf = EKF()
		self.feature = PlainFeatures()
		self.savgol = SavitzkyGolov(window_length=11,
									poly=3)
		
		all_vel_raw = []
		all_vel_ekf = []
		all_vel_savgol = []
		all_vel_ekf_savgol = []
		
		all_acc_raw = []
		all_acc_ekf = []
		all_acc_savgol = []
		all_acc_ekf_savgol = []
		
		all_j_raw = []
		all_j_ekf = []
		all_j_savgol = []
		all_j_ekf_savgol = []

		for index in range(0, len(self.manager.seq_list)):
			print(index)

			seq_path = self.manager.seq_list[index]
			df = self.manager.get(seq_path).seq_df

			traj = df[df['OBJECT_TYPE']=='AGENT']
			traj = traj[['TIMESTAMP', 'X', 'Y']].values
			self.savgol.set_window_size(traj.shape[0]//2)
			
			#features
			plain_features = self.feature.process(traj=traj)
			ekf_features = self.ekf.process(traj=traj)
			savgol_features = self.savgol.process(traj=traj)
			ekf_savgol_features = self.savgol.filter(vector=ekf_features)

			plain_features = np.squeeze(plain_features)
			ekf_features = np.squeeze(ekf_features)
			savgol_features = np.squeeze(savgol_features)
			ekf_savgol_features = np.squeeze(ekf_savgol_features)
		

			#path
			plt.figure(1)
			plt.plot(plain_features[:,0],plain_features[:,1], color='blue', label='raw', marker='.')
			plt.plot(ekf_features[:,0],ekf_features[:,1], color='green', label='ekf', marker='.')
			plt.plot(savgol_features[:,0],savgol_features[:,1], color='orange', label='savgol', marker='.')
			plt.plot(ekf_savgol_features[:,0],ekf_savgol_features[:,1], color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'path_{index}.png')
			plt.clf()
			
			#velocity
			vel_plain = np.sqrt(np.power(plain_features[:,2],2) + np.power(plain_features[:,3],2))
			vel_ekf = np.sqrt(np.power(ekf_features[:,2],2) + np.power(ekf_features[:,3],2))
			vel_savgol = np.sqrt(np.power(savgol_features[:,2],2) + np.power(savgol_features[:,3],2))
			vel_ekf_savgol = np.sqrt(np.power(ekf_savgol_features[:,2],2) + np.power(ekf_savgol_features[:,3],2))

			plt.figure(2)
			plt.plot(vel_plain, color='blue', label='raw', marker='.')
			plt.plot(vel_ekf, color='green', label='ekf', marker='.')
			plt.plot(vel_savgol, color='orange', label='savgol', marker='.')
			plt.plot(vel_ekf_savgol, color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'vel_{index}.png')
			plt.clf()

			acc_plain = np.sqrt(np.power(plain_features[:,4],2) + np.power(plain_features[:,5],2))
			acc_ekf = np.sqrt(np.power(ekf_features[:,4],2) + np.power(ekf_features[:,5],2))
			acc_savgol = np.sqrt(np.power(savgol_features[:,4],2) + np.power(savgol_features[:,5],2))
			acc_ekf_savgol = np.sqrt(np.power(ekf_savgol_features[:,4],2) + np.power(ekf_savgol_features[:,5],2))

			plt.figure(3)
			plt.plot(acc_plain, color='blue', label='raw', marker='.')
			plt.plot(acc_ekf, color='green', label='ekf', marker='.')
			plt.plot(acc_savgol, color='orange', label='savgol', marker='.')
			plt.plot(acc_ekf_savgol, color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'acc_{index}.png')
			plt.clf()


			j_plain = np.sqrt(np.power(plain_features[:,6],2) + np.power(plain_features[:,7],2))
			j_ekf = np.sqrt(np.power(ekf_features[:,6],2) + np.power(ekf_features[:,7],2))
			j_savgol = np.sqrt(np.power(savgol_features[:,6],2) + np.power(savgol_features[:,7],2))
			j_ekf_savgol = np.sqrt(np.power(ekf_savgol_features[:,6],2) + np.power(ekf_savgol_features[:,7],2))

			plt.figure(3)
			plt.plot(j_plain, color='blue', label='raw', marker='.')
			plt.plot(j_ekf, color='green', label='ekf', marker='.')
			plt.plot(j_savgol, color='orange', label='savgol', marker='.')
			plt.plot(j_ekf_savgol, color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'j_{index}.png')
			plt.clf()

			all_vel_raw.append(vel_plain)
			all_vel_ekf.append(vel_ekf)
			all_vel_savgol.append(vel_savgol)
			all_vel_ekf_savgol.append(vel_ekf_savgol)
			
			all_acc_raw.append(acc_plain)
			all_acc_ekf.append(acc_ekf)
			all_acc_savgol.append(acc_savgol)
			all_acc_ekf_savgol.append(acc_ekf_savgol)
			
			all_j_raw.append(j_plain)
			all_j_ekf.append(j_ekf)
			all_j_savgol.append(j_savgol)
			all_j_ekf_savgol.append(j_ekf_savgol)

		all_vel_raw = np.concatenate(all_vel_raw)
		all_vel_ekf = np.concatenate(all_vel_ekf)
		all_vel_savgol = np.concatenate(all_vel_savgol)
		all_vel_ekf_savgol = np.concatenate(all_vel_ekf_savgol)
		all_acc_raw = np.concatenate(all_acc_raw)
		all_acc_ekf = np.concatenate(all_acc_ekf)
		all_acc_savgol = np.concatenate(all_acc_savgol)
		all_acc_ekf_savgol = np.concatenate(all_acc_ekf_savgol)
		all_j_raw = np.concatenate(all_j_raw)
		all_j_ekf = np.concatenate(all_j_ekf)
		all_j_savgol = np.concatenate(all_j_savgol)
		all_j_ekf_savgol = np.concatenate(all_j_ekf_savgol)

		print('\033[92m PLAIN \033[0m')
		self.stats(traj=[all_vel_raw, all_acc_raw, all_j_raw])
		print('\033[92m EKF \033[0m')
		self.stats(traj=[all_vel_ekf, all_acc_ekf, all_j_ekf])
		print('\033[92m SAVGOL \033[0m')
		self.stats(traj=[all_vel_savgol, all_acc_savgol, all_j_savgol])
		print('\033[92m EKF-SAVGOL \033[0m')
		self.stats(traj=[all_vel_ekf_savgol, all_acc_ekf_savgol, all_j_ekf_savgol])

		plt.figure(1)
		plt.boxplot([all_vel_raw,all_vel_ekf,all_vel_savgol,all_vel_ekf_savgol], labels=['raw','ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('box_plot_vel.png')
		plt.clf()

		plt.figure(2)
		plt.boxplot([all_acc_raw,all_acc_ekf,all_acc_savgol,all_acc_ekf_savgol], labels=['raw','ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('box_plot_acc.png')
		plt.clf()

		plt.figure(3)
		plt.boxplot([all_j_raw,all_j_ekf,all_j_savgol,all_j_ekf_savgol], labels=['raw','ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('box_plot_j.png')
		plt.clf()

		plt.figure(1)
		plt.boxplot([all_vel_ekf,all_vel_savgol,all_vel_ekf_savgol], labels=['ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('2box_plot_vel.png')
		plt.clf()

		plt.figure(2)
		plt.boxplot([all_acc_ekf,all_acc_savgol,all_acc_ekf_savgol], labels=['ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('2box_plot_acc.png')
		plt.clf()

		plt.figure(3)
		plt.boxplot([all_j_ekf,all_j_savgol,all_j_ekf_savgol], labels=['ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('2box_plot_j.png')
		plt.clf()
Пример #22
0
class Argoverse(object):


	def __init__(self, root : str):


		"""	
			create the dataset manager object

			- params:
				- root : str : root path for the data
		"""

		self.root_path = root


		self.manager = ArgoverseForecastingLoader(self.root_path)


		self.ekf = EKF()
		self.feature = PlainFeatures()
		self.savgol = SavitzkyGolov(window_length=11,
									poly=3)
		
		all_vel_raw = []
		all_vel_ekf = []
		all_vel_savgol = []
		all_vel_ekf_savgol = []
		
		all_acc_raw = []
		all_acc_ekf = []
		all_acc_savgol = []
		all_acc_ekf_savgol = []
		
		all_j_raw = []
		all_j_ekf = []
		all_j_savgol = []
		all_j_ekf_savgol = []

		for index in range(0, len(self.manager.seq_list)):
			print(index)

			seq_path = self.manager.seq_list[index]
			df = self.manager.get(seq_path).seq_df

			traj = df[df['OBJECT_TYPE']=='AGENT']
			traj = traj[['TIMESTAMP', 'X', 'Y']].values
			self.savgol.set_window_size(traj.shape[0]//2)
			
			#features
			plain_features = self.feature.process(traj=traj)
			ekf_features = self.ekf.process(traj=traj)
			savgol_features = self.savgol.process(traj=traj)
			ekf_savgol_features = self.savgol.filter(vector=ekf_features)

			plain_features = np.squeeze(plain_features)
			ekf_features = np.squeeze(ekf_features)
			savgol_features = np.squeeze(savgol_features)
			ekf_savgol_features = np.squeeze(ekf_savgol_features)
		

			#path
			plt.figure(1)
			plt.plot(plain_features[:,0],plain_features[:,1], color='blue', label='raw', marker='.')
			plt.plot(ekf_features[:,0],ekf_features[:,1], color='green', label='ekf', marker='.')
			plt.plot(savgol_features[:,0],savgol_features[:,1], color='orange', label='savgol', marker='.')
			plt.plot(ekf_savgol_features[:,0],ekf_savgol_features[:,1], color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'path_{index}.png')
			plt.clf()
			
			#velocity
			vel_plain = np.sqrt(np.power(plain_features[:,2],2) + np.power(plain_features[:,3],2))
			vel_ekf = np.sqrt(np.power(ekf_features[:,2],2) + np.power(ekf_features[:,3],2))
			vel_savgol = np.sqrt(np.power(savgol_features[:,2],2) + np.power(savgol_features[:,3],2))
			vel_ekf_savgol = np.sqrt(np.power(ekf_savgol_features[:,2],2) + np.power(ekf_savgol_features[:,3],2))

			plt.figure(2)
			plt.plot(vel_plain, color='blue', label='raw', marker='.')
			plt.plot(vel_ekf, color='green', label='ekf', marker='.')
			plt.plot(vel_savgol, color='orange', label='savgol', marker='.')
			plt.plot(vel_ekf_savgol, color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'vel_{index}.png')
			plt.clf()

			acc_plain = np.sqrt(np.power(plain_features[:,4],2) + np.power(plain_features[:,5],2))
			acc_ekf = np.sqrt(np.power(ekf_features[:,4],2) + np.power(ekf_features[:,5],2))
			acc_savgol = np.sqrt(np.power(savgol_features[:,4],2) + np.power(savgol_features[:,5],2))
			acc_ekf_savgol = np.sqrt(np.power(ekf_savgol_features[:,4],2) + np.power(ekf_savgol_features[:,5],2))

			plt.figure(3)
			plt.plot(acc_plain, color='blue', label='raw', marker='.')
			plt.plot(acc_ekf, color='green', label='ekf', marker='.')
			plt.plot(acc_savgol, color='orange', label='savgol', marker='.')
			plt.plot(acc_ekf_savgol, color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'acc_{index}.png')
			plt.clf()


			j_plain = np.sqrt(np.power(plain_features[:,6],2) + np.power(plain_features[:,7],2))
			j_ekf = np.sqrt(np.power(ekf_features[:,6],2) + np.power(ekf_features[:,7],2))
			j_savgol = np.sqrt(np.power(savgol_features[:,6],2) + np.power(savgol_features[:,7],2))
			j_ekf_savgol = np.sqrt(np.power(ekf_savgol_features[:,6],2) + np.power(ekf_savgol_features[:,7],2))

			plt.figure(3)
			plt.plot(j_plain, color='blue', label='raw', marker='.')
			plt.plot(j_ekf, color='green', label='ekf', marker='.')
			plt.plot(j_savgol, color='orange', label='savgol', marker='.')
			plt.plot(j_ekf_savgol, color='red', label='ekf-savgol', marker='.')
			plt.legend()
			plt.savefig(f'j_{index}.png')
			plt.clf()

			all_vel_raw.append(vel_plain)
			all_vel_ekf.append(vel_ekf)
			all_vel_savgol.append(vel_savgol)
			all_vel_ekf_savgol.append(vel_ekf_savgol)
			
			all_acc_raw.append(acc_plain)
			all_acc_ekf.append(acc_ekf)
			all_acc_savgol.append(acc_savgol)
			all_acc_ekf_savgol.append(acc_ekf_savgol)
			
			all_j_raw.append(j_plain)
			all_j_ekf.append(j_ekf)
			all_j_savgol.append(j_savgol)
			all_j_ekf_savgol.append(j_ekf_savgol)

		all_vel_raw = np.concatenate(all_vel_raw)
		all_vel_ekf = np.concatenate(all_vel_ekf)
		all_vel_savgol = np.concatenate(all_vel_savgol)
		all_vel_ekf_savgol = np.concatenate(all_vel_ekf_savgol)
		all_acc_raw = np.concatenate(all_acc_raw)
		all_acc_ekf = np.concatenate(all_acc_ekf)
		all_acc_savgol = np.concatenate(all_acc_savgol)
		all_acc_ekf_savgol = np.concatenate(all_acc_ekf_savgol)
		all_j_raw = np.concatenate(all_j_raw)
		all_j_ekf = np.concatenate(all_j_ekf)
		all_j_savgol = np.concatenate(all_j_savgol)
		all_j_ekf_savgol = np.concatenate(all_j_ekf_savgol)

		print('\033[92m PLAIN \033[0m')
		self.stats(traj=[all_vel_raw, all_acc_raw, all_j_raw])
		print('\033[92m EKF \033[0m')
		self.stats(traj=[all_vel_ekf, all_acc_ekf, all_j_ekf])
		print('\033[92m SAVGOL \033[0m')
		self.stats(traj=[all_vel_savgol, all_acc_savgol, all_j_savgol])
		print('\033[92m EKF-SAVGOL \033[0m')
		self.stats(traj=[all_vel_ekf_savgol, all_acc_ekf_savgol, all_j_ekf_savgol])

		plt.figure(1)
		plt.boxplot([all_vel_raw,all_vel_ekf,all_vel_savgol,all_vel_ekf_savgol], labels=['raw','ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('box_plot_vel.png')
		plt.clf()

		plt.figure(2)
		plt.boxplot([all_acc_raw,all_acc_ekf,all_acc_savgol,all_acc_ekf_savgol], labels=['raw','ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('box_plot_acc.png')
		plt.clf()

		plt.figure(3)
		plt.boxplot([all_j_raw,all_j_ekf,all_j_savgol,all_j_ekf_savgol], labels=['raw','ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('box_plot_j.png')
		plt.clf()

		plt.figure(1)
		plt.boxplot([all_vel_ekf,all_vel_savgol,all_vel_ekf_savgol], labels=['ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('2box_plot_vel.png')
		plt.clf()

		plt.figure(2)
		plt.boxplot([all_acc_ekf,all_acc_savgol,all_acc_ekf_savgol], labels=['ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('2box_plot_acc.png')
		plt.clf()

		plt.figure(3)
		plt.boxplot([all_j_ekf,all_j_savgol,all_j_ekf_savgol], labels=['ekf', 'savgol', 'ekf-savgol'])
		plt.savefig('2box_plot_j.png')
		plt.clf()
		
	def stats(self, traj:np.ndarray) -> NoReturn:

		#central tendency : mean
		#dispersion       : std
		#bounds           : min max
		#quantile         : 0.25, 0.5, 0.75

		labels = ['vel', 'acc', 'jerk']
		for t, l in zip(traj, labels):
			_mean = round(np.mean(t),2)
			_std  = round(np.std(t),2)
			_min  = round(np.min(t),2)
			_max  = round(np.max(t),2)
			_q25  = round(np.quantile(t, 0.25),2)
			_q50  = round(np.quantile(t, 0.5),2)
			_q75  = round(np.quantile(t, 0.75),2)

			print (f'Feature: {l}')
			print ('\tmean:{} | std:{} | min:{} | max:{} | q25:{} | q50:{} | q75:{}'.format(_mean,
					_std, _min, _max, _q25, _q50, _q75))
Пример #23
0
 def __init__(self, scenario_path = _sample_path):
     self.map_pres = ArgoverseMap()
     self.scenarios = ArgoverseForecastingLoader(scenario_path)
     self.seq_lane_props = self.map_pres.city_lane_centerlines_dict
def test_get(data_loader: ArgoverseForecastingLoader) -> None:
    data_1 = data_loader.get("0")
    data_2 = data_loader[0]
    assert data_1.current_seq == data_2.current_seq
def data_loader() -> ArgoverseForecastingLoader:
    return ArgoverseForecastingLoader(TEST_DATA_LOC)
Пример #26
0
 def __init__(self, root_dir, config, mode):
     self.avl = ArgoverseForecastingLoader(root_dir)
     self.avl.seq_list.sort()
     self.config = config
     self.train = mode == 'train'
     logging.info(f'root_dir: {root_dir}, num of squence: {len(self.avl)}')
Пример #27
0
class BaseDataset(torch.utils.data.Dataset):
    def __init__(self,
                 data_dict: Dict[str, Any],
                 args: Any,
                 mode: str,
                 base_dir="/work/vita/sadegh/argo/argoverse-api/",
                 use_history=True,
                 use_agents=True,
                 use_scene=True):
        """Initialize the Dataset.
        Args:
            data_dict: Dict containing all the data
            args: Arguments passed to the baseline code
            mode: train/val/test mode
        """
        self.data_dict = data_dict
        self.args = args
        self.mode = mode
        self.use_history = use_history
        self.use_agents = use_agents
        self.use_scene = use_scene
        # Get input
        self.input_data = data_dict["{}_input".format(mode)]
        if mode != "test":
            self.output_data = data_dict["{}_output".format(mode)]
        self.data_size = self.input_data.shape[0]

        # Get helpers
        self.helpers = self.get_helpers()
        self.helpers = list(zip(*self.helpers))

        middle_dir = mode if mode != "test" else "test_obs"
        self.root_dir = base_dir + middle_dir + "/data"

        ##set root_dir to the correct path to your dataset folder
        self.afl = ArgoverseForecastingLoader(self.root_dir)

        self.avm = ArgoverseMap()
        self.mf = MapFeaturesUtils()

    def __len__(self):
        """Get length of dataset.
        Returns:
            Length of dataset
        """
        return self.data_size

    def __getitem__(
            self,
            idx: int) -> Tuple[torch.FloatTensor, Any, Dict[str, np.ndarray]]:
        """Get the element at the given index.
        Args:
            idx: Query index
        Returns:
            A list containing input Tensor, Output Tensor (Empty if test) and viz helpers. 
        """

        helper = self.helpers[idx]
        #         hp=helper[0][:20]
        #         seq_to_find_lanes=np.concatenate([hp,[hp[-1]+i*(hp[-1]-hp[-2]) for i in range(1,10)]])
        ############################# find lanes
        cnt_lines, img, cnt_lines_norm, world_to_image_space = self.mf.get_candidate_centerlines_for_trajectory(
            helper[0][:20],
            yaw_deg=helper[5],
            city_name=helper[1][0],
            avm=self.avm,
            viz=True,
            seq_len=60,
            max_candidates=MAX_NUM_LANES,
        )
        #############################

        # normalize history
        traj = helper[0] if self.mode != "test" else helper[0][:20]
        traj = transform_points(traj - helper[0][19],
                                yaw_as_rotation33(math.pi * helper[5] / 180))

        path_type = []
        path = []
        history_agent_type = []
        history_agent = []
        agents_num = 0
        normal_agents_hist = []

        if self.use_history or self.use_agents:
            if self.use_history:
                ego_world_history = helper[0][:20]
                history_xy = transform_points(ego_world_history,
                                              world_to_image_space)
                history_xy = crop_tensor(history_xy, (224, 224))
                history_agent_type += [1]
                history_agent += [history_xy]
            if self.use_agents:
                agents_history, normal_agents_hist, agents_num = self.get_agents(
                    idx, world_to_image_space, helper[0][19], helper[5])
                history_agent_type += [2] * len(agents_history)
                history_agent += agents_history

            history_agent = torch.cat(
                [linear_path_to_tensor(lane, -1) for lane in history_agent], 0)

        if self.use_scene:
            path_type = [0] * len(cnt_lines_norm)
            path = torch.cat(
                [linear_path_to_tensor(lane, -1) for lane in cnt_lines_norm],
                0)

        return {
            "history_positions":
            torch.FloatTensor(traj[:self.args.obs_len]),
            "normal_agents_history":
            normal_agents_hist,
            "agents_num":
            agents_num,
            "target_positions":
            torch.empty(1) if self.mode == "test" else torch.FloatTensor(
                traj[self.args.obs_len:]),
            "path":
            path,
            "path_type":
            path_type,
            "history_agent":
            history_agent,
            "history_agent_type":
            history_agent_type,
            "base_image":
            img.transpose(2, 0, 1),
            "centroid":
            helper[0][19],
            "yaw_deg":
            helper[5],
            "seq_id":
            helper[8],
            "world_to_image_space":
            world_to_image_space,
        }

    def get_helpers(self) -> Tuple[Any]:
        """Get helpers for running baselines.
        Returns:
            helpers: Tuple in the format specified by LSTM_HELPER_DICT_IDX
        Note: We need a tuple because DataLoader needs to index across all these helpers simultaneously.
        """
        helper_df = self.data_dict[f"{self.mode}_helpers"]
        candidate_centerlines = helper_df["CANDIDATE_CENTERLINES"].values
        #         print("ss",candidate_centerlines)
        candidate_nt_distances = helper_df["CANDIDATE_NT_DISTANCES"].values
        xcoord = np.stack(
            helper_df["FEATURES"].values)[:, :,
                                          FEATURE_FORMAT["X"]].astype("float")
        ycoord = np.stack(
            helper_df["FEATURES"].values)[:, :,
                                          FEATURE_FORMAT["Y"]].astype("float")
        centroids = np.stack((xcoord, ycoord), axis=2)
        _DEFAULT_HELPER_VALUE = np.full((centroids.shape[0]), None)
        city_names = np.stack(
            helper_df["FEATURES"].values)[:, :, FEATURE_FORMAT["CITY_NAME"]]
        seq_paths = helper_df["SEQUENCE"].values
        translation = (helper_df["TRANSLATION"].values
                       if self.args.normalize else _DEFAULT_HELPER_VALUE)
        rotation = (helper_df["ROTATION"].values
                    if self.args.normalize else _DEFAULT_HELPER_VALUE)

        use_candidates = self.args.use_map and self.mode == "test"

        candidate_delta_references = (
            helper_df["CANDIDATE_DELTA_REFERENCES"].values
            if self.args.use_map and use_candidates else _DEFAULT_HELPER_VALUE)
        delta_reference = (helper_df["DELTA_REFERENCE"].values
                           if self.args.use_delta and not use_candidates else
                           _DEFAULT_HELPER_VALUE)

        helpers = [None for i in range(len(LSTM_HELPER_DICT_IDX))]

        # Name of the variables should be the same as keys in LSTM_HELPER_DICT_IDX
        for k, v in LSTM_HELPER_DICT_IDX.items():
            helpers[v] = locals()[k.lower()]

        return tuple(helpers)

    def get_agents(self, index, world_to_image_space, centroid, yaw_deg):
        """Get agents
        """
        helper_df = self.data_dict[f"{self.mode}_helpers"]
        seq_id = helper_df.iloc[index, 0]
        seq_path = f"{self.root_dir}/{seq_id}.csv"
        #         print(seq_path)
        df = self.afl.get(seq_path).seq_df
        frames = df.groupby("TRACK_ID")

        res = []
        normal_agents_hist = np.full((MAX_AGENTS_NUM, self.args.obs_len, 2),
                                     300)

        #         print(len(frames))
        # Plot all the tracks up till current frame
        num_selected = 0

        rotation_mat = yaw_as_rotation33(math.pi * yaw_deg / 180)
        for group_name, group_data in frames:
            object_type = group_data["OBJECT_TYPE"].values[0]

            #             print(group_data[["X","Y"]].values.shape).
            cor_xy = group_data[["X", "Y"]].to_numpy()
            if cor_xy.shape[0] < 20:
                continue

            cor_xy = cor_xy[:self.args.obs_len]
            if np.linalg.norm(centroid - cor_xy[-1]) > MIN_AGENTS_DIST:
                continue

            traj = transform_points(cor_xy - centroid, rotation_mat)
            cor_xy = transform_points(cor_xy, world_to_image_space)
            #             print(cor_xy.shape)
            cropped_vector = crop_tensor(cor_xy, (224, 224))
            #             print(cropped_vector.shape)

            if len(cropped_vector) > 1:
                normal_agents_hist[num_selected] = traj
                res.append(cropped_vector)
                num_selected += 1
            if num_selected >= MAX_AGENTS_NUM:
                break
#         print(num_selected)
        return res, normal_agents_hist, num_selected
Пример #28
0
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
import torch
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import torch
import numpy as np
import sys

## Specify data filepaths
testset_dir = '/Users/mithunjothiravi/Repos/argoverse-api/demo_usage/data/test_obs/data'
preds_dir = '/Users/mithunjothiravi/Repos/argoverse-api/demo_usage/data/test_preds.tar'

## Load the Argoverse dataset and the prediction information
testset_load = ArgoverseForecastingLoader(testset_dir)
pred_load = torch.load(preds_dir)[
    25024]  # Change the index to be the file number

# Test set trajectories
test_trajectories = testset_load.agent_traj

ax = plt.gca()
plt.plot(pred_load[0, 0, 0], pred_load[0, 0, 1], '-o',
         c='r')  #starting point here
plt.plot(pred_load[0, :21, 0], pred_load[0, :21, 1], '-', c='b')

for i in range(len(pred_load)):
    plt.plot(pred_load[i, 20:, 0],
             pred_load[i, 20:, 1],
             '-',
Пример #29
0
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
from argoverse.visualization.visualize_sequences import viz_sequence
from argoverse.map_representation.map_api import ArgoverseMap
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
# %%

# set root_dir to the correct path to your dataset folder
# root_dir = '/media/bartosz/hdd1TB/workspace_hdd/datasets/argodataset/argoverse-forecasting/forecasting_train/sample/'
root_dir = '/media/bartosz/hdd1TB/workspace_hdd/datasets/argodataset/argoverse-forecasting/train/data'

afl = ArgoverseForecastingLoader(root_dir)

print('Total number of sequences:', len(afl))

avm = ArgoverseMap()
# %%


def get_plot(map_range):
    my_dpi = 96.0
    # fig = plt.figure(figsize=(72 / my_dpi, 72 / my_dpi), dpi=my_dpi)
    fig = plt.figure(figsize=(5, 5), dpi=my_dpi)

    # fig.tight_layout(pad=0)
    ax = fig.add_axes([0., 0., 1., 1.])
    ax.set_xlim([map_range[0], map_range[1]])
    ax.set_ylim([map_range[2], map_range[3]])
    # ax.axis('off')
Пример #30
0
class Argoverse_Data(Dataset):
    def __init__(self,
                 root_dir='argoverse-data/forecasting_sample/data',
                 train_seq_size=20,
                 cuda=False,
                 test=False):
        super(Argoverse_Data, self).__init__()
        self.root_dir = root_dir
        self.afl = ArgoverseForecastingLoader(self.root_dir)
        self.seq_paths = glob.glob(f"{self.root_dir}/*.csv")
        self.train_seq_size = train_seq_size
        self.use_cuda = cuda
        self.mode_test = test

    def __len__(self):
        return len(self.seq_paths)

    def old_transform(self, trajectory):
        def rotation_angle(x, y):
            angle = np.arctan(abs(y / x))
            direction = -1 * np.sign(x * y)
            return direction * angle

        translation = trajectory[0]
        trajectory = trajectory - trajectory[0]
        theta = rotation_angle(trajectory[19, 0], trajectory[19, 1])
        c, s = np.cos(theta), np.sin(theta)
        R = np.array([[c, -s], [s, c]])
        trajectory = torch.tensor(trajectory)
        trajectory = trajectory.permute(1, 0)
        trajectory = np.matmul(R, trajectory)
        trajectory = torch.tensor(trajectory)
        trajectory = trajectory.permute(1, 0)
        if self.mode_test:
            return trajectory[0:self.train_seq_size].float(), R, translation
        else:
            return trajectory[0:self.train_seq_size].float(
            ), trajectory[self.train_seq_size:].float()

    def transform(self, trajectory):
        def rotation_angle(x, y):
            angle = np.arctan(abs(y / x))
            direction = -1 * np.sign(x * y)
            return direction * angle

        if self.mode_test:
            translation = -trajectory[0]
            train_trajectory = trajectory + translation
            theta = rotation_angle(train_trajectory[19, 0],
                                   train_trajectory[19, 1])
            c, s = np.cos(theta), np.sin(theta)
            R = torch.Tensor([[c, -s], [s, c]]).float()
            train_trajectory = torch.tensor(train_trajectory).float()
            train_trajectory = torch.matmul(R, train_trajectory.permute(
                1, 0)).permute(1, 0)

            return train_trajectory, R, torch.Tensor(translation).float()
        else:
            old_trajectory = trajectory
            translation = -trajectory[0]
            transformed_trajectory = trajectory + translation
            theta = rotation_angle(transformed_trajectory[19, 0],
                                   transformed_trajectory[19, 1])
            c, s = np.cos(theta), np.sin(theta)
            R = torch.Tensor([[c, -s], [s, c]]).float()
            transformed_trajectory = torch.tensor(
                transformed_trajectory).float()
            transformed_trajectory = torch.matmul(
                R, transformed_trajectory.permute(1, 0)).permute(1, 0)
            train_trajectory = transformed_trajectory[:self.train_seq_size]
            gt_transformed_trajectory = transformed_trajectory[self.
                                                               train_seq_size:]
            actual_gt_trajectory = torch.Tensor(
                trajectory[self.train_seq_size:]).float()
            return train_trajectory, gt_transformed_trajectory, actual_gt_trajectory, R, torch.Tensor(
                translation).float()

    def inverse_transform_one(self, trajectory, R, t):
        out = torch.matmul(R, trajectory.permute(1, 0)).permute(1, 0)
        return out + t.reshape(1, 2)

    def inverse_transform(self, trajectory, traj_dict):
        R = traj_dict['rotation']
        t = traj_dict['translation']
        if self.use_cuda:
            R = R.cuda()
            t = t.cuda()
        out = torch.matmul(R.permute(0, 2, 1),
                           trajectory.permute(0, 2, 1)).permute(0, 2, 1)
        out = out - t.reshape(t.shape[0], 1, 2)
        return out

    def __getitem__(self, index):
        '''
        Obtain neighbour trajectories as well.
        Obtain map parameters at the trajectories
        Do it in the coordinates of the centerlines as well
        '''

        current_loader = self.afl.get(self.seq_paths[index])
        agent_traj = current_loader.agent_traj

        #         if self.test:
        #             agent_train_traj,R,translation=self.transform(agent_traj)
        #             seq_index=int(os.path.basename(self.seq_paths[index]).split('.')[0])
        #             return {'seq_index': seq_index,'train_agent':agent_train_traj,'rotation':R,'translation':translation,'city':current_loader.city}
        #         else:
        agent_train_traj, agent_gt_traj, agent_unnorm_gt_traj, R, translation = self.transform(
            agent_traj)
        return {
            'seq_path': self.seq_paths[index],
            'train_agent': agent_train_traj,
            'gt_agent': agent_gt_traj,
            'gt_unnorm_agent': agent_unnorm_gt_traj,
            'rotation': R,
            'translation': translation,
            'city': current_loader.city
        }