def load_and_clean_csv(csv_path): curr_meta = pdu.read_multiindex_pd(csv_path) curr_meta = curr_meta[~curr_meta['id'].isin(MetaProcessingParams. false_indexes)] new_indexes = pdu.get_update_indexes_rule(curr_meta) curr_meta = curr_meta.replace({'id': new_indexes}) curr_meta.to_csv(csv_path) return curr_meta
def final_merge_single(self, overlapped_windows): """ Final merge, optional""" self.update_config_for_final() name = 'final' meta_object = MetaPartition(None, overlapped_windows, self.homography, name) file_dir = FileProcessing(meta_object, name) file_dir.create_dir() meta_object.add_observer(file_dir) processed_meta = DataframeProcessing(meta_object) meta_object.add_observer(processed_meta) tracker_obj = TrackerMerger(processed_meta, meta_object, file_dir) window_processing(tracker_obj, final_merge=True) final_json = util.choose_best_csv_final_last(os.environ.get('RES_DIR')) final_json_meta = pdu.read_multiindex_pd(final_json) return final_json_meta
def load_info(self): files = os.listdir(self.files_dir) for file in files: file_path = os.path.join(args.files_dir, file) if file.endswith('json'): curr_file_info = load_and_process(file_path) curr_file_info = sort_meta_by_key(curr_file_info) elif file.endswith('.csv'): meta_pandas = read_multiindex_pd(file_path) meta_dict = from_dataframe_to_dict(meta_pandas) curr_file_info = fixed_to_original_coordinate_system( meta_dict, self.homography_dict, int(os.environ.get('fixed_coordinate_resize_h')), int(os.environ.get('fixed_coordinate_resize_w')), self.height, self.width) else: raise ValueError('Check you input file formats!') self.files_info[file] = curr_file_info self.ids_counters[file] = []