def export_evaluation_data(self, t: int, curr_eval_data: dict) -> bool: with open(str(self.state_path(t)), 'w') as file: json.dump(curr_eval_data, file, indent=2) data_frames = [DriveDataFrame.load_from_str(s) for s in curr_eval_data['data_frames']] controls = list(map(attrgetter('control'), data_frames)) stops, sub_goals = zip(*curr_eval_data['stop_frames']) texts = ['th{:+4.2f} st{:+4.2f} {:4s}:{:+4.2f}'.format(c.throttle, c.steer, g[:4], s) for c, s, g in zip(controls, stops, sub_goals)] text_dict = {i: t for i, t in zip(range(*curr_eval_data['frame_range']), texts)} src_image_files = [self.agent.image_path(f) for f in range(*curr_eval_data['frame_range'])] src_image_files = list(filter(lambda x: x.exists(), src_image_files)) if self.image_type in ['s', 'd']: final_image_files = [self.segment_dir / '{:08d}.png'.format(i) for i in range(len(self.final_images))] for p, s in zip(final_image_files, self.final_images): cv2.imwrite(str(p), s) video_from_files(final_image_files, self.video_dir / 'segment{:02d}.mp4'.format(t), texts=[], framerate=EVAL_FRAMERATE_SCALE * DATASET_FRAMERATE, revert=False) image_frames = set([int(s.stem[:-1]) for s in src_image_files]) drive_frames = set(text_dict.keys()) common_frames = sorted(list(image_frames.intersection(drive_frames))) src_image_files = [self.agent.image_path(f) for f in common_frames] dst_image_files = [self.image_dir / p.name for p in src_image_files] [shutil.copy(str(s), str(d)) for s, d in zip(src_image_files, dst_image_files)] text_list = [text_dict[f] for f in common_frames] video_from_files(src_image_files, self.video_path(t), texts=text_list, framerate=EVAL_FRAMERATE_SCALE * DATASET_FRAMERATE, revert=True) return self.state_path(t).exists()
def read_data(data_path: Path) -> Dict[str, DriveDataFrame]: with open(str(data_path), 'r') as file: lines = file.read().splitlines() data_dict = dict() for i, line in enumerate(lines): words = line.split(':') # word, drive_data_frame_str if len(words) != 2: logger.error(words) assert len(words) == 2 data_dict[words[0]] = DriveDataFrame.load_from_str(words[1]) return data_dict
def export_evaluation_data(self, t: int, curr_eval_data: dict) -> bool: with open(str(self.state_path(t)), 'w') as file: json.dump(curr_eval_data, file, indent=2) data_frames = [DriveDataFrame.load_from_str(s) for s in curr_eval_data['data_frames']] controls = list(map(attrgetter('control'), data_frames)) stops, sub_goals = zip(*curr_eval_data['stop_frames']) logger.info('controls, stops, goals {}, {}, {}'.format(len(controls), len(stops), len(sub_goals))) self.export_video(t, 'center', curr_eval_data) self.export_video(t, 'extra', curr_eval_data) self.export_segment_video(t) return self.state_path(t).exists()
def load_model_single_trajectory(eval_dir: EvaluationDirectory, traj_index: int) -> EvaluationTrajectory: try: with open(str(eval_dir.state_path(traj_index)), 'r') as file: data = json.load(file) except: raise FileNotFoundError('failed to load {}'.format( eval_dir.state_path(traj_index))) collided = False if 'collided' not in data else parse_bool( data['collided']) data_frames = [ DriveDataFrame.load_from_str(f) for f in data['data_frames'] ] info = EvaluationUnitInfo(eval_dir.data_keyword, eval_dir.exp_index, eval_dir.exp_name, eval_dir.exp_step, traj_index) return EvaluationTrajectory(info, data_frames, collided)
def load_evaluation_dataset(param: Parameter) -> Tuple[List[List[DriveDataFrame]], List[str]]: param = _prepare_evaluation_param(param) data_root = Path.cwd() / '.carla/dataset/evaluation' if int(param.dataset_data_names[0][-1]) == 1: data_root = data_root / 'town1' else: data_root = data_root / 'town2' if not data_root.exists(): raise FileNotFoundError('could not find {}'.format(data_root)) data_path = data_root / '{}.json'.format(param.eval_keyword) with open(str(data_path), 'r') as file: eval_dict = json.load(file) drives = [[DriveDataFrame.load_from_str(d) for d in dl] for dl in eval_dict['drives']] sentences = eval_dict['sentences'] return list(drives), list(sentences)