def build_pred(self): seq_data = [] if self.data_path == []: print("Empty data") return seq_data for idx, file_path in enumerate(self.data_path): frame_data = bh.load_json(file_path) if idx % args.verbose_interval == 0 and idx != 0: print('{} images.'.format(idx)) match_box = self.det_result[idx] prediction_data = [] for i, box in enumerate(match_box): prediction = bh.init_labels_format() prediction['box2d']['x1'] = int(box[0]) prediction['box2d']['y1'] = int(box[1]) prediction['box2d']['x2'] = int(box[2]) prediction['box2d']['y2'] = int(box[3]) prediction['box2d']['confidence'] = float(box[6]) # 3D box projected center prediction['box3d']['xc'] = int(box[4]) prediction['box3d']['yc'] = int(box[5]) del prediction['poly2d'] del prediction['attributes']['trafficLightColor'] del prediction['attributes']['areaType'] del prediction['attributes']['laneDirection'] del prediction['attributes']['laneStyle'] del prediction['attributes']['laneTypes'] prediction_data.append(prediction) frame_data['prediction'] = prediction_data print("Frame {}, GT: {} Boxes, PD: {} Boxes".format( idx, len(frame_data['labels']), len(frame_data['prediction']))) del frame_data['labels'] filename = os.path.join( DATASET.PRED_PATH, frame_data['videoName'], os.path.basename(file_path)) if not os.path.exists(os.path.dirname(filename)): os.mkdir(os.path.dirname(filename)) bh.dump_json(filename, frame_data) seq_data.append(filename) print("Saving {} frames...".format(idx)) return seq_data
# Update input json path and save path method = method + setting_str trk_path = '{ROOT}{SESS}_{EP}_{DT}_{PH}_set/{MT}_pd.json'.format( **{ 'ROOT': args.root, 'SESS': args.session, 'EP': args.epoch, 'DT': args.set, 'PH': args.split, 'MT': method }) save_path = '{ROOT}{SESS}_{EP}_{DT}_{PH}_set/{MT}/data/'.format( **{ 'ROOT': args.root, 'SESS': args.session, 'EP': args.epoch, 'DT': args.set, 'PH': args.split, 'MT': method }) # Load tracked results trk_result = bh.load_json(trk_path) print(len(trk_result), len(data_path)) for seq_idx, dpath in enumerate(data_path): trk_seq = [n['hypotheses'] for n in trk_result[seq_idx]['frames']] det_seq = bh.load_json(dpath) convert_app(det_seq, trk_seq, os.path.join(save_path, os.path.basename(dpath)))
# Load data print("Load label file from path: {}".format(DATASET.PRED_PATH)) folders, data_path = load_label_path(DATASET.PRED_PATH) for seq_idx, folder in enumerate(folders): pkl_path = '{ROOT}{SESS}_{EP}_{DT}_{PH}_set/{SESS}_{EP}_{' \ 'SQ}_bdd_roipool_output.pkl'.format( **{'ROOT': args.root, 'SESS': args.session, 'EP': args.epoch, 'DT': args.set, 'PH': args.split, 'SQ': folder}) save_path = '{ROOT}{SESS}_{EP}_{DT}_{PH}_set/{SQ}_bdd_3d.json'.format( **{'ROOT': args.root, 'SESS': args.session, 'EP': args.epoch, 'DT': args.set, 'PH': args.split, 'SQ': folder}) det_pred = load_single_frame_result(pkl_path) hypos = [] for fr_idx, frame_path in enumerate(data_path[folder]): det_placeholder = bh.load_json(frame_path) hypo = convert_app(det_placeholder, det_pred[fr_idx]) hypos.append(hypo) print("Saving updated tracking results with {} frames at {}...".format( len(hypos), save_path)) bh.dump_json(save_path, hypos)
def gta_label(self): # get information at boxes level. Collect dict. per box, not image. file_list = load_label_path(os.path.join(DATASET.LABEL_PATH)) print("{} with {} sequences".format(args.split, len(file_list))) data_label_list = [] re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+)') # rec_10090911_clouds_21h53m_x-968y-1487tox2523y214 # ('10090911', 'clouds', '21', '53', 'x-968y-1487tox2523y214') for fidx, filename in enumerate(file_list): print(fidx, filename) dataset = bh.load_json(filename) data_label = {} for fr_idx, frame in enumerate(dataset): img_name = os.path.join(frame['dset_name'], str(frame['timestamp']) + '_final.jpg') num_boxes = len(frame['object']) obj = frame['object'] data_label[img_name] = {} data_label[img_name]['vid_name'] = os.path.join( frame['dset_name']) img_log = re_pattern.match(frame['dset_name']) img_weather = img_log.group(2) img_hour = img_log.group(3) data_label[img_name]['weather'] = img_weather data_label[img_name]['timeofday'] = bh.get_time_of_day( int(img_hour)) data_label[img_name]['timestamp'] = frame['timestamp'] data_label[img_name]['fov'] = frame['camera']['fov'] data_label[img_name]['nearClip'] = frame['camera']['nearClip'] data_label[img_name]['pose'] = { 'rotation': [np.pi * angle / 180.0 for angle in frame['pose']['rotation']], 'position': [p_t - p_0 for (p_t, p_0) in \ zip(frame['pose']['position'], dataset[0]['pose']['position'])]} data_label[img_name]['img_name'] = img_name data_label[img_name]['num_boxes'] = num_boxes data_label[img_name]['pixel'] = [ obj[i]['n_pixel'] for i in range(num_boxes) ] data_label[img_name]['class'] = [ DATASET.CLASS_PARSER[obj[i]['kitti']['type']] for i in range(num_boxes) ] data_label[img_name]['ignore'] = [ obj[i]['ignore'] for i in range(num_boxes) ] data_label[img_name]['tracking_id'] = [ obj[i]['tracking_id'] for i in range(num_boxes) ] data_label[img_name]['truncated'] = [ obj[i]['kitti']['truncated'] for i in range(num_boxes) ] data_label[img_name]['occluded'] = [ obj[i]['kitti']['occluded'] for i in range(num_boxes) ] data_label[img_name]['boxes'] = [ obj[i]['kitti']['bbox'] for i in range(num_boxes) ] data_label[img_name]['alpha'] = [ obj[i]['kitti']['alpha'] for i in range(num_boxes) ] data_label[img_name]['dims'] = [ obj[i]['kitti']['dimensions'] for i in range(num_boxes) ] data_label[img_name]['trans'] = [ obj[i]['kitti']['location'] for i in range(num_boxes) ] data_label[img_name]['rot_y'] = [ obj[i]['kitti']['rotation_y'] for i in range(num_boxes) ] data_label_list.append(data_label) with open(os.path.join(DATASET.PKL_FILE), 'wb') as f: pickle.dump(data_label_list, f, -1) return data_label_list