def create_attributes(): info = ulti.load_json() output_dir = os.path.join(info['dataset_dir'], 'Categories') ulti.make_dir(output_dir) info = {} info['id'] = 1 info['name'] = 'Road_Objects' category = [ { "id": 1, "name": "pedestrian", "type": "thing", "supercategory": "person" }, { "id": 2, "name": "rider", "type": "thing", "supercategory": "person" }, { "id": 3, "name": "car", "type": "thing", "supercategory": "vehicle" }, { "id": 4, "name": "truck", "type": "thing", "supercategory": "vehicle" }, { "id": 5, "name": "bus", "type": "thing", "supercategory": "vehicle" }, { "id": 6, "name": "motorcycle", "type": "thing", "supercategory": "vehicle" }, { "id": 7, "name": "bicycle", "type": "thing", "supercategory": "vehicle" }, ] info['category'] = category outfile = ulti.write_json(info, file=os.path.join(output_dir, info['name'] + '.json'))
def create_test(): info = ulti.load_json() dir_input = info['dataset_dir'] dir_output = os.path.join(info['dataset_dir'], 'RCNN_data') video = info['annotated_video'] exclude_true_gt_frames = False path = os.path.join(dir_input, 'Info', video + '.json') dataset = ulti.load_json(dir_input + '/Info/' + video + '.json') # pprint(dataset) videos = dataset['videos'] images = [] path = dir_input + '/Info/' + video + '.json' if os.path.isfile(path): data = ulti.load_json(path) if exclude_true_gt_frames: for image in data['images']: if not image['has_gt']: images.append(image) else: images.extend(data['images']) tq = tqdm.tqdm(total=len(images)) for image in images: tq.update(1) image['file_name'] = image['file_name'].replace('Images\\\\', '') image['file_name'] = image['file_name'].replace('Images/', '') image['file_name'] = image['file_name'].replace('Images\\', '') categories = ulti.load_json(dir_input + '/Categories/Road_Objects.json') categories = categories['category'] dataset = { 'categories': categories, 'annotations': [], 'videos': [], 'images': [] } dataset['videos'] = videos dataset['images'] = images print('test: ', len(images)) # pprint(dataset) ulti.make_dir(dir_output) outfile = ulti.write_json(dataset, file=os.path.join(dir_output, 'test.json'))
def init_data(experiment_type='Entire_dataset', iter=0): root_dir = os.path.join(os.getcwd(), 'CityScapes/val') dataset_dir = os.path.join(root_dir, 'Dataset') training_dir = os.path.join(root_dir, 'Train') dataset_name = 'CityScapes_val' annotated_video = dataset_name experiment = os.path.join(experiment_type, 'Iter_{}'.format(iter)) ######################################################## info = {'dataset_dir': dataset_dir, 'dataset_name': dataset_name, 'training_dir': training_dir, 'annotated_video': annotated_video, 'experiment': experiment, 'iter': iter, } ulti.make_dir(os.path.join(dataset_dir, 'Images')) ulti.make_dir(training_dir) outfile = ulti.write_json(info)
def filter_data(videonames=[]): info = ulti.load_json() if not videonames: videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images')) videonames = sorted(videonames) tq = tqdm.tqdm(total=len(videonames)) frequency_threshold = 4 for videoname in videonames: tq.set_description('Video {}'.format(videoname)) tq.update(1) dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Tubelet', videoname + '.json') ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Videos', videoname)) ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Tubelet')) ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Visualization', videoname)) with open(dir_tubelet) as f: tubelet = json.load(f) tubelet_by_frame = tubelet['tubelet_by_frame'] tubelet_by_id = tubelet['tubelet_by_id'] for id in list(tubelet_by_id.keys()): tubelet = tubelet_by_id[id] if len(tubelet.keys()) < frequency_threshold: for frame in tubelet.keys(): del tubelet_by_frame[frame][id] del tubelet_by_id[id] continue for frame in list(tubelet.keys()): img = cv2.imread(os.path.join(info['dataset_dir'], 'Images', videoname, frame + '.jpg')) height, width, channels = img.shape bbox = tubelet[frame] bbox = check_condition(bbox, height, width) if bbox is None: del tubelet_by_id[id][frame] del tubelet_by_frame[frame][id] if len(tubelet_by_frame[frame].keys()) == 0: del tubelet_by_frame[frame] if len(tubelet_by_id[id].keys()) == 0: del tubelet_by_id[id] ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id}, os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Tubelet', videoname + '.json')) for frame in tubelet_by_frame.keys(): tubelet = tubelet_by_frame[frame] ulti.write_json(tubelet, os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Videos', videoname, frame + '.json'))
def smoothen_label(videonames=[]): info = ulti.load_json() if not videonames: videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images')) videonames = sorted(videonames) tq = tqdm.tqdm(total=len(videonames)) for videoname in videonames: tq.set_description('Video {}'.format(videoname)) tq.update(1) dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Tubelet', videoname + '.json') ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Videos', videoname)) ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Tubelet')) ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Visualization', videoname)) with open(dir_tubelet) as f: tubelet = json.load(f) tubelet_by_frame = tubelet['tubelet_by_frame'] tubelet_by_id = tubelet['tubelet_by_id'] for id in tubelet_by_id.keys(): tubelet = tubelet_by_id[id] category_ids = [] category_scores = [] for frame in tubelet.keys(): bbox = tubelet[frame] category_ids.append(bbox['category_id']) category_scores.append(bbox['score']) # if len(np.unique(np.array(category_ids))) > 1: element, score = most_frequent(category_ids, category_scores) # print(frame, id, element, ':', category_ids) for frame in tubelet.keys(): if score: tubelet_by_id[id][frame]['score'] = score tubelet_by_frame[frame][id]['score'] = score tubelet_by_id[id][frame]['category_id'] = element tubelet_by_frame[frame][id]['category_id'] = element ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id}, os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Tubelet', videoname + '.json')) for frame in tubelet_by_frame.keys(): tubelet = tubelet_by_frame[frame] ulti.write_json(tubelet, os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Videos', videoname, frame + '.json'))
def create_train(): info = ulti.load_json() dir_input = info['dataset_dir'] dir_output = os.path.join(info['dataset_dir'], 'RCNN_data') video = info['annotated_video'] only_use_true_gt = False path = os.path.join(dir_input, 'Info', video + '.json') dataset = ulti.load_json(path) images = dataset['images'] videos = dataset['videos'] categories = ulti.load_json(dir_input + '/Categories/Road_Objects.json') categories = categories['category'] dataset = { 'categories': categories, 'annotations': [], 'videos': [], 'images': [] } video_names = [] video_ids = [] if video == info['dataset_name']: for vid in videos: dataset['videos'].append(vid) video_names.append(vid['name']) video_ids.append(vid['id']) else: for vid in videos: if vid['name'] == video: dataset['videos'].append(vid) video_names.append(vid['name']) video_ids.append(vid['id']) for image in images: image['file_name'] = image['file_name'].replace('Images\\\\', '') image['file_name'] = image['file_name'].replace('Images/', '') image['file_name'] = image['file_name'].replace('Images\\', '') ann_files = [] list_images = [] annotations = [] ins_id = 0 tq = tqdm.tqdm(total=len(video_names)) for id, video in zip(video_ids, video_names): tq.update(1) for (dirpath, dirnames, filenames) in os.walk( os.path.join(info['dataset_dir'], info['experiment'], 'Detection', 'Json', video)): ann_temp = [] img_temp = [] for file in filenames: if file.endswith('.json'): data = ulti.load_json(os.path.join(dirpath, file)) if len(data) > 0: ann_files.append(file) ann_temp.append(file) img_temp = sorted(img_temp) ann_temp = sorted(ann_temp) for image in images: if image['video_id'] == id: file_name = os.path.splitext( os.path.basename(image['file_name']))[0] + '.json' if file_name in ann_temp: if (image['has_gt'] and only_use_true_gt) or not only_use_true_gt: # print(file_name) list_images.append(image) img_temp.append(image) json_file = os.path.splitext( os.path.basename( image['file_name']))[0] + '.json' if json_file in ann_temp: data = ulti.load_json( os.path.join(dirpath, json_file)) for ann in data: ann['bbox'] = [ ann['bbox']['x'], ann['bbox']['y'], ann['bbox']['w'], ann['bbox']['h'] ] ann['id'] = ins_id ann['image_id'] = image['id'] ins_id += 1 annotations.extend(data) tq.set_description('Video {}'.format( os.path.join(video, file_name))) dataset['annotations'] = annotations dataset['videos'] = videos dataset['images'] = list_images ulti.make_dir(dir_output) outfile = ulti.write_json(dataset, file=os.path.join(dir_output, 'train.json'))
def create_dataset_info(img_id_start=0, video_id_start=0, ann_id_start=0): info = ulti.load_json() dataset_dir = os.path.join(info['dataset_dir']) dataset_name = info['dataset_name'] img_id = img_id_start video_id = video_id_start ann_id = ann_id_start dataset = {} dataset['id'] = 1 info = {} info['name'] = dataset_name info['root_dir'] = dataset_dir + '/' info['type'] = 'video' # 'video' or 'image' info['ann_dir'] = '' info['extension'] = 'jpg' dataset['info'] = info videos = [] data_dir = os.path.join(info['root_dir'], 'Images') folders = [ f for f in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, f)) ] for folder in sorted(folders): video = {} video['id'] = video_id video['name'] = folder data_dir = os.path.join(info['root_dir'], 'Images', video['name']) files = [ f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f)) and f.endswith(info['extension']) ] video['n_frames'] = len(files) videos.append(video) video_id += 1 dataset['videos'] = videos images = [] tq = tqdm.tqdm(total=len(videos)) for video in videos: tq.update(1) data_dir = os.path.join(info['root_dir'], 'Images', video['name']) files = [ f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f)) and f.endswith(info['extension']) ] sub_images = [] sub_dataset = copy.deepcopy(dataset) for f in sorted(files): image = {} image['id'] = img_id image['has_gt'] = False image['video_id'] = video['id'] image['file_name'] = os.path.join('Images', video['name'], f) image['seg_file_name'] = '' img = Image.open(os.path.join(info['root_dir'], image['file_name'])).convert("RGB") img = np.array(img)[:, :, [2, 1, 0]] img = img.copy() image['width'] = img.shape[1] image['height'] = img.shape[0] img_id += 1 images.append(image) sub_images.append(image) tq.set_description('Video {}'.format(image['file_name'])) sub_dataset['images'] = sub_images dir_output = dataset_dir + '/Info' ulti.make_dir(dir_output) outfile = ulti.write_json(sub_dataset, file=(dir_output + '/' + video['name'] + '.json')) dataset['images'] = images dir_output = dataset_dir + '/Info/' ulti.make_dir(dir_output) outfile = ulti.write_json(dataset, file=(dir_output + '/' + info['name'] + '.json'))
def generate_ann(threshold=None, trackable_threshold=0): if threshold is None: threshold = [0.85, 0.550, 0.950, 0.950, 0.800, 0.800, 0.700] info = ulti.load_json() video = info['annotated_video'] dataset_dir = info['dataset_dir'] output_dir = os.path.join(dataset_dir, info['experiment'], 'Detection') ulti.make_dir(output_dir) print(output_dir) input_path = os.path.join(dataset_dir, info['experiment'], 'Raw_Detection', 'bbox.json') prediction = ulti.load_json(input_path) category = ulti.load_json( os.path.join(dataset_dir, 'Categories', 'Road_Objects.json')) dataset = ulti.load_json(os.path.join(dataset_dir, 'Info', video + '.json')) list_img_id_dataset = [] list_img_id_prediction = [] ann_id = 0 for ann in prediction: list_img_id_prediction.append(ann['image_id']) tq = tqdm.tqdm(total=len(dataset['images'])) for img in dataset['images']: list_img_id_dataset.append(img['id']) tq.set_description('Video Frame {}'.format(img['id'])) tq.update(1) index = [ i for i, x in enumerate(list_img_id_prediction) if x == img['id'] and prediction[i]['score'] >= threshold[ prediction[i]['category_id'] - 1] ] if len(index) > 0: # print(img['id'], index) annotations = [] for ind in index: ann = {} ann['id'] = ann_id ann['image_id'] = prediction[ind]['image_id'] ann['track_id'] = -1 ann['category_id'] = prediction[ind]['category_id'] ann['score'] = prediction[ind]['score'] ann['second_category_id'] = None ann['second_score'] = 0 ann['third_category_id'] = None ann['third_score'] = 0 ann['iscrowd'] = 0 # always 0 ann['bbox'] = { 'x': int(prediction[ind]['bbox'][0]), 'y': int(prediction[ind]['bbox'][1]), 'w': int(prediction[ind]['bbox'][2]), 'h': int(prediction[ind]['bbox'][3]) } if ann['bbox']['w'] * ann['bbox']['h'] >= trackable_threshold: ann['trackable'] = True else: ann['trackable'] = False ann['segmentation'] = [] ann['area'] = 0 ann['score'] = prediction[ind]['score'] annotations.append(ann) ann_id += 1 if len(annotations) > 0: fullpath = os.path.join( dataset_dir, 'Annotations', category['name'], os.path.basename(os.path.dirname(img['file_name']))) ulti.make_dir(fullpath) fullpath = os.path.join( output_dir, 'Json', os.path.basename(os.path.dirname(img['file_name']))) ulti.make_dir(fullpath) path = os.path.join( fullpath, os.path.splitext(os.path.basename(img['file_name']))[0] + '.json') ulti.write_json(annotations, path)
import tqdm from shutil import copyfile if __name__ == "__main__": ulti.copy_model() info = ulti.load_json() video = info['annotated_video'] input_path = os.path.join(info['training_dir'], video, 'inference', 'ro_bdd_test_cocostyle', 'bbox.json') dataset_dir = info['dataset_dir'] output_dir = os.path.join(dataset_dir, info['experiment'], 'Raw_Detection') ulti.make_dir(output_dir) copyfile(input_path, os.path.join(output_dir, 'bbox.json')) prediction = ulti.load_json(input_path) category = ulti.load_json( os.path.join(dataset_dir, 'Categories', 'Road_Objects.json')) dataset = ulti.load_json(os.path.join(dataset_dir, 'Info', video + '.json')) list_img_id_dataset = [] list_img_id_prediction = [] ann_id = 0 for ann in prediction: list_img_id_prediction.append(ann['image_id']) tq = tqdm.tqdm(total=len(dataset['images']))
def create_train(): info = ulti.load_json() dir_input = info['dataset_dir'] dir_output = os.path.join(info['dataset_dir'], 'RCNN_data') video = info['annotated_video'] only_use_true_gt = False path = os.path.join(dir_input, 'Info', video + '.json') dataset = ulti.load_json(path) images = dataset['images'] videos = dataset['videos'] categories = ulti.load_json(dir_input + '/Categories/Road_Objects.json') categories = categories['category'] dataset = { 'categories': categories, 'annotations': [], 'videos': [], 'images': [] } video_names = [] video_ids = [] for vid in videos: dataset['videos'].append(vid) video_names.append(vid['name']) video_ids.append(vid['id']) dict_images = {} for image in images: image['file_name'] = image['file_name'].replace('Images\\\\', '') image['file_name'] = image['file_name'].replace('Images/', '') image['file_name'] = image['file_name'].replace('Images\\', '') key = os.path.join( os.path.basename(os.path.dirname(image['file_name'])), os.path.splitext(os.path.basename(image['file_name']))[0]) dict_images[key] = image list_images = [] annotations = [] ins_id = 0 tq = tqdm.tqdm(total=len(video_names)) for id, video in zip(video_ids, video_names): # print(video) tq.update(1) dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Tubelet', video + '.json') with open(dir_tubelet) as f: tubelet = json.load(f) tubelet_by_frame = tubelet['tubelet_by_frame'] tubelet_by_id = tubelet['tubelet_by_id'] for filename in tubelet_by_frame.keys(): tubelet = tubelet_by_frame[filename] if len(tubelet.keys()) > 0: image = dict_images[os.path.join(video, filename)] if (image['has_gt'] and only_use_true_gt) or not only_use_true_gt: list_images.append(image) for key in tubelet.keys(): ann = tubelet[key] ann['bbox'] = [ ann['bbox']['x'], ann['bbox']['y'], ann['bbox']['w'], ann['bbox']['h'] ] ann['id'] = ins_id ann['image_id'] = image['id'] ins_id += 1 annotations.append(ann) tq.set_description('Video {}'.format( os.path.join(video, filename))) dataset['annotations'] = annotations dataset['videos'] = videos dataset['images'] = list_images print('train: ', len(list_images)) ulti.make_dir(dir_output) outfile = ulti.write_json(dataset, file=os.path.join(dir_output, 'train.json'))
def main(model_path=''): ulti.copy_model(model_path=model_path) info = ulti.load_json() video = info['annotated_video'] input_path = os.path.join(info['training_dir'], video, 'inference', 'ro_bdd_test_cocostyle', 'bbox.json') dataset_dir = info['dataset_dir'] output_dir = os.path.join(dataset_dir, info['experiment'], 'Raw_Detection') ulti.make_dir(output_dir) copyfile(input_path, os.path.join(output_dir, 'bbox.json')) prediction = ulti.load_json(input_path) category = ulti.load_json( os.path.join(dataset_dir, 'Categories', 'Road_Objects.json')) dataset = ulti.load_json(os.path.join(dataset_dir, 'Info', video + '.json')) list_img_id_dataset = [] list_img_id_prediction = [] ann_id = 0 for ann in prediction: list_img_id_prediction.append(ann['image_id']) tq = tqdm.tqdm(total=len(dataset['images'])) for img in dataset['images']: list_img_id_dataset.append(img['id']) tq.set_description('Video Frame {}'.format(img['id'])) tq.update(1) index = [ i for i, x in enumerate(list_img_id_prediction) if x == img['id'] ] if len(index) > 0: # print(img['id'], index) annotations = [] for ind in index: ann = {} ann['id'] = ann_id ann['image_id'] = prediction[ind]['image_id'] ann['track_id'] = -1 ann['category_id'] = prediction[ind]['category_id'] ann['score'] = prediction[ind]['score'] ann['iscrowd'] = 0 # always 0 ann['bbox'] = { 'x': int(prediction[ind]['bbox'][0]), 'y': int(prediction[ind]['bbox'][1]), 'w': int(prediction[ind]['bbox'][2]), 'h': int(prediction[ind]['bbox'][3]) } ann['trackable'] = True ann['segmentation'] = [] ann['area'] = 0 annotations.append(ann) ann_id += 1 if len(annotations) > 0: fullpath = os.path.join( dataset_dir, 'Annotations', category['name'], os.path.basename(os.path.dirname(img['file_name']))) ulti.make_dir(fullpath) fullpath = os.path.join( output_dir, 'Raw', 'Json', os.path.basename(os.path.dirname(img['file_name']))) ulti.make_dir(fullpath) path = os.path.join( fullpath, os.path.splitext(os.path.basename(img['file_name']))[0] + '.json') ulti.write_json(annotations, path)
def run_trackers(): info = ulti.load_json() videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images')) videonames = sorted(videonames) tq = tqdm.tqdm(total=len(videonames)) alpha = 0.5 frequency_threshold = 5 iou_threshold = 0.3 iou_threshold2 = 0.8 border = 8 for videoname in videonames: tq.set_description('Video {}'.format(videoname)) tq.update(1) dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Tubelet', videoname + '.json') ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Videos', videoname)) ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Tubelet')) with open(dir_tubelet) as f: tubelet = json.load(f) tubelet_by_frame = tubelet['tubelet_by_frame'] tubelet_by_id = tubelet['tubelet_by_id'] filenames = os.listdir(os.path.join(info['dataset_dir'], 'Images', videoname)) filenames = sorted(filenames) frames = [] for i, frame in enumerate(filenames): frame = os.path.splitext(frame)[0] frames.append(frame) img = cv2.imread(os.path.join(info['dataset_dir'], 'Images', videoname, filenames[0])) height, width, channels = img.shape for id in list(tubelet_by_id.keys()): tubelet = tubelet_by_id[id] if len(tubelet.keys()) >= frequency_threshold: keys = sorted(list(tubelet.keys())) # forward process prev_idx = frames.index(keys[0]) tracker = None for idx in range(frames.index(keys[1]), len(frames)): if frames[idx] not in keys: # begin tracking if tracker is None and frames[prev_idx] in tubelet.keys(): prev_bbox = tubelet[frames[prev_idx]]['bbox'] if prev_bbox['x'] >= width / border and (prev_bbox['x'] + prev_bbox['w']) <= width * ( border - 1) / border: tracker = init_tracker( init_image_file=os.path.join(info['dataset_dir'], 'Images', videoname, filenames[prev_idx]), init_rect=[prev_bbox['x'], prev_bbox['y'], prev_bbox['w'], prev_bbox['h']], device='cuda') prev_rect = [prev_bbox['x'], prev_bbox['y'], prev_bbox['w'], prev_bbox['h']] iou = -1 if tracker: rect, tracker = run_tracker( [os.path.join(info['dataset_dir'], 'Images', videoname, filenames[idx])], tracker, visualization=False) rect = rect[0] if iou < 0 or iou >= iou_threshold: iou = bb_intersection_over_union(rect, prev_rect) prev_rect = rect if os.path.splitext(filenames[idx])[0] in tubelet_by_frame.keys(): other_boxes = tubelet_by_frame[os.path.splitext(filenames[idx])[0]] other_iou = 0 for other_key in other_boxes.keys(): other_box = other_boxes[other_key]['bbox'] other_rect = [other_box['x'], other_box['y'], other_box['w'], other_box['h']] other_iou = bb_intersection_over_union(rect, other_rect) if other_iou >= iou_threshold2: break if other_iou >= iou_threshold2: break if iou >= iou_threshold: tubelet[frames[idx]] = copy.deepcopy(tubelet[list(tubelet.keys())[0]]) tubelet[frames[idx]]['bbox'] = {'x': rect[0], 'y': rect[1], 'w': rect[2], 'h': rect[3]} tubelet[frames[idx]]['track_bbox_xyxy'] = xywh_to_xyxy(rect) tubelet[frames[idx]]['add_by_tracker'] = True if frames[idx] not in tubelet_by_frame.keys(): tubelet_by_frame[frames[idx]] = {} tubelet_by_frame[frames[idx]][id] = tubelet[frames[idx]] else: break else: # stop tracking if tracker: tracker = None prev_idx = idx ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id}, os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Tubelet', videoname + '.json')) for frame in tubelet_by_frame.keys(): tubelet = tubelet_by_frame[frame] ulti.write_json(tubelet, os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Videos', videoname, frame + '.json'))
def init_tracklet(): info = ulti.load_json() ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tracklet')) videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images')) videonames = sorted(videonames) tq = tqdm.tqdm(total=len(videonames)) for videoname in videonames: tq.set_description('Video {}'.format(videoname)) tq.update(1) dir_track = os.path.join(info['dataset_dir'], info['experiment'], 'Track', 'DeepSort', 'Json', videoname) dir_track_reverse = os.path.join(info['dataset_dir'], info['experiment'], 'Track', 'DeepSort_Reverse', 'Json', videoname) tracklet_by_id = {} tracklet_by_frame = {} list_images = os.listdir(dir_track) list_images = sorted(list_images) for i, image in enumerate(list_images): filename = os.path.splitext(image)[0] if filename not in tracklet_by_frame.keys(): tracklet_by_frame[filename] = {} with open(os.path.join(dir_track, image)) as f: data = json.load(f) for x in data: tracklet_by_frame[filename][x['id']] = {} tracklet_by_frame[filename][x['id']]['track_bbox_xyxy'] = x['bbox_xyxy'] if x['id'] not in tracklet_by_id.keys(): tracklet_by_id[x['id']] = {} tracklet_by_id[x['id']][filename] = {} tracklet_by_id[x['id']][filename]['track_bbox_xyxy'] = x['bbox_xyxy'] tracklet_by_id = dict(sorted(tracklet_by_id.items())) for key in tracklet_by_id.keys(): tracklet_by_id[key] = dict(sorted(tracklet_by_id[key].items())) tracklet_by_frame = dict(sorted(tracklet_by_frame.items())) for key in tracklet_by_frame.keys(): tracklet_by_frame[key] = dict(sorted(tracklet_by_frame[key].items())) tracklet_by_id2 = {} tracklet_by_frame2 = {} list_reverse_images = os.listdir(dir_track_reverse) list_reverse_images = sorted(list_reverse_images) for i, image in enumerate(list_reverse_images): filename = os.path.splitext(image)[0] if filename not in tracklet_by_frame2.keys(): tracklet_by_frame2[filename] = {} with open(os.path.join(dir_track_reverse, image)) as f: data = json.load(f) for x in data: tracklet_by_frame2[filename][x['id']] = {} tracklet_by_frame2[filename][x['id']]['track_bbox_xyxy'] = x['bbox_xyxy'] if x['id'] not in tracklet_by_id2.keys(): tracklet_by_id2[x['id']] = {} tracklet_by_id2[x['id']][filename] = {} tracklet_by_id2[x['id']][filename]['track_bbox_xyxy'] = x['bbox_xyxy'] if image in list_images: break tracklet_by_id2 = dict(sorted(tracklet_by_id2.items())) for key in tracklet_by_id2.keys(): tracklet_by_id2[key] = dict(sorted(tracklet_by_id2[key].items())) tracklet_by_frame2 = dict(sorted(tracklet_by_frame2.items())) for key in tracklet_by_frame2.keys(): tracklet_by_frame2[key] = dict(sorted(tracklet_by_frame2[key].items())) if len(list(tracklet_by_frame.keys())) > 0 and len(list(tracklet_by_frame2.keys())) > 0: boxes1 = np.zeros((len(tracklet_by_frame[list(tracklet_by_frame.keys())[0]]), 4)) for i, box in enumerate(list(tracklet_by_frame[list(tracklet_by_frame.keys())[0]].items())): boxes1[i, :] = box[1]['track_bbox_xyxy'] boxes2 = np.zeros((len(tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]]), 4)) for i, box in enumerate(list(tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]].items())): boxes2[i, :] = box[1]['track_bbox_xyxy'] iou = torchvision.ops.box_iou(torch.from_numpy(boxes1), torch.from_numpy(boxes2)) iou = iou.detach().numpy() iou_threshold = 0.5 iou[iou < iou_threshold] = 0 max_iou = 1 while max_iou != 0: max_iou = np.amax(iou) if max_iou == 0: break (i, j) = np.where(iou == max_iou) i = i[0] j = j[0] # i boxes1 tracklet_by_frame[list(tracklet_by_frame.keys())[0]] # j boxes2 tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]] id1 = list(tracklet_by_frame[list(tracklet_by_frame.keys())[0]].keys())[i] id2 = list(tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]].keys())[j] # if videoname == '000017' and id1 == 4: # print(videoname, max_iou, id1, id2) for k in tracklet_by_id2[id2].keys(): tracklet_by_id[id1][k] = tracklet_by_id2[id2][k] tracklet_by_id2[id2][k] = None for k in tracklet_by_frame2.keys(): if k not in tracklet_by_frame.keys(): tracklet_by_frame[k] = {} if id2 in tracklet_by_frame2[k].keys(): tracklet_by_frame[k][id1] = tracklet_by_frame2[k][id2] tracklet_by_frame2[k][id2] = None iou[i, :] = 0 iou[:, j] = 0 tracklet_by_id = dict(sorted(tracklet_by_id.items())) for key in tracklet_by_id.keys(): tracklet_by_id[key] = dict(sorted(tracklet_by_id[key].items())) tracklet_by_frame = dict(sorted(tracklet_by_frame.items())) for key in tracklet_by_frame.keys(): tracklet_by_frame[key] = dict(sorted(tracklet_by_frame[key].items())) ulti.write_json({'tracklet_by_id': tracklet_by_id, 'tracklet_by_frame': tracklet_by_frame}, file=os.path.join(info['dataset_dir'], info['experiment'], 'Tracklet', videoname + '.json'))
def create_tubelet(): info = ulti.load_json() ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Tubelet')) videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images')) videonames = sorted(videonames) tq = tqdm.tqdm(total=len(videonames)) for videoname in videonames: tq.set_description('Video {}'.format(videoname)) tq.update(1) ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Videos', videoname)) ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Visualization', videoname)) dir_tracklet = os.path.join(info['dataset_dir'], info['experiment'], 'Tracklet', videoname + '.json') dir_detection = os.path.join(info['dataset_dir'], info['experiment'], 'Detection', 'Json', videoname) with open(dir_tracklet) as f: tracklet_data = json.load(f) tracklet_by_id = tracklet_data['tracklet_by_id'] tracklet_by_frame = tracklet_data['tracklet_by_frame'] tubelet_by_frame = {} tubelet_by_id = {} for frame in tracklet_by_frame.keys(): tubelet_data = [] with open(os.path.join(dir_detection, frame + '.json')) as f: detection_data = json.load(f) detection_bbox = np.zeros((len(detection_data), 4)) for i, box in enumerate(detection_data): detection_bbox[i, 0] = box['bbox']['x'] detection_bbox[i, 1] = box['bbox']['y'] detection_bbox[i, 2] = box['bbox']['x'] + box['bbox']['w'] detection_bbox[i, 3] = box['bbox']['y'] + box['bbox']['h'] track_bbox = np.zeros((len(tracklet_by_frame[frame]), 4)) for i, key in enumerate(tracklet_by_frame[frame].keys()): box = tracklet_by_frame[frame][key] track_bbox[i, :] = box['track_bbox_xyxy'] if detection_bbox.shape[0] == 0 or track_bbox.shape[0] == 0: continue iou = torchvision.ops.box_iou(torch.from_numpy(detection_bbox), torch.from_numpy(track_bbox)) iou = iou.detach().numpy() iou_threshold = 0.7 iou[iou < iou_threshold] = 0 max_iou = 1 while max_iou != 0: max_iou = np.amax(iou) if max_iou == 0: break (i, j) = np.where(iou == max_iou) i = i[0] j = j[0] detection_data[i]['track_id'] = list(tracklet_by_frame[frame].keys())[j] detection_data[i]['track_bbox_xyxy'] = tracklet_by_frame[frame][detection_data[i]['track_id']]['track_bbox_xyxy'] tubelet_data.append(detection_data[i]) iou[i, :] = 0 iou[:, j] = 0 if len(tubelet_data) > 0: ulti.write_json(tubelet_data, os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Videos', videoname, frame + '.json')) tubelet_by_frame[frame] = {} for x in tubelet_data: tubelet_by_frame[frame][x['track_id']] = x if x['track_id'] not in tubelet_by_id: tubelet_by_id[x['track_id']] = {} tubelet_by_id[x['track_id']][frame] = x ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id}, os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Tubelet', videoname + '.json'))