Esempio n. 1
0
def copy_data():
    info = ulti.load_json()
    dir_images = os.path.join(info['dataset_dir'], '..', 'Raw')
    dir_output = os.path.join(info['dataset_dir'], 'Images')
    videos = os.listdir(dir_images)
    for video in sorted(videos):
        if 'frankfurt' != video:
            pass
            for id in range(1000):
                prex = (video + '_%06d') % id
                files = glob(os.path.join(dir_images, video, prex) + '_*')
                if len(files) > 0:
                    files = sorted(files)
                    if not os.path.exists(os.path.join(dir_output, prex)):
                        os.makedirs(os.path.join(dir_output, prex))
                    for file in files:
                        img = cv2.imread(os.path.join(dir_images, video, file))
                        print(
                            os.path.join(
                                dir_output, prex,
                                os.path.splitext(os.path.basename(file))[0] +
                                '.jpg'))
                        cv2.imwrite(
                            os.path.join(
                                dir_output, prex,
                                os.path.splitext(os.path.basename(file))[0] +
                                '.jpg'), img)
        else:
            for id in range(1000):
                prex = video + '_%06d' % id
                files = glob(os.path.join(dir_images, video, prex) + '_*')
                files = [os.path.splitext(file)[0] for file in files]
                files = list(dict.fromkeys(files))
                if len(files) > 0:
                    files = sorted(files)
                    count = 0
                    for file in files:
                        # print('{} {}'.format(count//30, count%30))
                        new_prex = prex + '_%06d' % ((count) // 30)
                        if (count % 30) == 0:
                            if not os.path.exists(
                                    os.path.join(dir_output, new_prex)):
                                os.makedirs(os.path.join(dir_output, new_prex))
                        if os.path.isfile(
                                os.path.join(dir_images, video,
                                             file + '.jpg')):
                            img = cv2.imread(
                                os.path.join(dir_images, video, file + '.jpg'))
                        else:
                            img = cv2.imread(
                                os.path.join(dir_images, video, file + '.png'))

                        cv2.imwrite(
                            os.path.join(
                                dir_output, new_prex,
                                os.path.splitext(os.path.basename(file))[0] +
                                '.jpg'), img)
                        files_tmp = os.listdir(
                            os.path.join(dir_output, new_prex))
                        count += 1
Esempio n. 2
0
def create_test():
    info = ulti.load_json()
    dir_input = info['dataset_dir']
    dir_output = os.path.join(info['dataset_dir'], 'RCNN_data')
    video = info['annotated_video']
    exclude_true_gt_frames = False
    path = os.path.join(dir_input, 'Info', video + '.json')
    dataset = ulti.load_json(dir_input + '/Info/' + video + '.json')
    # pprint(dataset)
    videos = dataset['videos']
    images = []
    path = dir_input + '/Info/' + video + '.json'
    if os.path.isfile(path):
        data = ulti.load_json(path)
        if exclude_true_gt_frames:
            for image in data['images']:
                if not image['has_gt']:
                    images.append(image)
        else:
            images.extend(data['images'])

    tq = tqdm.tqdm(total=len(images))
    for image in images:
        tq.update(1)
        image['file_name'] = image['file_name'].replace('Images\\\\', '')
        image['file_name'] = image['file_name'].replace('Images/', '')
        image['file_name'] = image['file_name'].replace('Images\\', '')

    categories = ulti.load_json(dir_input + '/Categories/Road_Objects.json')
    categories = categories['category']
    dataset = {
        'categories': categories,
        'annotations': [],
        'videos': [],
        'images': []
    }
    dataset['videos'] = videos
    dataset['images'] = images
    print('test: ', len(images))
    # pprint(dataset)
    ulti.make_dir(dir_output)
    outfile = ulti.write_json(dataset,
                              file=os.path.join(dir_output, 'test.json'))
Esempio n. 3
0
def filter_data(videonames=[]):
    info = ulti.load_json()
    if not videonames:
        videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images'))
    videonames = sorted(videonames)
    tq = tqdm.tqdm(total=len(videonames))
    frequency_threshold = 4
    for videoname in videonames:
        tq.set_description('Video {}'.format(videoname))
        tq.update(1)
        dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Tubelet',
                                   videoname + '.json')
        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Videos', videoname))
        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Tubelet'))
        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Visualization', videoname))

        with open(dir_tubelet) as f:
            tubelet = json.load(f)
        tubelet_by_frame = tubelet['tubelet_by_frame']
        tubelet_by_id = tubelet['tubelet_by_id']

        for id in list(tubelet_by_id.keys()):
            tubelet = tubelet_by_id[id]
            if len(tubelet.keys()) < frequency_threshold:
                for frame in tubelet.keys():
                    del tubelet_by_frame[frame][id]
                del tubelet_by_id[id]
                continue

            for frame in list(tubelet.keys()):
                img = cv2.imread(os.path.join(info['dataset_dir'], 'Images', videoname, frame + '.jpg'))
                height, width, channels = img.shape

                bbox = tubelet[frame]
                bbox = check_condition(bbox, height, width)

                if bbox is None:
                    del tubelet_by_id[id][frame]
                    del tubelet_by_frame[frame][id]

                if len(tubelet_by_frame[frame].keys()) == 0:
                    del tubelet_by_frame[frame]

            if len(tubelet_by_id[id].keys()) == 0:
                del tubelet_by_id[id]

        ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id},
                        os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Tubelet',
                                     videoname + '.json'))

        for frame in tubelet_by_frame.keys():
            tubelet = tubelet_by_frame[frame]
            ulti.write_json(tubelet,
                            os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Videos', videoname,
                                         frame + '.json'))
Esempio n. 4
0
def create_attributes():
    info = ulti.load_json()
    output_dir = os.path.join(info['dataset_dir'], 'Categories')
    ulti.make_dir(output_dir)
    info = {}
    info['id'] = 1
    info['name'] = 'Road_Objects'
    category = [
        {
            "id": 1,
            "name": "pedestrian",
            "type": "thing",
            "supercategory": "person"
        },
        {
            "id": 2,
            "name": "rider",
            "type": "thing",
            "supercategory": "person"
        },
        {
            "id": 3,
            "name": "car",
            "type": "thing",
            "supercategory": "vehicle"
        },
        {
            "id": 4,
            "name": "truck",
            "type": "thing",
            "supercategory": "vehicle"
        },
        {
            "id": 5,
            "name": "bus",
            "type": "thing",
            "supercategory": "vehicle"
        },
        {
            "id": 6,
            "name": "motorcycle",
            "type": "thing",
            "supercategory": "vehicle"
        },
        {
            "id": 7,
            "name": "bicycle",
            "type": "thing",
            "supercategory": "vehicle"
        },
    ]
    info['category'] = category
    outfile = ulti.write_json(info,
                              file=os.path.join(output_dir,
                                                info['name'] + '.json'))
Esempio n. 5
0
def main(model_path=''):
    ulti.copy_model(model_path=model_path)

    info = ulti.load_json()
    training_dir = info['training_dir']
    dataset_name = info['annotated_video']

    dir_input = os.path.join(training_dir, dataset_name)
    if os.path.exists(dir_input):
        shutil.rmtree(dir_input, ignore_errors=True)

    src = os.path.join(
        info['dataset_dir'],
        '../Initial_model/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml')
    dst = os.path.join(info['training_dir'],
                       'e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml')
    shutil.copyfile(src, dst)
def smoothen_label(videonames=[]):
    info = ulti.load_json()
    if not videonames:
        videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images'))
    videonames = sorted(videonames)
    tq = tqdm.tqdm(total=len(videonames))
    for videoname in videonames:
        tq.set_description('Video {}'.format(videoname))
        tq.update(1)

        dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Tubelet', videoname + '.json')

        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Videos', videoname))
        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Tubelet'))
        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Visualization', videoname))

        with open(dir_tubelet) as f:
            tubelet = json.load(f)
        tubelet_by_frame = tubelet['tubelet_by_frame']
        tubelet_by_id = tubelet['tubelet_by_id']

        for id in tubelet_by_id.keys():
            tubelet = tubelet_by_id[id]
            category_ids = []
            category_scores = []
            for frame in tubelet.keys():
                bbox = tubelet[frame]
                category_ids.append(bbox['category_id'])
                category_scores.append(bbox['score'])
            # if len(np.unique(np.array(category_ids))) > 1:
            element, score = most_frequent(category_ids, category_scores)
            # print(frame, id, element, ':', category_ids)
            for frame in tubelet.keys():
                if score:
                    tubelet_by_id[id][frame]['score'] = score
                    tubelet_by_frame[frame][id]['score'] = score
                tubelet_by_id[id][frame]['category_id'] = element
                tubelet_by_frame[frame][id]['category_id'] = element

        ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id},
                        os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Tubelet', videoname + '.json'))

        for frame in tubelet_by_frame.keys():
            tubelet = tubelet_by_frame[frame]
            ulti.write_json(tubelet,
                            os.path.join(info['dataset_dir'], info['experiment'], 'Smooth_label', 'Videos', videoname, frame + '.json'))
def convert_args():
    info = ulti.load_json()
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Training")
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--config-file",
        default=os.path.join(info['training_dir'],
                             'e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml'),
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument(
        "--sub-dataset",
        dest="sub_dataset",
        help="Video Name",
        default=info['annotated_video'],
        # default=None,
        type=str,
    )
    parser.add_argument(
        "--epochs",
        dest="epochs",
        help="Number of Epochs",
        default=25,
        type=int,
    )
    parser.add_argument(
        "--scale",
        dest="scale",
        help="Scale of Epochs",
        default=0.5,
        type=int,
    )
    parser.add_argument(
        "--train",
        dest="train",
        help="Train model",
        default=False,
        type=bool,
    )
    parser.add_argument(
        "--test",
        dest="test",
        help="Test the final model",
        default=False,
        type=bool,
    )
    parser.add_argument(
        "--visualize-loss",
        dest="visualize_loss",
        help="Draw loss function",
        default="",  # "visdom", "tensorboardx", "tensorboard_logger"
        type=str,
    )
    parser.add_argument("--visualize-results",
                        dest="visualize_results",
                        help="Visualize results",
                        default=False,
                        type=bool)
    parser.add_argument("--only-visualization",
                        dest="only_visualization",
                        help="Only visualize results, not evaluate results",
                        default=True,
                        type=bool)
    parser.add_argument("--category",
                        dest="category",
                        help="dataset having categories",
                        default="ro",
                        type=str)
    parser.add_argument(
        "--visualization-title",
        dest="vis_title",
        help="Visualization Title",
        default=None,
        type=str,
    )

    parser.add_argument(
        "--ckpt",
        dest="ckpt",
        help=
        "The path to the checkpoint for test, default is the latest checkpoint.",
        default=None,
    )

    parser.add_argument(
        "--copy-weight-from-head-box",
        dest="copy_weight_from_head_box",
        help=
        "Copy weight from ROI head box to other branches if they are initialized",
        default=False,
        type=bool)

    args, _ = parser.parse_known_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()
    return args
def main(args):
    seed_torch()
    info = ulti.load_json()

    num_gpus = get_num_gpus()
    args.config_file = os.path.join(
        info['training_dir'], 'e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml')

    cfg.merge_from_file(args.config_file)
    cfg.defrost()
    cfg.OUTPUT_DIR = os.path.join(info['training_dir'], args.sub_dataset)
    cfg.MODEL.WEIGHT = os.path.join(info['dataset_dir'], info['experiment'],
                                    'Detector',
                                    'Iter{}.pth'.format(info['iter']))
    cfg.SOLVER.IMS_PER_BATCH = num_gpus * 4
    cfg.TEST.IMS_PER_BATCH = num_gpus * 16
    cfg.SOLVER.BASE_LR = 0.002
    cfg.freeze()

    mkdir(cfg.OUTPUT_DIR)

    if args.sub_dataset is None:
        args.sub_dataset = ""

    if args.vis_title is None:
        args.vis_title = os.path.basename(cfg.OUTPUT_DIR)

    logger = setup_logger("maskrcnn_benchmark", cfg.OUTPUT_DIR, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    logger.info("Collecting env info (might take some time)")
    # logger.info("\n" + collect_env_info())

    DatasetCatalog = None
    train_dataset = cfg.DATASETS.TRAIN[0]
    test_dataset = cfg.DATASETS.TEST[0]
    paths_catalog = import_file("maskrcnn_benchmark.config.paths_catalog",
                                cfg.PATHS_CATALOG, True)

    if args.sub_dataset != "":
        DatasetCatalog = paths_catalog.DatasetCatalog

        DatasetCatalog.DATASETS[train_dataset]['img_dir'] = os.path.join(
            info['dataset_dir'], 'Images')
        DatasetCatalog.DATASETS[train_dataset]['ann_file'] = os.path.join(
            info['dataset_dir'], 'RCNN_data', 'train.json')

        DatasetCatalog.DATASETS[test_dataset]['img_dir'] = os.path.join(
            info['dataset_dir'], 'Images')
        DatasetCatalog.DATASETS[test_dataset]['ann_file'] = os.path.join(
            info['dataset_dir'], 'RCNN_data', 'test.json')

        data = json.load(
            open(DatasetCatalog.DATASETS[train_dataset]['ann_file']))
    else:
        data = json.load(
            open(paths_catalog.DatasetCatalog.DATASETS[train_dataset]
                 ['ann_file']))

    iters_per_epoch = len(data['images'])
    iters_per_epoch = math.ceil(iters_per_epoch / cfg.SOLVER.IMS_PER_BATCH)
    args.iters_per_epoch = iters_per_epoch

    cfg.defrost()
    cfg.SOLVER.MAX_ITER = round(args.epochs * args.scale * iters_per_epoch)
    cfg.SOLVER.STEPS = (round(8 * args.scale * iters_per_epoch),
                        round(11 * args.scale * iters_per_epoch),
                        round(16 * args.scale * iters_per_epoch))
    cfg.freeze()

    # logger.info("Loaded configuration file {}".format(args.config_file))
    with open(args.config_file, "r") as cf:
        config_str = "\n" + cf.read()
        # logger.info(config_str)
    # logger.info("Running with config:\n{}".format(cfg))

    # logger.info(DatasetCatalog)

    output_config_path = os.path.join(cfg.OUTPUT_DIR, 'config.yml')
    logger.info("Saving config into: {}".format(output_config_path))
    # save overloaded model config in the output directory
    save_config(cfg, output_config_path)

    if args.train:
        args.skip_train = False
        logger.info(args)
        model = network.train(cfg, args, DatasetCatalog)

    if args.test:
        network.test(cfg, args, model=None, DatasetCatalog=DatasetCatalog)
Esempio n. 9
0
def create_train():
    info = ulti.load_json()
    dir_input = info['dataset_dir']
    dir_output = os.path.join(info['dataset_dir'], 'RCNN_data')
    video = info['annotated_video']
    only_use_true_gt = False
    path = os.path.join(dir_input, 'Info', video + '.json')

    dataset = ulti.load_json(path)
    images = dataset['images']
    videos = dataset['videos']

    categories = ulti.load_json(dir_input + '/Categories/Road_Objects.json')
    categories = categories['category']

    dataset = {
        'categories': categories,
        'annotations': [],
        'videos': [],
        'images': []
    }

    video_names = []
    video_ids = []
    if video == info['dataset_name']:
        for vid in videos:
            dataset['videos'].append(vid)
            video_names.append(vid['name'])
            video_ids.append(vid['id'])
    else:
        for vid in videos:
            if vid['name'] == video:
                dataset['videos'].append(vid)
                video_names.append(vid['name'])
                video_ids.append(vid['id'])

    for image in images:
        image['file_name'] = image['file_name'].replace('Images\\\\', '')
        image['file_name'] = image['file_name'].replace('Images/', '')
        image['file_name'] = image['file_name'].replace('Images\\', '')

    ann_files = []
    list_images = []
    annotations = []
    ins_id = 0
    tq = tqdm.tqdm(total=len(video_names))
    for id, video in zip(video_ids, video_names):
        tq.update(1)
        for (dirpath, dirnames, filenames) in os.walk(
                os.path.join(info['dataset_dir'], info['experiment'],
                             'Detection', 'Json', video)):
            ann_temp = []
            img_temp = []

            for file in filenames:
                if file.endswith('.json'):
                    data = ulti.load_json(os.path.join(dirpath, file))
                    if len(data) > 0:
                        ann_files.append(file)
                        ann_temp.append(file)

            img_temp = sorted(img_temp)
            ann_temp = sorted(ann_temp)
            for image in images:
                if image['video_id'] == id:
                    file_name = os.path.splitext(
                        os.path.basename(image['file_name']))[0] + '.json'
                    if file_name in ann_temp:
                        if (image['has_gt']
                                and only_use_true_gt) or not only_use_true_gt:
                            # print(file_name)
                            list_images.append(image)
                            img_temp.append(image)

                            json_file = os.path.splitext(
                                os.path.basename(
                                    image['file_name']))[0] + '.json'

                            if json_file in ann_temp:
                                data = ulti.load_json(
                                    os.path.join(dirpath, json_file))
                                for ann in data:
                                    ann['bbox'] = [
                                        ann['bbox']['x'], ann['bbox']['y'],
                                        ann['bbox']['w'], ann['bbox']['h']
                                    ]
                                    ann['id'] = ins_id
                                    ann['image_id'] = image['id']
                                    ins_id += 1
                                annotations.extend(data)
                                tq.set_description('Video {}'.format(
                                    os.path.join(video, file_name)))

    dataset['annotations'] = annotations
    dataset['videos'] = videos
    dataset['images'] = list_images
    ulti.make_dir(dir_output)
    outfile = ulti.write_json(dataset,
                              file=os.path.join(dir_output, 'train.json'))
Esempio n. 10
0
def create_dataset_info(img_id_start=0, video_id_start=0, ann_id_start=0):
    info = ulti.load_json()
    dataset_dir = os.path.join(info['dataset_dir'])
    dataset_name = info['dataset_name']

    img_id = img_id_start
    video_id = video_id_start
    ann_id = ann_id_start

    dataset = {}
    dataset['id'] = 1

    info = {}
    info['name'] = dataset_name
    info['root_dir'] = dataset_dir + '/'
    info['type'] = 'video'  # 'video' or 'image'
    info['ann_dir'] = ''
    info['extension'] = 'jpg'
    dataset['info'] = info

    videos = []
    data_dir = os.path.join(info['root_dir'], 'Images')
    folders = [
        f for f in os.listdir(data_dir)
        if os.path.isdir(os.path.join(data_dir, f))
    ]

    for folder in sorted(folders):
        video = {}
        video['id'] = video_id
        video['name'] = folder
        data_dir = os.path.join(info['root_dir'], 'Images', video['name'])
        files = [
            f for f in os.listdir(data_dir)
            if os.path.isfile(os.path.join(data_dir, f))
            and f.endswith(info['extension'])
        ]
        video['n_frames'] = len(files)
        videos.append(video)
        video_id += 1
    dataset['videos'] = videos

    images = []
    tq = tqdm.tqdm(total=len(videos))
    for video in videos:
        tq.update(1)
        data_dir = os.path.join(info['root_dir'], 'Images', video['name'])
        files = [
            f for f in os.listdir(data_dir)
            if os.path.isfile(os.path.join(data_dir, f))
            and f.endswith(info['extension'])
        ]
        sub_images = []
        sub_dataset = copy.deepcopy(dataset)
        for f in sorted(files):
            image = {}
            image['id'] = img_id
            image['has_gt'] = False
            image['video_id'] = video['id']
            image['file_name'] = os.path.join('Images', video['name'], f)
            image['seg_file_name'] = ''
            img = Image.open(os.path.join(info['root_dir'],
                                          image['file_name'])).convert("RGB")
            img = np.array(img)[:, :, [2, 1, 0]]
            img = img.copy()
            image['width'] = img.shape[1]
            image['height'] = img.shape[0]
            img_id += 1
            images.append(image)
            sub_images.append(image)

            tq.set_description('Video {}'.format(image['file_name']))

        sub_dataset['images'] = sub_images

        dir_output = dataset_dir + '/Info'
        ulti.make_dir(dir_output)
        outfile = ulti.write_json(sub_dataset,
                                  file=(dir_output + '/' + video['name'] +
                                        '.json'))

    dataset['images'] = images

    dir_output = dataset_dir + '/Info/'
    ulti.make_dir(dir_output)
    outfile = ulti.write_json(dataset,
                              file=(dir_output + '/' + info['name'] + '.json'))
Esempio n. 11
0
def generate_ann(threshold=None, trackable_threshold=0):

    if threshold is None:
        threshold = [0.85, 0.550, 0.950, 0.950, 0.800, 0.800, 0.700]

    info = ulti.load_json()
    video = info['annotated_video']
    dataset_dir = info['dataset_dir']

    output_dir = os.path.join(dataset_dir, info['experiment'], 'Detection')
    ulti.make_dir(output_dir)

    print(output_dir)

    input_path = os.path.join(dataset_dir, info['experiment'], 'Raw_Detection',
                              'bbox.json')

    prediction = ulti.load_json(input_path)
    category = ulti.load_json(
        os.path.join(dataset_dir, 'Categories', 'Road_Objects.json'))
    dataset = ulti.load_json(os.path.join(dataset_dir, 'Info',
                                          video + '.json'))

    list_img_id_dataset = []
    list_img_id_prediction = []
    ann_id = 0
    for ann in prediction:
        list_img_id_prediction.append(ann['image_id'])

    tq = tqdm.tqdm(total=len(dataset['images']))
    for img in dataset['images']:
        list_img_id_dataset.append(img['id'])
        tq.set_description('Video Frame {}'.format(img['id']))
        tq.update(1)
        index = [
            i for i, x in enumerate(list_img_id_prediction)
            if x == img['id'] and prediction[i]['score'] >= threshold[
                prediction[i]['category_id'] - 1]
        ]
        if len(index) > 0:
            # print(img['id'], index)
            annotations = []
            for ind in index:
                ann = {}
                ann['id'] = ann_id
                ann['image_id'] = prediction[ind]['image_id']
                ann['track_id'] = -1
                ann['category_id'] = prediction[ind]['category_id']
                ann['score'] = prediction[ind]['score']
                ann['second_category_id'] = None
                ann['second_score'] = 0
                ann['third_category_id'] = None
                ann['third_score'] = 0
                ann['iscrowd'] = 0  # always 0
                ann['bbox'] = {
                    'x': int(prediction[ind]['bbox'][0]),
                    'y': int(prediction[ind]['bbox'][1]),
                    'w': int(prediction[ind]['bbox'][2]),
                    'h': int(prediction[ind]['bbox'][3])
                }
                if ann['bbox']['w'] * ann['bbox']['h'] >= trackable_threshold:
                    ann['trackable'] = True
                else:
                    ann['trackable'] = False
                ann['segmentation'] = []
                ann['area'] = 0
                ann['score'] = prediction[ind]['score']
                annotations.append(ann)
                ann_id += 1
            if len(annotations) > 0:
                fullpath = os.path.join(
                    dataset_dir, 'Annotations', category['name'],
                    os.path.basename(os.path.dirname(img['file_name'])))
                ulti.make_dir(fullpath)
                fullpath = os.path.join(
                    output_dir, 'Json',
                    os.path.basename(os.path.dirname(img['file_name'])))
                ulti.make_dir(fullpath)
                path = os.path.join(
                    fullpath,
                    os.path.splitext(os.path.basename(img['file_name']))[0] +
                    '.json')
                ulti.write_json(annotations, path)
import os
import ulti
import tqdm
from shutil import copyfile

if __name__ == "__main__":
    ulti.copy_model()

    info = ulti.load_json()

    video = info['annotated_video']
    input_path = os.path.join(info['training_dir'], video, 'inference',
                              'ro_bdd_test_cocostyle', 'bbox.json')

    dataset_dir = info['dataset_dir']

    output_dir = os.path.join(dataset_dir, info['experiment'], 'Raw_Detection')
    ulti.make_dir(output_dir)
    copyfile(input_path, os.path.join(output_dir, 'bbox.json'))

    prediction = ulti.load_json(input_path)
    category = ulti.load_json(
        os.path.join(dataset_dir, 'Categories', 'Road_Objects.json'))
    dataset = ulti.load_json(os.path.join(dataset_dir, 'Info',
                                          video + '.json'))

    list_img_id_dataset = []
    list_img_id_prediction = []
    ann_id = 0
    for ann in prediction:
        list_img_id_prediction.append(ann['image_id'])
Esempio n. 13
0
import os
import ulti
import shutil

if __name__ == "__main__":
    ulti.copy_model()

    info = ulti.load_json()
    training_dir = info['training_dir']
    dataset_name = info['annotated_video']

    dir_input = os.path.join(training_dir, dataset_name)
    if os.path.exists(dir_input):
        shutil.rmtree(dir_input)

    src = os.path.join(
        info['dataset_dir'],
        '../Initial_model/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml')
    dst = os.path.join(info['training_dir'],
                       'e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml')
    shutil.copyfile(src, dst)
Esempio n. 14
0
def main(videonames=[]):
    info = ulti.load_json()
    if not videonames:
        videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images'))
    videonames = sorted(videonames)

    output_kf_bbox = False

    if info['annotated_video'] == info['dataset_name']:
        tq = tqdm.tqdm(total=len(videonames))
        for videoname in videonames:
            tq.set_description('Video {}'.format(videoname))
            tq.update(1)
            tracker = DeepSortTracker(output_kf_bbox=output_kf_bbox)
            tracker.track(os.path.join(info['dataset_dir'], 'Images',
                                       videoname),
                          os.path.join(info['dataset_dir'], info['experiment'],
                                       'Detection', 'Json', videoname),
                          None,
                          os.path.join(info['dataset_dir'], info['experiment'],
                                       'Track', 'DeepSort', 'Json', videoname),
                          visualization=False,
                          tq_display=False,
                          reverse=False)
    else:
        videoname = info['annotated_video']
        tracker = DeepSortTracker(output_kf_bbox=output_kf_bbox)
        tracker.track(os.path.join(info['dataset_dir'], 'Images', videoname),
                      os.path.join(info['dataset_dir'], info['experiment'],
                                   'Detection', 'Json', videoname),
                      None,
                      os.path.join(info['dataset_dir'], info['experiment'],
                                   'Track', 'DeepSort', 'Json', videoname),
                      visualization=False,
                      tq_display=True,
                      reverse=False)

    if info['annotated_video'] == info['dataset_name']:
        tq = tqdm.tqdm(total=len(videonames))
        for videoname in videonames:
            tq.set_description('Video {}'.format(videoname))
            tq.update(1)
            tracker = DeepSortTracker(output_kf_bbox=output_kf_bbox)
            tracker.track(os.path.join(info['dataset_dir'], 'Images',
                                       videoname),
                          os.path.join(info['dataset_dir'], info['experiment'],
                                       'Detection', 'Json', videoname),
                          None,
                          os.path.join(info['dataset_dir'], info['experiment'],
                                       'Track', 'DeepSort_Reverse', 'Json',
                                       videoname),
                          visualization=False,
                          tq_display=False,
                          reverse=True)
    else:
        videoname = info['annotated_video']
        tracker = DeepSortTracker(output_kf_bbox=output_kf_bbox)
        tracker.track(os.path.join(info['dataset_dir'], 'Images', videoname),
                      os.path.join(info['dataset_dir'], info['experiment'],
                                   'Detection', 'Json', videoname),
                      None,
                      os.path.join(info['dataset_dir'], info['experiment'],
                                   'Track', 'DeepSort_Reverse', 'Json',
                                   videoname),
                      visualization=False,
                      tq_display=True,
                      reverse=True)
Esempio n. 15
0
def create_train():
    info = ulti.load_json()
    dir_input = info['dataset_dir']
    dir_output = os.path.join(info['dataset_dir'], 'RCNN_data')
    video = info['annotated_video']
    only_use_true_gt = False
    path = os.path.join(dir_input, 'Info', video + '.json')

    dataset = ulti.load_json(path)
    images = dataset['images']
    videos = dataset['videos']

    categories = ulti.load_json(dir_input + '/Categories/Road_Objects.json')
    categories = categories['category']

    dataset = {
        'categories': categories,
        'annotations': [],
        'videos': [],
        'images': []
    }

    video_names = []
    video_ids = []
    for vid in videos:
        dataset['videos'].append(vid)
        video_names.append(vid['name'])
        video_ids.append(vid['id'])

    dict_images = {}
    for image in images:
        image['file_name'] = image['file_name'].replace('Images\\\\', '')
        image['file_name'] = image['file_name'].replace('Images/', '')
        image['file_name'] = image['file_name'].replace('Images\\', '')
        key = os.path.join(
            os.path.basename(os.path.dirname(image['file_name'])),
            os.path.splitext(os.path.basename(image['file_name']))[0])
        dict_images[key] = image

    list_images = []
    annotations = []
    ins_id = 0
    tq = tqdm.tqdm(total=len(video_names))
    for id, video in zip(video_ids, video_names):
        # print(video)
        tq.update(1)

        dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'],
                                   'Add_instances', 'Tubelet', video + '.json')
        with open(dir_tubelet) as f:
            tubelet = json.load(f)
        tubelet_by_frame = tubelet['tubelet_by_frame']
        tubelet_by_id = tubelet['tubelet_by_id']

        for filename in tubelet_by_frame.keys():
            tubelet = tubelet_by_frame[filename]
            if len(tubelet.keys()) > 0:
                image = dict_images[os.path.join(video, filename)]
                if (image['has_gt']
                        and only_use_true_gt) or not only_use_true_gt:
                    list_images.append(image)

                    for key in tubelet.keys():
                        ann = tubelet[key]
                        ann['bbox'] = [
                            ann['bbox']['x'], ann['bbox']['y'],
                            ann['bbox']['w'], ann['bbox']['h']
                        ]
                        ann['id'] = ins_id
                        ann['image_id'] = image['id']
                        ins_id += 1
                        annotations.append(ann)
                    tq.set_description('Video {}'.format(
                        os.path.join(video, filename)))
    dataset['annotations'] = annotations
    dataset['videos'] = videos
    dataset['images'] = list_images
    print('train: ', len(list_images))
    ulti.make_dir(dir_output)
    outfile = ulti.write_json(dataset,
                              file=os.path.join(dir_output, 'train.json'))
def main(model_path=''):
    ulti.copy_model(model_path=model_path)

    info = ulti.load_json()

    video = info['annotated_video']
    input_path = os.path.join(info['training_dir'], video, 'inference',
                              'ro_bdd_test_cocostyle', 'bbox.json')

    dataset_dir = info['dataset_dir']

    output_dir = os.path.join(dataset_dir, info['experiment'], 'Raw_Detection')
    ulti.make_dir(output_dir)
    copyfile(input_path, os.path.join(output_dir, 'bbox.json'))

    prediction = ulti.load_json(input_path)
    category = ulti.load_json(
        os.path.join(dataset_dir, 'Categories', 'Road_Objects.json'))
    dataset = ulti.load_json(os.path.join(dataset_dir, 'Info',
                                          video + '.json'))

    list_img_id_dataset = []
    list_img_id_prediction = []
    ann_id = 0
    for ann in prediction:
        list_img_id_prediction.append(ann['image_id'])

    tq = tqdm.tqdm(total=len(dataset['images']))
    for img in dataset['images']:
        list_img_id_dataset.append(img['id'])
        tq.set_description('Video Frame {}'.format(img['id']))
        tq.update(1)
        index = [
            i for i, x in enumerate(list_img_id_prediction) if x == img['id']
        ]
        if len(index) > 0:
            # print(img['id'], index)
            annotations = []
            for ind in index:
                ann = {}
                ann['id'] = ann_id
                ann['image_id'] = prediction[ind]['image_id']
                ann['track_id'] = -1
                ann['category_id'] = prediction[ind]['category_id']
                ann['score'] = prediction[ind]['score']
                ann['iscrowd'] = 0  # always 0
                ann['bbox'] = {
                    'x': int(prediction[ind]['bbox'][0]),
                    'y': int(prediction[ind]['bbox'][1]),
                    'w': int(prediction[ind]['bbox'][2]),
                    'h': int(prediction[ind]['bbox'][3])
                }
                ann['trackable'] = True
                ann['segmentation'] = []
                ann['area'] = 0
                annotations.append(ann)
                ann_id += 1
            if len(annotations) > 0:
                fullpath = os.path.join(
                    dataset_dir, 'Annotations', category['name'],
                    os.path.basename(os.path.dirname(img['file_name'])))
                ulti.make_dir(fullpath)
                fullpath = os.path.join(
                    output_dir, 'Raw', 'Json',
                    os.path.basename(os.path.dirname(img['file_name'])))
                ulti.make_dir(fullpath)
                path = os.path.join(
                    fullpath,
                    os.path.splitext(os.path.basename(img['file_name']))[0] +
                    '.json')
                ulti.write_json(annotations, path)
Esempio n. 17
0
def run_trackers():
    info = ulti.load_json()
    videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images'))
    videonames = sorted(videonames)
    tq = tqdm.tqdm(total=len(videonames))
    alpha = 0.5
    frequency_threshold = 5
    iou_threshold = 0.3
    iou_threshold2 = 0.8
    border = 8
    for videoname in videonames:
        tq.set_description('Video {}'.format(videoname))
        tq.update(1)

        dir_tubelet = os.path.join(info['dataset_dir'], info['experiment'], 'Filter_data', 'Tubelet',
                                   videoname + '.json')

        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Videos', videoname))
        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Tubelet'))

        with open(dir_tubelet) as f:
            tubelet = json.load(f)
        tubelet_by_frame = tubelet['tubelet_by_frame']
        tubelet_by_id = tubelet['tubelet_by_id']

        filenames = os.listdir(os.path.join(info['dataset_dir'], 'Images', videoname))
        filenames = sorted(filenames)
        frames = []
        for i, frame in enumerate(filenames):
            frame = os.path.splitext(frame)[0]
            frames.append(frame)

        img = cv2.imread(os.path.join(info['dataset_dir'], 'Images', videoname, filenames[0]))
        height, width, channels = img.shape

        for id in list(tubelet_by_id.keys()):
            tubelet = tubelet_by_id[id]
            if len(tubelet.keys()) >= frequency_threshold:
                keys = sorted(list(tubelet.keys()))

                # forward process
                prev_idx = frames.index(keys[0])
                tracker = None
                for idx in range(frames.index(keys[1]), len(frames)):
                    if frames[idx] not in keys:
                        # begin tracking
                        if tracker is None and frames[prev_idx] in tubelet.keys():
                            prev_bbox = tubelet[frames[prev_idx]]['bbox']

                            if prev_bbox['x'] >= width / border and (prev_bbox['x'] + prev_bbox['w']) <= width * (
                                    border - 1) / border:
                                tracker = init_tracker(
                                    init_image_file=os.path.join(info['dataset_dir'], 'Images', videoname,
                                                                 filenames[prev_idx]),
                                    init_rect=[prev_bbox['x'], prev_bbox['y'], prev_bbox['w'], prev_bbox['h']],
                                    device='cuda')
                                prev_rect = [prev_bbox['x'], prev_bbox['y'], prev_bbox['w'], prev_bbox['h']]
                                iou = -1

                        if tracker:
                            rect, tracker = run_tracker(
                                [os.path.join(info['dataset_dir'], 'Images', videoname, filenames[idx])],
                                tracker, visualization=False)
                            rect = rect[0]
                            if iou < 0 or iou >= iou_threshold:
                                iou = bb_intersection_over_union(rect, prev_rect)
                            prev_rect = rect

                            if os.path.splitext(filenames[idx])[0] in tubelet_by_frame.keys():
                                other_boxes = tubelet_by_frame[os.path.splitext(filenames[idx])[0]]
                                other_iou = 0
                                for other_key in other_boxes.keys():
                                    other_box = other_boxes[other_key]['bbox']
                                    other_rect = [other_box['x'], other_box['y'], other_box['w'], other_box['h']]
                                    other_iou = bb_intersection_over_union(rect, other_rect)
                                    if other_iou >= iou_threshold2:
                                        break
                                if other_iou >= iou_threshold2:
                                    break

                            if iou >= iou_threshold:
                                tubelet[frames[idx]] = copy.deepcopy(tubelet[list(tubelet.keys())[0]])
                                tubelet[frames[idx]]['bbox'] = {'x': rect[0],
                                                                'y': rect[1],
                                                                'w': rect[2],
                                                                'h': rect[3]}
                                tubelet[frames[idx]]['track_bbox_xyxy'] = xywh_to_xyxy(rect)
                                tubelet[frames[idx]]['add_by_tracker'] = True

                                if frames[idx] not in tubelet_by_frame.keys():
                                    tubelet_by_frame[frames[idx]] = {}
                                tubelet_by_frame[frames[idx]][id] = tubelet[frames[idx]]
                            else:
                                break
                    else:
                        # stop tracking
                        if tracker:
                            tracker = None
                    prev_idx = idx

        ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id},
                        os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Tubelet',
                                     videoname + '.json'))

        for frame in tubelet_by_frame.keys():
            tubelet = tubelet_by_frame[frame]
            ulti.write_json(tubelet,
                            os.path.join(info['dataset_dir'], info['experiment'], 'Add_instances', 'Videos', videoname,
                                         frame + '.json'))
Esempio n. 18
0
def init_tracklet():
    info = ulti.load_json()
    ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tracklet'))
    videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images'))
    videonames = sorted(videonames)
    tq = tqdm.tqdm(total=len(videonames))
    for videoname in videonames:
        tq.set_description('Video {}'.format(videoname))
        tq.update(1)

        dir_track = os.path.join(info['dataset_dir'], info['experiment'], 'Track', 'DeepSort', 'Json', videoname)
        dir_track_reverse = os.path.join(info['dataset_dir'], info['experiment'], 'Track', 'DeepSort_Reverse', 'Json', videoname)

        tracklet_by_id = {}
        tracklet_by_frame = {}
        list_images = os.listdir(dir_track)
        list_images = sorted(list_images)
        for i, image in enumerate(list_images):
            filename = os.path.splitext(image)[0]
            if filename not in tracklet_by_frame.keys():
                tracklet_by_frame[filename] = {}
            with open(os.path.join(dir_track, image)) as f:
                data = json.load(f)
                for x in data:
                    tracklet_by_frame[filename][x['id']] = {}
                    tracklet_by_frame[filename][x['id']]['track_bbox_xyxy'] = x['bbox_xyxy']
                    if x['id'] not in tracklet_by_id.keys():
                        tracklet_by_id[x['id']] = {}
                    tracklet_by_id[x['id']][filename] = {}
                    tracklet_by_id[x['id']][filename]['track_bbox_xyxy'] = x['bbox_xyxy']
        tracklet_by_id = dict(sorted(tracklet_by_id.items()))
        for key in tracklet_by_id.keys():
            tracklet_by_id[key] = dict(sorted(tracklet_by_id[key].items()))
        tracklet_by_frame = dict(sorted(tracklet_by_frame.items()))
        for key in tracklet_by_frame.keys():
            tracklet_by_frame[key] = dict(sorted(tracklet_by_frame[key].items()))

        tracklet_by_id2 = {}
        tracklet_by_frame2 = {}
        list_reverse_images = os.listdir(dir_track_reverse)
        list_reverse_images = sorted(list_reverse_images)
        for i, image in enumerate(list_reverse_images):
            filename = os.path.splitext(image)[0]
            if filename not in tracklet_by_frame2.keys():
                tracklet_by_frame2[filename] = {}
            with open(os.path.join(dir_track_reverse, image)) as f:
                data = json.load(f)
                for x in data:
                    tracklet_by_frame2[filename][x['id']] = {}
                    tracklet_by_frame2[filename][x['id']]['track_bbox_xyxy'] = x['bbox_xyxy']
                    if x['id'] not in tracklet_by_id2.keys():
                        tracklet_by_id2[x['id']] = {}
                    tracklet_by_id2[x['id']][filename] = {}
                    tracklet_by_id2[x['id']][filename]['track_bbox_xyxy'] = x['bbox_xyxy']
            if image in list_images:
                break
        tracklet_by_id2 = dict(sorted(tracklet_by_id2.items()))
        for key in tracklet_by_id2.keys():
            tracklet_by_id2[key] = dict(sorted(tracklet_by_id2[key].items()))
        tracklet_by_frame2 = dict(sorted(tracklet_by_frame2.items()))
        for key in tracklet_by_frame2.keys():
            tracklet_by_frame2[key] = dict(sorted(tracklet_by_frame2[key].items()))

        if len(list(tracklet_by_frame.keys())) > 0 and len(list(tracklet_by_frame2.keys())) > 0:

            boxes1 = np.zeros((len(tracklet_by_frame[list(tracklet_by_frame.keys())[0]]), 4))
            for i, box in enumerate(list(tracklet_by_frame[list(tracklet_by_frame.keys())[0]].items())):
                boxes1[i, :] = box[1]['track_bbox_xyxy']

            boxes2 = np.zeros((len(tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]]), 4))
            for i, box in enumerate(list(tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]].items())):
                boxes2[i, :] = box[1]['track_bbox_xyxy']

            iou = torchvision.ops.box_iou(torch.from_numpy(boxes1), torch.from_numpy(boxes2))
            iou = iou.detach().numpy()
            iou_threshold = 0.5
            iou[iou < iou_threshold] = 0

            max_iou = 1
            while max_iou != 0:
                max_iou = np.amax(iou)

                if max_iou == 0:
                    break
                (i, j) = np.where(iou == max_iou)
                i = i[0]
                j = j[0]
                # i  boxes1 tracklet_by_frame[list(tracklet_by_frame.keys())[0]]
                # j  boxes2 tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]]
                id1 = list(tracklet_by_frame[list(tracklet_by_frame.keys())[0]].keys())[i]
                id2 = list(tracklet_by_frame2[list(tracklet_by_frame2.keys())[-1]].keys())[j]

                # if videoname == '000017' and id1 == 4:
                #     print(videoname, max_iou, id1, id2)

                for k in tracklet_by_id2[id2].keys():
                    tracklet_by_id[id1][k] = tracklet_by_id2[id2][k]
                    tracklet_by_id2[id2][k] = None

                for k in tracklet_by_frame2.keys():
                    if k not in tracklet_by_frame.keys():
                        tracklet_by_frame[k] = {}
                    if id2 in tracklet_by_frame2[k].keys():
                        tracklet_by_frame[k][id1] = tracklet_by_frame2[k][id2]
                        tracklet_by_frame2[k][id2] = None

                iou[i, :] = 0
                iou[:, j] = 0

            tracklet_by_id = dict(sorted(tracklet_by_id.items()))
            for key in tracklet_by_id.keys():
                tracklet_by_id[key] = dict(sorted(tracklet_by_id[key].items()))
            tracklet_by_frame = dict(sorted(tracklet_by_frame.items()))
            for key in tracklet_by_frame.keys():
                tracklet_by_frame[key] = dict(sorted(tracklet_by_frame[key].items()))

        ulti.write_json({'tracklet_by_id': tracklet_by_id, 'tracklet_by_frame': tracklet_by_frame},
                        file=os.path.join(info['dataset_dir'], info['experiment'], 'Tracklet', videoname + '.json'))
Esempio n. 19
0
def create_tubelet():
    info = ulti.load_json()
    ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Tubelet'))
    videonames = os.listdir(os.path.join(info['dataset_dir'], 'Images'))
    videonames = sorted(videonames)
    tq = tqdm.tqdm(total=len(videonames))
    for videoname in videonames:
        tq.set_description('Video {}'.format(videoname))
        tq.update(1)

        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Videos', videoname))
        ulti.make_dir(os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Visualization', videoname))

        dir_tracklet = os.path.join(info['dataset_dir'], info['experiment'], 'Tracklet', videoname + '.json')
        dir_detection = os.path.join(info['dataset_dir'], info['experiment'], 'Detection', 'Json', videoname)

        with open(dir_tracklet) as f:
            tracklet_data = json.load(f)
        tracklet_by_id = tracklet_data['tracklet_by_id']
        tracklet_by_frame = tracklet_data['tracklet_by_frame']

        tubelet_by_frame = {}
        tubelet_by_id = {}

        for frame in tracklet_by_frame.keys():
            tubelet_data = []
            with open(os.path.join(dir_detection, frame + '.json')) as f:
                detection_data = json.load(f)
            detection_bbox = np.zeros((len(detection_data), 4))
            for i, box in enumerate(detection_data):
                detection_bbox[i, 0] = box['bbox']['x']
                detection_bbox[i, 1] = box['bbox']['y']
                detection_bbox[i, 2] = box['bbox']['x'] + box['bbox']['w']
                detection_bbox[i, 3] = box['bbox']['y'] + box['bbox']['h']
            track_bbox = np.zeros((len(tracklet_by_frame[frame]), 4))
            for i, key in enumerate(tracklet_by_frame[frame].keys()):
                box = tracklet_by_frame[frame][key]
                track_bbox[i, :] = box['track_bbox_xyxy']

            if detection_bbox.shape[0] == 0 or track_bbox.shape[0] == 0:
                continue
            iou = torchvision.ops.box_iou(torch.from_numpy(detection_bbox), torch.from_numpy(track_bbox))
            iou = iou.detach().numpy()
            iou_threshold = 0.7
            iou[iou < iou_threshold] = 0

            max_iou = 1
            while max_iou != 0:
                max_iou = np.amax(iou)

                if max_iou == 0:
                    break
                (i, j) = np.where(iou == max_iou)
                i = i[0]
                j = j[0]

                detection_data[i]['track_id'] = list(tracklet_by_frame[frame].keys())[j]
                detection_data[i]['track_bbox_xyxy'] = tracklet_by_frame[frame][detection_data[i]['track_id']]['track_bbox_xyxy']
                tubelet_data.append(detection_data[i])

                iou[i, :] = 0
                iou[:, j] = 0

            if len(tubelet_data) > 0:
                ulti.write_json(tubelet_data, os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Videos', videoname, frame + '.json'))
                tubelet_by_frame[frame] = {}
                for x in tubelet_data:
                    tubelet_by_frame[frame][x['track_id']] = x
                    if x['track_id'] not in tubelet_by_id:
                        tubelet_by_id[x['track_id']] = {}
                    tubelet_by_id[x['track_id']][frame] = x
        ulti.write_json({'tubelet_by_frame': tubelet_by_frame, 'tubelet_by_id': tubelet_by_id},
                        os.path.join(info['dataset_dir'], info['experiment'], 'Tubelet', 'Tubelet', videoname + '.json'))