Exemple #1
0
 def get_dataset(cf):
     dataset = Dataset_daly_ocv(cf['dataset.mirror'])
     dataset.populate_from_folder(cf['dataset.cache_folder'])
     # Augment video with split
     for vid, split_name in dataset.split.items():
         dataset.videos_ocv[vid]['split'] = split_name
     return dataset
def eval_daly_object(workfolder, cfg_dict, add_args):
    """
    Evaluation code with hacks
    """
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    _set_defcfg_detectron(cfg)
    _set_defcfg_detectron_test(cfg)
    _set_defcfg_object_hacks(cfg)
    cfg.set_deftype("""
    what_to_eval: [~, str]
    eval_hacks:
        model_to_eval: ['what', ['what', 'what+foldname']]
    """)
    cf = cfg.parse()
    cf_add_d2 = cfg.without_prefix('d2.')

    # DALY Dataset
    dataset = Dataset_daly_ocv()
    dataset.populate_from_folder(cf['dataset.cache_folder'])
    split_label = cf['dataset.subset']
    split_vids = get_daly_split_vids(dataset, split_label)
    datalist: Datalist = simplest_daly_to_datalist_v2(dataset, split_vids)

    cls_names, datalist_converter = \
            _datalist_hacky_converter(cf, dataset)
    datalist = datalist_converter(datalist)

    TEST_DATASET_NAME = 'daly_objaction_test'

    model_to_eval = _eval_foldname_hack(cf)
    _eval_routine(cf, cf_add_d2, out, cls_names,
        TEST_DATASET_NAME, datalist, model_to_eval)
def train_daly_object(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    _set_defcfg_detectron(cfg)
    _set_defcfg_detectron_train(cfg)
    _set_defcfg_object_hacks(cfg)
    cf = cfg.parse()
    cf_add_d2 = cfg.without_prefix('d2.')
    cf_add_d2 = d2dict_gpu_scaling(cf, cf_add_d2, cf['num_gpus'])

    dataset = Dataset_daly_ocv()
    dataset.populate_from_folder(cf['dataset.cache_folder'])
    o100_objects, category_map = get_category_map_o100(dataset)
    assert len(o100_objects) == 16
    split_label = cf['dataset.subset']
    split_vids = get_daly_split_vids(dataset, split_label)
    datalist: Datalist = simplest_daly_to_datalist_v2(dataset, split_vids)

    cls_names, datalist_converter = \
            _datalist_hacky_converter(cf, dataset)
    datalist = datalist_converter(datalist)

    TRAIN_DATASET_NAME = 'daly_objaction_train'
    _train_routine(cf, cf_add_d2, out,
        cls_names, TRAIN_DATASET_NAME, datalist, add_args)
Exemple #4
0
def compare_data(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    cfg.set_defaults_handling(raise_without_defaults=False)
    cfg.set_defaults("""
    cache_folders: ~
    """)
    cf = cfg.parse()

    dataset = Dataset_daly_ocv()
    dataset.precompute_to_folder(out)
Exemple #5
0
 def resolve_dataset_tubes(cf):
     dataset = Dataset_daly_ocv(cf['dataset.mirror'])
     dataset.populate_from_folder(cf['dataset.cache_folder'])
     split_label = cf['dataset.subset']
     split_vids: List[Vid_daly] = \
             get_daly_split_vids(dataset, split_label)
     dgt_tubes: Dict[I_dgt, T_dgt] = \
             get_daly_gt_tubes(dataset)
     dgt_tubes = dtindex_filter_split(dgt_tubes, split_vids)
     av_gt_tubes: AV_dict[T_dgt] = push_into_avdict(dgt_tubes)
     return dataset, split_vids, av_gt_tubes
Exemple #6
0
def load_wein_tubes(workfolder, cfg_dict, add_args):
    """
    Philippe tubes:
        tube:
             (one row per frame):
                index of the frame (starting at 1)
                x1 y1 x2 y2
                score of the generic human detector
                score of the instance-specific detector
    """
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    cfg.set_deftype("""
    wein_tubes: [~, str]
    dataset:
        cache_folder: [~, str]
        mirror: ['uname', ~]
    wein_index: [0, int]  # 0 = 510 cut tubes, 1 = 200 uncut tubes
    """)
    cf = cfg.parse()
    # Dataset
    dataset = Dataset_daly_ocv(cf['dataset.mirror'])
    dataset.populate_from_folder(cf['dataset.cache_folder'])
    # Tubes
    # 0 has 510 elements, 1 has 200 elements
    wein_package = small.load_py2_pkl(cf['wein_tubes'])[cf['wein_index']]
    # We got a dictionary of filenames (w .mp4 suffix)
    extracted_tubes: Dict[I_dwein, Tube_daly_wein_as_provided] = {}
    for vid_mp4, wein_bunches in wein_package.items():
        vid = re.search(r'(.*)\.mp4', vid_mp4).group(1)
        rs = dataset.rstats[vid]
        for bunch_id, wein_tubes in enumerate(wein_bunches):
            for tube_id, wein_tube in enumerate(wein_tubes):
                frame_inds = wein_tube[:, 0].astype(np.int) - 1
                assert max(frame_inds) < rs['max_pos_frames']
                boxes_ltrd = wein_tube[:, 1:5]  # ltrd
                human_scores = wein_tube[:, 5]
                instance_scores = wein_tube[:, 6]
                tube = {
                    'frame_inds': frame_inds,
                    'boxes': boxes_ltrd,
                    'hscores': human_scores,
                    'iscores': instance_scores
                }
                extracted_tubes[(vid, bunch_id, tube_id)] = tube
    small.save_pkl(out / 'extracted_tubes.pkl', extracted_tubes)
Exemple #7
0
def daly_oldstats(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.yconfig.YConfig_v2(cfg_dict, allowed_wo_defaults=[''])
    cfg.set_defaults_yaml("""
    dataset:
        cache_folder: ~
        mirror: 'uname'
    """)
    cf = cfg.parse()

    dataset = Dataset_daly_ocv(cf['dataset.mirror'])
    dataset.populate_from_folder(cf['dataset.cache_folder'])

    pd_kf, pd_obj = Daly_stats.pd_stats(dataset)

    Daly_stats.inst_bbox_stats_hists(pd_kf, out)
    Daly_stats.inst_bbox_stats_hmap(pd_kf, out, dataset)
    Daly_stats.obj_inst_bbox_stats(pd_obj, out, dataset)
def train_daly_action(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    _set_defcfg_detectron(cfg)
    _set_defcfg_detectron_train(cfg)
    cf = cfg.parse()
    cf_add_d2 = cfg.without_prefix('d2.')
    cf_add_d2 = d2dict_gpu_scaling(cf, cf_add_d2, cf['num_gpus'])

    dataset = Dataset_daly_ocv()
    dataset.populate_from_folder(cf['dataset.cache_folder'])
    split_label = cf['dataset.subset']
    split_vids = get_daly_split_vids(dataset, split_label)
    datalist: Datalist = daly_to_datalist_pfadet(dataset, split_vids)

    cls_names = dataset.action_names

    TRAIN_DATASET_NAME = 'daly_pfadet_train'
    _train_routine(cf, cf_add_d2, out,
        cls_names, TRAIN_DATASET_NAME, datalist, add_args)
Exemple #9
0
def precompute_cache(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    cfg.set_deftype("""
    dataset: [~, str]
    charades:
        mirror: ['gpuhost7', str]
        resolution: ['480', str]
    daly:
        mirror: [~, str]
    """)
    cf = cfg.parse()

    if cf['dataset'] == 'daly':
        dataset = Dataset_daly_ocv(cf['daly.mirror'])
    elif cf['dataset'] == 'charades':
        dataset = Dataset_charades_ocv(cf['charades.mirror'],
                                       cf['charades.resolution'])
    elif cf['dataset'] == 'voc2007':
        raise NotImplementedError()
    else:
        raise RuntimeError('Wrong dataset')
    dataset.precompute_to_folder(out)