Пример #1
0
 def __init__(self, tubes_featfold, scaler):
     self.scaler = scaler
     tubes_featfold = Path(tubes_featfold)
     connections_f: Dict[
         Tuple[Vid_daly, int], Box_connections_dwti] = \
             small.load_pkl(tubes_featfold/'connections_f.pkl')
     fullframe = small.load_pkl(tubes_featfold / 'fullframe.pkl')
     self.connections_f = connections_f
     self.fullframe_feats = fullframe
Пример #2
0
def eval_avstubes(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    cfg.set_defaults_handling(raise_without_defaults=False)
    Ncfg_dataset.set_dataset_seed(cfg)
    Ncfg_tubes.set_defcfg(cfg)
    cfg.set_defaults("""
    tubes_path: ~
    """)
    Ncfg_tube_eval.set_defcfg(cfg)
    cf = cfg.parse()

    dataset, split_vids, av_gt_tubes = \
            Ncfg_dataset.resolve_dataset_tubes(cf)
    tubes_to_eval: AV_dict[T_dwein_scored] = \
            small.load_pkl(cf['tubes_path'])

    # Extended version of "Ncfg_tube_eval.evalprint_if"
    nms_thresh = cf['tube_eval.nms.thresh']
    iou_thresholds: List[float] = cf['tube_eval.iou_thresholds']
    minscore_cutoff = cf['tube_eval.minscore_cutoff']

    tubes_to_eval = av_stubes_above_score(tubes_to_eval, minscore_cutoff)
    tubes_to_eval = \
            compute_nms_for_av_stubes(tubes_to_eval, nms_thresh)
    dfdict = _compute_exhaustive_evaluation_stats(av_gt_tubes, tubes_to_eval,
                                                  iou_thresholds)
    _print_exhaustive_evaluation_stats(dfdict)
Пример #3
0
 def __init__(self, tubes_featfold):
     tubes_featfold = Path(tubes_featfold)
     connections_f = small.load_pkl(tubes_featfold / 'connections_f.pkl')
     box_inds2 = small.load_pkl(tubes_featfold / 'box_inds2.pkl')
     # DWTI -> Frame -> bi (big index)
     with small.QTimer('Creating dwti -> big index structure'):
         dwti_f_bi = {}
         for con, bi2 in zip(connections_f.values(), box_inds2):
             bi_range = np.arange(bi2[0], bi2[1])
             for dwti, bi in zip(con['dwti_sources'], bi_range):
                 dwti_f_bi.setdefault(dwti, {})[con['frame_ind']] = bi
     # Features
     with small.QTimer('big numpy load'):
         BIG = np.load(str(tubes_featfold / "feats.npy"))
     self.dwti_f_bi = dwti_f_bi
     self.BIG = BIG
Пример #4
0
def gather_reapply_agg_rcnn_avstubes(workfolder, cfg_dict, add_args):
    """
    Will apply aggregation again
    """
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    cfg.set_defaults_handling(['gather.paths'])
    Ncfg_dataset.set_dataset_seed(cfg)
    Ncfg_tubes.set_defcfg(cfg)
    cfg.set_deftype("""
    gather:
        paths: [~, ~]
    score_agg_kind: ['mean', ['mean', 'max', 'sum']]
    """)
    Ncfg_tube_eval.set_defcfg(cfg)
    cf = cfg.parse()
    # Preparation
    dataset, split_vids, av_gt_tubes = \
            Ncfg_dataset.resolve_dataset_tubes(cf)
    tubes_dwein: Dict[I_dwein, T_dwein] = \
            Ncfg_tubes.resolve_tubes_dwein(cf, split_vids)
    # Experiment logic
    gather_paths = cf['gather.paths']
    if not _gather_check_all_present(
            gather_paths, ['vf_cls_probs.pkl', 'vf_connections_dwti.pkl']):
        return
    vf_connections_dwti = {}
    vf_cls_probs = {}
    for path in gather_paths:
        path = Path(path)
        vf_cls_probs_ = small.load_pkl(path / 'vf_cls_probs.pkl')
        vf_connections_dwti_ = small.load_pkl(path / 'vf_connections_dwti.pkl')
        assert vf_cls_probs_.keys() == vf_connections_dwti_.keys()
        vf_cls_probs.update(vf_cls_probs_)
        vf_connections_dwti.update(vf_connections_dwti_)
    small.save_pkl(out / 'vf_connections_dwti.pkl', vf_connections_dwti)
    small.save_pkl(out / 'vf_cls_probs.pkl', vf_cls_probs)
    agg_kind = cf['score_agg_kind']
    av_stubes: AV_dict[T_dwein_scored] = \
        Ncfg_generic_rcnn_eval.aggregate_rcnn_scores(
            dataset, tubes_dwein, vf_connections_dwti,
            vf_cls_probs, agg_kind)
    small.save_pkl(out / 'av_stubes.pkl', av_stubes)
    # Post experiment
    Ncfg_tube_eval.evalprint_if(cf, av_stubes, av_gt_tubes)
Пример #5
0
 def _restore(self):
     intermediate_files: Dict[int, Dict[str, Path]] = \
             self._get_intermediate_files()
     start_i, ifiles = max(intermediate_files.items(), default=(-1, None))
     if ifiles is not None:
         restore_from = ifiles['pkl']
         self.result = small.load_pkl(restore_from)
         log.info('Restore from {}'.format(restore_from))
     return start_i
Пример #6
0
def loadconvert_tubes_dwein(path_dwein) -> Dict[I_dwein, T_dwein]:
    # Tubes faithfully loaded in experiments.data.load_wein_tubes
    tubes_dwein_prov: Dict[I_dwein, Tube_daly_wein_as_provided] = \
            small.load_pkl(path_dwein)
    # Convenience reconversion
    tubes_dwein = {
        k: _reconvert_dwein_tube(k, t)
        for k, t in tubes_dwein_prov.items()
    }
    return tubes_dwein
Пример #7
0
 def __init__(self, tubes_dwein_feats_fold, scaler):
     self.scaler = scaler
     fold = Path(tubes_dwein_feats_fold)
     connections_f: Dict[Tuple[Vid_daly, int], Box_connections_dwti] = \
             small.load_pkl(fold/'connections_f.pkl')
     box_inds2 = small.load_pkl(fold / 'box_inds2.pkl')
     # DWTI -> Frame -> bi (big index)
     dwti_f_bi: Dict[I_dwein, Dict[int, int]]
     with small.QTimer('Creating dwti -> big index structure'):
         dwti_f_bi = {}
         for con, bi2 in zip(connections_f.values(), box_inds2):
             bi_range = np.arange(bi2[0], bi2[1])
             for dwti, bi in zip(con['dwti_sources'], bi_range):
                 dwti_f_bi.setdefault(dwti, {})[con['frame_ind']] = bi
     # Features
     with small.QTimer('big numpy load'):
         BIG = np.load(str(fold / "feats.npy"))
     self.connections_f = connections_f
     self.dwti_f_bi = dwti_f_bi
     self.BIG = BIG
Пример #8
0
    def load_scale(cf, vgroup):
        # Produce keyframe datasets realquick
        featname = cf['inputs.keyframes.featname']
        keyframes_featfold = Path(cf['inputs.keyframes.fold'])
        keyframes = small.load_pkl(keyframes_featfold / 'keyframes.pkl')
        outputs = small.load_pkl(keyframes_featfold /
                                 'dict_outputs.pkl')[featname]

        kfeats_d: Dict[str, E_kfeats] = {}
        for sset, vids in vgroup.items():
            kfeats_d[sset] = Ncfg_kfeats.split_off_D(outputs, keyframes, vids)
        # Scale
        if cf['data_scaler'] == 'keyframes':
            scaler = Ncfg_kfeats.fitscale_kfeats(kfeats_d)
        elif cf['data_scaler'] == 'no':
            scaler = None
        else:
            raise RuntimeError()

        # To torch
        tkfeats_d: Dict[str, E_tkfeats] = Ncfg_kfeats.to_torch(kfeats_d)
        return tkfeats_d, scaler
Пример #9
0
def _resolve_actobjects(cf, dataset, split_vids):
    # / Assign objects to tubes
    # // Create objaction_dets in video frames
    objactions_vf: Dict[Vid_daly, Dict[int, Objaction_dets]] = {}
    datalist = _recreate_actobject_datalist(dataset, split_vids)
    if cf['actobjects.source'] == 'detected':
        # /// Load detections themselves
        actobjects_evaluated = small.load_pkl(cf['actobjects.detected.path'])
        # /// Assign objactions
        for dl_item, pred_item in zip(datalist, actobjects_evaluated):
            pred_boxes = pred_item.pred_boxes.tensor.numpy()
            scores = pred_item.scores.numpy()
            pred_classes = pred_item.pred_classes.numpy()
            pred_classes = np.array(
                [dataset.action_names[i] for i in pred_classes])
            detections: Objaction_dets = {
                'pred_boxes': pred_boxes,
                'scores': scores,
                'pred_classes': pred_classes
            }
            (objactions_vf.setdefault(
                dl_item['vid'],
                {})[dl_item['video_frame_number']]) = detections
    elif cf['actobjects.source'] == 'gt':
        # /// Create fake "perfect" detections
        for dl_item in datalist:
            pred_boxes = []
            pred_classes = []
            for anno in dl_item['annotations']:
                pred_boxes.append(anno['bbox'])
                pred_classes.append(dataset.action_names[anno['category_id']])
            pred_boxes = np.array(pred_boxes)
            pred_classes = np.array(pred_classes)
            scores = np.ones(len(pred_boxes))
            detections: Objaction_dets = {
                'pred_boxes': pred_boxes,
                'scores': scores,
                'pred_classes': pred_classes
            }
            (objactions_vf.setdefault(
                dl_item['vid'],
                {})[dl_item['video_frame_number']]) = detections
    else:
        raise NotImplementedError()
    return objactions_vf
Пример #10
0
def prepare_charades_vids(data, inputs_vids_qeval, cull_specs):
    # Prepare vids
    train_vids = [v for v, s in data.split.items() if s == 'train']
    val_vids = [v for v, s in data.split.items() if s == 'val']
    if inputs_vids_qeval:
        qeval_vids = small.load_pkl(inputs_vids_qeval)
    else:
        qeval_vids = None
    # Cull if necessary
    if cull_specs is not None:
        train_vids = cull_vids_fraction(train_vids, cull_specs['train'])
        val_vids = cull_vids_fraction(val_vids, cull_specs['val'])
        if qeval_vids:
            qeval_vids = cull_vids_fraction(qeval_vids, cull_specs['qeval'])
    # Eval dict
    eval_vids_dict = {'eval': val_vids}
    if qeval_vids:
        eval_vids_dict['qeval'] = qeval_vids
    return train_vids, eval_vids_dict
Пример #11
0
def merge_evaluate(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig_v2(cfg_dict)
    Ncfg_daly.set_defcfg_v2(cfg)
    cfg.set_defaults_yaml("""
    inputs:
        tubes_dwein: ~
        keyframes:
            roi: ~
            full: ~
        tubes_dwein_feats:
            roi: ~
            full: ~
        ckpt:
            roi: ~
            full: ~
    net:
        kind: 'layer1'
        layer1:
            H: 32
        ll_dropout: 0.5
    seed: 42
    split_assignment: !def ['train/val',
        ['train/val', 'trainval/test']]
    nms_scorefield: 'hscore'
    """)
    cf = cfg.parse()
    # Seeds
    initial_seed = cf['seed']
    torch.manual_seed(initial_seed)
    # Data
    # General DALY level preparation
    dataset = Ncfg_daly.get_dataset(cf)
    vgroup = Ncfg_daly.get_vids(cf, dataset)
    sset_train, sset_eval = cf['split_assignment'].split('/')

    # wein tubes
    tubes_dwein_d, tubes_dgt_d = load_gt_and_wein_tubes(
            cf['inputs.tubes_dwein'], dataset, vgroup)
    tubes_dwein_prov: Dict[I_dwein, Tube_daly_wein_as_provided] = \
            small.load_pkl(cf['inputs.tubes_dwein'])

    # / keyframe feats
    def to_torch(kfeats):
        tkfeats = kfeats.copy()
        tkfeats['X'] = torch.from_numpy(tkfeats['X'])
        tkfeats['Y'] = torch.from_numpy(tkfeats['Y'])
        return tkfeats

    # synthetic tube labels
    _, dwti_to_label_eval = qload_synthetic_tube_labels(
            tubes_dgt_d[sset_eval], tubes_dwein_d[sset_eval], dataset)
    # Ssset
    tubes_dwein_eval = tubes_dwein_d[sset_eval]
    tubes_dgt_eval = tubes_dgt_d[sset_eval]

    # Interacting with big
    man_feats_dwt_roi = Manager_feats_tubes_dwein_roipooled(
            cf['inputs.tubes_dwein_feats.roi'], None)
    man_feats_dwt_full = Manager_feats_tubes_dwein_full(
            cf['inputs.tubes_dwein_feats.full'], None)
    D_in = man_feats_dwt_full.fullframe_feats.shape[-1]

    model_roi = define_mlp_model(cf, D_in, 11)
    states = torch.load(cf['inputs.ckpt.roi'])
    model_roi.load_state_dict(states['model_sdict'])
    model_roi.eval()

    model_full = define_mlp_model(cf, D_in, 10)
    states = torch.load(cf['inputs.ckpt.full'])
    model_full.load_state_dict(states['model_sdict'])
    model_full.eval()

    # Actual evaluation
    iou_thresholds = [.3, .5, .7]
    av_gt_tubes: AV_dict[T_dgt] = push_into_avdict(tubes_dgt_eval)

    # ROI evaluations
    tube_softmaxes_eval: Dict[I_dwein, np.ndarray] = {}
    with torch.no_grad():
        for dwti in tubes_dwein_eval.keys():
            dwti_feats = man_feats_dwt_roi.get_all_tube_feats(dwti)
            preds_softmax = model_roi(dwti_feats)['x_final']
            tube_softmaxes_eval[dwti] = preds_softmax.numpy()
    tube_softmaxes_eval_nobg = {k: v[:, :-1]
            for k, v in tube_softmaxes_eval.items()}

    # FULL evaluations
    connections_f = man_feats_dwt_full.connections_f
    fullframe_feats = man_feats_dwt_full.fullframe_feats
    with torch.no_grad():
        t_fullframe_feats = torch.from_numpy(fullframe_feats)
        x_final = model_full(t_fullframe_feats)['x_final'].numpy()
    # Aggregate frame scores
    frame_scores: Dict[Tuple[Vid_daly, int], np.ndarray] = {}
    for cons, outputs_ in zip(connections_f.values(), x_final):
        vid = cons['vid']
        frame_ind = cons['frame_ind']
        frame_scores[(vid, frame_ind)] = outputs_

    # Universal detector experiments
    tubes_dwein = tubes_dwein_eval
    av_stubes_with_scores: AV_dict[Dict] = {}
    for dwt_index, tube in tubes_dwein.items():
        (vid, bunch_id, tube_id) = dwt_index
        # Human score from dwein tubes
        hscores = tubes_dwein_prov[dwt_index]['hscores']
        iscores = tubes_dwein_prov[dwt_index]['iscores']
        # Scores over roi
        softmaxes = tube_softmaxes_eval_nobg[dwt_index]
        scores = softmaxes.mean(axis=0)
        # Aggregated frame score
        fscores_for_tube_ = []
        for frame_ind in tube['frame_inds']:
            fscore = frame_scores.get((vid, frame_ind))
            if fscore is not None:
                fscores_for_tube_.append(fscore)
        fscores_for_tube = np.vstack(fscores_for_tube_)
        for ia, (action_name, score) in enumerate(
                zip(dataset.action_names, scores)):
            stube = cast(Dict, tube.copy())
            stube['hscore'] = hscores.mean()
            stube['iscore'] = np.nanmean(iscores)
            stube['box_det_score'] = score
            stube['box_nonbg_score'] = scores.sum()
            stube['frame_cls_score'] = fscores_for_tube.mean(0)[ia]
            stube['hscore*frame_cls_score'] = \
                    stube['hscore'] * stube['frame_cls_score']
            stube['mean(box_det_score+frame_cls_score'] = \
                    (stube['box_det_score'] + stube['frame_cls_score'])/2
            stube['mean(box_det_score, hscore*frame_cls_score)'] = \
                    (stube['box_det_score'] + stube['hscore*frame_cls_score'])/2
            (av_stubes_with_scores
                    .setdefault(action_name, {})
                    .setdefault(vid, []).append(stube))

    av_stubes: Any = copy.deepcopy(av_stubes_with_scores)
    nms_scorefield = cf['nms_scorefield']
    av_stubes = assign_scorefield(av_stubes, nms_scorefield)
    av_stubes = av_stubes_above_score(av_stubes, 0.0)
    av_stubes = compute_nms_for_av_stubes(av_stubes, 0.3)
    av_stubes = assign_scorefield(
            av_stubes, 'mean(box_det_score, hscore*frame_cls_score)')

    df_ap_full = compute_ap_for_avtubes_as_df(
        av_gt_tubes, av_stubes, iou_thresholds, False, False)
    log.info(df_ap_full*100)
    apline = '/'.join((df_ap_full*100).round(2).loc['all'].values.astype(str))
    log.info('AP357: {}'.format(apline))
Пример #12
0
def fancy_evaluate(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig_v2(cfg_dict)
    Ncfg_daly.set_defcfg_v2(cfg)
    Ncfg_kfeats.set_defcfg_v2(cfg)
    cfg.set_defaults_yaml("""
    inputs:
        tubes_dwein: ~
        tubes_dwein_feats:
            fold: ~
            kind: !def ['roipooled', ['fullframe', 'roipooled']]
        ckpt: ~
    seed: 42
    split_assignment: !def ['train/val',
        ['train/val', 'trainval/test']]
    data_scaler: !def ['no', ['keyframes', 'no']]
    net:
        kind: !def ['layer1', ['layer0', 'layer1']]
        layer1:
            H: 32
        ll_dropout: 0.5
        n_outputs: !def [~, [10, 11]]
    eval:
        full_tubes:
            enabled: True
            detect_mode: !def ['roipooled', ['fullframe', 'roipooled']]
            nms: 0.3
            field_nms: 'box_det_score'  # hscore
            field_det: 'box_det_score'  # hscore*frame_cls_score
    """)
    cf = cfg.parse()
    # Seeds
    initial_seed = cf['seed']
    torch.manual_seed(initial_seed)
    # Data
    # General DALY level preparation
    dataset = Ncfg_daly.get_dataset(cf)
    vgroup = Ncfg_daly.get_vids(cf, dataset)
    sset_train, sset_eval = cf['split_assignment'].split('/')
    vids_train, vids_eval = vgroup[sset_train], vgroup[sset_eval]
    man_feats_kf = Manager_feats_keyframes(
            cf['inputs.keyframes.fold'], cf['inputs.keyframes.featname'])
    # wein tubes
    tubes_dwein_d, tubes_dgt_d = load_gt_and_wein_tubes(
            cf['inputs.tubes_dwein'], dataset, vgroup)
    tubes_dwein_prov: Dict[I_dwein, Tube_daly_wein_as_provided] = \
            small.load_pkl(cf['inputs.tubes_dwein'])

    # synthetic tube labels
    _, dwti_to_label_eval = qload_synthetic_tube_labels(
            tubes_dgt_d[sset_eval], tubes_dwein_d[sset_eval], dataset)
    # Ssset
    tkfeats_train = man_feats_kf.split_off(vids_train, True)
    tkfeats_eval = man_feats_kf.split_off(vids_eval, True)
    tubes_dwein_eval = tubes_dwein_d[sset_eval]
    tubes_dgt_eval = tubes_dgt_d[sset_eval]

    # Interacting with big
    assert cf['eval.full_tubes.enabled'], 'We train on them anyway'
    detect_mode = cf['inputs.tubes_dwein_feats.kind']
    man_feats_dwt = create_preextracted_feats_manager(
            cf, None, detect_mode)
    output_dims = cf['net.n_outputs']

    if detect_mode == 'fullframe':
        assert output_dims == 10
        D_in = man_feats_dwt.fullframe_feats.shape[-1]
    elif detect_mode == 'roipooled':
        assert output_dims == 11
        D_in = man_feats_dwt.BIG.shape[-1]
    else:
        raise RuntimeError()

    # Model
    model = Net_mlp_onelayer(
        D_in, output_dims, 32, 0.5)
    states = torch.load(cf['inputs.ckpt'])
    i_epoch = states['i_epoch']
    model.load_state_dict(states['model_sdict'])

    model.eval()
    result = mlp_perf_kf_evaluate(
            model, tkfeats_train, tkfeats_eval,
            tubes_dwein_eval, tubes_dgt_eval,
            dataset, output_dims)
    result_fulltube = mlp_perf_fulltube_evaluate(
            model, man_feats_dwt,
            tubes_dwein_eval, tubes_dwein_prov,
            tubes_dgt_eval, dwti_to_label_eval,
            dataset, output_dims,
            cf['eval.full_tubes.detect_mode'],
            cf['eval.full_tubes.nms'],
            cf['eval.full_tubes.field_nms'],
            cf['eval.full_tubes.field_det'])
    result.update(result_fulltube)
    model.train()
    log.info(f'Evalset perf at {i_epoch=}')
    mlp_perf_display(result, sset_eval)
Пример #13
0
def tubefeats_dist_train_mlp(workfolder, cfg_dict, add_args):
    """
    Training of MLP trainfeats in the same way as we finetune stuff
    """
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig_v2(cfg_dict)
    Ncfg_daly.set_defcfg_v2(cfg)
    cfg.set_defaults_yaml("""
    inputs:
        keyframes:
            fold: ~
            featname: ~
    """)
    cfg.set_defaults_yaml("""
    inputs:
        tubes_dwein: ~
        tubes_dwein_feats:
            fold: ~
            kind: !def ['roipooled', ['fullframe', 'roipooled']]
        ckpt: ~
    seed: 42
    split_assignment: !def ['train/val',
        ['train/val', 'trainval/test']]
    data_scaler: !def ['no', ['keyframes', 'no']]
    net:
        n_outputs: !def [~, [10, 11]]
    train:
        lr: 1.0e-05
        weight_decay: 5.0e-2
        start_epoch: 0
        n_epochs: 120
        batch_size: 32
        tubes:
            top_n_matches: 1
            stride: 4
            frame_dist: 16
            add_keyframes: True
    period:
        i_batch:
            loss_log: '::'
        i_epoch:
            loss_log: '0::1'
            q_eval: '::'
            full_eval: '0,1,2,3,4::5'
    eval:
        full_tubes:
            enabled: True
            detect_mode: !def ['roipooled', ['fullframe', 'roipooled']]
            nms: 0.3
            field_nms: 'box_det_score'  # hscore
            field_det: 'box_det_score'  # hscore*frame_cls_score
    """)
    cf = cfg.parse()
    # Seeds
    initial_seed = cf['seed']
    torch.manual_seed(initial_seed)
    # Data
    # General DALY level preparation
    dataset = Ncfg_daly.get_dataset(cf)
    vgroup = Ncfg_daly.get_vids(cf, dataset)
    sset_train, sset_eval = cf['split_assignment'].split('/')
    vids_train, vids_eval = vgroup[sset_train], vgroup[sset_eval]
    man_feats_kf = Manager_feats_keyframes(
            cf['inputs.keyframes.fold'], cf['inputs.keyframes.featname'])
    # wein tubes
    tubes_dwein_d, tubes_dgt_d = load_gt_and_wein_tubes(
            cf['inputs.tubes_dwein'], dataset, vgroup)
    tubes_dwein_prov: Dict[I_dwein, Tube_daly_wein_as_provided] = \
            small.load_pkl(cf['inputs.tubes_dwein'])

    # synthetic tube labels
    _, dwti_to_label_eval = qload_synthetic_tube_labels(
            tubes_dgt_d[sset_eval], tubes_dwein_d[sset_eval], dataset)

    # Ssset
    tkfeats_train = man_feats_kf.split_off(vids_train, True)
    tkfeats_eval = man_feats_kf.split_off(vids_eval, True)
    tubes_dwein_train = tubes_dwein_d[sset_train]
    tubes_dwein_eval = tubes_dwein_d[sset_eval]
    tubes_dgt_train = tubes_dgt_d[sset_train]
    tubes_dgt_eval = tubes_dgt_d[sset_eval]

    # Interacting with big
    assert cf['eval.full_tubes.enabled'], 'We train on them anyway'
    top_n_matches = cf['train.tubes.top_n_matches']
    stride = cf['train.tubes.stride']
    detect_mode = cf['inputs.tubes_dwein_feats.kind']
    man_feats_dwt = create_preextracted_feats_manager(
            cf, None, detect_mode)

    max_distance = cf['train.tubes.frame_dist']
    output_dims = cf['net.n_outputs']

    if detect_mode == 'fullframe':
        # fullframes
        labeled_frames: List[Frame_labeled] = \
            prepare_label_fullframes_for_training(
                tubes_dgt_train, dataset, stride, max_distance)
        # associate to extracted feats
        labeled_linked_frames: List[Frame_labeled] = \
            _link_lframes_to_exfeats(labeled_frames, man_feats_dwt)
        tdataset = TDataset_over_labeled_linked_frames(
                labeled_linked_frames, man_feats_dwt)
        assert output_dims == 10
        D_in = man_feats_dwt.fullframe_feats.shape[-1]
    elif detect_mode == 'roipooled':
        # roitubes
        tkfeats_train_numpy = man_feats_kf.split_off(vids_train, False)
        keyframes_train = tkfeats_train_numpy['kf']
        keyframe_feats_train = tkfeats_train_numpy['X']
        labeled_boxes: List[Box_labeled] = \
          prepare_label_roiboxes_for_training(
            tubes_dgt_train, dataset, stride, max_distance,
            tubes_dwein_train, keyframes_train, top_n_matches,
            cf['train.tubes.add_keyframes'])
        # associate roiboxes to extracted feats
        labeled_linked_boxes: List[Box_labeled_linked] = \
                _link_lboxes_to_exfeats(labeled_boxes, man_feats_dwt)
        tdataset = TDataset_over_labeled_linked_boxes(
            labeled_linked_boxes, man_feats_dwt, keyframe_feats_train)
        assert output_dims == 11
        D_in = man_feats_dwt.BIG.shape[-1]
    else:
        raise RuntimeError()

    # Model
    model = Net_mlp_onelayer(
        D_in, output_dims, 32, 0.5)
    loss_fn = torch.nn.CrossEntropyLoss(reduction='mean')
    optimizer = torch.optim.AdamW(model.parameters(),
            lr=cf['train.lr'], weight_decay=cf['train.weight_decay'])

    ckpt = Checkpointer(model, optimizer)

    # Restore previous run
    rundir = small.mkdir(out/'rundir')
    checkpoint_path = (Manager_checkpoint_name.find_last_checkpoint(rundir))
    if '--new' in add_args:
        Manager_checkpoint_name.rename_old_rundir(rundir)
        checkpoint_path = None
    start_epoch = (ckpt.restore_model_magic(checkpoint_path,
        cf['inputs.ckpt'], cf['train.start_epoch']))

    # Training
    n_epochs = cf['train.n_epochs']
    for i_epoch in range(start_epoch, n_epochs):
        log.info(f'Started epoch {i_epoch=}')
        model.train()
        l_avg = snippets.misc.Averager()
        avg_bs = snippets.misc.Averager()
        # Loader reproducible even if we restore
        rgen = np.random.default_rng(initial_seed+i_epoch)
        loader = _get_trainloader_rnd_sampler(tdataset,
                cf['train.batch_size'], rgen)
        for i_batch, (data_input) in enumerate(loader):
            # inputs converter
            (meta,) = data_input
            flat_labels = np.hstack([m['labels'] for m in meta])
            flat_feats = np.vstack([m['feats'] for m in meta])

            flat_labels_t = torch.from_numpy(flat_labels)
            flat_feats_t = torch.from_numpy(flat_feats)

            result = model(flat_feats_t)
            pred_train = result['x_final']
            loss = loss_fn(pred_train, flat_labels_t)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            avg_bs.update(len(flat_labels))
            l_avg.update(loss.item())

            if check_step(i_batch, cf['period.i_batch.loss_log']):
                Nb = len(loader)
                loss_str = (f'loss(all/last):{l_avg.avg:.4f}/{l_avg.last:.4f}')
                log.info(f'{i_epoch=}, {i_batch=}/{Nb}; {loss_str}')
        log.info('Epoch stats: avg_batchsize {}, loader_size {} '.format(
            avg_bs.avg, len(loader)))
        ckpt.save_epoch(rundir, i_epoch)
        if check_step(i_epoch, cf['period.i_epoch.loss_log']):
            log.info(f'Loss at {i_epoch=}: {l_avg.avg}')

        if check_step(i_epoch, cf['period.i_epoch.q_eval']):
            model.eval()
            kacc_train = quick_accuracy_over_kfeat(
                    tkfeats_train, model, True)*100
            kacc_eval = quick_accuracy_over_kfeat(
                    tkfeats_eval, model, True)*100
            model.train()
            log.info(f'Qperf at {i_epoch=}: '
                    f'{kacc_train=:.2f} {kacc_eval=:.2f}')
        if check_step(i_epoch, cf['period.i_epoch.full_eval']):
            model.eval()
            result = mlp_perf_kf_evaluate(
                    model, tkfeats_train, tkfeats_eval,
                    tubes_dwein_eval, tubes_dgt_eval,
                    dataset, output_dims)
            result_fulltube = mlp_perf_fulltube_evaluate(
                    model, man_feats_dwt,
                    tubes_dwein_eval, tubes_dwein_prov,
                    tubes_dgt_eval, dwti_to_label_eval,
                    dataset, output_dims,
                    cf['eval.full_tubes.detect_mode'],
                    cf['eval.full_tubes.nms'],
                    cf['eval.full_tubes.field_nms'],
                    cf['eval.full_tubes.field_det'])
            result.update(result_fulltube)
            model.train()
            log.info(f'Evalset perf at {i_epoch=}')
            mlp_perf_display(result, sset_eval)
Пример #14
0
 def __init__(self, fold, featname):
     keyframes_featfold = Path(fold)
     self.outputs = small.load_pkl(
             keyframes_featfold/'dict_outputs.pkl')[featname]
     self.keyframes = small.load_pkl(keyframes_featfold/'keyframes.pkl')
Пример #15
0
def daly_map_explore(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.yconfig.YConfig_v2(cfg_dict, allowed_wo_defaults=[''])
    cfg.set_defaults_yaml("""
    dataset:
        cache_folder: ~
        mirror: 'uname'
    split_assignment: 'train/val'
    inputs:
        tubes_dwein: ~
        keyframes:
            fold: ~
            featname: 'roipooled'
        big:
          fold: ~
        trained_mlp_roi: ~
    net:
        kind: 'layer1'
        layer1:
            H: 32
        ll_dropout: 0.5
    seed: 0
    data_scaler: 'no'
    """)
    cf = cfg.parse()

    # Seeds
    initial_seed = cf['seed']
    torch.manual_seed(initial_seed)
    rgen = np.random.default_rng(initial_seed)

    # Data
    # General DALY level preparation
    dataset = Ncfg_daly.get_dataset(cf)
    vgroup = Ncfg_daly.get_vids(cf, dataset)
    sset_train, sset_eval = cf['split_assignment'].split('/')
    # keyframe feats
    tkfeats_d, scaler = Ncfg_kfeats.load_scale(cf, vgroup)
    # wein tubes
    tubes_dwein_d, tubes_dgt_d = load_gt_and_wein_tubes(
        cf['inputs.tubes_dwein'], dataset, vgroup)
    tubes_dwein_prov: Dict[I_dwein, Tube_daly_wein_as_provided] = \
            small.load_pkl(cf['inputs.tubes_dwein'])

    # Sset
    tkfeats_train = tkfeats_d[sset_train]
    tkfeats_eval = tkfeats_d[sset_eval]
    tubes_dwein_train = tubes_dwein_d[sset_train]
    tubes_dwein_eval = tubes_dwein_d[sset_eval]
    tubes_dgt_train = tubes_dgt_d[sset_train]
    tubes_dgt_eval = tubes_dgt_d[sset_eval]

    # Big manager
    man_big = Manager_big_simple(cf['inputs.big.fold'])
    # Load create model
    D_in = man_big.BIG.shape[-1]
    model = define_mlp_model(cf, D_in, 11)
    states = torch.load(cf['inputs.trained_mlp_roi'])
    model.load_state_dict(states['model_sdict'])
    model.eval()

    # quick accc
    kf_cut_last = True
    result = {}
    result['kacc_train'] = _quick_accuracy_over_kfeat(tkfeats_train, model,
                                                      kf_cut_last)
    result['kacc_eval'] = _quick_accuracy_over_kfeat(tkfeats_eval, model,
                                                     kf_cut_last)

    # // Full AUC (Evaluation of full wein-tubes with a trained model)
    tube_softmaxes_eval: Dict[I_dwein, np.ndarray] = \
        _predict_softmaxes_for_dwein_tubes_in_da_big(
            model, man_big, tubes_dwein_eval.keys())
    tube_softmaxes_eval_nobg = {
        k: v[:, :-1]
        for k, v in tube_softmaxes_eval.items()
    }

    iou_thresholds = [.3, .5, .7]
    av_gt_tubes: AV_dict[T_dgt] = push_into_avdict(tubes_dgt_eval)

    # Universal detector experiments
    av_stubes_eval_augm: AV_dict[Dict] = {}
    for dwt_index, tube in tubes_dwein_eval.items():
        softmaxes = tube_softmaxes_eval_nobg[dwt_index]
        scores = softmaxes.mean(axis=0)
        hscores = tubes_dwein_prov[dwt_index]['hscores']
        iscores = tubes_dwein_prov[dwt_index]['iscores']
        (vid, bunch_id, tube_id) = dwt_index
        for action_name, score in zip(dataset.action_names, scores):
            stube = tube.copy()
            stube['cls_score'] = score
            stube['univ_score'] = scores.sum()
            stube['hscore'] = hscores.mean()
            stube['iscore'] = np.nanmean(iscores)
            (av_stubes_eval_augm.setdefault(action_name,
                                            {}).setdefault(vid,
                                                           []).append(stube))

    def assign_scorefield(av_stubes: Any, score_field):
        for a, v_stubes in av_stubes.items():
            for v, stubes in v_stubes.items():
                for stube in stubes:
                    stube['score'] = stube[score_field]

    def specific_nms_exp(av_stubes, av_gt_tubes, score_field):
        assign_scorefield(av_stubes, score_field)
        av_stubes = av_stubes_above_score(av_stubes, 0.0)
        av_stubes = compute_nms_for_av_stubes(av_stubes, 0.3)
        assign_scorefield(av_stubes, 'cls_score')
        df_ap = compute_ap_for_avtubes_as_df(av_gt_tubes, av_stubes,
                                             iou_thresholds, False, False)
        return df_ap, av_stubes

    dfap_per_nmsfield = {}
    av_stubes_after_per_nmsfield = {}
    for score_field in ['cls_score', 'univ_score', 'hscore', 'iscore']:
        av_stubes = copy.deepcopy(av_stubes_eval_augm)
        df_ap, av_stubes_after = specific_nms_exp(av_stubes, av_gt_tubes,
                                                  score_field)
        dfap_per_nmsfield[score_field] = df_ap
        av_stubes_after_per_nmsfield[score_field] = av_stubes_after
    x = pd.concat(dfap_per_nmsfield)[0.5].unstack(level=0) * 100
    log.info('AP5 per NMSfield:\n{}'.format(snippets.df_to_table_v2(x)))

    def box_overlap_stats(score_field, av_stubes_after):
        vidfc_per_action = {}
        for a, v_stubes in av_stubes_after.items():
            vidf = {}  # frame groups
            for v, stubes in v_stubes.items():
                for stube in stubes:
                    for frame_ind in stube['frame_inds']:
                        vidf.setdefault((v, frame_ind),
                                        []).append(stube['index'])
            vidfc = pd.Series({k: len(v) for k, v in vidf.items()})
            vidfc_per_action[a] = vidfc
        s_eq1 = (pd.concat(vidfc_per_action) == 1).unstack(level=0).mean()
        s_mean = pd.concat(vidfc_per_action).unstack(level=0).mean()
        df_boxoverlap = pd.concat((s_eq1, s_mean),
                                  axis=1,
                                  keys=['equal1', 'avg']).T
        log.info('Boxoverl for "{}"\n{}'.format(
            score_field, snippets.df_to_table_v2(df_boxoverlap)))

    # Record instances when same frame has multiple boxes (per class)
    for score_field, av_stubes_after in av_stubes_after_per_nmsfield.items():
        box_overlap_stats(score_field, av_stubes_after)
    box_overlap_stats(score_field, av_stubes_eval_augm)
Пример #16
0
def merge_scores_avstubes(workfolder, cfg_dict, add_args):
    out, = snippets.get_subfolders(workfolder, ['out'])
    cfg = snippets.YConfig(cfg_dict)
    cfg.set_defaults_handling(raise_without_defaults=False)
    Ncfg_dataset.set_dataset_seed(cfg)
    Ncfg_tube_eval.set_defcfg(cfg)
    cfg.set_defaults("""
    tube_dict: ~
    combinations:
        enabled: False
        sizes: ~
    """)
    cf = cfg.parse()

    dataset, split_vids, av_gt_tubes = \
            Ncfg_dataset.resolve_dataset_tubes(cf)
    ts = {k: small.load_pkl(v) for k, v in cfg_dict['tube_dict'].items()}
    if not cf['combinations.enabled']:
        av_stubes = _meanpool_avstubes(list(ts.values()))
        small.save_pkl(out / 'merged_av_stubes.pkl', av_stubes)
        log.info('All combined score:')
        Ncfg_tube_eval.evalprint_if(cf, av_stubes, av_gt_tubes)
        return

    sizes = cf['combinations.sizes']
    combinations = [list(itertools.combinations(ts.keys(), r)) for r in sizes]
    combinations = list(itertools.chain(*combinations))
    log.info('Combinations: {}'.format(combinations))

    comb_dfdicts = {}
    for comb in combinations:
        comb_name = '+'.join(comb)
        comb_fold = small.mkdir(out / comb_name)

        def compute():
            to_merge = [ts[k] for k in comb]
            av_stubes = _meanpool_avstubes(to_merge)
            small.save_pkl(comb_fold / 'av_stubes.pkl', av_stubes)
            dfdict = Ncfg_tube_eval.eval_as_df(cf, av_stubes, av_gt_tubes)
            return dfdict

        dfdict = small.stash2(comb_fold / 'stashed_dfdict.pkl')(compute)
        comb_dfdicts[comb_name] = dfdict

    log.info('Individual results:')
    for comb_name, dfdict in comb_dfdicts.items():
        log.info(f'Results for {comb_name}:')
        _print_quick_evaluation_stats(dfdict)

    log.info('Combined tables:')
    big_ = {comb: pd.concat(dfdict) for comb, dfdict in comb_dfdicts.items()}
    big = pd.concat(big_, axis=1)
    for stat in big.index.levels[0]:
        log.info(f'=== {stat} ===')
        for thresh in big.columns.levels[1]:
            X = (big.loc['ap'].loc[:, pd.IndexSlice[:,
                                                    thresh]].droplevel(1,
                                                                       axis=1))
            table = snippets.df_to_table_v2((X * 100).round(2))
            log.info(f'{stat} for IOU {thresh}:\n{table}')
        log.info('\n')