def load_wein_tubes(workfolder, cfg_dict, add_args): """ Philippe tubes: tube: (one row per frame): index of the frame (starting at 1) x1 y1 x2 y2 score of the generic human detector score of the instance-specific detector """ out, = snippets.get_subfolders(workfolder, ['out']) cfg = snippets.YConfig(cfg_dict) cfg.set_deftype(""" wein_tubes: [~, str] dataset: cache_folder: [~, str] mirror: ['uname', ~] wein_index: [0, int] # 0 = 510 cut tubes, 1 = 200 uncut tubes """) cf = cfg.parse() # Dataset dataset = Dataset_daly_ocv(cf['dataset.mirror']) dataset.populate_from_folder(cf['dataset.cache_folder']) # Tubes # 0 has 510 elements, 1 has 200 elements wein_package = small.load_py2_pkl(cf['wein_tubes'])[cf['wein_index']] # We got a dictionary of filenames (w .mp4 suffix) extracted_tubes: Dict[I_dwein, Tube_daly_wein_as_provided] = {} for vid_mp4, wein_bunches in wein_package.items(): vid = re.search(r'(.*)\.mp4', vid_mp4).group(1) rs = dataset.rstats[vid] for bunch_id, wein_tubes in enumerate(wein_bunches): for tube_id, wein_tube in enumerate(wein_tubes): frame_inds = wein_tube[:, 0].astype(np.int) - 1 assert max(frame_inds) < rs['max_pos_frames'] boxes_ltrd = wein_tube[:, 1:5] # ltrd human_scores = wein_tube[:, 5] instance_scores = wein_tube[:, 6] tube = { 'frame_inds': frame_inds, 'boxes': boxes_ltrd, 'hscores': human_scores, 'iscores': instance_scores } extracted_tubes[(vid, bunch_id, tube_id)] = tube small.save_pkl(out / 'extracted_tubes.pkl', extracted_tubes)
def assign_objactions_to_tubes(workfolder, cfg_dict, add_args): """ Score tubes by assigning objactions to them and pooling the scores, then evaluate resulting scored tubes - Objactions: detecton evaluated datalist or gt objects (per frame) - Tubes: philippe tubes - Assignment: inner overlap or iou scores """ out, = snippets.get_subfolders(workfolder, ['out']) cfg = snippets.YConfig(cfg_dict) Ncfg_dataset.set_dataset_seed(cfg) Ncfg_tubes.set_defcfg(cfg) cfg.set_deftype(""" actobjects: source: ['detected', ['detected', 'gt']] detected: path: [~, ~] obj_to_tube: overlap_type: ['inner_overlap', ['inner_overlap', 'iou']] overlap_cutoff: [0.2, float] score_cutoff: [0.2, float] """) Ncfg_tube_eval.set_defcfg(cfg) cf = cfg.parse() dataset, split_vids, av_gt_tubes = \ Ncfg_dataset.resolve_dataset_tubes(cf) # Inputs to the assignment routine ftubes: Dict[I_dwein, T_dwein] = \ Ncfg_tubes.resolve_tubes_dwein(cf, split_vids) objactions_vf: Dict[Vid_daly, Dict[int, Objaction_dets]] = \ _resolve_actobjects(cf, dataset, split_vids) # Assignment itself overlap_type = cf['obj_to_tube.overlap_type'] overlap_cutoff = cf['obj_to_tube.overlap_cutoff'] score_cutoff = cf['obj_to_tube.score_cutoff'] av_stubes: AV_dict[T_dwein_scored] = \ score_ftubes_via_objaction_overlap_aggregation( dataset, objactions_vf, ftubes, overlap_type, overlap_cutoff, score_cutoff) small.save_pkl(out / 'av_stubes.pkl', av_stubes) Ncfg_tube_eval.evalprint_if(cf, av_stubes, av_gt_tubes)
def apply_pncaffe_rcnn_in_frames(workfolder, cfg_dict, add_args): """ Apply Phil-Nic rcnn model on tube boxes to extract per-action scores """ out, = snippets.get_subfolders(workfolder, ['out']) cfg = snippets.YConfig(cfg_dict) Ncfg_dataset.set_dataset_seed(cfg) Ncfg_tubes.set_defcfg(cfg) Ncfg_nicphil_rcnn.set_defcfg(cfg) Ncfg_generic_rcnn_eval.set_defcfg(cfg) Ncfg_tube_eval.set_defcfg(cfg) cf = cfg.parse() # Preparation dataset, split_vids, av_gt_tubes = \ Ncfg_dataset.resolve_dataset_tubes(cf) tubes_dwein: Dict[I_dwein, T_dwein] = \ Ncfg_tubes.resolve_tubes_dwein(cf, split_vids) neth: Nicolas_net_helper = Ncfg_nicphil_rcnn.resolve_helper(cf) # Experiment logic if cf['demo_run.enabled']: Ncfg_generic_rcnn_eval.demo_run(cf, out, dataset, split_vids, tubes_dwein, neth) return vf_connections_dwti, vf_cls_probs = \ Ncfg_generic_rcnn_eval.evaluate_rcnn_boxes( cf, out, dataset, split_vids, tubes_dwein, neth) small.save_pkl(out / 'vf_connections_dwti.pkl', vf_connections_dwti) small.save_pkl(out / 'vf_cls_probs.pkl', vf_cls_probs) agg_kind = cf['score_agg_kind'] av_stubes = Ncfg_generic_rcnn_eval.aggregate_rcnn_scores( dataset, tubes_dwein, vf_connections_dwti, vf_cls_probs, agg_kind) small.save_pkl(out / 'av_stubes.pkl', av_stubes) # Post experiment Ncfg_tube_eval.evalprint_if(cf, av_stubes, av_gt_tubes)
def gather_reapply_agg_rcnn_avstubes(workfolder, cfg_dict, add_args): """ Will apply aggregation again """ out, = snippets.get_subfolders(workfolder, ['out']) cfg = snippets.YConfig(cfg_dict) cfg.set_defaults_handling(['gather.paths']) Ncfg_dataset.set_dataset_seed(cfg) Ncfg_tubes.set_defcfg(cfg) cfg.set_deftype(""" gather: paths: [~, ~] score_agg_kind: ['mean', ['mean', 'max', 'sum']] """) Ncfg_tube_eval.set_defcfg(cfg) cf = cfg.parse() # Preparation dataset, split_vids, av_gt_tubes = \ Ncfg_dataset.resolve_dataset_tubes(cf) tubes_dwein: Dict[I_dwein, T_dwein] = \ Ncfg_tubes.resolve_tubes_dwein(cf, split_vids) # Experiment logic gather_paths = cf['gather.paths'] if not _gather_check_all_present( gather_paths, ['vf_cls_probs.pkl', 'vf_connections_dwti.pkl']): return vf_connections_dwti = {} vf_cls_probs = {} for path in gather_paths: path = Path(path) vf_cls_probs_ = small.load_pkl(path / 'vf_cls_probs.pkl') vf_connections_dwti_ = small.load_pkl(path / 'vf_connections_dwti.pkl') assert vf_cls_probs_.keys() == vf_connections_dwti_.keys() vf_cls_probs.update(vf_cls_probs_) vf_connections_dwti.update(vf_connections_dwti_) small.save_pkl(out / 'vf_connections_dwti.pkl', vf_connections_dwti) small.save_pkl(out / 'vf_cls_probs.pkl', vf_cls_probs) agg_kind = cf['score_agg_kind'] av_stubes: AV_dict[T_dwein_scored] = \ Ncfg_generic_rcnn_eval.aggregate_rcnn_scores( dataset, tubes_dwein, vf_connections_dwti, vf_cls_probs, agg_kind) small.save_pkl(out / 'av_stubes.pkl', av_stubes) # Post experiment Ncfg_tube_eval.evalprint_if(cf, av_stubes, av_gt_tubes)
def apply_pfadet_rcnn_in_frames(workfolder, cfg_dict, add_args): """ Apply trained d2 frcnn model on tube boxes to extract per-action scores - We dispense with the frcnn box predictions and only use per-roi scores """ out, = snippets.get_subfolders(workfolder, ['out']) cfg = snippets.YConfig(cfg_dict) cfg.set_defaults_handling(['d2.']) Ncfg_dataset.set_dataset_seed(cfg) Ncfg_tubes.set_defcfg(cfg) Ncfg_generic_rcnn_eval.set_defcfg(cfg) cfg.set_deftype(""" d2_rcnn: model: [~, ~] conf_thresh: [0.0, float] """) Ncfg_tube_eval.set_defcfg(cfg) cf = cfg.parse() cf_add_d2 = cfg.without_prefix('d2.') # Preparation dataset, split_vids, av_gt_tubes = \ Ncfg_dataset.resolve_dataset_tubes(cf) tubes_dwein: Dict[I_dwein, T_dwein] = \ Ncfg_tubes.resolve_tubes_dwein(cf, split_vids) neth = D2_rcnn_helper(cf, cf_add_d2, dataset, out) # Experiment logic if cf['demo_run.enabled']: Ncfg_generic_rcnn_eval.demo_run(cf, out, dataset, split_vids, tubes_dwein, neth) return vf_connections_dwti, vf_cls_probs = \ Ncfg_generic_rcnn_eval.evaluate_rcnn_boxes( cf, out, dataset, split_vids, tubes_dwein, neth) small.save_pkl(out / 'vf_connections_dwti.pkl', vf_connections_dwti) small.save_pkl(out / 'vf_cls_probs.pkl', vf_cls_probs) agg_kind = cf['score_agg_kind'] av_stubes = Ncfg_generic_rcnn_eval.aggregate_rcnn_scores( dataset, tubes_dwein, vf_connections_dwti, vf_cls_probs, agg_kind) small.save_pkl(out / 'av_stubes.pkl', av_stubes) # Post experiment Ncfg_tube_eval.evalprint_if(cf, av_stubes, av_gt_tubes)
def _save(self, i): ifiles = self._get_filenames(i) savepath = ifiles['pkl'] small.save_pkl(savepath, self.result) ifiles['finished'].touch()
def compute(): to_merge = [ts[k] for k in comb] av_stubes = _meanpool_avstubes(to_merge) small.save_pkl(comb_fold / 'av_stubes.pkl', av_stubes) dfdict = Ncfg_tube_eval.eval_as_df(cf, av_stubes, av_gt_tubes) return dfdict
def merge_scores_avstubes(workfolder, cfg_dict, add_args): out, = snippets.get_subfolders(workfolder, ['out']) cfg = snippets.YConfig(cfg_dict) cfg.set_defaults_handling(raise_without_defaults=False) Ncfg_dataset.set_dataset_seed(cfg) Ncfg_tube_eval.set_defcfg(cfg) cfg.set_defaults(""" tube_dict: ~ combinations: enabled: False sizes: ~ """) cf = cfg.parse() dataset, split_vids, av_gt_tubes = \ Ncfg_dataset.resolve_dataset_tubes(cf) ts = {k: small.load_pkl(v) for k, v in cfg_dict['tube_dict'].items()} if not cf['combinations.enabled']: av_stubes = _meanpool_avstubes(list(ts.values())) small.save_pkl(out / 'merged_av_stubes.pkl', av_stubes) log.info('All combined score:') Ncfg_tube_eval.evalprint_if(cf, av_stubes, av_gt_tubes) return sizes = cf['combinations.sizes'] combinations = [list(itertools.combinations(ts.keys(), r)) for r in sizes] combinations = list(itertools.chain(*combinations)) log.info('Combinations: {}'.format(combinations)) comb_dfdicts = {} for comb in combinations: comb_name = '+'.join(comb) comb_fold = small.mkdir(out / comb_name) def compute(): to_merge = [ts[k] for k in comb] av_stubes = _meanpool_avstubes(to_merge) small.save_pkl(comb_fold / 'av_stubes.pkl', av_stubes) dfdict = Ncfg_tube_eval.eval_as_df(cf, av_stubes, av_gt_tubes) return dfdict dfdict = small.stash2(comb_fold / 'stashed_dfdict.pkl')(compute) comb_dfdicts[comb_name] = dfdict log.info('Individual results:') for comb_name, dfdict in comb_dfdicts.items(): log.info(f'Results for {comb_name}:') _print_quick_evaluation_stats(dfdict) log.info('Combined tables:') big_ = {comb: pd.concat(dfdict) for comb, dfdict in comb_dfdicts.items()} big = pd.concat(big_, axis=1) for stat in big.index.levels[0]: log.info(f'=== {stat} ===') for thresh in big.columns.levels[1]: X = (big.loc['ap'].loc[:, pd.IndexSlice[:, thresh]].droplevel(1, axis=1)) table = snippets.df_to_table_v2((X * 100).round(2)) log.info(f'{stat} for IOU {thresh}:\n{table}') log.info('\n')