def learn_evaluation_verifiers(infr): """ Creates a cross-validated ensemble of classifiers to evaluate verifier error cases and groundtruth errors. Doctest: >>> import ibeis >>> infr = ibeis.AnnotInference( >>> 'PZ_MTEST', aids='all', autoinit='annotmatch', >>> verbose=4) >>> verifiers = infr.learn_evaluation_verifiers() >>> edges = list(infr.edges()) >>> verif = verifiers['match_state'] >>> probs = verif.predict_proba_df(edges) >>> print(probs) """ infr.print('learn_evaluataion_verifiers') from ibeis.algo.verif import vsone pblm = vsone.OneVsOneProblem(infr, verbose=5) pblm.primary_task_key = 'match_state' pblm.eval_clf_keys = ['RF'] pblm.eval_data_keys = ['learn(sum,glob)'] pblm.setup_evaluation() if True: pblm.report_evaluation() verifiers = pblm._make_evaluation_verifiers(pblm.eval_task_keys) return verifiers
def learn_deploy_verifiers(infr, publish=False): """ Uses current knowledge to train verifiers for new unseen pairs. Example: >>> import ibeis >>> ibs = ibeis.opendb('PZ_MTEST') >>> infr = ibeis.AnnotInference(ibs, aids='all') >>> infr.ensure_mst() >>> publish = False >>> infr.learn_deploy_verifiers() Ignore: publish = True """ infr.print('learn_deploy_verifiers') from ibeis.algo.verif import vsone pblm = vsone.OneVsOneProblem(infr, verbose=True) pblm.primary_task_key = 'match_state' pblm.default_clf_key = 'RF' pblm.default_data_key = 'learn(sum,glob)' pblm.setup() dpath = '.' task_key = 'match_state' pblm.deploy(dpath, task_key=task_key, publish=publish) task_key = 'photobomb_state' if task_key in pblm.eval_task_keys: pblm.deploy(dpath, task_key=task_key)