def main(ground_truth_filename, proposal_filename, max_avg_nr_proposals=100,
         tiou_thresholds=np.linspace(0.5, 0.95, 10),
         subset='validation', verbose=True, check_status=True):
    anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
                                 tiou_thresholds=tiou_thresholds,
                                 max_avg_nr_proposals=max_avg_nr_proposals,
                                 subset=subset, verbose=True, check_status=True)
    anet_proposal.evaluate()
示例#2
0
def run_evaluation(gt_file, res_file, max_avg_nr_proposals = 100, \
                   tiou_thresholds = np.linspace(0.5, 0.95, 10), subset='validation'):

    anet_proposal = ANETproposal(gt_file, res_file,
                                 tiou_thresholds=tiou_thresholds,
                                 max_avg_nr_proposals=max_avg_nr_proposals,
                                 subset=subset, verbose=True, check_status=False)
    anet_proposal.evaluate()

    recall = anet_proposal.recall
    average_recall = anet_proposal.avg_recall
    average_nr_proposals = anet_proposal.proposals_per_video

    return (average_nr_proposals, average_recall, recall)
示例#3
0
def evaluation_proposal(args, eval_file):

    ground_truth_filename = './Evaluation/data/thumos14.json'
    anet_proposal = ANETproposal(ground_truth_filename,
                                 eval_file,
                                 tiou_thresholds=np.linspace(0.5, 1.0, 11),
                                 max_avg_nr_proposals=1000,
                                 subset='test',
                                 verbose=True,
                                 check_status=False)
    anet_proposal.evaluate()
    recall = anet_proposal.recall
    average_recall = anet_proposal.avg_recall
    average_nr_proposals = anet_proposal.proposals_per_video
    names = ['AR@50', 'AR@100', 'AR@200', 'AR@500', 'AR@1000']
    values = [np.mean(recall[:, i]) for i in [49, 99, 199, 499, 999]]
    return names, values
def evaluate_return_area(ground_truth_filename,proposal_filename,max_avg_nr_proposals=100,
        tiou_thresholds=np.linspace(0.5,0.95,10),
        subset='validation',verbose = True, check_status = True):
    anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
                                 tiou_thresholds=tiou_thresholds,
                                 max_avg_nr_proposals=max_avg_nr_proposals,
                                 subset=subset, verbose=True, check_status=True)
    anet_proposal.evaluate()
    
    recall = anet_proposal.recall
    average_recall = anet_proposal.avg_recall
    average_nr_proposals = anet_proposal.proposals_per_video

    area_under_curve = np.trapz(average_recall, average_nr_proposals)
    AR_AN = 100.*float(area_under_curve)/average_nr_proposals[-1]
    Recall_all = 100.*float(average_recall[-1])
    
    return (AR_AN,Recall_all)         
示例#5
0
def run_evaluation(ground_truth_filename,
                   proposal_filename,
                   max_avg_nr_proposals=100,
                   tiou_thresholds=np.linspace(0.5, 0.95, 10),
                   subset='validation'):

    anet_proposal = ANETproposal(ground_truth_filename,
                                 proposal_filename,
                                 tiou_thresholds=tiou_thresholds,
                                 max_avg_nr_proposals=max_avg_nr_proposals,
                                 subset=subset,
                                 verbose=True,
                                 check_status=True)
    auc_score = anet_proposal.evaluate()

    recall = anet_proposal.recall
    average_recall = anet_proposal.avg_recall
    average_nr_proposals = anet_proposal.proposals_per_video

    return (average_nr_proposals, average_recall, recall, auc_score)
def evaluation_proposal(opt):
    app = ANETproposal(
        ground_truth_filename="./Evaluation/data/activity_net_1_3_new.json",
        proposal_filename=os.path.join(opt['output'], opt["result_file"]),
        tiou_thresholds=np.linspace(0.5, 0.95, 10),
        max_avg_nr_proposals=100,
        subset='validation',
        verbose=True,
        check_status=False)
    app.evaluate()
    parent_path, run_id = os.path.split(os.path.normpath(opt['output']))
    results = (f'[{run_id}|Proposals]'
               f' AUC {app.auc*100:.3f}'
               f' AR@1 {np.mean(app.recall[:,0])*100:.3f}'
               f' AR@5 {np.mean(app.recall[:,4])*100:.3f}'
               f' AR@10 {np.mean(app.recall[:,9])*100:.3f}'
               f' AR@100 {np.mean(app.recall[:,-1])*100:.3f}')
    print(results)
    with open(os.path.join(parent_path, 'results.txt'), 'a') as fobj:
        fobj.write(f'{results}\n')