Example #1
0
def process_run(args, run_file_name, annotation, description, thresh):
    '''
    compute scores and generate output files for a single run
    
    :returns dict: max_scores for this one run
    '''
    ## Generate confusion matrices from a run for each target_id
    ## and for each step of the confidence cutoff
    stats = build_confusion_matrix(
        os.path.join(args.run_dir, run_file_name) + '.gz',
        annotation, args.cutoff_step, args.unan_is_true, args.include_training,
        thresh=thresh,
        require_positives=args.require_positives,
        debug=args.debug)

    compile_and_average_performance_metrics(stats)

    max_scores = find_max_scores(stats)

    log(json.dumps(stats, indent=4, sort_keys=True))

    base_output_filepath = os.path.join(
        args.run_dir, 
        run_file_name + '-' + description)

    output_filepath = base_output_filepath + '.csv'
    write_performance_metrics(output_filepath, stats)

    ## Output a graph of the key performance statistics
    graph_filepath = base_output_filepath + '.png'
    write_graph(graph_filepath, stats)

    return max_scores
Example #2
0
def process_run(args, run_file_name, annotation, description, thresh):
    '''
    compute scores and generate output files for a single run
    
    :returns dict: max_scores for this one run
    '''
    ## Generate confusion matrices from a run for each target_id
    ## and for each step of the confidence cutoff
    stats = build_confusion_matrix(os.path.join(args.run_dir, run_file_name) +
                                   '.gz',
                                   annotation,
                                   args.cutoff_step,
                                   args.unan_is_true,
                                   args.include_training,
                                   thresh=thresh,
                                   require_positives=args.require_positives,
                                   debug=args.debug)

    compile_and_average_performance_metrics(stats)

    max_scores = find_max_scores(stats)

    log(json.dumps(stats, indent=4, sort_keys=True))

    base_output_filepath = os.path.join(args.run_dir,
                                        run_file_name + '-' + description)

    output_filepath = base_output_filepath + '.csv'
    write_performance_metrics(output_filepath, stats)

    ## Output a graph of the key performance statistics
    graph_filepath = base_output_filepath + '.png'
    write_graph(graph_filepath, stats)

    return max_scores
Example #3
0
            CM, FILL_TPs, annotation, positives,
            cutoff_step_size=50, debug=args.debug)

        ## split into team name and create stats file
        team_id, system_id = run_file_name[:-3].split('-')

        ## now we switch from calling it a confusion matrix to calling
        ## it the general statistics matrix:
        stats = CM

        for mode in MODES:
            
            description = make_description(args, mode)

            ## Generate performance metrics for a run
            compile_and_average_performance_metrics(stats[mode])

            max_scores = find_max_scores(stats[mode])

            team_scores[mode][team_id][system_id] = max_scores

            ## Output the key performance statistics
            base_output_filepath = os.path.join(
                args.run_dir, 
                run_file_name + '-' + description)

            output_filepath = base_output_filepath + '.csv'

            write_performance_metrics(output_filepath, stats[mode])

            ## Output a graph of the key performance statistics