Ejemplo n.º 1
0
def main():
    """Main function to evaluate the pattern discovery task."""
    parser = argparse.ArgumentParser(
        description="mir_eval pattern discovery evaluation",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-o',
                        dest='output_file',
                        default=None,
                        type=str,
                        action='store',
                        help='Store results in json format')
    parser.add_argument("reference_file",
                        action="store",
                        help="Path to the reference file.")
    parser.add_argument("estimated_file",
                        action="store",
                        help="Path to the estimation file.")
    parameters = vars(parser.parse_args(sys.argv[1:]))

    # Load in data
    ref_patterns = mir_eval.io.load_patterns(parameters['reference_file'])
    est_patterns = mir_eval.io.load_patterns(parameters['estimated_file'])

    # Compute all the scores
    scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)
    print("{} vs. {}".format(os.path.basename(parameters['reference_file']),
                             os.path.basename(parameters['estimated_file'])))
    eval_utilities.print_evaluation(scores)

    if parameters['output_file']:
        print('Saving results to: ', parameters['output_file'])
        eval_utilities.save_results(scores, parameters['output_file'])
Ejemplo n.º 2
0
def main():
    """Main function to evaluate the pattern discovery task."""
    parser = argparse.ArgumentParser(description="mir_eval pattern discovery "
                                                 "evaluation",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-o',
                        dest='output_file',
                        default=None,
                        type=str,
                        action='store',
                        help='Store results in json format')
    parser.add_argument("reference_file",
                        action="store",
                        help="Path to the reference file.")
    parser.add_argument("estimated_file",
                        action="store",
                        help="Path to the estimation file.")
    parameters = vars(parser.parse_args(sys.argv[1:]))

    # Load in data
    ref_patterns = mir_eval.io.load_patterns(parameters['reference_file'])
    est_patterns = mir_eval.io.load_patterns(parameters['estimated_file'])

    # Compute all the scores
    scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)
    print "{} vs. {}".format(os.path.basename(parameters['reference_file']),
                             os.path.basename(parameters['estimated_file']))
    eval_utilities.print_evaluation(scores)

    if parameters['output_file']:
        print 'Saving results to: ', parameters['output_file']
        eval_utilities.save_results(scores, parameters['output_file'])
Ejemplo n.º 3
0
                        action='store',
                        help='Store results in json format')

    parser.add_argument('reference_file',
                        action='store',
                        help='path to the reference annotation file')

    parser.add_argument('estimated_file',
                        action='store',
                        help='path to the estimated annotation file')

    return vars(parser.parse_args(sys.argv[1:]))

if __name__ == '__main__':
    # Get the parameters
    parameters = process_arguments()

    # Load in data
    reference_onsets = mir_eval.io.load_events(parameters['reference_file'])
    estimated_onsets = mir_eval.io.load_events(parameters['estimated_file'])

    # Compute all the scores
    scores = mir_eval.onset.evaluate(reference_onsets, estimated_onsets)
    print "{} vs. {}".format(os.path.basename(parameters['reference_file']),
                             os.path.basename(parameters['estimated_file']))
    eval_utilities.print_evaluation(scores)

    if parameters['output_file']:
        print 'Saving results to: ', parameters['output_file']
        eval_utilities.save_results(scores, parameters['output_file'])