Esempio n. 1
0
def compute_gt_results(est_file,
                       ref_file,
                       boundaries_id,
                       labels_id,
                       config,
                       bins=251):
    """Computes the results by using the ground truth dataset identified by
    the annotator parameter.

    Return
    ------
    results : dict
        Dictionary of the results (see function compute_results).
    """

    # Get the ds_prefix
    ds_prefix = os.path.basename(est_file).split("_")[0]

    try:
        ref_inter, ref_labels = jams2.converters.load_jams_range(
            ref_file,
            "sections",
            annotator=0,
            context=msaf.prefix_dict[ds_prefix])
    except:
        logging.warning("No references for file: %s" % ref_file)
        return {}

    # Read estimations with correct configuration
    est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
                                                labels_id, **config)

    if len(est_inter) == 0:
        logging.warning("No estimations for file: %s" % est_file)
        return {}

    # Compute the results and return
    return compute_results(ref_inter, est_inter, ref_labels, est_labels, bins,
                           est_file)
Esempio n. 2
0
def compute_gt_results(est_file,
                       ref_file,
                       boundaries_id,
                       labels_id,
                       config,
                       bins=251,
                       annotator_id=0):
    """Computes the results by using the ground truth dataset identified by
    the annotator parameter.

    Return
    ------
    results : dict
        Dictionary of the results (see function compute_results).
    """
    try:
        if config["hier"]:
            ref_times, ref_labels, ref_levels = \
                msaf.io.read_hier_references(
                    ref_file, annotation_id=0,
                    exclude_levels=["segment_salami_function"])
        else:
            jam = jams.load(ref_file, validate=False)
            ann = jam.search(namespace='segment_.*')[annotator_id]
            ref_inter, ref_labels = ann.data.to_interval_values()
    except:
        logging.warning("No references for file: %s" % ref_file)
        return {}

    # Read estimations with correct configuration
    est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
                                                labels_id, **config)
    if len(est_inter) == 0:
        logging.warning("No estimations for file: %s" % est_file)
        return {}

    # Compute the results and return
    logging.info("Evaluating %s" % os.path.basename(est_file))
    if config["hier"]:
        # Hierarchical
        assert len(est_inter) == len(est_labels), "Same number of levels " \
            "are required in the boundaries and labels for the hierarchical " \
            "evaluation."
        est_times = []
        est_labels = []

        # Sort based on how many segments per level
        est_inter = sorted(est_inter, key=lambda level: len(level))

        for inter in est_inter:
            est_times.append(msaf.utils.intervals_to_times(inter))
            # Add fake labels (hierarchical eval does not use labels --yet--)
            est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)

        # Align the times
        utils.align_end_hierarchies(est_times, ref_times, thres=1)

        # To intervals
        est_hier = [utils.times_to_intervals(times) for times in est_times]
        ref_hier = [utils.times_to_intervals(times) for times in ref_times]

        # Compute evaluations
        res = {}
        res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
            mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10)
        res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
            mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15)

        res["track_id"] = os.path.basename(est_file)[:-5]
        return res
    else:
        # Flat
        return compute_results(ref_inter, est_inter, ref_labels, est_labels,
                               bins, est_file)
Esempio n. 3
0
File: eval.py Progetto: beckgom/msaf
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config, bins=251, annotator_id=0):
    """Computes the results by using the ground truth dataset identified by
    the annotator parameter.

    Return
    ------
    results : dict
        Dictionary of the results (see function compute_results).
    """
    try:
        if config["hier"]:
            ref_times, ref_labels, ref_levels = msaf.io.read_hier_references(
                ref_file, annotation_id=0, exclude_levels=["segment_salami_function"]
            )
        else:
            jam = jams.load(ref_file, validate=False)
            ann = jam.search(namespace="segment_.*")[annotator_id]
            ref_inter, ref_labels = ann.data.to_interval_values()
    except:
        logging.warning("No references for file: %s" % ref_file)
        return {}

    # Read estimations with correct configuration
    est_inter, est_labels = io.read_estimations(est_file, boundaries_id, labels_id, **config)
    if len(est_inter) == 0:
        logging.warning("No estimations for file: %s" % est_file)
        return {}

    # Compute the results and return
    logging.info("Evaluating %s" % os.path.basename(est_file))
    if config["hier"]:
        # Hierarchical
        assert len(est_inter) == len(est_labels), (
            "Same number of levels " "are required in the boundaries and labels for the hierarchical " "evaluation."
        )
        est_times = []
        est_labels = []

        # Sort based on how many segments per level
        est_inter = sorted(est_inter, key=lambda level: len(level))

        for inter in est_inter:
            est_times.append(msaf.utils.intervals_to_times(inter))
            # Add fake labels (hierarchical eval does not use labels --yet--)
            est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)

        # Align the times
        utils.align_end_hierarchies(est_times, ref_times, thres=1)

        # To intervals
        est_hier = [utils.times_to_intervals(times) for times in est_times]
        ref_hier = [utils.times_to_intervals(times) for times in ref_times]

        # Compute evaluations
        res = {}
        res["t_recall10"], res["t_precision10"], res["t_measure10"] = mir_eval.hierarchy.tmeasure(
            ref_hier, est_hier, window=10
        )
        res["t_recall15"], res["t_precision15"], res["t_measure15"] = mir_eval.hierarchy.tmeasure(
            ref_hier, est_hier, window=15
        )

        res["track_id"] = os.path.basename(est_file)[:-5]
        return res
    else:
        # Flat
        return compute_results(ref_inter, est_inter, ref_labels, est_labels, bins, est_file)
Esempio n. 4
0
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
                       bins=251, annotator_id=0):
    """Computes the results by using the ground truth dataset identified by
    the annotator parameter.

    Return
    ------
    results : dict
        Dictionary of the results (see function compute_results).
    """

    # Get the ds_prefix
    ds_prefix = os.path.basename(est_file).split("_")[0]

    # Get context
    if ds_prefix in msaf.prefix_dict.keys():
        context = msaf.prefix_dict[ds_prefix]
    else:
        context = "function"

    try:
        # TODO: Read hierarchical annotations
        if config["hier"]:
            ref_times, ref_labels, ref_levels = \
                msaf.io.read_hier_references(ref_file, annotation_id=0,
                                             exclude_levels=["function"])
        else:
            ref_inter, ref_labels = jams2.converters.load_jams_range(
                ref_file, "sections", annotator=annotator_id, context=context)
    except:
        logging.warning("No references for file: %s" % ref_file)
        return {}

    # Read estimations with correct configuration
    est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
                                                labels_id, **config)

    if len(est_inter) == 0:
        logging.warning("No estimations for file: %s" % est_file)
        return {}

    # Compute the results and return
    if config["hier"]:
        # Hierarchical
        assert len(est_inter) == len(est_labels), "Same number of levels " \
            "are required in the boundaries and labels for the hierarchical " \
            "evaluation."
        est_times = []
        est_labels = []

        # Sort based on how many segments per level
        est_inter = sorted(est_inter, key=lambda level: len(level))

        for inter in est_inter:
            est_times.append(msaf.utils.intervals_to_times(inter))
            # Add fake labels (hierarchical eval does not use labels --yet--)
            est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)

        # Align the times
        utils.align_end_hierarchies(est_times, ref_times)

        # Build trees
        ref_tree = mir_eval.segment.tree.SegmentTree(ref_times, ref_labels,
                                                     ref_levels)
        est_tree = mir_eval.segment.tree.SegmentTree(est_times, est_labels)

        # Compute evaluations
        res = {}
        res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
            mir_eval.segment.hmeasure(ref_tree, est_tree, window=100)
        res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
            mir_eval.segment.hmeasure(ref_tree, est_tree, window=150)
        res["t_recall30"], res["t_precision30"], res["t_measure30"] = \
            mir_eval.segment.hmeasure(ref_tree, est_tree, window=300)

        res["track_id"] = os.path.basename(est_file)[:-5]
        return res
    else:
        # Flat
        return compute_results(ref_inter, est_inter, ref_labels, est_labels,
                            bins, est_file)
Esempio n. 5
0
def compute_gt_results(est_file,
                       ref_file,
                       boundaries_id,
                       labels_id,
                       config,
                       bins=251,
                       annotator_id=0):
    """Computes the results by using the ground truth dataset identified by
	the annotator parameter.

	Return
	------
	results : dict
		Dictionary of the results (see function compute_results).
	"""
    # Get the ds_prefix
    # This doesn't work when est_file names contain "_"
    # ds_prefix = os.path.basename(est_file).split("_")[0]
    ds_prefix = os.path.basename(est_file)

    # Get context
    if ds_prefix in msaf.prefix_dict.keys():
        context = msaf.prefix_dict[ds_prefix]
    else:
        context = "function"

    # Up to here it works.
    try:
        # TODO: Read hierarchical annotations
        if config["hier"]:
            ref_times, ref_labels, ref_levels = \
             msaf.io.read_hier_references(ref_file, annotation_id=0,
                     exclude_levels=["function"])
        else:
            ref_inter, ref_labels = jams2.converters.load_jams_range(
                ref_file, "sections", annotator=annotator_id, context=context)

    except:
        logging.warning("No estimations for file: %s" % est_file)
        return {}

    # Read estimations with correct configuration
    est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
                                                labels_id, **config)

    if len(est_inter) == 0:
        logging.warning("No estimations for file: %s" % est_file)
        return {}

    # Compute the results and return
    if config["hier"]:
        # Hierarchical
        assert len(est_inter) == len(est_labels), "Same number of levels " \
         "are required in the boundaries and labels for the hierarchical " \
         "evaluation."
        est_times = []
        est_labels = []

        # Sort based on how many segments per level
        est_inter = sorted(est_inter, key=lambda level: len(level))

        for inter in est_inter:
            est_times.append(msaf.utils.intervals_to_times(inter))
            # Add fake labels (hierarchical eval does not use labels --yet--)
            est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)

        # Align the times
        utils.align_end_hierarchies(est_times, ref_times)

        # Build trees
        ref_tree = mir_eval.segment.tree.SegmentTree(ref_times, ref_labels,
                                                     ref_levels)
        est_tree = mir_eval.segment.tree.SegmentTree(est_times, est_labels)

        # Compute evaluations
        res = {}
        res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
         mir_eval.segment.hmeasure(ref_tree, est_tree, window=100)
        res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
         mir_eval.segment.hmeasure(ref_tree, est_tree, window=150)
        res["t_recall30"], res["t_precision30"], res["t_measure30"] = \
         mir_eval.segment.hmeasure(ref_tree, est_tree, window=300)

        res["track_id"] = os.path.basename(est_file)[:-5]
        return res
    else:
        # Flat
        return compute_results(ref_inter, est_inter, ref_labels, est_labels,
                               bins, est_file)