Пример #1
0
 def add_parser_group(self, d, target_obj):
     gp_nm = d.pop('name')
     if 'title' not in d:
         d['title'] = gp_nm
     args = util.coerce_to_iter(d.pop('arguments', None))
     gp_kwargs = util.filter_kwargs(d, argparse._ArgumentGroup.__init__)
     gp_obj = target_obj.add_argument_group(**gp_kwargs)
     self.parser_groups[gp_nm] = gp_obj
     for arg in args:
         self.add_parser_argument(arg, gp_obj, gp_nm)
Пример #2
0
 def add_parser_group(self, d, target_obj):
     gp_nm = d.pop('name')
     _ = d.setdefault('title', gp_nm)
     args = util.coerce_to_iter(d.pop('arguments', None))
     if args:
         # only add group if it has > 0 arguments
         gp_kwargs = util.filter_kwargs(d, argparse._ArgumentGroup.__init__)
         gp_obj = target_obj.add_argument_group(**gp_kwargs)
         self.parser_groups[gp_nm] = gp_obj
         for arg in args:
             self.add_parser_argument(arg, gp_obj, gp_nm)
Пример #3
0
 def __init__(self, impl='KMEANS', **kwargs):
     # Finally, we create the model and let 'r rip!
     class_name = '_{}'.format(impl)
     model_class = getattr(sys.modules[__name__], class_name)
     kwargs, unused, _ = util.filter_kwargs(model_class, kwargs)
     if len(unused) > 0:
         logger.warning(
             'Cluster model "{}" has unused arguments: {}'.format(
                 impl,
                 unused
             )
         )
     self._model = model_class(**kwargs)
Пример #4
0
 def make_parser(self, d):
     args = util.coerce_to_iter(d.pop('arguments', None))
     arg_groups = util.coerce_to_iter(d.pop('argument_groups', None))
     d['formatter_class'] = CustomHelpFormatter
     p_kwargs = util.filter_kwargs(d, argparse.ArgumentParser.__init__)
     p = argparse.ArgumentParser(**p_kwargs)
     for arg in args:
         # add arguments not in any group
         self.add_parser_argument(arg, p, 'parser')
     for group in arg_groups:
         # add groups and arguments therein
         self.add_parser_group(group, p)
     return p
Пример #5
0
    def __init__(self, impl='MalletLda', **kwargs):

        # Finally, we create the topic model and let 'r rip!
        class_name = '_{}'.format(impl)
        model_class = getattr(sys.modules[__name__], class_name)
        kwargs, unused, _ = util.filter_kwargs(model_class, kwargs)
        if len(unused) > 0:
            logger.warning('Topic model "{}" has unused arguments: {}'.format(
                impl,
                unused
            ))

        regen = kwargs.get('regen', None)
        if regen:
            logger.info('Forcing regeneration of topic model')

        self.model = model_class(**kwargs)
def evaluate(reference_sources, estimated_sources, **kwargs):
    """Compute all metrics for the given reference and estimated annotations.

    Examples
    --------
    >>> # reference_sources[n] should be an ndarray of samples of the
    >>> # n'th reference source
    >>> # estimated_sources[n] should be the same for the n'th estimated
    >>> scores = mir_eval.separation.evaluate(reference_sources,
    ...                                       estimated_sources)

    Parameters
    ----------
    reference_sources : np.ndarray, shape=(nsrc, nsampl)
        matrix containing true sources
    estimated_sources : np.ndarray, shape=(nsrc, nsampl)
        matrix containing estimated sources
    kwargs
        Additional keyword arguments which will be passed to the
        appropriate metric or preprocessing functions.

    Returns
    -------
    scores : dict
        Dictionary of scores, where the key is the metric name (str) and
        the value is the (float) score achieved.

    """
    # Compute all the metrics
    scores = collections.OrderedDict()

    sdr, sir, sar, perm = util.filter_kwargs(bss_eval_sources,
                                             reference_sources,
                                             estimated_sources, **kwargs)

    scores['Source to Distortion'] = sdr.tolist()
    scores['Source to Interference'] = sir.tolist()
    scores['Source to Artifact'] = sar.tolist()
    scores['Source permutation'] = perm

    return scores
Пример #7
0
def evaluate(reference_sources, estimated_sources, **kwargs):
    """Compute all metrics for the given reference and estimated annotations.

    Examples
    --------
    >>> # reference_sources[n] should be an ndarray of samples of the
    >>> # n'th reference source
    >>> # estimated_sources[n] should be the same for the n'th estimated
    >>> scores = mir_eval.separation.evaluate(reference_sources,
    ...                                       estimated_sources)

    Parameters
    ----------
    reference_sources : np.ndarray, shape=(nsrc, nsampl)
        matrix containing true sources
    estimated_sources : np.ndarray, shape=(nsrc, nsampl)
        matrix containing estimated sources
    kwargs
        Additional keyword arguments which will be passed to the
        appropriate metric or preprocessing functions.

    Returns
    -------
    scores : dict
        Dictionary of scores, where the key is the metric name (str) and
        the value is the (float) score achieved.

    """
    # Compute all the metrics
    scores = collections.OrderedDict()

    sdr, sir, sar, perm = util.filter_kwargs(bss_eval_sources,
                                             reference_sources,
                                             estimated_sources, **kwargs)

    scores['Source to Distortion'] = sdr.tolist()
    scores['Source to Interference'] = sir.tolist()
    scores['Source to Artifact'] = sar.tolist()
    scores['Source permutation'] = perm

    return scores
Пример #8
0
def evaluate(reference_sources, estimated_sources, **kwargs):
    """Compute all metrics for the given reference and estimated signals.

    NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`
    for any valid input and will additionally compute
    :func:`mir_eval.separation.bss_eval_sources` for valid input with fewer
    than 3 dimensions.

    Examples
    --------
    >>> # reference_sources[n] should be an ndarray of samples of the
    >>> # n'th reference source
    >>> # estimated_sources[n] should be the same for the n'th estimated source
    >>> scores = mir_eval.separation.evaluate(reference_sources,
    ...                                       estimated_sources)

    Parameters
    ----------
    reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
        matrix containing true sources
    estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
        matrix containing estimated sources
    kwargs
        Additional keyword arguments which will be passed to the
        appropriate metric or preprocessing functions.

    Returns
    -------
    scores : dict
        Dictionary of scores, where the key is the metric name (str) and
        the value is the (float) score achieved.

    """
    # Compute all the metrics
    scores = collections.OrderedDict()

    sdr, isr, sir, sar, perm = util.filter_kwargs(bss_eval_images,
                                                  reference_sources,
                                                  estimated_sources, **kwargs)
    scores['Images - Source to Distortion'] = sdr.tolist()
    scores['Images - Image to Spatial'] = isr.tolist()
    scores['Images - Source to Interference'] = sir.tolist()
    scores['Images - Source to Artifact'] = sar.tolist()
    scores['Images - Source permutation'] = perm.tolist()

    sdr, isr, sir, sar, perm = util.filter_kwargs(bss_eval_images_framewise,
                                                  reference_sources,
                                                  estimated_sources, **kwargs)
    scores['Images Frames - Source to Distortion'] = sdr.tolist()
    scores['Images Frames - Image to Spatial'] = isr.tolist()
    scores['Images Frames - Source to Interference'] = sir.tolist()
    scores['Images Frames - Source to Artifact'] = sar.tolist()
    scores['Images Frames - Source permutation'] = perm.tolist()

    # Verify we can compute sources on this input
    if reference_sources.ndim < 3 and estimated_sources.ndim < 3:
        sdr, sir, sar, perm = util.filter_kwargs(bss_eval_sources_framewise,
                                                 reference_sources,
                                                 estimated_sources, **kwargs)
        scores['Sources Frames - Source to Distortion'] = sdr.tolist()
        scores['Sources Frames - Source to Interference'] = sir.tolist()
        scores['Sources Frames - Source to Artifact'] = sar.tolist()
        scores['Sources Frames - Source permutation'] = perm.tolist()

        sdr, sir, sar, perm = util.filter_kwargs(bss_eval_sources,
                                                 reference_sources,
                                                 estimated_sources, **kwargs)
        scores['Sources - Source to Distortion'] = sdr.tolist()
        scores['Sources - Source to Interference'] = sir.tolist()
        scores['Sources - Source to Artifact'] = sar.tolist()
        scores['Sources - Source permutation'] = perm.tolist()

    return scores