def apply(self, protocol_name, output_dir, step=None):

        model = self.model_.to(self.device)
        model.eval()

        duration = self.duration
        if step is None:
            step = 0.25 * duration

        # do not use memmap as this would lead to too many open files
        if isinstance(self.feature_extraction_, Precomputed):
            self.feature_extraction_.use_memmap = False

        # initialize embedding extraction
        sequence_embedding = SequenceEmbedding(model,
                                               self.feature_extraction_,
                                               duration=duration,
                                               step=step,
                                               batch_size=self.batch_size,
                                               device=self.device)
        sliding_window = sequence_embedding.sliding_window
        dimension = sequence_embedding.dimension

        # create metadata file at root that contains
        # sliding window and dimension information
        precomputed = Precomputed(root_dir=output_dir,
                                  sliding_window=sliding_window,
                                  dimension=dimension)

        # file generator
        protocol = get_protocol(protocol_name,
                                progress=True,
                                preprocessors=self.preprocessors_)

        for current_file in FileFinder.protocol_file_iter(protocol,
                                                          extra_keys=['audio'
                                                                      ]):

            fX = sequence_embedding.apply(current_file)
            precomputed.dump(current_file, fX)
    def apply(self, protocol_name, output_dir, step=None):

        model = self.model_.to(self.device)
        model.eval()

        duration = self.task_.duration
        if step is None:
            step = 0.25 * duration

        # do not use memmap as this would lead to too many open files
        if isinstance(self.feature_extraction_, Precomputed):
            self.feature_extraction_.use_memmap = False

        # initialize embedding extraction
        sequence_labeling = SequenceLabeling(
            model, self.feature_extraction_, duration=duration,
            step=.25 * duration, batch_size=self.batch_size,
            source='audio', device=self.device)

        sliding_window = sequence_labeling.sliding_window
        n_classes = self.task_.n_classes

        # create metadata file at root that contains
        # sliding window and dimension information
        precomputed = Precomputed(
            root_dir=output_dir,
            sliding_window=sliding_window,
            dimension=n_classes)

        # file generator
        protocol = get_protocol(protocol_name, progress=True,
                                preprocessors=self.preprocessors_)

        for current_file in FileFinder.protocol_file_iter(
            protocol, extra_keys=['audio']):

            fX = sequence_labeling.apply(current_file)
            precomputed.dump(current_file, fX)
    def apply(self, protocol_name, output_dir):

        # file generator
        protocol = get_protocol(protocol_name, progress=True,
                                preprocessors=self.preprocessors_)

        mkdir_p(output_dir)
        path = Path(output_dir) / f'{protocol_name}.txt'

        with open(path, mode='w') as fp:

            for current_file in FileFinder.protocol_file_iter(
                protocol, extra_keys=['audio']):

                uri = get_unique_identifier(current_file)
                hypothesis = self.pipeline_.apply(current_file)

                if isinstance(hypothesis, Timeline):
                    for s in hypothesis:
                        fp.write(f'{uri} {s.start:.3f} {s.end:.3f}\n')
                    continue

                for s, t, l in hypothesis.itertracks(yield_label=True):
                    fp.write(f'{uri} {s.start:.3f} {s.end:.3f} {t} {l}\n')
Exemple #4
0
    def apply(self,
              protocol_name: str,
              output_dir: Path,
              subset: Optional[str] = None):
        """Apply current best pipeline

        Parameters
        ----------
        protocol_name : `str`
            Name of pyannote.database protocol to process.
        subset : `str`, optional
            Subset to process. Defaults processing all subsets.
        """

        # file generator
        protocol = get_protocol(protocol_name,
                                progress=True,
                                preprocessors=self.preprocessors_)

        output_dir.mkdir(parents=True, exist_ok=True)
        extension = self.pipeline_.write_format
        if subset is None:
            path = output_dir / f'{protocol_name}.all.{extension}'
        else:
            path = output_dir / f'{protocol_name}.{subset}.{extension}'

        # initialize evaluation metric
        try:
            metric = self.pipeline_.get_metric()
        except NotImplementedError as e:
            metric = None
            losses = []

        skip_metric = False

        with open(path, mode='w') as fp:

            if subset is None:
                files = FileFinder.protocol_file_iter(protocol)
            else:
                files = getattr(protocol, subset)()

            for current_file in files:

                # apply pipeline and dump output to file
                output = self.pipeline_(current_file)
                self.pipeline_.write(fp, output)

                if skip_metric:
                    continue

                try:

                    if metric is None:
                        loss = self.pipeline_.loss(current_file, output)
                        losses.append(loss)

                    else:
                        from pyannote.database import get_annotated
                        _ = metric(current_file['annotation'],
                                   output,
                                   uem=get_annotated(current_file))

                except Exception as e:
                    # this may happen for files with no available groundtruth.
                    # in this case, we simply do not perform evaluation
                    skip_metric = True

        if skip_metric:
            msg = (f'For some (possibly good) reason, the output of this '
                   f'pipeline could not be evaluated on {protocol_name}.')
            print(msg)
            return

        # report evaluation metric
        if metric is None:
            loss = np.mean(losses)
            print(f'Loss = {loss:g}')
        else:
            _ = metric.report(display=True)
    def apply(self,
              protocol_name: str,
              output_dir: Path,
              subset: Optional[str] = None):
        """Apply current best pipeline

        Parameters
        ----------
        protocol_name : `str`
            Name of pyannote.database protocol to process.
        subset : `str`, optional
            Subset to process. Defaults processing all subsets.
        """

        # file generator
        protocol = get_protocol(protocol_name,
                                progress=True,
                                preprocessors=self.preprocessors_)

        output_dir.mkdir(parents=True, exist_ok=False)
        if subset is None:
            path = output_dir / f'{protocol_name}.all.txt'
        else:
            path = output_dir / f'{protocol_name}.{subset}.txt'

        # initialize evaluation metric
        try:
            metric = self.pipeline_.get_metric()
        except NotImplementedError as e:
            metric = None
            losses = []

        with open(path, mode='w') as fp:

            if subset is None:
                files = FileFinder.protocol_file_iter(protocol)
            else:
                files = getattr(protocol, subset)()

            for current_file in files:
                output = self.pipeline_(current_file)

                # evaluate output
                if metric is None:
                    loss = self.pipeline_.loss(current_file, output)
                    losses.append(loss)

                else:
                    from pyannote.database import get_annotated
                    _ = metric(current_file['annotation'],
                               output,
                               uem=get_annotated(current_file))

                self.pipeline_.write(fp, output)

        # report evaluation metric
        if metric is None:
            loss = np.mean(losses)
            print(f'Loss = {loss:g}')
        else:
            _ = metric.report(display=True)
def extract(protocol_name,
            file_finder,
            experiment_dir,
            robust=False,
            parallel=False):

    protocol = get_protocol(protocol_name, progress=False)

    # load configuration file
    config_yml = experiment_dir + '/config.yml'
    with open(config_yml, 'r') as fp:
        config = yaml.load(fp)

    feature_extraction_name = config['feature_extraction']['name']
    features = __import__('pyannote.audio.features',
                          fromlist=[feature_extraction_name])
    FeatureExtraction = getattr(features, feature_extraction_name)
    feature_extraction = FeatureExtraction(
        **config['feature_extraction'].get('params', {}))

    sliding_window = feature_extraction.sliding_window()
    dimension = feature_extraction.dimension()

    if 'normalization' in config:
        normalization_name = config['normalization']['name']
        normalization_module = __import__(
            'pyannote.audio.features.normalization',
            fromlist=[normalization_name])
        Normalization = getattr(normalization_module, normalization_name)
        normalization = Normalization(
            **config['normalization'].get('params', {}))
    else:
        normalization = None

    # create metadata file at root that contains
    # sliding window and dimension information

    precomputed = Precomputed(root_dir=experiment_dir,
                              sliding_window=sliding_window,
                              dimension=dimension)

    if parallel:

        extract_one = functools.partial(helper_extract,
                                        file_finder=file_finder,
                                        experiment_dir=experiment_dir,
                                        config_yml=config_yml,
                                        normalization=normalization,
                                        robust=robust)

        n_jobs = cpu_count()
        pool = Pool(n_jobs)
        imap = pool.imap

    else:

        feature_extraction = init_feature_extraction(experiment_dir)
        extract_one = functools.partial(helper_extract,
                                        file_finder=file_finder,
                                        experiment_dir=experiment_dir,
                                        feature_extraction=feature_extraction,
                                        normalization=normalization,
                                        robust=robust)
        imap = map

    for result in imap(
            extract_one,
            FileFinder.protocol_file_iter(protocol, extra_keys=['audio'])):
        if result is None:
            continue
        print(result)
def extract(protocol_name, file_finder, experiment_dir,
            robust=False, parallel=False):

    protocol = get_protocol(protocol_name, progress=False)

    # load configuration file
    config_yml = experiment_dir + '/config.yml'
    with open(config_yml, 'r') as fp:
        config = yaml.load(fp)

    feature_extraction_name = config['feature_extraction']['name']
    features = __import__('pyannote.audio.features',
                          fromlist=[feature_extraction_name])
    FeatureExtraction = getattr(features, feature_extraction_name)
    feature_extraction = FeatureExtraction(
        **config['feature_extraction'].get('params', {}))

    sliding_window = feature_extraction.sliding_window()
    dimension = feature_extraction.dimension()

    if 'normalization' in config:
        normalization_name = config['normalization']['name']
        normalization_module = __import__('pyannote.audio.features.normalization',
                                   fromlist=[normalization_name])
        Normalization = getattr(normalization_module, normalization_name)
        normalization = Normalization(
            **config['normalization'].get('params', {}))
    else:
        normalization = None

    # create metadata file at root that contains
    # sliding window and dimension information

    precomputed = Precomputed(root_dir=experiment_dir,
                              sliding_window=sliding_window,
                              dimension=dimension)

    if parallel:

        extract_one = functools.partial(helper_extract,
                                        file_finder=file_finder,
                                        experiment_dir=experiment_dir,
                                        config_yml=config_yml,
                                        normalization=normalization,
                                        robust=robust)

        n_jobs = cpu_count()
        pool = Pool(n_jobs)
        imap = pool.imap

    else:

        feature_extraction = init_feature_extraction(experiment_dir)
        extract_one = functools.partial(helper_extract,
                                        file_finder=file_finder,
                                        experiment_dir=experiment_dir,
                                        feature_extraction=feature_extraction,
                                        normalization=normalization,
                                        robust=robust)
        imap = map


    for result in imap(extract_one, FileFinder.protocol_file_iter(
        protocol, extra_keys=['audio'])):
        if result is None:
            continue
        print(result)