Exemple #1
0
    def test_process_features_online_with_frame_hop_size_change_stores_correct(self, processor, tmpdir):
        ds = resources.create_dataset()

        in_feat_path = os.path.join(tmpdir.strpath, 'in_feats')
        out_feat_path = os.path.join(tmpdir.strpath, 'out_feats')

        in_feats = containers.FeatureContainer(in_feat_path)
        utt_feats = np.arange(30).reshape(5, 6)

        with in_feats:
            in_feats.sampling_rate = 16000
            in_feats.frame_size = 400
            in_feats.hop_size = 160

            for utt_idx in ds.utterances.keys():
                in_feats.set(utt_idx, utt_feats)

        processor.mock_frame_size_scale = 2.0
        processor.mock_hop_size_scale = 2.0
        processor.process_features_online(ds, in_feats, out_feat_path)

        out_feats = containers.FeatureContainer(out_feat_path)

        with out_feats:
            assert out_feats.frame_size == 800
            assert out_feats.hop_size == 320
Exemple #2
0
    def test_process_features(self, processor, tmpdir):
        ds = resources.create_dataset()

        in_feat_path = os.path.join(tmpdir.strpath, 'in_feats')
        out_feat_path = os.path.join(tmpdir.strpath, 'out_feats')

        in_feats = containers.FeatureContainer(in_feat_path)
        utt_feats = np.arange(30).reshape(5, 6)

        with in_feats:
            in_feats.sampling_rate = 16000
            in_feats.frame_size = 400
            in_feats.hop_size = 160

            for utt_idx in ds.utterances.keys():
                in_feats.set(utt_idx, utt_feats)

        processor.process_features(ds, in_feats, out_feat_path)

        out_feats = containers.FeatureContainer(out_feat_path)

        with out_feats:
            assert len(out_feats.keys()) == 5

            assert np.array_equal(out_feats.get('utt-1', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-2', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-3', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-4', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-5', mem_map=False), utt_feats)
Exemple #3
0
    def test_process_features_online_with_given_chunk_size(self, processor, tmpdir):
        ds = resources.create_dataset()

        in_feat_path = os.path.join(tmpdir.strpath, 'in_feats')
        out_feat_path = os.path.join(tmpdir.strpath, 'out_feats')

        in_feats = containers.FeatureContainer(in_feat_path)
        utt_feats = np.arange(90).reshape(15, 6)

        with in_feats:
            in_feats.sampling_rate = 16000
            in_feats.frame_size = 400
            in_feats.hop_size = 160

            for utt_idx in ds.utterances.keys():
                in_feats.set(utt_idx, utt_feats)

        processor.process_features_online(ds, in_feats, out_feat_path, chunk_size=4)

        out_feats = containers.FeatureContainer(out_feat_path)

        assert len(processor.called_with_data) == 4 * 5
        assert processor.called_with_data[0].shape == (4, 6)
        assert processor.called_with_data[3].shape == (3, 6)

        with out_feats:
            assert len(out_feats.keys()) == 5

            assert np.array_equal(out_feats.get('utt-1', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-2', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-3', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-4', mem_map=False), utt_feats)
            assert np.array_equal(out_feats.get('utt-5', mem_map=False), utt_feats)
Exemple #4
0
    def new_feature_container(self, idx, path=None):
        """
        Add a new feature container with the given data.

        Parameters:
            idx (str): An unique identifier within the dataset.
            path (str): The path to store the feature file. If None a default path is used.

        Returns:
            FeatureContainer: The newly added feature-container.
        """

        new_feature_idx = idx
        new_feature_path = path

        # Add index to idx if already existing
        if new_feature_idx in self._feature_containers.keys():
            new_feature_idx = naming.index_name_if_in_list(
                new_feature_idx, self._feature_containers.keys())

        # Set default path if none given
        if new_feature_path is None:
            if not os.path.isdir(self.path):
                raise ValueError(
                    'To copy file the dataset needs to have a path.')

            new_feature_path = os.path.join(self.path, DEFAULT_FEAT_SUBDIR,
                                            new_feature_idx)
        else:
            new_feature_path = os.path.abspath(new_feature_path)

        feat_container = containers.FeatureContainer(new_feature_path)
        self._feature_containers[new_feature_idx] = feat_container

        return feat_container
Exemple #5
0
    def test_process_features_online_ignores_none(self, processor, tmpdir):
        ds = resources.create_dataset()

        in_feat_path = os.path.join(tmpdir.strpath, 'in_feats')
        out_feat_path = os.path.join(tmpdir.strpath, 'out_feats')

        in_feats = containers.FeatureContainer(in_feat_path)
        utt_feats = np.arange(90).reshape(15, 6)

        with in_feats:
            in_feats.sampling_rate = 16000
            in_feats.frame_size = 400
            in_feats.hop_size = 160

            for utt_idx in ds.utterances:
                in_feats.set(utt_idx, utt_feats)

        def return_none(*args, **kwargs):
            return None

        processor.process_frames = return_none
        processor.process_features_online(ds,
                                          in_feats,
                                          out_feat_path,
                                          chunk_size=4)

        assert True
Exemple #6
0
    def _process_corpus(self,
                        corpus,
                        output_path,
                        processing_func,
                        frame_size=400,
                        hop_size=160,
                        sr=None):
        """ Utility function for processing a corpus with a separate processing function. """
        feat_container = containers.FeatureContainer(output_path)
        feat_container.open()

        sampling_rate = -1

        for utterance in corpus.utterances.values():
            utt_sampling_rate = utterance.sampling_rate

            if sr is None:
                if sampling_rate > 0 and sampling_rate != utt_sampling_rate:
                    raise ValueError(
                        'File {} has a different sampling-rate than the previous ones!'
                        .format(utterance.track.idx))

                sampling_rate = utt_sampling_rate

            processing_func(utterance, feat_container, frame_size, hop_size,
                            sr, corpus)

        tf_frame_size, tf_hop_size = self.frame_transform(frame_size, hop_size)
        feat_container.frame_size = tf_frame_size
        feat_container.hop_size = tf_hop_size
        feat_container.sampling_rate = sr or sampling_rate

        feat_container.close()

        return feat_container
Exemple #7
0
def sample_feature_container():
    container_path = resources.get_resource_path(
        ['sample_files', 'feat_container']
    )
    sample_container = containers.FeatureContainer(container_path)
    sample_container.open()
    yield sample_container
    sample_container.close()
Exemple #8
0
    def process_features_online(self,
                                corpus,
                                input_features,
                                output_path,
                                chunk_size=1):
        """
        Process all features of the given corpus and save the processed features in a feature-container.
        The features are processed in **online** mode, chunk by chunk.

        Args:
            corpus (Corpus): The corpus to process the utterances from.
            input_features (FeatureContainer): The feature-container to process the frames from.
            output_path (str): A path to save the feature-container to.
            chunk_size (int): Number of frames to process per chunk.

        Returns:
            FeatureContainer: The feature-container containing the processed features.
        """
        feat_container = containers.FeatureContainer(output_path)
        feat_container.open()

        input_features.open()

        for utterance in corpus.utterances.values():
            sampling_rate = input_features.sampling_rate
            frames = input_features.get(utterance.idx, mem_map=True)

            current_frame = 0

            while current_frame < frames.shape[0]:
                last = current_frame + chunk_size > frames.shape[0]
                to_frame = current_frame + chunk_size

                chunk = frames[current_frame:to_frame]

                processed = self.process_frames(chunk,
                                                sampling_rate,
                                                current_frame,
                                                last=last,
                                                utterance=utterance,
                                                corpus=corpus)

                if processed is not None:
                    feat_container.append(utterance.idx, processed)

                current_frame += chunk_size

        tf_frame_size, tf_hop_size = self.frame_transform(
            input_features.frame_size, input_features.hop_size)
        feat_container.frame_size = tf_frame_size
        feat_container.hop_size = tf_hop_size
        feat_container.sampling_rate = input_features.sampling_rate

        feat_container.close()

        return feat_container
Exemple #9
0
    def test_process_corpus_online_with_frame_hop_size_change_stores_correct(self, processor, tmpdir):
        ds = resources.create_dataset()
        feat_path = os.path.join(tmpdir.strpath, 'feats')

        processor.mock_frame_size_scale = 0.5
        processor.mock_hop_size_scale = 0.25
        processor.process_corpus_online(ds, feat_path, frame_size=4096, hop_size=2048)

        fc = containers.FeatureContainer(feat_path)
        fc.open()

        assert fc.frame_size == 2048
        assert fc.hop_size == 512

        fc.close()
Exemple #10
0
def container_dim_x_4(tmpdir):
    inputs_path = os.path.join(tmpdir.strpath, 'inputs.hdf5')

    cnt = containers.FeatureContainer(inputs_path)
    cnt.open()

    cnt.frame_size = 4
    cnt.hop_size = 2
    cnt.sampling_rate = 16000

    cnt.set('utt-1', np.arange(60).reshape(15, 4))
    cnt.set('utt-2', np.arange(80).reshape(20, 4))
    cnt.set('utt-3', np.arange(44).reshape(11, 4))
    cnt.set('utt-4', np.arange(12).reshape(3, 4))
    cnt.set('utt-5', np.arange(16).reshape(4, 4))

    return cnt
Exemple #11
0
    def process_features(self, corpus, input_features, output_path):
        """
        Process all features of the given corpus and save the processed features in a feature-container.
        The features are processed in **offline** mode, all features of an utterance at once.

        Args:
            corpus (Corpus): The corpus to process the utterances from.
            input_features (FeatureContainer): The feature-container to process the frames from.
            output_path (str): A path to save the feature-container to.

        Returns:
            FeatureContainer: The feature-container containing the processed features.
        """
        feat_container = containers.FeatureContainer(output_path)
        feat_container.open()

        input_features.open()

        for utterance in corpus.utterances.values():
            sampling_rate = input_features.sampling_rate
            frames = input_features.get(utterance.idx, mem_map=False)
            processed = self.process_frames(frames,
                                            sampling_rate,
                                            offset=0,
                                            last=True,
                                            utterance=utterance,
                                            corpus=corpus)
            feat_container.set(utterance.idx, processed)

        tf_frame_size, tf_hop_size = self.frame_transform(
            input_features.frame_size, input_features.hop_size)
        feat_container.frame_size = tf_frame_size
        feat_container.hop_size = tf_hop_size
        feat_container.sampling_rate = input_features.sampling_rate

        feat_container.close()

        return feat_container