Esempio n. 1
0
    def __call__(self, original, sample_rate):
        """Augment original waveform

        Parameters
        ----------
        original : `np.ndarray`
            (n_samples, n_channels) waveform.
        sample_rate : `int`
            Sample rate.

        Returns
        -------
        augmented : `np.ndarray`
            (n_samples, n_channels) noise-augmented waveform.
        """

        raw_audio = RawAudio(sample_rate=sample_rate, mono=True)

        # accumulate enough noise to cover duration of original waveform
        noises = []
        len_left = len(original)
        while len_left > 0:

            # select noise file at random
            file = random.choice(self.files_)

            # select noise segment at random
            segment = next(random_segment(file['gaps'], weighted=False))
            duration = segment.duration
            segment_len = duration * sample_rate

            # if noise segment is longer than what is needed, crop it at random
            if segment_len > len_left:
                duration = len_left / sample_rate
                segment = next(random_subsegment(segment, duration))

            noise = raw_audio.crop(file,
                                   segment,
                                   mode='center',
                                   fixed=duration)

            # decrease the `len_left` value by the size of the returned noise
            len_left -= len(noise)

            noise = normalize(noise)
            noises.append(noise)

        # concatenate
        # FIXME: use fade-in between concatenated noises
        noise = np.vstack(noises)

        # select SNR at random
        snr = (self.snr_max -
               self.snr_min) * np.random.random_sample() + self.snr_min
        alpha = np.exp(-np.log(10) * snr / 20)

        return normalize(original) + alpha * noise
Esempio n. 2
0
    def _random_samples(self):
        """Random samples

        Returns
        -------
        samples : generator
            Generator that yields {'X': ..., 'y': ...} samples indefinitely.
        """

        uris = list(self.data_)
        durations = np.array([self.data_[uri]['duration'] for uri in uris])
        probabilities = durations / np.sum(durations)

        while True:

            # choose file at random with probability
            # proportional to its (annotated) duration
            uri = uris[np.random.choice(len(uris), p=probabilities)]

            datum = self.data_[uri]
            current_file = datum['current_file']

            # choose one segment at random with probability
            # proportional to its duration
            segment = next(random_segment(datum['segments'], weighted=True))

            # choose fixed-duration subsegment at random
            subsegment = next(random_subsegment(segment, self.duration))

            X = self.feature_extraction.crop(current_file,
                                             subsegment,
                                             mode='center',
                                             fixed=self.duration)

            y = self.crop_y(datum['y'],
                            subsegment)
            sample = {'X': X, 'y': y}

            if self.mask is not None:
                mask = self.crop_y(current_file[self.mask],
                                   subsegment)
                sample['mask'] = mask

            for key, classes in self.file_labels_.items():
                sample[key] = classes.index(current_file[key])

            yield sample
    def overlap_samples(self):
        """Random overlap samples

        Returns
        -------
        samples : generator
            Generator that yields {'waveform': ..., 'y': ...} samples
            indefinitely.
        """

        uris = list(self.data_)
        durations = np.array([self.data_[uri]['duration'] for uri in uris])
        probabilities = durations / np.sum(durations)

        while True:

            # choose file at random with probability
            # proportional to its (annotated) duration
            uri = uris[np.random.choice(len(uris), p=probabilities)]

            datum = self.data_[uri]
            current_file = datum['current_file']

            # choose one segment at random with probability
            # proportional to its duration
            segment = next(random_segment(datum['segments'], weighted=True))

            # choose random subsegment
            # duration = np.random.rand() * self.duration
            sequence = next(random_subsegment(segment, self.duration))

            # get corresponding waveform
            X = self.raw_audio_.crop(current_file,
                                     sequence,
                                     mode='center',
                                     fixed=self.duration)

            # get corresponding labels
            y = datum['y'].crop(sequence,
                                mode=self.alignment,
                                fixed=self.duration)

            yield {'waveform': normalize(X),
                   'y': y}
Esempio n. 4
0
    def samples(self):

        labels = list(self.data_)

        # batch_counter counts samples in current batch.
        # as soon as it reaches batch_size, a new random duration is selected
        # so that the next batch will use a different chunk duration
        batch_counter = 0
        batch_size = self.batch_size
        batch_duration = self.min_duration + np.random.rand() * (
            self.duration - self.min_duration)

        while True:

            # shuffle labels
            np.random.shuffle(labels)

            # loop on each label
            for label in labels:

                # load data for this label
                # segment_generators, durations, files, features = \
                #     zip(*self.data_[label])
                segments, durations, files = zip(*self.data_[label])

                # choose 'per_label' files at random with probability
                # proportional to the total duration of 'label' in those files
                probabilities = durations / np.sum(durations)
                chosen = np.random.choice(len(files),
                                          size=self.per_label,
                                          p=probabilities)

                # loop on (randomly) chosen files
                for i in chosen:

                    # choose one segment at random with
                    # probability proportional to duration
                    # segment = next(segment_generators[i])
                    segment = next(
                        random_segment(segments[i], weighted=self.weighted_))

                    # choose per_turn chunk(s) at random
                    for chunk in itertools.islice(
                            random_subsegment(segment, batch_duration),
                            self.per_turn):

                        yield {
                            'X':
                            self.feature_extraction.crop(files[i],
                                                         chunk,
                                                         mode='center',
                                                         fixed=batch_duration),
                            'y':
                            self.segment_labels_.index(label),
                        }

                        # increment number of samples in current batch
                        batch_counter += 1

                        # as soon as the batch is complete, a new random
                        # duration is selected so that the next batch will use
                        # a different chunk duration
                        if batch_counter == batch_size:
                            batch_counter = 0
                            batch_duration = self.min_duration + np.random.rand(
                            ) * (self.duration - self.min_duration)