def get_transforms(bckgrd_aug_dir=None, secondary_bckgrd_aug_dir=None): list_of_aug = [ # AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.3), AddGaussianNoise(p=0.2), AddGaussianSNR(p=0.2), Gain(min_gain_in_db=-15, max_gain_in_db=15, p=0.3) ] if bckgrd_aug_dir is not None: list_of_aug.append(AddBackgroundNoise(bckgrd_aug_dir, p=0.2)) if secondary_bckgrd_aug_dir is not None: list_of_aug.append( AddShortNoises(secondary_bckgrd_aug_dir, min_time_between_sounds=0.0, max_time_between_sounds=15.0, burst_probability=0.5, p=0.6)) list_of_aug += [ AddGaussianNoise(p=0.2), AddGaussianSNR(p=0.2), Gain(min_gain_in_db=-15, max_gain_in_db=15, p=0.3) ] augmenter = Compose(list_of_aug) transforms = { "train": get_training_augmentation(augmenter), "valid": get_validation_augmentation() } return transforms
def augmented_feature_engineering(wavFile, settings): fs, rawWav = scipy.io.wavfile.read(wavFile) wavData = rawWav if (settings['CHANNELS'] == 2): wavData = rawWav[:, 0] augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5), Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), ]) wavData = augmenter(samples=np.array(wavData, dtype="float32"), sample_rate=fs) data_row = [] input_type = settings['FEATURE_ENGINEERING_TYPE'] if (input_type == TYPE_FEATURE_ENGINEERING_NORM_MFCC): mfcc_result1 = mfcc(wavData, samplerate=fs, nfft=1103, numcep=30, nfilt=40, preemph=0.5, winstep=0.005, winlen=0.015, appendEnergy=False) data_row.extend(mfcc_result1.ravel()) elif (input_type == TYPE_FEATURE_ENGINEERING_RAW_WAVE): data_row = wavData else: print("OLD MFCC TYPE IS NOT SUPPORTED FOR TRAINING PYTORCH") return data_row
def build_transforms(train=True): return Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5), PitchShift(min_semitones=-4, max_semitones=4, p=0.5), Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), ])
def add_gaussian_noise(data_path, file_info, n_repeats=3, min_amp=0.001, max_amp=0.015): # Create the augmenter augmenter = Compose([ AddGaussianNoise(min_amplitude=min_amp, max_amplitude=max_amp, p=1.0) ]) # Iterate through the Gibbon audio files only for j in file_info[file_info.label == 1].index: for i in range(n_repeats): # Read audio file rate, samples = wavfile.read(data_path + 'Clean/' + file_info.at[j, 'fname']) # Set the output path output_file_path = data_path + 'Augmented/AddGaussianNoise_{:03d}_'.format( i) + file_info.at[j, 'fname'] # Add gaussian noise augmented_samples = augmenter(samples=samples, sample_rate=rate) # Save the new audio wavfile.write(filename=output_file_path, rate=rate, data=augmented_samples)
def get_stretched_audio(self, x, sr): composition = [] composition.append( AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5)) augmenter = Compose(composition) aug_chord = augmenter(samples=x, sample_rate=sr) return aug_chord
def __init__(self, dataset): self.dataset = dataset self.sample_rate = TRAINING_CONFIG['audio_sample_rate'] self.augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5), PitchShift(min_semitones=-4, max_semitones=4, p=0.5), Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), ])
def __init__(self): super(Augment_Time, self).__init__() self.p = 0.5 self.augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.01, p=0.3), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5), PitchShift(min_semitones=-4, max_semitones=4, p=0.5), FrequencyMask(), TimeMask() #Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), ])
def __init__(self, desired_sr, mode, time_aug=False): super(ReadAudio, self).__init__() self.desired_sr = desired_sr self.augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.01, p=0.3), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.3), PitchShift(min_semitones=-4, max_semitones=4, p=0.5), #Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), ]) self.mode = mode self.time_aug = time_aug
def get_transforms(bckgrd_aug_dir=None): list_of_aug = [ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.3), AddGaussianSNR(p=0.3) ] if bckgrd_aug_dir is not None: list_of_aug.append(AddBackgroundNoise(bckgrd_aug_dir,p=0.5)) augmenter = Compose(list_of_aug) transforms = { "train": get_training_augmentation(augmenter), "valid": get_validation_augmentation() } return transforms
def augment_stretched_noise(data, sr, label, noise=True, stretch=True): composition = [] if noise: composition.append( AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5)) if stretch: composition.append(TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5)) augmenter = Compose(composition) aug_chord = augmenter(samples=data, sample_rate=sr) mfccs = librosa.feature.mfcc(y=aug_chord, sr=sr, n_mfcc=40) mfccs_processed = np.mean(mfccs.T, axis=0) return mfccs_processed
def compose_without_noise(ir_path='data/impulse'): _p = 0.25 transforms = [ AddGaussianNoise(p=_p), Shift(p=_p, min_fraction=-0.2, max_fraction=0.2), FrequencyMask(p=_p), TimeMask(p=_p, max_band_part=0.25), AddGaussianSNR(p=_p), ClippingDistortion(p=_p, max_percentile_threshold=20), MyAddImpulseResponse(p=_p, ir_path=ir_path), TimeStretch(p=_p / 10), PitchShift(p=_p / 25), ] return MyCompose(transforms, p=1.0, max_augs=3)
def __getitem__(self, idx: int): wav_path, ebird_code = self.file_list[idx] y, sr = sf.read(wav_path, dtype='float32') signal_aug = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5), TimeStretch(min_rate=0.9, max_rate=1.15, p=0.5) ]) len_y = len(y) effective_length = sr * PERIOD if len_y < effective_length: new_y = np.zeros(effective_length, dtype=np.float32) start = np.random.randint(effective_length - len_y) new_y[start:start + len_y] = y y = new_y.astype(np.float32) elif len_y > effective_length: start = np.random.randint(len_y - effective_length) y = y[start:start + effective_length].astype(np.float32) else: y = y.astype(np.float32) if self.waveform_transforms: y = signal_aug(samples=y, sample_rate=sr) melspec = librosa.feature.melspectrogram( y, sr=sr, **self.melspectrogram_parameters) melspec = librosa.power_to_db(melspec).astype(np.float32) if self.spectrogram_transforms: melspec = self.spectrogram_transforms(melspec) else: pass #img_aug = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5)]) image = mono_to_color(melspec) height, width, _ = image.shape image = cv2.resize( image, (int(width * self.img_size / height), self.img_size)) #image = img_aug(image) image = np.moveaxis(image, 2, 0) image = (image / 255.0).astype(np.float32) labels = np.zeros(len(BIRD_CODE), dtype="f") labels[BIRD_CODE[ebird_code]] = 1 return image, labels
def compose(sounds_path): _p = 0.2 transforms = [ MyGain(p=_p), AddGaussianNoise(p=_p), Shift(p=_p, min_fraction=-0.25, max_fraction=0.25), FrequencyMask(p=_p), TimeMask(p=_p, max_band_part=0.25), AddGaussianSNR(p=_p), ClippingDistortion(p=_p, max_percentile_threshold=20), AddBackgroundNoise(sounds_path=sounds_path, p=_p), TimeStretch(p=_p/10), PitchShift(p=_p/30), ] return Compose(transforms, p=0.4, shuffle=True)
def raw_audio_process(transform_fn): augment_fn = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5), PitchShift(min_semitones=-4, max_semitones=4, p=0.5), Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5) ]) @wraps(transform_fn) def augment_audio(audio, **kwargs): sr = kwargs.setdefault('sr', 22050) n_win = kwargs.setdefault('n_win', 20) win_length = int(n_win * sr / 1000) audio = augment_fn(audio) return transform_fn(audio, win_length=win_length, hop_length=win_length // 4) return augment_audio
def process_fn(output='stft', spec_aug=False, p=0.5, sr=22050): augment_fn = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=p), TimeStretch(min_rate=0.8, max_rate=1.25, p=p), PitchShift(min_semitones=-4, max_semitones=4, p=p), Shift(min_fraction=-0.5, max_fraction=0.5, p=p) ]) win_length = int(20 * sr / 1000) if output == 'stft': def stft_transform(feats): if feats.ndim == 1: feats = augment_fn(samples=feats, sample_rate=sr) feats = np.log( np.abs(librosa.stft(feats, 1023, win_length=win_length)).T + 1e-12) if spec_aug: feats = spec_augment(feats) return feats return stft_transform if output == 'lms': def lms_transform(feats): if feats.ndim == 1: feats = augment_fn(samples=feats, sample_rate=sr) hop_length = win_length // 4 feats = np.log( np.abs( librosa.feature.melspectrogram( feats, n_fft=win_length, hop_length=hop_length, win_length=win_length)).T + 1e-12) if spec_aug: feats = spec_augment(feats) return feats return lms_transform
def __init__(self, path_audio, y, resample_freq = 32000, max_length=3, augmentation=[], validation=False, num_class=264, pseudo_labels=None): self.labels2idx = {'Pump': 0, 'Spinach': 1, 'abalimi': 2, 'afukirira': 3, 'agriculture': 4, 'akammwanyi': 5, 'akamonde': 6, 'akasaanyi': 7, 'akatunda': 8, 'akatungulu': 9, 'akawuka': 10, 'amakoola': 11, 'amakungula': 12, 'amalagala': 13, 'amappapaali': 14, 'amatooke': 15, 'banana': 16, 'beans': 17, 'bibala': 18, 'bulimi': 19, 'butterfly': 20, 'cabbages': 21, 'cassava': 22, 'caterpillar': 23, 'caterpillars': 24, 'coffee': 25, 'crop': 26, 'ddagala': 27, 'dig': 28, 'disease': 29, 'doodo': 30, 'drought': 31, 'ebbugga': 32, 'ebibala': 33, 'ebigimusa': 34, 'ebijanjaalo': 35, 'ebijjanjalo': 36, 'ebikajjo': 37, 'ebikolo': 38, 'ebikongoliro': 39, 'ebikoola': 40, 'ebimera': 41, 'ebinyebwa': 42, 'ebirime': 43, 'ebisaanyi': 44, 'ebisooli': 45, 'ebisoolisooli': 46, 'ebitooke': 47, 'ebiwojjolo': 48, 'ebiwuka': 49, 'ebyobulimi': 50, 'eddagala': 51, 'eggobe': 52, 'ejjobyo': 53, 'ekibala': 54, 'ekigimusa': 55, 'ekijanjaalo': 56, 'ekikajjo': 57, 'ekikolo': 58, 'ekikoola': 59, 'ekimera': 60, 'ekirime': 61, 'ekirwadde': 62, 'ekisaanyi': 63, 'ekitooke': 64, 'ekiwojjolo': 65, 'ekyeya': 66, 'emboga': 67, 'emicungwa': 68, 'emisiri': 69, 'emiyembe': 70, 'emmwanyi': 71, 'endagala': 72, 'endokwa': 73, 'endwadde': 74, 'enkota': 75, 'ennima': 76, 'ennimiro': 77, 'ennyaanya': 78, 'ensigo': 79, 'ensiringanyi': 80, 'ensujju': 81, 'ensuku': 82, 'ensukusa': 83, 'enva endiirwa': 84, 'eppapaali': 85, 'faamu': 86, 'farm': 87, 'farmer': 88, 'farming instructor': 89, 'fertilizer': 90, 'fruit': 91, 'fruit picking': 92, 'garden': 93, 'greens': 94, 'ground nuts': 95, 'harvest': 96, 'harvesting': 97, 'insect': 98, 'insects': 99, 'irish potatoes': 100, 'irrigate': 101, 'kaamulali': 102, 'kasaanyi': 103, 'kassooli': 104, 'kikajjo': 105, 'kikolo': 106, 'kisaanyi': 107, 'kukungula': 108, 'leaf': 109, 'leaves': 110, 'lumonde': 111, 'lusuku': 112, 'maize': 113, 'maize stalk borer': 114, 'maize streak virus': 115, 'mango': 116, 'mangoes': 117, 'matooke': 118, 'matooke seedlings': 119, 'medicine': 120, 'miceere': 121, 'micungwa': 122, 'mpeke': 123, 'muceere': 124, 'mucungwa': 125, 'mulimi': 126, 'munyeera': 127, 'muwogo': 128, 'nakavundira': 129, 'nambaale': 130, 'namuginga': 131, 'ndwadde': 132, 'nfukirira': 133, 'nnakati': 134, 'nnasale beedi': 135, 'nnimiro': 136, 'nnyaanya': 137, 'npk': 138, 'nursery bed': 139, 'obulimi': 140, 'obulwadde': 141, 'obumonde': 142, 'obusaanyi': 143, 'obutunda': 144, 'obutungulu': 145, 'obuwuka': 146, 'okufukirira': 147, 'okufuuyira': 148, 'okugimusa': 149, 'okukkoola': 150, 'okukungula': 151, 'okulima': 152, 'okulimibwa': 153, 'okunnoga': 154, 'okusaasaana': 155, 'okusaasaanya': 156, 'okusiga': 157, 'okusimba': 158, 'okuzifuuyira': 159, 'olusuku': 160, 'omuceere': 161, 'omucungwa': 162, 'omulimi': 163, 'omulimisa': 164, 'omusiri': 165, 'omuyembe': 166, 'onion': 167, 'orange': 168, 'pampu': 169, 'passion fruit': 170, 'pawpaw': 171, 'pepper': 172, 'plant': 173, 'plantation': 174, 'ppaapaali': 175, 'pumpkin': 176, 'rice': 177, 'seed': 178, 'sikungula': 179, 'sow': 180, 'spray': 181, 'spread': 182, 'suckers': 183, 'sugarcane': 184, 'sukumawiki': 185, 'super grow': 186, 'sweet potatoes': 187, 'tomatoes': 188, 'vegetables': 189, 'watermelon': 190, 'weeding': 191, 'worm': 192} self.idx2labels = {k:v for v,k in self.labels2idx.items()} identity = np.eye(num_class) self.augmentation = set(augmentation) self.samples = path_audio #+ path_augment self.max_length = max_length # 99% are shorter than 3 sec self.resample_freq=resample_freq self.validation = validation self.y = np.array([identity[self.labels2idx[t]] for t in y]).astype(np.float32) #+ [self.labels2idx[t] for t in y_aug] self.num_class = num_class self.noise = Compose([AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.6), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.6), PitchShift(min_semitones=-4, max_semitones=4, p=0.5), Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), Gain(min_gain_in_db=-12, max_gain_in_db=12, p=0.6), ]) if pseudo_labels is not None: self.add_pl(pseudo_labels[0], pseudo_labels[1])
def __init__( self, sound_file_paths, batch_size=8, augment=True, save_augmented_sounds_to_path=None, fixed_sound_length=FIXED_SOUND_LENGTH, num_mels=NUM_MELS, preprocessing_fn=None, ): self.sound_file_paths = sound_file_paths self.batch_size = batch_size self.augment = augment self.save_augmented_sounds_to_path = save_augmented_sounds_to_path self.fixed_sound_length = fixed_sound_length self.min_num_samples = (fixed_sound_length + 3) * HOP_LENGTH self.num_mels = num_mels self.preprocessing_fn = preprocessing_fn self.laughter_paths = self.sound_file_paths["laughter"] self.non_laughter_paths = [] for category in self.sound_file_paths: if not is_laughter_category(category): self.non_laughter_paths += self.sound_file_paths[category] if save_augmented_sounds_to_path: os.makedirs(save_augmented_sounds_to_path, exist_ok=True) self.augmenter = Compose( [ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.002, p=0.1), TimeStretch(min_rate=0.8, max_rate=1.25, p=0.02), PitchShift(min_semitones=-3, max_semitones=3, p=0.02), Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), ] )
Path(os.path.join(DEMO_DIR, "acoustic_guitar_0.wav")), Path(os.path.join(DEMO_DIR, "perfect-alley1.ogg")), ] transforms = [ { "instance": AddBackgroundNoise(sounds_path=os.path.join( DEMO_DIR, "background_noises"), p=1.0), "num_runs": 5, }, { "instance": AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=1.0), "num_runs": 5, }, { "instance": AddGaussianSNR(p=1.0), "num_runs": 5 }, { "instance": AddImpulseResponse(p=1.0, ir_path=os.path.join(DEMO_DIR, "ir")), "num_runs": 1, }, { "instance":
def applyTransformations(fileName, output_dir, auxiliarSoundsDir): name = fileName.split(".")[0].split("/")[-1] samples = load_wav_file(fileName) # AddImpulseResponse augmenter = Compose([ AddImpulseResponse(p=1.0, ir_path=os.path.join(auxiliarSoundsDir, "helperSounds/ir")) ]) output_file_path = os.path.join( output_dir, "{}_AddImpulseResponse_{:03d}.wav".format(name, 0)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # FrequencyMask augmenter = Compose([FrequencyMask(p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_FrequencyMask_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # TimeMask augmenter = Compose([TimeMask(p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_TimeMask_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddGaussianSNR augmenter = Compose([AddGaussianSNR(p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_AddGaussianSNR_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddGaussianNoise augmenter = Compose( [AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_AddGaussianNoise_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # TimeStretch augmenter = Compose([TimeStretch(min_rate=0.8, max_rate=1.25, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_TimeStretch_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # PitchShift augmenter = Compose([PitchShift(min_semitones=-4, max_semitones=4, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_itchShift_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # Shift augmenter = Compose([Shift(min_fraction=-0.5, max_fraction=0.5, p=1.0)]) for i in range(5): output_file_path = os.path.join(output_dir, "{}_Shift_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # Shift without rollover augmenter = Compose( [Shift(min_fraction=-0.5, max_fraction=0.5, rollover=False, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_ShiftWithoutRollover_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # Normalize augmenter = Compose([Normalize(p=1.0)]) output_file_path = os.path.join(output_dir, "{}_Normalize_{:03d}.wav".format(name, 0)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # ClippingDistortion augmenter = Compose([ClippingDistortion(p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_ClippingDistortion_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddBackgroundNoise augmenter = Compose([ AddBackgroundNoise(sounds_path=os.path.join( auxiliarSoundsDir, "helperSounds/background_noises"), p=1.0) ]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_AddBackgroundNoise_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddShortNoises augmenter = Compose([ AddShortNoises( sounds_path=os.path.join(auxiliarSoundsDir, "helperSounds/short_noises"), min_snr_in_db=0, max_snr_in_db=8, min_time_between_sounds=2.0, max_time_between_sounds=4.0, burst_probability=0.4, min_pause_factor_during_burst=0.01, max_pause_factor_during_burst=0.95, min_fade_in_time=0.005, max_fade_in_time=0.08, min_fade_out_time=0.01, max_fade_out_time=0.1, p=1.0, ) ]) for i in range(5): output_file_path = os.path.join( output_dir, "{}_AddShortNoises_{:03d}.wav".format(name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples)
def generate(self, wave_file, output_dir): """ For each transformation, apply it to an example sound and write the transformed sounds to an output folder. """ samples = load_wav_file(wave_file) _filename = os.path.basename(wave_file).split('.')[0] # AddImpulseResponse if self.AddImpulseResponse[0]: augmenter = Compose([ AddImpulseResponse(p=1.0, ir_path=os.path.join(DEMO_DIR, "ir")) ]) output_file_path = os.path.join( output_dir, _filename + "_AddImpulseResponse{:03d}.wav".format(0)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # FrequencyMask if self.FrequencyMask[0]: augmenter = Compose([FrequencyMask(p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_FrequencyMask{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # TimeMask if self.TimeMask[0]: augmenter = Compose([TimeMask(p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_TimeMask{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddGaussianSNR if self.AddGaussianSNR[0]: augmenter = Compose([AddGaussianSNR(p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_AddGaussianSNR{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddGaussianNoise if self.AddGaussianNoise[0]: augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=1.0) ]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_AddGaussianNoise{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # TimeStretch if self.TimeStretch[0]: augmenter = Compose( [TimeStretch(min_rate=0.5, max_rate=1.5, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_TimeStretch{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # PitchShift if self.PitchShift[0]: augmenter = Compose( [PitchShift(min_semitones=-6, max_semitones=12, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_PitchShift{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # Shift if self.Shift[0]: augmenter = Compose( [Shift(min_fraction=-0.5, max_fraction=0.5, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_Shift{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # Shift without rollover if self.ShiftWithoutRoll[0]: augmenter = Compose([ Shift(min_fraction=-0.2, max_fraction=0.2, rollover=False, p=1.0) ]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_ShiftWithoutRollover{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # Normalize if self.Normalize[0]: augmenter = Compose([Normalize(p=1.0)]) output_file_path = os.path.join( output_dir, _filename + "_Normalize{:03d}.wav".format(0)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # Resample if self.Resample[0]: augmenter = Compose([ Resample(min_sample_rate=12000, max_sample_rate=44100, p=1.0) ]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_Resample{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # ClippingDistortion if self.ClippingDistortion[0]: augmenter = Compose( [ClippingDistortion(max_percentile_threshold=10, p=1.0)]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_ClippingDistortion{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddBackgroundNoise if self.AddBackgroundNoise[0]: augmenter = Compose([ AddBackgroundNoise(sounds_path=os.path.join( DEMO_DIR, "background_noises"), p=1.0) ]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_AddBackgroundNoise{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddWhiteNoise if self.AddWhiteNoise[0]: augmenter = Compose([ AddBackgroundNoise(sounds_path=os.path.join( DEMO_DIR, "white_noises"), p=1.0) ]) for i in range(self.AddWhiteNoise[1]): output_file_path = os.path.join( output_dir, _filename + "_AddWhiteNoise{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddPinkNoise if self.AddPinkNoise[0]: augmenter = Compose([ AddBackgroundNoise(sounds_path=os.path.join( DEMO_DIR, "pink_noises"), p=1.0) ]) for i in range(self.AddPinkNoise[1]): output_file_path = os.path.join( output_dir, _filename + "_AddPinkNoise{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # AddShortNoises if self.AddShortNoises[0]: augmenter = Compose([ AddShortNoises( sounds_path=os.path.join(DEMO_DIR, "short_noises"), min_snr_in_db=0, max_snr_in_db=8, min_time_between_sounds=2.0, max_time_between_sounds=4.0, burst_probability=0.4, min_pause_factor_during_burst=0.01, max_pause_factor_during_burst=0.95, min_fade_in_time=0.005, max_fade_in_time=0.08, min_fade_out_time=0.01, max_fade_out_time=0.1, p=1.0, ) ]) for i in range(5): output_file_path = os.path.join( output_dir, _filename + "_AddShortNoises{:03d}.wav".format(i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples)
from sklearn.utils import class_weight import warnings from tqdm import tqdm #from tensorflow.keras import backend from kapre.time_frequency import Melspectrogram from kapre.utils import Normalization2D from kapre.augmentation import AdditiveNoise from kapre.time_frequency import Spectrogram from python_speech_features import mfcc from mutagen.mp3 import MP3 from audiomentations import Compose, AddGaussianNoise, TimeStretch, PitchShift, Shift augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.10, p=0.5) ]) #%matplotlib inline #!rm -r train_data #!rm -r val_data #!rm -r models #!mkdir models # suppress warnings warnings.filterwarnings("ignore") SOUND_DIR = "data/birdsong-recognition/train_audio/" # function to plot signal
def __init__(self, root_dir, csv_dir, conf, bird_code, inv_ebird_label, num_test_samples=10, bckgrd_aug_dir=None, background_audio_dir=None, file_type="mp3", isTraining=True, transform=None, apply_mixer=False): self.root_dir = root_dir self.conf = conf self.isTraining = isTraining self.bird_code = bird_code self.inv_ebird_label = inv_ebird_label self.transform = transform self.file_type = file_type self.apply_mixer = apply_mixer self.additional_loader_params = { "worker_init_fn": self.init_workers_fn, "collate_fn": self.collate_fn } self.sampler = ImbalancedDatasetSampler df = pd.read_csv(csv_dir) df.secondary_labels = df.secondary_labels.apply(eval) self.data = list(df[["filename", "ebird_code", "secondary_labels"]].to_dict('index').values()) self.background_audio_dir = background_audio_dir if self.background_audio_dir is not None: for bk in background_audio_dir.glob('**/*.wav'): self.data.append({"filename": bk}) self.num_test_samples = num_test_samples self.length = len(self.data) if self.apply_mixer: self.dict_grp = {} for grp, d in df.groupby("ebird_code"): self.dict_grp[grp] = d.index.values self.possible_mixer_keys = list(self.dict_grp.keys()) if bckgrd_aug_dir is not None: self.augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.3), AddGaussianSNR(p=0.3), PitchShift(min_semitones=-4, max_semitones=4, p=0.3), AddBackgroundNoise(bckgrd_aug_dir, p=0.5), ]) else: self.augmenter = Compose([ AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.3), AddGaussianSNR(p=0.3), PitchShift(min_semitones=-4, max_semitones=4, p=0.3) ]) del df
def transform(file_path, output_folder, iterations): """ For each transformation, apply it to an example sound and write the transformed sounds to an output folder. """ samples = load_wav_file(file_path) file_name = os.path.basename(file_path).replace('.wav', '') def produce(augmenter, name): for i in range(iterations): output_file_path = '{}/{}'.format( output_folder, "{}_{}_{}.wav".format(name, file_name, i)) augmented_samples = augmenter(samples=samples, sample_rate=SAMPLE_RATE) wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples) # TimeMask augmenter = Compose([TimeMask(p=1.0)]) produce(augmenter, 'TimeMask') # FrequencyMask augmenter = Compose([FrequencyMask(p=1.0)]) produce(augmenter, 'FrequencyMask') # AddGaussianSNR augmenter = Compose([AddGaussianSNR(p=1.0)]) produce(augmenter, 'AddGaussianSNR') # PitchShift augmenter = Compose([PitchShift(min_semitones=-4, max_semitones=4, p=1.0)]) produce(augmenter, 'PitchShift') # TimeStretch augmenter = Compose([TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5)]) produce(augmenter, 'TimeStretch') # AddGaussianNoise augmenter = Compose( [AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=1.0)]) produce(augmenter, 'AddGaussianNoise') # Shift augmenter = Compose([Shift(min_fraction=-0.5, max_fraction=0.5, p=1.0)]) produce(augmenter, 'Shift') # Shift without rollover augmenter = Compose( [Shift(min_fraction=-0.5, max_fraction=0.5, rollover=False, p=1.0)]) produce(augmenter, 'Shift without rollover') # Normalize augmenter = Compose([Normalize(p=1.0)]) produce(augmenter, 'Normalize') # AddImpulseResponse augmenter = Compose( [AddImpulseResponse(p=1.0, ir_path=os.path.join(DEMO_DIR, "ir"))]) produce(augmenter, 'AddImpulseResponse') # Resample augmenter = Compose([Resample(p=1.0)]) produce(augmenter, 'Resample') # ClippingDistortion augmenter = Compose([ClippingDistortion(p=1.0)]) produce(augmenter, 'ClippingDistortion') # AddBackgroundNoise augmenter = Compose([ AddBackgroundNoise(sounds_path=os.path.join(DEMO_DIR, "background_noises"), p=1.0) ]) produce(augmenter, 'AddBackgroundNoise') # AddShortNoises augmenter = Compose([ AddShortNoises( sounds_path=os.path.join(DEMO_DIR, "short_noises"), min_snr_in_db=0, max_snr_in_db=8, min_time_between_sounds=2.0, max_time_between_sounds=4.0, burst_probability=0.4, min_pause_factor_during_burst=0.01, max_pause_factor_during_burst=0.95, min_fade_in_time=0.005, max_fade_in_time=0.08, min_fade_out_time=0.01, max_fade_out_time=0.1, p=1.0, ) ]) produce(augmenter, 'AddShortNoises')