Esempio n. 1
0
def overlayAudio(offset, interval):
    onsetMarker = AudioOnsetsMarker(onsets=1.0 * beats[offset::interval])
    audioMarked = onsetMarker(audio)
    writer = MonoWriter(filename='test.wav')
    beginIndex = 0.25 * np.size(audioMarked)
    endIndex = 0.35 * np.size(audioMarked)
    writer(audioMarked[beginIndex:endIndex])  #Only write fragment
Esempio n. 2
0
	def _flush_save_audio_buffer(self, queue):
		
		while True:
			filename, audio, tracklist = queue.get()
			if not (filename is None):
				logger.debug('Saving {} to disk'.format(filename))
				writer = MonoWriter(filename=filename, bitrate=320,format='mp3')
				writer(np.array(audio,dtype='single'))
				# Save tracklist
				with open(self.save_dir_tracklist,'a+') as csvfile:
					writer = csv.writer(csvfile)
					for line in tracklist:
						writer.writerow([line])
			else:
				logger.debug('Stopping audio saving thread!')
				return
Esempio n. 3
0
def thresholdAudio(audioPath='testDownload/', t=-30, fs=44100):
    '''
    Run thresholdAudio for trim all de mp3 audio files in a audioPath/../.. where the first .. is related
    with the queryText and the second .. is related with the location of freesound track.
    It was thougt to use after soundDownload.py from sms-tool package at freesound source.

    :param audioPath: path where sounds where download (possible path used for soundDownload.py). Default: testDownload/
    :param t: threshold (in dB) to trim audiofiles related to max value of file. Default: -30
    :param fs: fs for the output sound. Default: 44100
    :return: print(Done!!)
    '''

    thTimes = 10 ** (t/20)                                          #threshold: dB to times
    instrument = ls(audioPath)                                      #read the queryText path inside the given path
    audioTrack = [ls(str(key)) for key in instrument]               #read the different folder inside each queryText path
    a, b = np.shape(audioTrack)                                     #size of the matrix
    finalArray = [os.path.join(str(audioTrack[i][j]), arch.name)
                  for i in np.arange(a) for j in np.arange(b) for arch in
                  Path(str(audioTrack[i][j])).iterdir()
                  if arch.name.endswith('.mp3')]                    #array for each track

    for key in finalArray:
        track = MonoLoader(filename=key, sampleRate=fs)()           #read audio and transform into mono
        maximo = np.max(abs(track))                                 #set the abs maximum
        i = 0
        j = -1
        while abs(track[i]) < maximo * thTimes:                     #find the first significant value
            i += 1
        while abs(track[j]) < maximo * thTimes:                     #find the last significant value
            j -= 1
        shortTrack = track[i:j]                                     #build the trimed track
        MonoWriter(filename=key + 'computed.wav')(shortTrack)       #write the file at same location of given
    print('Done!!')
Esempio n. 4
0
    def convert_to_wav(audiofile, samplerate=44100):
        logger.debug('{0}: converting to WAV'.format(audiofile))

        # Convert to WAV using Essentia so that timbral models always read WAV file
        output_filename = '/tmp/{0}-converted.wav'.format(str(uuid.uuid4()))
        audio = MonoLoader(filename=audiofile, sampleRate=samplerate)()
        MonoWriter(filename=output_filename, format='wav', sampleRate=samplerate)(audio)
        return output_filename
Esempio n. 5
0
def get(inputDir, descExt):
    exception = {}
    output = {}

    for path, dname, fnames in os.walk(
            inputDir
    ):  # dname directories names in current directory, fnames file names in current directory.
        for fname in fnames:
            if descExt in fname.lower():
                new_pid = os.fork()
                if new_pid == 0:  # Si new_pid == 0 > forked process.
                    try:  # uso un try..except..finally para asegurarme de _siempre_ terminar el proceso
                        file_name = path + "/" + fname
                        [Sound, Fs, nChannels, md5, bit_rate,
                         codec] = AudioLoader(filename=file_name)()  # lo cargo
                        Sound = Sound[:,
                                      0]  # El algoritmo siempre tira dos canales

                        print file_name

                        impulse, FsImpulse = LoadImpulse.get_impulse()
                        impulse = impulse.astype('float32',
                                                 casting='same_kind')

                        if Fs != FsImpulse:
                            Rs = Resample(inputSampleRate=FsImpulse,
                                          outputSampleRate=Fs)
                            impulse = Rs(impulse)

                        final = np.convolve(Sound, impulse)

                        if descExt == '.aif': descExt = '.aiff'

                        mw = MonoWriter(filename=path + '/R1_' + fname,
                                        sampleRate=Fs,
                                        format=descExt.split('.')[1])
                        mw(final)
                        print 'Done!'

                    except Exception:
                        exception[fname] = [
                            'oops'
                        ]  # De esta forma puedo fijarme si hubo alguna excepcion
                        # pass
                    finally:
                        os._exit(0)  # Cierro el fork y vuelvo al proceso padre
                else:
                    child = new_pid
                os.waitpid(
                    child, 0
                )  # evito crear procesos paralelos, lo que limita la performance de mi programa

    return exception
Esempio n. 6
0
def VCTK(model_config):
    print("Preprocessing VCTK dataset")
    VCTK_path = model_config["raw_data_path"] + "/VCTK"
    VCTK_preprocessed_path = model_config[
        "preprocessed_data_path"] + "/VCTK_8k_DBE"

    clean_dirs = ["/clean_trainset_wav", "/clean_testset_wav"]
    noisy_dirs = ["/noisy_trainset_wav", "/noisy_testset_wav"]

    # copy clean dirs
    for clean_dir in clean_dirs:
        shutil.copytree(VCTK_path + clean_dir,
                        VCTK_preprocessed_path + clean_dir)

    # create dirs
    for noisy_dir in noisy_dirs:
        noisy8k_dir = VCTK_preprocessed_path + noisy_dir.replace(
            "noisy", "noisy8k")
        os.makedirs(noisy8k_dir)

        # preprocessing
        for root, dirs, files in os.walk(VCTK_path + noisy_dir):
            for file in files:
                if file.endswith('.wav'):
                    # read audio
                    file_name = os.path.join(root, file)
                    noisy = MonoLoader(filename=file_name, sampleRate=44100)()
                    noisy8k = MonoLoader(filename=file_name, sampleRate=8000)()

                    # resample audio
                    noisy8k_resampled = Resample(
                        inputSampleRate=8000, outputSampleRate=44100)(noisy8k)

                    # lengths
                    len_noisy = len(noisy)
                    len_noisy8k_resampled = len(noisy8k_resampled)

                    # trimming/appending
                    len_diff = len_noisy8k_resampled - len_noisy
                    if len_diff > 0:
                        noisy8k_resampled = noisy8k_resampled[:len_noisy]
                    elif len_diff < 0:
                        noisy8k_resampled = np.pad(noisy8k_resampled,
                                                   (0, abs(len_diff)),
                                                   'constant',
                                                   constant_values=(0, 0))

                    # write audio
                    output_name = noisy8k_dir + "/" + file.split(
                        ".")[0] + "_8k.wav"
                    MonoWriter(filename=output_name,
                               sampleRate=44100)(noisy8k_resampled)
def process_data_and_extract_profiles(
        segment_id: int, song_file
) -> Tuple[int, Tuple, Tuple, Tuple, Tuple, Tuple, Tuple, Tuple]:
    """Extracts BPM, timbre and moods from a segmented songfile

    Parameters
    ----------
    segment_id
        id of the segment of the audiofile corresponds to
    song_file
        the audio from a segmented song

    Returns
    -------
    Tuple[int, Tuple, Tuple, Tuple, Tuple, Tuple, Tuple, Tuple]
        A tuple of the segment_id and tuples describing BPM,
        timbre and all moods
    """

    # creating the temporary files
    temp_song = tempfile.NamedTemporaryFile(delete=True, suffix='.wav')
    temp_classifier = tempfile.NamedTemporaryFile(delete=True)

    MonoWriter(filename=temp_song.name)(song_file)

    make_low_level_data_file(temp_song.name, temp_classifier.name)

    # closing and effectively deleting the song tempfile
    temp_song.close()

    bpm, beats_confidence = get_song_bpm(song_file)
    bpm_tuple = (bpm, beats_confidence)

    (timbre, mood_relaxed, mood_party, mood_aggressive, mood_happy,
     mood_sad) = get_classifier_data(temp_classifier.name)

    # closing and effectively deleting the classifier tempfile
    temp_classifier.close()

    return (segment_id, bpm_tuple, timbre, mood_relaxed, mood_party,
            mood_aggressive, mood_happy, mood_sad)
Esempio n. 8
0
    def _flush_save_audio_buffer(self, queue):

        while True:
            filename, audio, tracklist = queue.get()
            if not (filename is None):
                logger.debug('Saving {} to disk, length {}'.format(
                    filename, len(audio)))
                if self.stereo:
                    writer = AudioWriter(filename=filename, format='wav')
                else:
                    writer = MonoWriter(filename=filename, format='wav')
                writer(np.array(audio.T, dtype='single'))
                # Save tracklist
                logger.debug('Saving tracklist')
                with open(self.save_dir_tracklist, 'a+') as csvfile:
                    writer = csv.writer(csvfile)
                    for line in tracklist:
                        writer.writerow([line])
            else:
                logger.debug('Stopping audio saving thread!')
                return
Esempio n. 9
0
# BPM_floor = np.floor(BPM)
# valid_BPMs = [BPM_floor - 1, BPM_floor, BPM_floor+1, BPM_floor+2]
# form bpm in valid_BPMs:
# 	sPerBeat = (60./BPM)
# 	beatsInSong = (len(audio)/44100.) / sPerBeat
# 	perfectBeats = sPerBeat * range(int(beatsInSong) - 1) # One beat shorter than the original file for phase aligning
# 	np.correlate(beats, perfectBeats, mode = 'valid')

# Overlay the audio file with onsets
onsetMarker = AudioOnsetsMarker(onsets=beats)
audioMarked = onsetMarker(audio / 2.)

# Stretch the result
#from librosa.effects import time_stretch
#audioMarked = time_stretch(audioMarked, 175./172.)

# Output the marked file
writer = MonoWriter(filename='test.wav')
writer(audioMarked[:])  #Only write fragment

# Play the result
from subprocess import call
call(["mplayer", 'test.wav'])

# Display the waveform
#print 'Displaying waveform'
#plt.plot(audio[::4410]) # 44.1 kHz, plotted every 4410th sample => 10 samples per second
#for b in beats:
#	plt.axvline(x=b*10)
#plt.show() # unnecessary if you started "ipython --pylab"
Esempio n. 10
0
 def get_fool(self, songid, stretch_rate):
     audio = np.load(os.path.join(self.npy_path, songid)+'.npy')
     t_audio = librosa.effects.time_stretch(audio, stretch_rate)
     MonoWriter(filename=os.path.join(self.fool_audio_path, songid)+'.wav', format='wav', sampleRate=self.fs)(t_audio)
     np.save(os.path.join(self.fool_npy_path, songid)+'.npy', t_audio)
Esempio n. 11
0
def DAPS(model_config):
    print("Preprocessing DAPS dataset")
    DAPS_path = model_config["raw_data_path"] + "/DAPS/"
    DAPS_preprocessed_path = model_config[
        "preprocessed_data_path"] + "/DAPS_8k_DBE/"

    # create processed dir
    if not os.path.exists(DAPS_preprocessed_path):
        os.makedirs(DAPS_preprocessed_path)

    devices_and_rooms = [
        'ipad_bedroom1', 'ipad_confroom1', 'ipad_confroom2',
        'ipad_livingroom1', 'ipad_office1', 'ipad_office2',
        'ipadflat_confroom1', 'ipadflat_office1', 'iphone_bedroom1',
        'iphone_livingroom1'
    ]  #'ipad_balcony1' and 'iphone_balcony1' excluded

    duration = 5
    target = "clean"

    # Train samples
    for i in np.arange(9):
        subject = str(i + 1)
        scripts = [str(i % 5 + 1),
                   str((i + 1) % 5 + 1)]  # 2 scripts per subject

        for script in scripts:
            # Female
            target_file = DAPS_path + target + "/f" + subject + "_script" + script + "_" + target + ".wav"
            target_audio = MonoLoader(filename=target_file, sampleRate=44100)()

            starts = np.arange(0, len(target_audio), 44100 * duration)
            for j, start in enumerate(starts):
                # output dir
                output_dir = DAPS_preprocessed_path + target
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)

                if len(target_audio[start:]) > (
                        44100 *
                        duration):  # check if there is a complete excerpt
                    audio_to_write = target_audio[starts[j]:starts[j + 1]]
                    output_name = output_dir + "/f" + subject + "_script" + script + "_" + target + "_" + str(
                        j) + ".wav"
                    MonoWriter(filename=output_name,
                               sampleRate=44100)(audio_to_write)

            for device_and_room in devices_and_rooms:
                device_path = DAPS_path + device_and_room
                device_file = device_path + "/f" + subject + "_script" + script + "_" + device_and_room + ".wav"
                device_audio = MonoLoader(filename=device_file,
                                          sampleRate=44100)()

                starts = np.arange(0, len(device_audio), 44100 * duration)
                for j, start in enumerate(starts):
                    # output dir
                    output_dir = DAPS_preprocessed_path + device_and_room
                    if not os.path.exists(output_dir):
                        os.makedirs(output_dir)

                    if len(device_audio[start:]) > (44100 * duration):
                        audio_to_write = device_audio[starts[j]:starts[j + 1]]

                        audio_44k_to_8k = Resample(
                            inputSampleRate=44100,
                            outputSampleRate=8000)(audio_to_write)
                        audio_8k_to_44k = Resample(
                            inputSampleRate=8000,
                            outputSampleRate=44100)(audio_44k_to_8k)

                        len_audio_44k = len(audio_to_write)
                        len_audio_44k_resampled = len(audio_8k_to_44k)

                        # trimming/appending
                        len_diff = len_audio_44k_resampled - len_audio_44k
                        if len_diff > 0:
                            audio_8k_to_44k = audio_8k_to_44k[:len_audio_44k]
                        elif len_diff < 0:
                            audio_8k_to_44k = np.pad(audio_8k_to_44k,
                                                     (0, abs(len_diff)),
                                                     'constant',
                                                     constant_values=(0, 0))

                        output_name = output_dir + "/f" + subject + "_script" + script + "_" + device_and_room + "_" + str(
                            j) + ".wav"
                        MonoWriter(filename=output_name,
                                   sampleRate=44100)(audio_8k_to_44k)

            # Male
            target_file = DAPS_path + target + "/m" + subject + "_script" + script + "_" + target + ".wav"
            target_audio = MonoLoader(filename=target_file, sampleRate=44100)()

            starts = np.arange(0, len(target_audio), 44100 * duration)
            for j, start in enumerate(starts):
                # output dir
                output_dir = DAPS_preprocessed_path + target
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)

                if len(target_audio[start:]) > (
                        44100 *
                        duration):  # check if there is a complete excerpt
                    audio_to_write = target_audio[starts[j]:starts[j + 1]]
                    output_name = output_dir + "/m" + subject + "_script" + script + "_" + target + "_" + str(
                        j) + ".wav"
                    MonoWriter(filename=output_name,
                               sampleRate=44100)(audio_to_write)

            for device_and_room in devices_and_rooms:
                device_path = DAPS_path + device_and_room
                device_file = device_path + "/m" + subject + "_script" + script + "_" + device_and_room + ".wav"
                device_audio = MonoLoader(filename=device_file,
                                          sampleRate=44100)()

                starts = np.arange(0, len(device_audio), 44100 * duration)
                for j, start in enumerate(starts):
                    # output dir
                    output_dir = DAPS_preprocessed_path + device_and_room
                    if not os.path.exists(output_dir):
                        os.makedirs(output_dir)

                    if len(device_audio[start:]) > (44100 * duration):
                        audio_to_write = device_audio[starts[j]:starts[j + 1]]

                        audio_44k_to_8k = Resample(
                            inputSampleRate=44100,
                            outputSampleRate=8000)(audio_to_write)
                        audio_8k_to_44k = Resample(
                            inputSampleRate=8000,
                            outputSampleRate=44100)(audio_44k_to_8k)

                        len_audio_44k = len(audio_to_write)
                        len_audio_44k_resampled = len(audio_8k_to_44k)

                        # trimming/appending
                        len_diff = len_audio_44k_resampled - len_audio_44k
                        if len_diff > 0:
                            audio_8k_to_44k = audio_8k_to_44k[:len_audio_44k]
                        elif len_diff < 0:
                            audio_8k_to_44k = np.pad(audio_8k_to_44k,
                                                     (0, abs(len_diff)),
                                                     'constant',
                                                     constant_values=(0, 0))

                        output_name = output_dir + "/m" + subject + "_script" + script + "_" + device_and_room + "_" + str(
                            j) + ".wav"
                        MonoWriter(filename=output_name,
                                   sampleRate=44100)(audio_8k_to_44k)

    # Test samples
    # Female
    target_file = DAPS_path + target + "/f10_script5_" + target + ".wav"
    target_audio = MonoLoader(filename=target_file, sampleRate=44100)()

    output_name = DAPS_preprocessed_path + target + "/f10_script5_" + target + ".wav"
    MonoWriter(filename=output_name, sampleRate=44100)(target_audio)

    for device_and_room in devices_and_rooms:
        device_path = DAPS_path + device_and_room
        device_file = device_path + "/f10_script5_" + device_and_room + ".wav"
        device_audio = MonoLoader(filename=device_file, sampleRate=8000)()
        audio_8k_to_44k = Resample(inputSampleRate=8000,
                                   outputSampleRate=44100)(device_audio)

        len_audio_44k = len(target_audio)
        len_audio_44k_resampled = len(audio_8k_to_44k)

        # trimming/appending
        len_diff = len_audio_44k_resampled - len_audio_44k
        if len_diff > 0:
            audio_8k_to_44k = audio_8k_to_44k[:len_audio_44k]
        elif len_diff < 0:
            audio_8k_to_44k = np.pad(audio_8k_to_44k, (0, abs(len_diff)),
                                     'constant',
                                     constant_values=(0, 0))

        output_name = DAPS_preprocessed_path + device_and_room + "/f10_script5_" + device_and_room + ".wav"
        MonoWriter(filename=output_name, sampleRate=44100)(audio_8k_to_44k)

    # Male
    target_file = DAPS_path + target + "/m10_script5_" + target + ".wav"
    target_audio = MonoLoader(filename=target_file, sampleRate=44100)()

    output_name = DAPS_preprocessed_path + target + "/m10_script5_" + target + ".wav"
    MonoWriter(filename=output_name, sampleRate=44100)(target_audio)

    for device_and_room in devices_and_rooms:
        device_path = DAPS_path + device_and_room
        device_file = device_path + "/m10_script5_" + device_and_room + ".wav"
        device_audio = MonoLoader(filename=device_file, sampleRate=8000)()
        audio_8k_to_44k = Resample(inputSampleRate=8000,
                                   outputSampleRate=44100)(device_audio)

        len_audio_44k = len(target_audio)
        len_audio_44k_resampled = len(audio_8k_to_44k)

        # trimming/appending
        len_diff = len_audio_44k_resampled - len_audio_44k
        if len_diff > 0:
            audio_8k_to_44k = audio_8k_to_44k[:len_audio_44k]
        elif len_diff < 0:
            audio_8k_to_44k = np.pad(audio_8k_to_44k, (0, abs(len_diff)),
                                     'constant',
                                     constant_values=(0, 0))

        output_name = DAPS_preprocessed_path + device_and_room + "/m10_script5_" + device_and_room + ".wav"
        MonoWriter(filename=output_name, sampleRate=44100)(audio_8k_to_44k)
Esempio n. 12
0
def save_audio(audio, filename, file_format='wav', bit_rate=320):
    from essentia.standard import MonoWriter
    MonoWriter(filename=filename, bitrate=bit_rate, format=file_format)(audio)