def move_phrases(audio_files):
    phrase_num = get_max_phrase_id() + 1
    for file_path in audio_files:
        log.info(f'Moving {file_path}...')
        shutil.move(file_path,
                    os.path.join(AUDIO_ASSETS_DIR, f'phrase_{phrase_num}.mp3'))
        phrase_num += 1
def remove_phrase(phrase_id):
    """
    Used to delete a phrase from the library, whilst maintaining a consistent,
    consecutive id order.
    """
    log.info(f'Removing phrase {phrase_id}...')
    max_id = get_max_phrase_id()
    max_phrase_file = path.join(AUDIO_ASSETS_DIR, f'phrase_{max_id}.mp3')
    max_phrase = Phrase.from_mp3(max_id, max_phrase_file)

    # Update data file
    data = load_json(DATA_FILE)

    to_remove = None
    for category_id, phrase_set in data['phrases'].items():
        for i, phrase in enumerate(phrase_set):
            if phrase['id'] == phrase_id:
                to_remove = (category_id, i)
                break

    del data['phrases'][to_remove[0]][to_remove[1]]

    for i, phrase in enumerate(data['phrases'][str(max_phrase.category_id)]):
        if phrase['id'] == max_id:
            phrase['id'] = phrase_id
            break

    save_data(data, DATA_FILE)

    # Copy audio data
    max_phrase.id = phrase_id
    max_phrase.save_mp3(max_phrase_file)
    os.remove(max_phrase_file)

    check_missing_data()
def assign_metadata(clips, phrases):
    assert len(clips) == len(phrases)

    for i, phrase in enumerate(phrases):
        log.info(f'Updating {phrase.file_path}...')
        phrase.save_mp3(clips[i])
        os.remove(clips[i])
    update_data(phrases)
def audio_to_json(data_file=DATA_FILE):
    phrases = []
    for file_path, phrase_id in loop_phrases():
        phrase = Phrase.from_mp3(phrase_id, file_path)
        log.info(f'Processing Phrase {phrase_id}...')
        phrases.append(phrase)

    phrases = sorted(phrases, key=lambda x: (x.category_id, x.id))
    append_data(phrases, data_file=data_file)
def reduce_noise(
    phrase_file,
    noise_file=None,
    sampling_rate=44100,
    lufs=-14.0,
    bitrate=128,
):
    """
    Uses the noisereduce library to produce WAV files reducing the
    noise and normalising the volume to -14 LUFS
    """

    noise_file = noise_file or path.join(CURRENT_DIR, 'noise.wav')

    if phrase_file[-3:] != 'wav':
        phrase_file = convert_audio(phrase_file,
                                    'wav',
                                    sampling_rate=sampling_rate)

    with SuppressWarnings(['librosa', 'audioread']):
        noise, _ = librosa.load(noise_file, sr=sampling_rate)
        phrase, _ = librosa.load(phrase_file, sr=sampling_rate)

    create_tmp_dir()

    log.info(f'Reducing noise...')
    reduced_noise = noisereduce.reduce_noise(
        audio_clip=phrase,
        noise_clip=noise,
        verbose=False,
    )

    log.info('Normalising loudness...')
    meter = pyloudnorm.Meter(sampling_rate)
    loudness = meter.integrated_loudness(reduced_noise)
    with SuppressWarnings(['pyloudnorm']):
        normalised_audio = pyloudnorm.normalize.loudness(
            reduced_noise, loudness, lufs)

    def _assign_ext(fpath, extension):
        return fpath[:len(fpath) - 4] + '.' + extension

    tmp_file = path.join(TMP_DIR, path.basename(phrase_file))
    tmp_mp3 = _assign_ext(tmp_file, 'mp3')
    tmp_wav = _assign_ext(tmp_file, 'wav')
    wavfile.write(tmp_wav, sampling_rate, normalised_audio)

    if os.path.exists(tmp_mp3):
        os.remove(tmp_mp3)

    convert_audio(tmp_wav, 'mp3', sampling_rate, bitrate)
    os.remove(tmp_wav)
    return tmp_mp3
def split_audio(
    mp3_path,
    threshold=500,
    start_clip=50,
    end_clip=750,
    output_prefix='clip-',
):
    """
    Uses a simple algorithm to split clips in an audio file based on 
    silence inbetween noise.
    """
    audio = AudioSegment.from_mp3(mp3_path)
    create_tmp_dir()

    clips = []
    clip_start = None
    clip_end = None
    consecutive = 0
    for i, x in enumerate(audio):
        if x.max > threshold:
            if not clip_start:
                consecutive += 1
            else:
                consecutive = 0
        else:
            if not clip_start:
                consecutive = 0
            else:
                consecutive += 1

        if not clip_start and consecutive > start_clip:
            clip_start = max([0, i - 100])
            consecutive = 0
        elif clip_start and consecutive > end_clip:
            clip_end = max([clip_start, (i - end_clip) + 100])
            consecutive = 0
            clips.append((clip_start, clip_end))
            clip_start = None
            clip_end = None

    clip_paths = []
    for i, start_end in enumerate(clips):
        start, end = start_end
        clip = audio[start:end + 1]
        log.info(f'Exporting clip {i + 1}...')
        output_path = path.join(TMP_DIR, f'{output_prefix}{i + 1}.mp3')
        handler = clip.export(output_path, format='mp3')
        handler.close()
        clip_paths.append(output_path)
    return clip_paths
def convert_audio(input_path, output_ext, sampling_rate=44100, bitrate=128):
    output_path = input_path[:-3] + output_ext
    log.info(f'Converting {input_path} to {output_path}...')

    opts = []
    if input_path[-3:] == 'mp3':
        opts += [
            '-vn', '-ar',
            str(sampling_rate), '-ac', '2', '-b:a', f'{bitrate}k'
        ]

    cmd = ['ffmpeg', '-i', input_path] + opts + [output_path, '-y']
    with open(os.devnull, 'w') as devnull:
        subprocess.call(cmd, stdout=devnull, stderr=subprocess.STDOUT)
    return output_path
 def save_mp3(self, audio_file=None):
     audio_file = audio_file or self.file_path
     clip = AudioSegment.from_mp3(audio_file)
     log.info(f'Exporting {self.file_path}...')
     handler = clip.export(self.file_path, format='mp3', tags=self.tags)
     handler.close()