Example #1
0
File: test.py Project: jiaaro/pydub
    def test_invert(self):
        s_mono = Sine(100).to_audio_segment()
        s = s_mono.set_channels(2)

        try:
            s_mono.invert_phase(channels=(1, 0))
        except Exception:
            pass
        else:
            raise Exception("AudioSegment.invert_phase() didn't catch a bad input (mono)")

        s_inv = s.invert_phase()
        self.assertFalse(s == s_inv)
        self.assertTrue(s.rms == s_inv.rms)
        self.assertTrue(s == s_inv.invert_phase())

        s_inv_right = s.invert_phase(channels=(0, 1))
        left, right = s_inv_right.split_to_mono()

        self.assertFalse(s_mono == s_inv_right)
        self.assertFalse(s_inv == s_inv_right)
        self.assertTrue(left == s_mono)
        self.assertFalse(right == s_mono)

        s_inv_left = s.invert_phase(channels=(1, 0))
        left, right = s_inv_left.split_to_mono()

        self.assertFalse(s_mono == s_inv_left)
        self.assertFalse(s_inv == s_inv_left)
        self.assertFalse(left == s_mono)
        self.assertTrue(right == s_mono)
def mid2wav(file):
    mid = MidiFile(file)
    output = AudioSegment.silent(mid.length * 1000.0)

    tempo = 130  # bpm

    for track in mid.tracks:
        # position of rendering in ms
        current_pos = 0.0
        current_notes = defaultdict(dict)

        for msg in track:
            current_pos += ticks_to_ms(msg.time, tempo, mid)
            if msg.type == 'note_on':
                if msg.note in current_notes[msg.channel]:
                    current_notes[msg.channel][msg.note].append(
                        (current_pos, msg))
                else:
                    current_notes[msg.channel][msg.note] = [(current_pos, msg)]

            if msg.type == 'note_off':
                start_pos, start_msg = current_notes[msg.channel][
                    msg.note].pop()

                duration = math.ceil(current_pos - start_pos)
                signal_generator = Sine(note_to_freq(msg.note, 500))
                #print(duration)
                rendered = signal_generator.to_audio_segment(
                    duration=duration - 50,
                    volume=-20).fade_out(100).fade_in(30)

                output = output.overlay(rendered, start_pos)

    output.export("./output/rendered.wav", format="wav")
Example #3
0
    def __init__(self, char_speed_wpm, inter_character_wpm, inter_word_wpm):
        self.generator = Sine(550)

        self.dit_length = int(1200 / char_speed_wpm)
        self.dah_length = self.dit_length * 3
        self.character_gap = int(1200 / inter_character_wpm) * 3
        self.word_gap = int(1200 / inter_word_wpm) * 7

        self.atom_gap_sample = self.atom_gap()

        dit_sample = self.dit()
        dah_sample = self.dah()
        inter_character_sample = self.inter_character()
        self.inter_word_sample = self.inter_word()

        self.synth = {
            ".": dit_sample,
            "-": dah_sample,
            " ": inter_character_sample,
            "_": self.inter_word_sample
        }

        print(
            f"{self.dit_length} {self.dah_length} {self.character_gap} {self.word_gap}"
        )
Example #4
0
def make_god_song_01(complexity=None,
                     amount=None,
                     seed=None,
                     save_path=None,
                     scale=None):
    logger.info("Generating God song to {}".format(save_path))
    if seed:
        random.seed(seed)
    if not complexity:
        complexity = random.randint(1, 4)
    if not scale:
        scale = random.choice(list(scale_dict))  # choose a random scale
    if not amount:
        amount = random.choice(list_beats)
    result = AudioSegment.silent(duration=0)
    random_list = random_with_complexity(random, scale, complexity, amount)
    # print(random_list)

    for beat in random_list:
        gen = Sine(beat["frequency"])
        sine = gen.to_audio_segment(duration=beat["duration"]).apply_gain(
            beat["velocity"])
        sine = sine.fade_in(beat["fade_in"]).fade_out(beat["fade_out"])
        result += sine

    if save_path:
        result.export(save_path, format="mp3")

    return result
Example #5
0
class Audio:
    center_freq = 440  # A4 on a keyboard

    def __init__(self, dims):
        self.dims = dims
        self.sine_generator = Sine(self.center_freq)
        self.sine_tone = self.sine_generator.to_audio_segment(5,
                                                              volume=-999999)
        self.sine_tone = self.sine_tone + self.sine_generator.to_audio_segment(
            50)

    def generate1(self, circle):
        # circle is [int, int, int] = [x, y, r]
        self.generate(circle, self.dims)

    def generate(self, circle, dimensions):
        """
        circle is a list of (x, y, r) ?votes
        dimensions is a list of (x, y)
        """
        # sine_generator= Sine(self.center_freq)
        # sine_tone = sine_generator.to_audio_segment(50)
        if circle[0] > dimensions[0] / 2 + 10:
            # target is on the right side of vision
            # TODO: make this parabolic / aka more aggressive panning
            play(self.sine_tone.pan(+(circle[0] / dimensions[0])))
        elif circle[0] < dimensions[0] / 2 - 10:
            # target is on left side of vision
            # TODO: make this parabolic / aka more aggressive panning
            play(
                self.sine_tone.pan(-(
                    (dimensions[0] - circle[0]) / dimensions[0])))
        else:
            play(self.sine_tone)
Example #6
0
 def __init__(self, dims):
     self.dims = dims
     self.sine_generator = Sine(self.center_freq)
     self.sine_tone = self.sine_generator.to_audio_segment(5,
                                                           volume=-999999)
     self.sine_tone = self.sine_tone + self.sine_generator.to_audio_segment(
         50)
Example #7
0
    def test_invert(self):
        s_mono = Sine(100).to_audio_segment()
        s = s_mono.set_channels(2)

        try:
            s_mono.invert_phase(channels=(1, 0))
        except Exception:
            pass
        else:
            raise Exception(
                "AudioSegment.invert_phase() didn't catch a bad input (mono)")

        s_inv = s.invert_phase()
        self.assertFalse(s == s_inv)
        self.assertTrue(s.rms == s_inv.rms)
        self.assertTrue(s == s_inv.invert_phase())

        s_inv_right = s.invert_phase(channels=(0, 1))
        left, right = s_inv_right.split_to_mono()

        self.assertFalse(s_mono == s_inv_right)
        self.assertFalse(s_inv == s_inv_right)
        self.assertTrue(left == s_mono)
        self.assertFalse(right == s_mono)

        s_inv_left = s.invert_phase(channels=(1, 0))
        left, right = s_inv_left.split_to_mono()

        self.assertFalse(s_mono == s_inv_left)
        self.assertFalse(s_inv == s_inv_left)
        self.assertFalse(left == s_mono)
        self.assertTrue(right == s_mono)
Example #8
0
def SpeakLongText(long_text, max_text_length=GOOGLE_MAX_TEXT_LENGTH):
    "Converts a full length long_text text into an mp3"

    # Split the long_text into short_texts small enough to TTS
    long_text_as_short_texts = SplitTextToShortTexts(long_text,
                                                     max_text_length)

    # Allocate a temporary directory
    with tempfile.TemporaryDirectory() as temp_dir:

        # Get the event loop
        loop = asyncio.get_event_loop()
        concurrency_limit = asyncio.Semaphore(
            MAX_CONCURRENT_GOOGLE_API_REQUESTS)

        # NOTE: Google's text to speech library creates a TCP connection for each request but does not close it.
        #       These even stay open in the background after the Client is de-referenced (?!).
        #       These each use a File Descriptor, so for a large book, we hit the max file descriptors limit and crash.
        #       Running each TTS in its own proccess guarantees that at least at the end of the chapter, all will be de-allocated.

        # Manually create an executor so we can force it to clean up after
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=MAX_CONCURRENT_GOOGLE_API_REQUESTS) as executor:

            # Call to spawn a thread to generate each short text
            async def GenerateShortTextInThread(loop, short_text, temp_dir):
                async with concurrency_limit:
                    return await loop.run_in_executor(executor, SpeakShortText,
                                                      short_text, temp_dir)

            # Call to generate MP3s for all the short texts (concurrently)
            async def SimultaneouslyGenerateSeveralShortTexts(
                    loop, all_short_texts, temp_dir):
                mp3_generation_tasks = [
                    GenerateShortTextInThread(loop, short_text, temp_dir)
                    for short_text in all_short_texts
                ]
                return await asyncio.gather(*mp3_generation_tasks)

            # Generate an MP3 for each short_text
            mp3s_of_short_texts = loop.run_until_complete(
                SimultaneouslyGenerateSeveralShortTexts(
                    loop, long_text_as_short_texts, temp_dir))

            # Attempt to clean up all resources
            executor.shutdown(wait=True)

        # Combine the short_texts into a single mp3
        mp3_long_text = Sine(300).to_audio_segment(duration=500)
        for mp3_short_text in mp3s_of_short_texts:
            mp3_long_text = mp3_long_text.append(
                AudioSegment.from_mp3(mp3_short_text))

        # Return the full Mp3 (as a temporary file)
        temporary_mp3 = tempfile.NamedTemporaryFile(suffix='.mp3',
                                                    delete=False)
        mp3_long_text.export(temporary_mp3.name, format="mp3")

        return temporary_mp3
Example #9
0
def frequency_sweep():
    sweep = AudioSegment.empty()

    for i in range(0, 10000, 50):
        sine = Sine(float(i))
        sweep += sine.to_audio_segment(10, VOLUME)

    play(sweep)
Example #10
0
    def test_duration(self):
        one_sec = Sine(440).to_audio_segment(duration=1000)
        five_sec = Sine(440).to_audio_segment(duration=5000)
        half_sec = Sine(440).to_audio_segment(duration=500)

        self.assertAlmostEqual(len(one_sec), 1000)
        self.assertAlmostEqual(len(five_sec), 5000)
        self.assertAlmostEqual(len(half_sec), 500)
Example #11
0
def sineDot(value, pan, ms=100, factor=1.0, sampleRate=96000, bitDepth=32):
    cNote = 440
    wave = Sine(cNote * ((value + 0.00001) * factor) + 400,
                sample_rate=sampleRate,
                bit_depth=bitDepth)
    output = wave.to_audio_segment(duration=ms)
    output = output.fade_in(int(ms * 0.1))
    output = output.fade_out(int(ms * 0.1))
    output = output.pan(pan)
    return output
Example #12
0
def play_scale(scale):
    result = AudioSegment.silent(duration=0)

    for each_note in scale_dict[scale]:
        gen = Sine(pitch_dict_note_num[each_note])
        sine = gen.to_audio_segment(duration=300).apply_gain(-3)
        sine = sine.fade_in(50).fade_out(50)
        result += sine

    return result
Example #13
0
def genStereo(notes,
              audio_file,
              hop_length=128,
              sr=48000,
              crossfade=25,
              silencefade=15):
    C = hop_length * 1000.0 / sr
    note_start = notes[0]
    start_time = note_start[0] * C
    sound = Sine(0).to_audio_segment(duration=start_time)

    silenceCount = 0
    for i in range(len(notes) - 1):
        noteCurr = notes[i]
        notePost = notes[i + 1]
        durationBetween = (notePost[0] - noteCurr[1])
        if durationBetween == 0:
            durationCurr = (noteCurr[1] - noteCurr[0]) * C
            pitch = noteCurr[2]
            freq = pitch2freq(pitch)
            tone = Sine(freq, sample_rate=sr).to_audio_segment(
                duration=durationCurr + crossfade)
            sound = sound.append(tone, crossfade=crossfade)
        else:
            silenceCount = silenceCount + 1
            durationCurr = (noteCurr[1] - noteCurr[0]) * C
            pitch = noteCurr[2]
            freq = pitch2freq(pitch)
            tone = Sine(freq, sample_rate=sr).to_audio_segment(
                duration=durationCurr + crossfade)
            sound = sound.append(tone, crossfade=crossfade)
            silenceStart = noteCurr[1]
            silenceEnd = notePost[0]
            silenceDuration = (silenceEnd - silenceStart) * C
            tone = Sine(0, sample_rate=sr).to_audio_segment(
                duration=silenceDuration + silencefade)
            sound = sound.append(tone, crossfade=silencefade)

    # add the last note
    noteLast = notes[-1]
    durationLast = (noteLast[1] - noteLast[0]) * C
    freq = pitch2freq(noteLast[2])
    tone = Sine(freq).to_audio_segment(duration=durationLast + crossfade)
    sound = sound.append(tone, crossfade=crossfade)
    # print(sound.duration_seconds)

    sound2 = AudioSegment.from_wav(audio_file)
    # print(len(sound2))
    silence = AudioSegment.silent(duration=len(sound2) + 100)
    left = silence.overlay(sound, gain_during_overlay=-8)
    right = silence.overlay(sound2, gain_during_overlay=-8)
    stereo_sound = AudioSegment.from_mono_audiosegments(left, right)
    filename = './wav2wavmix/' + audio_file[6:-4] + '_mix_conti_v4.wav'
    stereo_sound.export(filename, format="wav", bitrate="48k")
    print('stereo sound file generated!')
Example #14
0
def makenoise():
    print("Welcome to the generator. What would you like to generate?")
    command = input()

    if command == "sine":
        print("Which frequency?")
        frequency = input()

        sine = Sine(float(frequency))
        out = sine.to_audio_segment(100)
        play(out)
Example #15
0
def getSinSound(freq,beat, bpm = 60):



    beat_in_mili = (60. / bpm) * 1000. /4 * beat*16



    sin = Sine(freq)
    sound = sin.to_audio_segment(beat_in_mili)
    sound = sound.fade_in(int(beat_in_mili*.05))
    sound = sound.fade_out(int(beat_in_mili*.05))
    return sound
Example #16
0
def __sequential_plot(tones, lines, labels, min_freq, min_value, tic, duration,
                      gains):
    for t in range(lines.shape[1]):
        tones += __tts(labels[t])
        __duration = int(duration / 4)

        for x, y in enumerate(lines[:, t]):
            gen = Sine(min_freq + (y - min_value) * tic)
            sine = gen.to_audio_segment(duration=duration).apply_gain(gains[t])
            sine = sine.fade_in(__duration).fade_out(__duration).pan(
                -1.0 + x / lines.shape[0] * 2)
            tones += sine

    return tones
Example #17
0
def test_add_data_can_read_files_and_discovers_label(tmpdir_factory):
    label = 'foo'
    file = tmpdir_factory.mktemp(label).join('tmp.wav')
    Sine(440).to_audio_segment().export(str(file), format='wav')
    file2 = tmpdir_factory.mktemp(label).join('tmp.wav')
    Sine(440).to_audio_segment().export(str(file2), format='wav')

    audioData = AudioData()
    audioData.add_data(type='train', data=[str(file), str(file2)])
    assert len(audioData._files['train']) == 2
    assert audioData._files['train'][0]['file'] == file
    assert audioData._files['train'][0]['label'] == str(file).split('/')[-2:-1][0]
    assert audioData._files['train'][1]['file'] == file2
    assert audioData._files['train'][1]['label'] == str(file2).split('/')[-2:-1][0]
Example #18
0
def test_add_data_can_accept_a_label_for_mixed_files_and_directories(tmpdir_factory):
    file1 = tmpdir_factory.mktemp('dir1').join('tmp.wav')
    Sine(440).to_audio_segment().export(str(file1), format='wav')
    file2 = tmpdir_factory.mktemp('dir2').join('tmp.wav')
    Sine(440).to_audio_segment().export(str(file2), format='wav')

    dir1 = '/'.join(str(file1).split('/')[:-1])

    audioData = AudioData()
    audioData.add_data(type='train', data=[dir1, str(file2)], label='foo')
    assert len(audioData._files['train']) == 2
    assert audioData._files['train'][0]['label'] == 'foo'
    assert audioData._files['train'][1]['file'] == str(file2)
    assert audioData._files['train'][1]['label'] == 'foo'
Example #19
0
def test_add_data_can_read_directories(tmpdir_factory):
    file1 = tmpdir_factory.mktemp('dir1').join('tmp.wav')
    Sine(440).to_audio_segment().export(str(file1), format='wav')
    file2 = tmpdir_factory.mktemp('dir2').join('tmp.wav')
    Sine(440).to_audio_segment().export(str(file2), format='wav')

    dir1 = '/'.join(str(file1).split('/')[:-1])
    dir2 = '/'.join(str(file2).split('/')[:-1])

    audioData = AudioData()
    audioData.add_data(type='train', data=[dir1, dir2])
    assert len(audioData._files['train']) == 2
    assert audioData._files['train'][0]['label'] == str(dir1).split('/')[-1:][0]
    assert audioData._files['train'][1]['label'] == str(dir2).split('/')[-1:][0]
Example #20
0
def test_it_transforms_files():
    new_audio = Sine(550).to_audio_segment()
    files = [{
        'audio': audio,
        'file': 'foo',
        'label': 'foo',
    }]

    transformed = get_transformed_files(files, [
        lambda audio: new_audio,
    ])

    assert len(transformed[0]['audio']) == len(files[0]['audio'])
    assert transformed[0]['audio'].get_array_of_samples() == new_audio.get_array_of_samples()
Example #21
0
def text_to_audio(text,
                  file_name,
                  export_file_format,  # e.g. "ogg"
                  codec=None,  # e.g. "opus"
                  frequency=700,
                  wpm=10,
                  cross_fade=2):
    unit_length_seconds = wpm_to_unit_length_seconds(wpm)
    intervals = sentence_to_intervals(text)
    segment = Sine(0).to_audio_segment(cross_fade)  # silence at the beginning for cross-fade
    for interval in intervals:
        segment = segment.append(interval_to_wave_data_segment(interval, frequency, unit_length_seconds),
                                 crossfade=cross_fade)
    segment.export(file_name,
                   format=export_file_format,
                   codec=codec)
Example #22
0
    def notegen(self, note, duration, timbre='marimba'):

        #sound = Sine(freq = note-5).to_audio_segment(duration = duration/5).overlay(Sine(freq = (note-5)*4).to_audio_segment(duration = duration/5, volume = -35), crossfade = crossfd).overlay(Sine(freq = (note-5)*10).to_audio_segment(duration = duration/5, volume = -40), crossfade = crossfd)
        if timbre == 'marimba':

            self.sound = (Sine(freq=note).to_audio_segment(
                duration=duration)).overlay(
                    Sine(freq=note * 4).to_audio_segment(
                        duration=duration, volume=-35)).overlay(
                            Sine(freq=note * 10).to_audio_segment(
                                duration=duration, volume=-40))

        elif timbre == 'pure':
            self.sound = Sine(freq=note).to_audio_segment(duration=duration)

        return self.sound
Example #23
0
 def generate_sound(self, balance, volume, dist_from_center,
                    classification):
     if classification is self.Classification.WIDE_LEFT:
         return self.generate_beeps(Sine(self.center_freq), balance, volume,
                                    dist_from_center,
                                    classification).pan(-1)
     elif classification is self.Classification.WIDE_RIGHT:
         return self.generate_beeps(Sine(self.center_freq), balance, volume,
                                    dist_from_center, classification).pan(1)
     elif classification is self.Classification.TRACK:
         return self.generate_beeps(Sine(self.center_freq), balance, volume,
                                    dist_from_center,
                                    classification).pan(balance)
     elif classification is self.Classification.BULLS:
         return Sine(self.bulls_freq).to_audio_segment(self.cycle_time_min,
                                                       volume=volume / 2)
Example #24
0
def create_card_segment(audio_files,
                        qa_delay=0,
                        aq_delay=0,
                        countdown=False,
                        repeat=False):
    # Question
    seg_q = AudioSegment.empty()
    for f in audio_files[0]:
        seg_q += AudioSegment.from_file(f)
    # Answer
    seg_a = AudioSegment.empty()
    for f in audio_files[1]:
        seg_a += AudioSegment.from_file(f)
    seg_pre_q_beep = Sine(2000).to_audio_segment(duration=500, volume=-30)
    seg_qa_delay = AudioSegment.silent(qa_delay)
    seg_aq_delay = AudioSegment.silent(aq_delay)
    seg_countdown = create_countdown_segment()

    track = seg_pre_q_beep + seg_q + seg_qa_delay
    if countdown:
        track += seg_countdown
    track += seg_a
    if (repeat):
        pause_500 = AudioSegment.silent(500)
        track += pause_500 + seg_q + pause_500 + seg_a + pause_500
    track += seg_aq_delay
    return track
Example #25
0
def create_countdown_segment():
    track = AudioSegment.empty()
    for i in range(0, 3):
        track += Sine(2000 + (i * 100)).to_audio_segment(duration=200,
                                                         volume=-30)
        track += AudioSegment.silent(duration=800)
    return track
Example #26
0
 def make_tone(self, freq):
     if self.waveEntry.get() == "Sine":
         tone = (Sine(freq).to_audio_segment(
             duration=int(self.durEntry.get())).fade_out(
                 self.slider.get()).fade_in(self.slider2.get())).apply_gain(
                     self.slider3.get()) + self.slider4.get()
     elif self.waveEntry.get() == "Square":
         tone = (Square(freq).to_audio_segment(
             duration=int(self.durEntry.get())).fade_out(
                 self.slider.get()).fade_in(self.slider2.get())).apply_gain(
                     self.slider3.get()) + self.slider4.get()
     elif self.waveEntry.get() == "Triangle":
         tone = (Triangle(freq).to_audio_segment(
             duration=int(self.durEntry.get())).fade_out(
                 self.slider.get()).fade_in(self.slider2.get())).apply_gain(
                     self.slider3.get()) + self.slider4.get()
     elif self.waveEntry.get() == "Sawtooth":
         tone = (Sawtooth(freq).to_audio_segment(
             duration=int(self.durEntry.get())).fade_out(
                 self.slider.get()).fade_in(self.slider2.get())).apply_gain(
                     self.slider3.get()) + self.slider4.get()
     elif self.waveEntry.get() == "Pulse":
         tone = (Pulse(freq).to_audio_segment(
             duration=int(self.durEntry.get())).fade_out(
                 self.slider.get()).fade_in(self.slider2.get())).apply_gain(
                     self.slider3.get()) + self.slider4.get()
     play(tone)
Example #27
0
class Audio2:
    center_freq = 440
    sine_generator = Sine(center_freq)
    duration = 100  # ms

    def __init__(self):
        print("todo")

    def generate(self, upTime=0, invert=False):
        upSound = self.sine_generator.to_audio_segment(upTime * self.duration,
                                                       volume=0)
        downSound = self.sine_generator.to_audio_segment(
            (1 - upTime) * self.duration, volume=-99999)
        # if invert:
        # return downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound
        # else:
        return upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound + upSound + downSound

    def go(self, sound):
        play(sound)

    def master(self, upTime=0, invert=False, pid=None):
        audio = self.generate(upTime, invert)
        play(audio)
        if pid is not None:
            print("KILLING " + str(pid))
            # time.sleep(1)
            os.kill(pid, 1)
            print("KILLED: " + str(pid))
Example #28
0
    def test_loudness(self):
        sine_dbfs = Sine(440).to_audio_segment().dBFS
        square_dbfs = Square(440).to_audio_segment().dBFS
        white_noise_dbfs = WhiteNoise().to_audio_segment().dBFS

        self.assertAlmostEqual(sine_dbfs, -3.0, places=1)
        self.assertAlmostEqual(square_dbfs, 0.0, places=1)
        self.assertAlmostEqual(white_noise_dbfs, -5, places=0)
Example #29
0
    def test_with_smoke(self):
        Sine(440).to_audio_segment()
        Square(440).to_audio_segment()
        Triangle(440).to_audio_segment()

        Pulse(440, duty_cycle=0.75).to_audio_segment()
        Sawtooth(440, duty_cycle=0.75).to_audio_segment()

        WhiteNoise().to_audio_segment()
Example #30
0
def test_add_data_can_accept_a_label(tmpdir_factory):
    file = tmpdir_factory.mktemp('dir').join('tmp.wav')
    Sine(440).to_audio_segment().export(str(file), format='wav')

    audioData = AudioData()
    audioData.add_data(type='train', data=[str(file)], label='foo')
    assert len(audioData._files['train']) == 1
    assert audioData._files['train'][0]['file'] == file
    assert audioData._files['train'][0]['label'] == 'foo'
def test_it_returns_one_chunk_for_excess_audio():
    audio = Sine(440).to_audio_segment() * 2
    files = [{
        'audio': audio[0:1500],
        'file': 'foo',
        'label': 'foo',
        'start_index': 0,
    }]

    chunks = audioData.slice_into_single_sample_chunks(files)
    assert len(chunks) == 1
Example #32
0
File: test.py Project: jiaaro/pydub
 def test_lowpass_filter_cutoff_frequency(self):
     # A Sine wave should not be affected by a LPF 3 octaves Higher
     s = Sine(100).to_audio_segment()
     less_treble = s.low_pass_filter(800)
     self.assertAlmostEqual(less_treble.dBFS, s.dBFS, places=0)
Example #33
0
 def test_invert(self):
     s = Sine(100).to_audio_segment()
     s_inv = s.invert_phase()
     self.assertFalse(s == s_inv)
     self.assertTrue(s.rms == s_inv.rms)
     self.assertTrue(s == s_inv.invert_phase())
Example #34
0
File: test.py Project: jiaaro/pydub
 def test_highpass_filter_cutoff_frequency(self):
     # A Sine wave should not be affected by a HPF 3 octaves lower
     s = Sine(800).to_audio_segment()
     less_bass = s.high_pass_filter(100)
     self.assertAlmostEqual(less_bass.dBFS, s.dBFS, places=0)