Exemplo n.º 1
0
    def play_audio(self):
        if self.play_button.isChecked():
            data = preprocess_audio(self.data, spikes_only=self.play_spike_audio)

            # Track progress
            if settings.show_audio_line:
                self.audio_line = self.addLine(x=0)
                self.audio_timer = pg.QtCore.QTimer()
                self.audio_timer.setInterval(AUDIO_TIMER_UPDATE)
                self.audio_timer.timeout.connect(self.update_audio_line)
            sd.stop()
            sd.play(data)
            if settings.show_audio_line:
                self.audio_timer.start()

            # Notify all other audio components
            self.guard = True
            self.src.s.stop_audio.emit()
            
        else:
            sd.stop()
            self.audio_timer.stop()
            if settings.show_audio_line:
                self.audio_line.hide()
                self.audio_line = None
Exemplo n.º 2
0
def ellis_bpm(fname, start_bpm, hpss=True, hop_length=512, tightness=100.0, plot=False, sound=False):
    y, sr = librosa.load(fname, sr=None)
    log.debug(u'Estimating tempo: {}'.format(TERM.cyan(fname)))
    if hpss:
        log.debug(TERM.magenta("Getting percussive elements"))
        y_harmonic, y_percussive = librosa.effects.hpss(y)
        chunks = np.array_split(y_percussive, PLOT_SPLIT)
        log.debug(TERM.magenta("Estimating beats per minute"))
        bpm, beat_frames = librosa.beat.beat_track(y=y_percussive, sr=sr, start_bpm=start_bpm, hop_length=hop_length, tightness=tightness)
    else:
        log.debug(TERM.magenta("Estimating beats per minute"))
        bpm, beat_frames = librosa.beat.beat_track(y=y, sr=sr, start_bpm=start_bpm, hop_length=hop_length, tightness=tightness)
        chunks = np.array_split(y, PLOT_SPLIT)

    log.debug(u'Tempo: {:6.2f} bpm'.format(bpm))
    if plot:
        plt.figure(figsize=(16,10))

        curr_frame = 0
        for i in range(PLOT_SPLIT):
            plt.subplot(PLOT_SPLIT * 100 + 11 + i)
            plt.plot(curr_frame + np.arange(len(chunks[i])), chunks[i], 'g')
            for b in beat_frames:
                plt.axvline(x=b*hop_length, color='k')
            plt.xlim([curr_frame, len(chunks[i]) + curr_frame])
            curr_frame += len(chunks[i])
        plt.show(block=False)
    if sound:
        beat_times = librosa.frames_to_time(beat_frames, sr=sr, hop_length=hop_length)
        clicks = mir_eval.sonify.clicks(beat_times, sr, length=len(y))
        sd.play(y + clicks, sr)
        input('Press Return key to stop sound')
        sd.stop()
    return bpm
Exemplo n.º 3
0
def get_hpss(filename):
    # Get sound file (input sr = None, as we don't want to resample)
    log.debug(u"Loading {}".format(TERM.green(filename)))
    y, sr = librosa.load(filename, sr=None)
    log.debug(TERM.magenta("Filtering high frequencies"))
    y_filt = fir_lowpass(y, 5000, sr)
    # Get the percussive and harmonic elements
    log.debug(TERM.magenta("Splitting into harmonic and percussive"))
    y_harmonic, y_percussive = librosa.effects.hpss(y_filt)
    y_spss = {"harmonic":y_harmonic, "percussive":y_percussive}
    # Play each in turn
    while True:
        component = get_response("Which component do you want to play?", ["harmonic", "percussive"], numbered=True)
        if component is None:
            return None
        sd.play(y_spss[component], sr)
        input('Press Return key to stop sound')
        sd.stop()
Exemplo n.º 4
0
 def stop_playback(self):
     sd.stop()
Exemplo n.º 5
0
def stop_tone():
    sd.stop()
Exemplo n.º 6
0
 def stopSound(self):    # function for stop audio
     if self.fileName:
         sd.stop()
Exemplo n.º 7
0
    def __init__(self, starterWindow):
        """
        Main loop of the UI
        :param mainWindow: QMainWindow Object
        """
        super(equalizerApp, self).setupUi(starterWindow)
        # Set Main View
        self.tabWidget.setCurrentIndex(0)

        # Setup popup Window
        self.popup_window = QtWidgets.QMainWindow()
        self.pop_ui = Ui_OtherWindow()
        self.pop_ui.setupUi(self.popup_window)

        # Initializations
        self.signalFile = ...  # the file loaded ---> data, Sampling Rate
        self.signalDataType = ...  # contains the data type of the signal
        self.signalFourier = ...  # fourier transform of the signal file data
        self.signalBands = ...  # Contains the signal bands
        self.signalBandsCopy = ...  # contains a copy of the signal bands for modification purposes
        self.signalModification = ...  # Contains the signal with the modified data
        self.signalModificationInv = ...  # Contains the data to be played and writen to wave
        self.filename = ...  # contains the file path
        self.format = ...  # contains the file format
        self.loadThread = loaderThread()  # contains the loader thread
        self.sliderValuesClicked = {0:..., 1:..., 2:..., 3:..., 4:..., 5:..., 6:..., 7:..., 8:..., 9:...}  # list contains the last pressed values
        self.results = {1:[], 2:[]}
        self.resultCounter = 1

        # encapsulations
        self.sliders = [self.verticalSlider, self.verticalSlider_2, self.verticalSlider_3, self.verticalSlider_4,
                        self.verticalSlider_5, self.verticalSlider_6, self.verticalSlider_7, self.verticalSlider_8,
                        self.verticalSlider_9, self.verticalSlider_10]

        # Widgets encapsulations
        self.frontWidgets = [self.inputSignalGraph, self.sliderChangedGraph]
        self.outputWidgets = [self.inputTimeOriginal, self.outputTimeModified, self.inputFourierOriginal, self.outputFourierModified]
        self.compareWidgets = [self.result1Plot, self.result2Plot]
        self.differenceWidgets = [self.pop_ui.timeDifference, self.pop_ui.fourierDifference]

        self.allWidgets = [self.frontWidgets, self.outputWidgets, self.compareWidgets, self.differenceWidgets]
        # buttons encapsulations
        self.playerButtons = [self.playButton, self.stopButton]
        self.outputButtons = [self.resetBands, self.showResult, self.playResult]
        self.saveButtons = [self.saveFile_btn, self.showDifference_btn, self.saveResult_btn, self.compareResult_btn]
        self.resultButtons = {1: [self.playCompare, self.stopCompare], 2: [self.playCompare_2, self.stopCompare_2]}
        self.windows = [self.rectangle, self.hanning, self.hamming]

        # Top Titles
        self.widgetTitles = ["Original Signal", "Changes Applied"]
        self.outputWidgetsTitles = ["Original Signal in Time", "Output Signal in Time", "Original Signal Fourier", "Output Signal Fourier"]
        self.compareTitles = ["First Result", "Second Result"]
        self.differenceTitles = ["Time Difference", "Fourier Difference"]

        self.allTitles = [self.widgetTitles, self.outputWidgetsTitles, self.compareTitles, self.differenceTitles]
        # Bottom Titles
        self.widgetsBottomLabels = ["No. of Samples", "Frequencies"]
        self.outputWidgetsBottomLabels = ["No. of Samples", "No. of Samples", "Frequencies", "Frequencies"]
        self.compareBottomLabels = ["Frequencies", "Frequencies"]
        self.differenceBottomLabels = ["No. of Samples", "Frequencies"]

        self.allButtomLabels = [self.widgetsBottomLabels, self.outputWidgetsBottomLabels, self.compareBottomLabels,
                                self.differenceBottomLabels]
        # pens configurations (Plot Colors)
        self.pens = [pg.mkPen(color=(255, 0, 0)), pg.mkPen(color=(0, 255, 0)),
                     pg.mkPen(color=(0, 0, 255)), pg.mkPen(color=(200, 87, 125)),
                     pg.mkPen(color=(123, 34, 203))]

        for encap in zip(self.allWidgets, self.allTitles, self.allButtomLabels):
            for widget, title, label in zip(encap[0], encap[1], encap[2]):
                widget.plotItem.setTitle(title)
                widget.plotItem.showGrid(True, True, alpha=0.8)
                widget.plotItem.setLabel("bottom", text=label)

        # Setup Y Range in Time Plot widgets
        for i in range(0, 2):
            self.outputWidgets[i].setYRange(-30000, 30000)

        self.inputSignalGraph.setYRange(-30000, 30000)

        # CONNECTIONSx
        self.actionload.triggered.connect(self.loadFile)
        for slider in self.sliders:
            slider.id = self.sliders.index(slider)
            slider.signal.connect(self.sliderChanged)

        self.playButton.clicked.connect(lambda : sd.play(self.signalFile["data"] ,  self.signalFile['frequency']))
        self.stopButton.clicked.connect(lambda : sd.stop())
        self.playResult.clicked.connect(lambda : sd.play(self.signalModificationInv.astype(self.signalDataType), self.signalFile['frequency']))
        self.resetBands.clicked.connect(self.resetAllBands)

        # Save Output Buttons
        self.showResult.clicked.connect(self.showResultOutput)
        self.saveFile_btn.clicked.connect(lambda: self.saveWaveFile(self.signalFile['frequency'], self.signalModificationInv))

        # Difference Button
        self.showDifference_btn.clicked.connect(self.showDifferenceWindow)

        #Compare Results
        self.saveResult_btn.clicked.connect(self.saveResult)
        self.compareResult_btn.clicked.connect(self.compareResults)
        self.playCompare.clicked.connect(lambda : sd.play(self.results[1][1].astype(self.signalDataType), self.signalFile['frequency']))
        self.playCompare_2.clicked.connect(lambda : sd.play(self.results[2][1].astype(self.signalDataType), self.signalFile['frequency']))
        self.stopCompare.clicked.connect(lambda : sd.stop())
        self.stopCompare_2.clicked.connect(lambda : sd.stop())
Exemplo n.º 8
0
def stop():
    """Stop playing Sound snd."""

    sounddevice.stop()
Exemplo n.º 9
0
    def start(self):
        """ Start sample program """

        # move to local namespace
        bpm = self.settings['bpm']
        ts = self.settings['time_signature']
        measures = self.settings['measures']
        layers = self.settings['layers']
        samplerate = self.settings['samplerate']
        channels = self.settings['channels']
        cannon = self.settings['cannon']  # boolean

        # precalculate useful values
        entrysize = samplerate * measures * ts[0] * (60. / bpm)

        # countdown
        for b in xrange(1, ts[0] + 1):
            print 'Down measure {}/{}...'.format(b, ts[0])
            time.sleep(60. / bpm)

        self.recording = np.zeros((layers, int(entrysize), channels))

        print 'Recording...'

        for phrase in xrange(100):  # set with variable

            # define condition for writing new layers of music
            if cannon:
                condition = True
            else:
                condition = phrase < layers

            sd.stop()

            # Catch layers...
            if condition:
                print 'RECORDING...'
                temp_recording = sd.playrec(np.sum(self.recording, axis=0),
                                            samplerate=samplerate,
                                            channels=channels)

            else:
                sd.play(np.sum(self.recording, axis=0),
                        samplerate,
                        loop=False,
                        blocking=False)

            for m in xrange(1, measures + 1):

                #clear_screen()
                print 'Measure {}/{}...'.format(m, measures)

                ### On the beat ###
                for b in xrange(1, ts[0] + 1):
                    print 'Beat {}/{}'.format(b, ts[0])
                    time.sleep(60. / bpm)

            if condition:

                pickle.dump(temp_recording, open('data.p', 'wb'))
                #filtered_recording = butter_highpass_filter(temp_recording,5,samplerate)
                filtered_recording = my_filter(temp_recording)
                self.recording[phrase % layers, :, :] = filtered_recording

                #plt.plot(filtered_recording)
                #plt.show()

        print 'End recording.'

        # play looped track
        #sd.play(self.recording,loop=False,blocking=False)

        for m in xrange(1, measures + 1):
            print 'Measure {}/{}...'.format(m, measures)

            for b in xrange(1, ts[0] + 1):
                print 'Beat {}/{}'.format(b, ts[0])
                time.sleep(60. / bpm)
Exemplo n.º 10
0
 def playsound(data):
     sd.play(data, fs)
     sleep(data.shape[0] / fs)
     sd.stop()
     pass
 def discard(self):
     sd.stop()
 def stop_playback(self):
     sd.stop()
     sd.wait()
     filename = self.file_path[:-4] + 'filtered.wav'
     sf.write(filename, self.data, self.fs)
 def stop_playback_button(self):
     sd.stop()
     sd.wait()
     filename = AudioTool.file_path[:-4] + 'filtered.wav'
     sf.write(filename, AudioTool.data, AudioTool.fs)
     self.dismiss_popup
 def discard_button(self):
     sd.stop()
     self.dismiss_popup()
Exemplo n.º 15
0
def stop():
    '''Stop playing Sound snd.'''

    sounddevice.stop()
Exemplo n.º 16
0
    def stop(self):
        '''Stop playing this Sound.'''

        sounddevice.stop()
Exemplo n.º 17
0
 def __del__(self):
     """ Class destuctor.
     
     """
     if self.isPlaying:
         sd.stop()
Exemplo n.º 18
0
def stop():
    try:
        sd.stop(snd)
    except:
        sd.stop(data)
Exemplo n.º 19
0
 def play(self, duration=1.0, blocking=False, loop=False, device=11):
     sd.play(self.wave, loop=loop, mapping=self.channels, device=device)
     if blocking:
         sleep(duration)
         sd.stop()
Exemplo n.º 20
0
def buttonStop():
    sd.stop()
    try:
        os.remove("selectedTrack.wav")
    except:
        print("nothing to delete")
Exemplo n.º 21
0
def on_relaese(sd):
    sd.stop()
Exemplo n.º 22
0
def stop():
    sd.stop();
Exemplo n.º 23
0
 def stop(self):
     sd.stop()
Exemplo n.º 24
0
 def stop(self):
     """
         Can be used to stop playing before the end
     """
     sd.stop()
Exemplo n.º 25
0
def stop():
    """
    Stop playing audio.
    """
    sd.stop()
Exemplo n.º 26
0
 def stop(self):
     """Stop playing of this (and all other) sound."""
     sounddevice.stop()
Exemplo n.º 27
0
def stop_music(event=None):
    sd.stop()
    sd.default.reset()
Exemplo n.º 28
0
    def gread(signal):
        #TODO:Eu preciso uma forma melhor  de ordenar meus listeners, mas por enquanto 0 eh o static e 1 o dynamic
        #TODO: este eh um ponto de falha, mudar os nodes ligados podem mudar a ordem estabelecida
        if signal.type == gsignal.ACTION:
            AudioTrack.play()

        if signal.type == gsignal.ACTION2:
            if (AudioTrack.mode == 0):
                AudioTrack.mode = 2
                AudioTrack.tick = 0
                AudioTrack.recbuffer = np.zeros((480000, 1))
                sd.rec(out=AudioTrack.recbuffer)
            elif (AudioTrack.mode == 2):
                AudioTrack.mode = 0
                sd.stop()
            else:
                print(
                    "Erro: Não foi iniciar uma gravação, o controlador de áudio está ocupado"
                )
                #TODO: fazer display gráfico do erro, usar algo estilo toast em android?

        if signal.type == gsignal.ACTION3:
            #modular o sinal indicado por trackname.iterator e subtrackname.iterator
            #TODO: consertar um gub bem obvio onde o trackname e o subtrackname vao estar trocados
            #TODO: sugestao: alterar o modulate de forma que seja simetrico pro carrier e pro signal
            track = AudioTrack.modulateamplitude(
                AudioTrack.tracklist[AudioTrack.tracknames.iterator],
                AudioTrack.tracklist[AudioTrack.subtracknames.iterator])

            AudioTrack.savetrack(track, track.name)

        if signal.type == gsignal.ACTION4:
            track = AudioTrack.addtracks(
                AudioTrack.tracklist[AudioTrack.tracknames.iterator],
                AudioTrack.tracklist[AudioTrack.subtracknames.iterator])

            AudioTrack.savetrack(track, track.name)

        if signal.type == gsignal.ACTION5:
            #TODO:eventualmente atualizar o demodulate de forma que não preciso informar o breakin
            track = AudioTrack.demodulateamplitude(
                AudioTrack.tracklist[AudioTrack.tracknames.iterator],
                AudioTrack.breakin.iterator + 1)

            AudioTrack.savetrack(track, track.name)

        if signal.type == gsignal.SELECT4:
            #Trocar o tracknames com o subtracknames
            hold = AudioTrack.tracknames.iterator
            AudioTrack.tracknames.iterator = AudioTrack.subtracknames.iterator
            AudioTrack.subtracknames.iterator = hold

        if signal.type == gsignal.SAVE:
            if (AudioTrack.mode == 0):
                AudioTrack.save()
            else:
                print(
                    "Erro: Não foi possível salvar a ammostra de som, está certo de que ainda não está gravando?"
                )
                #TODO: fazer display gráfico do erro, usar algo estilo toast em android?

        if signal.type == gsignal.SELECT:

            audiotrack = AudioTrack.tracklist[signal.content]

            if (AudioTrack.displaymode.iterator == 0):
                AudioTrack.trackablegraph.content = audiotrack.track
            else:
                AudioTrack.trackablegraph.content = audiotrack.fourier

            AudioTrack.detectTone(audiotrack.fourier)

        if signal.type == gsignal.SELECT2:
            audiotrack = AudioTrack.tracklist[AudioTrack.tracknames.iterator]

            if (AudioTrack.displaymode.iterator == 0):
                AudioTrack.trackablegraph.content = audiotrack.track
            else:
                AudioTrack.trackablegraph.content = audiotrack.fourier
Exemplo n.º 29
0
def excitation(exc_parameters):
    """Creates excitation file based off of the parameters parsed.
    Described below.
    """
    # settings
    # This is as a fraction of the maximum amplitude 1 = 2.96 V
    conversion_factor = float(exc_parameters['cfact'])  #-0.845
    amplitude = conversion_factor * float(exc_parameters['amplitude'])
    stable = float(exc_parameters['stable'])  # stable duration in seconds
    # Doesn't necessarily work for other sample rates
    sample_rate = float(exc_parameters['sample_rate'])
    # recording duration in seconds
    duration = float(exc_parameters['duration'])
    frequency = float(exc_parameters['frequency'])  # Frequency
    # Stable "Voltage" actually a fraction of max output positive values only
    v1 = conversion_factor * float(exc_parameters['v1'])
    # Recording Start "Voltage" actually a fraction of max output 0.1 = ~0.045V
    v2 = conversion_factor * float(exc_parameters['v2'])
    # Recording stop "Voltage" actually a fraction of max output 1.0 = ~1.265
    v3 = conversion_factor * float(exc_parameters['v3'])

    sramp = np.linspace(v1, v1,
                        int(stable * sample_rate))  # ramp for stable period
    ramp = np.linspace(v2, v3,
                       int(duration * sample_rate))  # ramp for excitation
    volt_range = np.append(sramp, ramp, axis=0)
    # stable duration
    # Left channel wave form
    xls = np.ones(int(stable * sample_rate))
    # Right Channel waveform
    xrs = np.ones(int(stable * sample_rate))

    s_left_channel = xls * v1  # modified to remove amplitude
    s_right_channel = xrs * v1

    s_left_channel -= sramp
    s_right_channel += sramp

    stable_waveform_stereo = np.vstack(
        (s_left_channel, s_right_channel)).T  # combine left and right channels

    # record duration
    xl = np.linspace(0, duration * 2 * np.pi, int(duration * sample_rate))
    xr = np.linspace(0, duration * 2 * np.pi, int(duration * sample_rate))

    left_channel = amplitude * np.sin(frequency * xl)
    right_channel = amplitude * np.sin(frequency * xr + np.pi)

    left_channel -= ramp

    right_channel += ramp

    # combine left and right channels
    waveform_stereo = np.vstack((left_channel, right_channel)).T

    # create total waveform
    total_waveform = (np.append(stable_waveform_stereo,
                                waveform_stereo,
                                axis=0))

    # record data
    rec_data = sd.playrec(total_waveform, sample_rate, channels=1)
    time.sleep(stable + duration)
    sd.stop()

    rec = len(rec_data)
    zeroCol = np.zeros(rec, dtype=int)
    df = pd.DataFrame(zeroCol)
    df.insert(loc=1, column=1, value=rec_data)
    blank_samples = 4000
    df.iloc[0:blank_samples, 1] = 0
    return df, volt_range
Exemplo n.º 30
0
              ## Generating the waveform
              print("Synthesizing the waveform:")
              # Synthesizing the waveform is fairly straightforward. Remember that the longer the
              # spectrogram, the more time-efficient the vocoder.
              generated_wav = vocoder.infer_waveform(spec)


              ## Post-generation
              # There's a bug with sounddevice that makes the audio cut one second earlier, so we
              # pad it.
              generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")

              # Play the audio (non-blocking)
              if not args.no_sound:
                  sd.stop()
                  sd.play(generated_wav, synthesizer.sample_rate)

              # Save it on the disk
              fpath = "%02d - %s.wav" % (num_generated, line[:10])
              print(generated_wav.dtype)
              librosa.output.write_wav(fpath, generated_wav.astype(np.float32),
                                      synthesizer.sample_rate)
              num_generated += 1
              print("\nSaved output as %s\n\n" % fpath)


        except Exception as e:
            print("Caught exception: %s" % repr(e))
            print("Restarting\n")
Exemplo n.º 31
0
def send_over_sound(message):
    # set device to play sound from
    sd.default.device = 3  # int(input("Please select Microphone:\t"))
    # Samples per second
    sps = 44100
    spp = int(2560 * 2)

    # Base Frequency that must match receiver
    freq_hz = 1200.0

    # Attenuation so the sound is reasonable volume
    atten = 0.1

    # Convert message to binary.
    message_binary = ' '.join(f"{ord(x):07b}" for x in message)
    print(message_binary)
    message_binary = message_binary.replace(" ", "")
    print(len(message_binary))

    # make sure the message is divisible by 3 for encoding
    while (len(message_binary) % 3 != 0):
        message_binary += "0"

    # print message in binary
    print([message_binary[i:i + 7] for i in range(0, len(message_binary), 7)])

    message_binary_list = [3.33, 3.66]

    # encode message no symbols beside each other will be the same.
    for i in range(0, len(message_binary), 3):
        if message_binary[i] == "0" and message_binary[
                i + 1] == "0" and message_binary[i + 2] == "0":
            message_binary_list.append(0.66)
        elif message_binary[i] == "0" and message_binary[
                i + 1] == "0" and message_binary[i + 2] == "1":
            message_binary_list.append(1)
        elif message_binary[i] == "0" and message_binary[
                i + 1] == "1" and message_binary[i + 2] == "0":
            message_binary_list.append(1.33)
        elif message_binary[i] == "0" and message_binary[
                i + 1] == "1" and message_binary[i + 2] == "1":
            message_binary_list.append(1.66)
        elif message_binary[i] == "1" and message_binary[
                i + 1] == "0" and message_binary[i + 2] == "0":
            message_binary_list.append(2)
        elif message_binary[i] == "1" and message_binary[
                i + 1] == "0" and message_binary[i + 2] == "1":
            message_binary_list.append(2.33)
        elif message_binary[i] == "1" and message_binary[
                i + 1] == "1" and message_binary[i + 2] == "0":
            message_binary_list.append(2.66)
        elif message_binary[i] == "1" and message_binary[
                i + 1] == "1" and message_binary[i + 2] == "1":
            message_binary_list.append(3)
        transmit_lenght = len(message_binary_list)
        if message_binary_list[transmit_lenght -
                               2] == message_binary_list[transmit_lenght - 1]:
            message_binary_list[transmit_lenght - 1] = 3.33

    message_binary_list.append(3.66)
    message_binary_list.append(3.33)
    message_binary_list.append(3.66)
    print(message_binary_list)

    # NumpPy to calculate the waveform
    # Calculate the duration of the message
    duration_s = ((len(message_binary_list)) * spp) / sps
    duration_s = math.ceil(duration_s)
    print(duration_s)
    # Calculate the number of samples
    total_samples = sps * duration_s

    each_sample_number = np.arange(total_samples)

    # convert message list to numpy array
    message_array = np.array(message_binary_list)
    scaled_message_array = np.array([0, 0])

    # Scale message to have spp samples per symbol
    for i in message_binary_list:
        a = np.full((1, spp), i)
        scaled_message_array = np.concatenate(
            (scaled_message_array, np.full((1, spp), i)), axis=None)

    # pad message with zeros to ensure array is full
    scaled_message_array = np.pad(
        scaled_message_array, (0, total_samples - len(scaled_message_array)),
        "constant")
    # set symbol frequency
    message_array = scaled_message_array * freq_hz
    # calculate waveform
    waveform = np.sin(2 * np.pi * each_sample_number * message_array / sps)
    # lower volume
    waveform_quiet = waveform * atten
    # generate graph for debug
    # plt.title("Matplotlib demo")
    # plt.xlabel("x axis caption")
    # plt.ylabel("y axis caption")
    # plt.plot(each_sample_number, waveform_quiet)
    # dont show graph
    # plt.show()

    # Play the waveform out the speakers
    sd.play(waveform_quiet, sps)
    time.sleep(duration_s)
    sd.stop()
    return
Exemplo n.º 32
0
 def play(self, wav, sample_rate):
     sd.stop()
     sd.play(wav, sample_rate)
Exemplo n.º 33
0
def DieProgramDie():
    sd.stop()
    print('')
    print('')
    print('')
    print('Thanks for making some dumb music, Panga!')