def loopback_speaker(): import sys if sys.platform == 'win32': # must install https://www.vb-audio.com/Cable/index.htm return soundcard.get_speaker('Cable') elif sys.platform == 'darwin': # must install soundflower return soundcard.get_speaker('Soundflower64') elif sys.platform == 'linux': # pacmd load-module module-null-sink channels=6 rate=48000 return soundcard.get_speaker('Null') else: raise RuntimeError('Unknown platform {}'.format(sys.platform))
def _init_player(self): if hasattr(self, 'player'): self._player.__exit__(exc_type=None, exc_value=None, traceback=None) options = self._options speaker = sc.get_speaker(options.sounddevice) rate = self._output_sample_rate if speaker is None: if options.sounddevice is None: print('Using default sound device. Specify --sound-device?') options.sounddevice = 'default' else: print("Could not find %s, using default", options.sounddevice) speaker = sc.default_speaker() # pulseaudio has sporadic failures, retry a few times for i in range(0, 10): try: self._player = speaker.player(samplerate=rate, blocksize=4096) self._player.__enter__() break except Exception as ex: print("speaker.player failed with ", ex) time.sleep(0.1) pass
def __init__(self, id: int or str = None, samplerate: int = config.samplerate, blocksize: int = config.blocksize, channels: List[int] = config.channels['out'], buffersize: int = config.buffersize, dtype: _np.dtype = config.dtype): _Streamer.__init__(self, samplerate, blocksize, channels, buffersize, dtype) self._channels = channels self._spk = _sc.default_speaker() if not id \ else _sc.get_speaker(id) return
def __init__(self, ctx, samplerate=44100, buffersize=1024, device_in=0, channel_in=0, device_out=0, channel_out=0, volume=1.0): self.ctx = ctx self.samplerate = samplerate self.buffersize = buffersize self.volume = volume # Select audio devices and its channels inputs = all_inputs() outputs = all_outputs() if len(inputs) - 1 < device_in: raise IndexError( 'No input device with index {} given'.format(device_in)) if len(outputs) - 1 < device_out: raise IndexError( 'No output device with index {} given'.format(device_out)) self._input = sc.get_microphone(inputs[device_in].id) self._output = sc.get_speaker(outputs[device_out].id) if self._input.channels - 1 < channel_in: raise IndexError( 'No input channel with index {} given'.format(channel_in)) if self._output.channels - 1 < channel_out: raise IndexError( 'No output channel with index {} given'.format(channel_out)) ctx.log('Input device "{}" @ channel {}'.format( self._input.name, channel_in)) ctx.log('Output device "{}" @ channel {}'.format( self._output.name, channel_out)) self._input_ch = channel_in self._output_ch = channel_out # Prepare reading thread self._lock = threading.Lock() self._frames = np.array([]) self.is_running = False # Prepare writing thread self._buffer = np.array([])
def create_threads(self) -> None: """ Creates the audio threads. This is called when Update is pushed. """ self.threads = [[None] * len(self.gui.win_vars.output_sound)] * len(self.gui.win_vars.input_sound) for x, i in enumerate(self.gui.win_vars.input_sound): for y, o in enumerate(self.gui.win_vars.output_sound): if i.name == gui_main.DEFAULT_SOUND_NAME or o.name == gui_main.DEFAULT_SOUND_NAME: continue self.threads[x][y] = threading.Thread(target=self.output_thread, args=(self.run_id, sc.get_microphone(i.name, True), lambda: i.volume, sc.get_speaker(o.name), lambda: o.volume)) self.threads[x][y].start()
def _process(self): speaker=soundcard.get_speaker(self.output_device_index or self.output_device_name) print(speaker) with speaker.player(samplerate=self.input_wire.RATE, blocksize=self.frames_per_buffer) as player: while not self.done: data=self.receive_input() if data is None: self.done=True else: data=numpy.frombuffer(data,dtype=self.input_wire.FORMAT) if not self.stopped and not self.done: try: player.play(data) except Exception as ex: self.print_message( "error: {0}".format(str(ex))) self.done=True self.stopped=True
def play_media(self, media_type, media_id, **kwargs): """Send play commmand.""" if not media_type == MEDIA_TYPE_MUSIC: _LOGGER.error( "Invalid media type %s. Only %s is supported", media_type, MEDIA_TYPE_MUSIC, ) return if (self._sink == DEFAULT_SINK): speaker = sc.default_speaker() else: speaker = sc.get_speaker(self._sink) _LOGGER.info('play_media: %s', media_id) self._state = STATE_PLAYING self.schedule_update_ha_state() try: local_path, _ = urllib.request.urlretrieve(media_id) # urllib.error.HTTPError except Exception: # pylint: disable=broad-except local_path = media_id stream = PCMStream(self._manager.binary, loop=self._hass.loop) stream_reader = asyncio.run_coroutine_threadsafe( stream.PCMStreamReader(input_source=local_path), self._hass.loop).result() data = asyncio.run_coroutine_threadsafe(stream_reader.read(-1), self._hass.loop).result() data = np.frombuffer(data, dtype=np.int16) / pow(2, 15) speaker.play(data, samplerate=16000, channels=1) urllib.request.urlcleanup() self._state = STATE_IDLE self.schedule_update_ha_state()
def __init__(self, parent_, controller_): ##PAGE ATTRIBUTES################### self.parent = parent_ self.controller = controller_ self.samplist = [] self.entry = tk.StringVar() ##tuple to store current signal to be played or to convolute## self.cursignal = () self.recording = np.zeros(recordtime * 44100) self.recsamps = np.arange(0, recordtime * 44100, 1) self.samplist.append((self.recsamps, self.recording)) self.RECplot = f.add_subplot(gs[0, 1]) self.RECplot.title.set_text('(0) Current Waveform') self.RECplot.plot(self.recsamps, self.recording) self.conv = [] self.convsamps = [] self.samplist.append((self.convsamps, self.conv)) self.CONVplot = f.add_subplot(gs[0, 2]) self.CONVplot.title.set_text('(1) Convolution Result') self.CONVplot.plot(self.convsamps, self.conv) self.MLdata = ReadWav('Masonic Lodge.wav') self.MLsamps = np.arange(0, len(self.MLdata), 1) self.samplist.append((self.MLsamps, self.MLdata)) self.MLplot = f.add_subplot(gs[1, 0]) self.MLplot.title.set_text('(2) Masonic Lodge') self.MLplot.plot(self.MLsamps, self.MLdata) self.NDRdata = ReadWav('Nice Drum Room.wav') self.NDRsamps = np.arange(0, len(self.NDRdata), 1) self.samplist.append((self.NDRsamps, self.NDRdata)) self.NDRplot = f.add_subplot(gs[1, 1]) self.NDRplot.title.set_text('(3) Nice Drum Room') self.NDRplot.plot(self.NDRsamps, self.NDRdata) self.PGdata = ReadWav('Parking Garage.wav') self.PGsamps = np.arange(0, len(self.PGdata), 1) self.samplist.append((self.PGsamps, self.PGdata)) self.PGplot = f.add_subplot(gs[1, 2]) self.PGplot.title.set_text('(4) Parking Garage') self.PGplot.plot(self.PGsamps, self.PGdata) self.RGTdata = ReadWav('Right Glass Triangle.wav') self.RGTsamps = np.arange(0, len(self.RGTdata), 1) self.samplist.append((self.RGTsamps, self.RGTdata)) self.RGTplot = f.add_subplot(gs[1, 3]) self.RGTplot.title.set_text('(5) Right Glass Triangle') self.RGTplot.plot(self.RGTsamps, self.RGTdata) self.SCONVdata = np.zeros(recordtime * 44100) self.SCONVsamps = np.arange(0, recordtime * 44100) self.samplist.append((self.SCONVsamps, self.SCONVdata)) self.SCONVplot = f.add_subplot(gs[1, 4]) self.SCONVplot.title.set_text('6) Record Your Own!') self.speaker = sc.get_speaker('Built-in') #################################### tk.Frame.__init__(self, parent_) ##BUTTONS################################################################################### homeret = tk.Button(self, text="Return to Home", activebackground='blue', height=10, command=lambda: self.ret_home()) homeret.pack() entry = tk.Entry(self, textvariable=self.entry) entry.pack(side="left") playrec = tk.Button(self, text="Play Recording", activebackground='yellow', height=10, command=lambda: self.play_recording()) playrec.pack(side="left") convbut = tk.Button(self, text="Convolve Signals", activebackground='firebrick1', height=10, command=lambda: self.convolve()) convbut.pack(side="right") convrec = tk.Button(self, text="Record Venue Impulse", activebackground='red', height=10, command=lambda: self.record_venue()) convrec.pack(side="bottom") ############################################################################################ ##PLOTS############################################## ##################################################### canvas = FigureCanvasTkAgg(f, self) canvas.draw() canvas.get_tk_widget().pack()
print("MIC DATA") return None, pyaudio.paContinue input_stream = audio.open( format=pyaudio.paInt16, # The API currently only supports 1-channel (mono) audio # https://goo.gl/z757pE channels=1, rate=16000, input=True, frames_per_buffer=int(16000 / 10), input_device_index=3, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=fill_buffer ) time.sleep(5) input_stream.stop_stream() input_stream.close() """ wf = wave.open('in_ear_{}_{}.wav'.format('mom', 1), 'r') print(wf.getparams()) mom = sc.get_speaker('2- Trekz Air by AfterShokz') print(mom) #[rate, data] = wavfile.read('in_ear_{}_{}.wav'.format('mom', 1)) [rate, data] = wavfile.read('recordedFile.wav') mom.play(data / np.max(data), samplerate=rate)
def get_interface(cfo): _test0 = soundcard.all_speakers() _test1 = soundcard.all_microphones() sp = soundcard.get_speaker(cfo.OUT_CARD_NAME) mc = soundcard.get_microphone(cfo.IN_CARD_NAME) return sp, mc
#if you have pulseudio import soundcard as sc speakers = sc.all_speakers() default_speaker = sc.default_speaker() mics = sc.all_microphones() default_mic = sc.default_microphone() # search by substring: one_speaker = sc.get_speaker('Scarlett') one_mic = sc.get_microphone('Scarlett') # fuzzy-search: one_speker = sc.get_speaker('FS2i2') one_mic = sc.get_microphone('FS2i2') print(default_speaker) print(default_mic) # record and play back one second of audio: data = default_mic.record(samplerate=44100, numframes=44100) default_speaker.play(data/numpy.max(data), samplerate=44100) # alternatively, get a `recorder` and `player` object and play or record continuously: with default_mic.recorder(samplerate=44100) as mic, default_speaker.player(samplerate=44100) as sp: for _ in range(100): data = mic.record(numframes=1024) sp.play(data)
print(s) # get the current default speaker on your system: default_speaker = sc.default_speaker() print(f"\nDefault speaker is: {default_speaker}\n") # get a list of all microphones: mics = sc.all_microphones() print("Microphones:") for m in mics: print(m) # get the current default microphone on your system: default_mic = sc.default_microphone() print(f"\nDefault mic is: {default_mic}\n") # search for a sound card by substring: one_speaker = sc.get_speaker("Mono") one_mic = sc.get_microphone("Mono") # fuzzy-search to get the same results: # one_speaker = sc.get_speaker('FS2i2') # one_mic = sc.get_microphone('FS2i2')# # record and play back one second of audio: # data = one_mic.record(samplerate=48000, numframes=48000) # one_speaker.play(data/numpy.max(data), samplerate=48000) # alternatively, get a `Recorder` and `Player` object # and play or record continuously: with one_mic.recorder(samplerate=48000) as mic, one_speaker.player( samplerate=48000) as sp: for _ in range(1000): data = mic.record(numframes=1024)
spiralOut(sleep) spiralOutFull(sleep) blinkDiagonally(sleep) """ 92 93 94 95 96 97 98 99 84 85 86 87 88 89 90 91 76 77 78 79 80 81 82 83 68 69 70 71 72 73 74 75 60 61 62 63 64 65 66 67 52 53 54 55 56 57 58 59 44 45 46 47 48 49 50 51 36 37 38 39 40 41 42 43 """ speaker = soundcard.get_speaker("Steinberg") microphone = soundcard.get_microphone("Steinberg", include_loopback=True) midiout = rtmidi.MidiOut() available_ports = midiout.get_ports() for port in available_ports: if "Ableton Push 2" in port and "MIDIOUT" not in port: device = int(port[-1]) midiout.open_port(device) sleep = 0.02 turnOff() prev = 0 color = randint(0, 127) print("{} is the color.".format(color)) samples = 11200 milisec = 56