def __init__(self, res=50, lag=.01, lag2=.03): self.snd = SndReader() self.res = res self.lag = lag self.lag2 = lag2 self.dur = self.snd.dur self.refreshRate = self.snd.refreshRate self.len = int(self.dur * self.res) self.tab = pyo.DataTable(self.len) self.datar = np.asarray(self.tab.getBuffer()) self.table = self.snd.table self.env = self.table.getEnvelope(self.len) self.envAr = np.concatenate( np.round(np.absolute(np.asarray(self.env)), 2)) self.tableOG = self.snd.tableOG self.durOG = self.snd.durOG self.son = pyo.TableRead(self.table, 1 / self.dur, 1) self.sig = None self.son = pyo.TableRead(self.table, 1 / self.dur, 1).play() self.son = pyo.Mix(self.son, 2) self.son.out()
def __init__(self, no_patterns: int, steps: int, bpm: float): """ :param no_patterns: number of patterns (number of samples to trigger) :param steps: number of steps :param bpm: beats per minute """ self.time_per_step = 1. / bpm * 60 / 4 self.duration = steps * self.time_per_step self.offsets = [[0.] * steps for _ in range(no_patterns + 1)] self.pattern = [[False] * steps for _ in range(no_patterns + 1)] self.pattern_tables = [ pyo.NewTable(self.duration) for _ in range(no_patterns + 1) ] self.trigger_tables = [ pyo.TableRead(table, freq=1. / self.duration, interp=1, loop=1) for table in self.pattern_tables ] self.trigger_functions = [ pyo.TrigFunc(trigger, lambda: None) for trigger in self.trigger_tables ] self.sampling_rate = self.trigger_tables[0].getSamplingRate() # the last pattern is always on to implement a callback at each step for step in range(steps): self.activate(no_patterns, step)
def __init__(self, son): super().__init__() self.file = son self.son = pyo.SndTable(self.file) self.synth = pyo.TableRead(self.son, 1 / self.son.getDur(), 1).play() self.filter = pyo.Biquad(self.synth) self.verb = pyo.WGVerb(self.filter).mix(2)
def _enter(self): if _pyo_server is None: # try and init it with defaults # print some warning init_audio_server() # process the vars sound_file = val(self.sound_file) start = val(self.start) stop = val(self.stop) # init the sound table (two steps to get mono in both speakers) sndtab = pyo.SndTable(initchnls=_pyo_server.getNchnls()) sndtab.setSound(path=sound_file, start=start, stop=stop) # set the duration if not looping if val(self.loop): # set callback for stopping sound raise NotImplemented("Looping sounds is currently not supported.") else: self.duration = sndtab.getDur() # read in sound info self._snd = pyo.TableRead(sndtab, freq=sndtab.getRate(), loop=val(self.loop), mul=val(self.volume))
def _enter(self): super(SoundFile, self)._enter() default_init_audio_server() self._sound_start_time = None # init the sound table (two steps to get mono in both speakers) sndtab = pyo.SndTable(initchnls=_pyo_server.getNchnls()) sndtab.setSound(path=self._filename, start=self._start, stop=self._stop) # set the end time if not self._loop: self.cancel(self._start_time + sndtab.getDur()) # read in sound info self.__snd = pyo.TableRead(sndtab, freq=sndtab.getRate(), loop=self._loop, mul=self._volume) if self.__snd is None: raise RuntimeError("Could not load sound file: %r" % self._filename) # schedule playing the sound clock.schedule(self._start_sound, event_time=self._start_time) # schedule stopping the sound if self._end_time is not None: clock.schedule(self._stop_sound, event_time=self._end_time)
def _updateSnd(self): self.needsUpdate = False doLoop = bool(self.loops != 0) # if True, end it via threading.Timer self._snd = pyo.TableRead(self._sndTable, freq=self._sndTable.getRate(), loop=doLoop, mul=self.volume)
def play(self): def call(): self.refreshTable() self.datar[:] = self.envAr self.pat = call() self.pat = pyo.Pattern(call, self.refreshRate).play() self.mul = pyo.TableRead(self.tab, 1 / self.dur, 1, 3).play() return self.mul
def __init__(self, extractor): self.extractor = extractor self.table = pyo.NewTable(extractor.table.length) self.player = pyo.TableRead(self.table, freq=self.table.getRate()) self.metro = pyo.Metro() self.trig = pyo.Percent(self.metro) self.trig_play = pyo.TrigFunc(self.trig, self.play) self.samples = np.asarray(self.table.getBuffer())
def play(self): def call(): self.avList = [] self.refreshTable() self.envarMo = self.envAr for i in range(self.envarMo.size): x = i + self.period self.avList.append(self.envarMo[x - self.period:x].mean()) self.datar[:] = np.asarray(self.avList) self.pat = pyo.Pattern(call, self.refreshRate).play() self.mul = pyo.TableRead(self.tab, 1 / self.dur, 1, 3).play() return self.mul
def TableWrap(audio, duration): ''' Records a PyoAudio generator into a sound table, returns a tableread object which can play the audio with .out() ''' # Duration is in ms, so divide by 1000 audio.play() tab = pyo.NewTable(length=(float(duration) / 1000), chnls=rpiset.NUM_CHANNELS) tabrec = pyo.TableRec(audio, table=tab, fadetime=0.01) tabrec.play() sleep((float(duration) / 1000)) tabread = pyo.TableRead(tab, loop=0) return tabread
def play(self, file=None): if file != None and os.path.isfile(file): self.CurrentFile = file self.AudioTable = self.AudioDecoder.GetTable() self.reader = pyo.TableRead(self.AudioTable, self.AudioTable.getRate()).out() return True elif file == None: return True else: return False
def play(self): self.sectOG = self.sect = self.getSections() self.sectDum = np.concatenate(self.sect) time = np.size(self.sectDum) / self.dur def call(): self.refreshTable() self.sect = self.getSections() self.sect = np.random.permutation(self.sect) self.sect = np.concatenate(self.sect) self.sect = np.resize(self.sect, self.datar.shape) self.datar[:] = np.asarray(self.sect) self.sect = self.sectOG self.patSect = call() self.patSect = pyo.Pattern(call, self.refreshRate).play() self.mul = pyo.TableRead(self.tab, 1 / self.dur, 1, 3).play() return self.mul
def play(self): def call(): self.refreshTable() self.envList = self.envAr.tolist() self.envLen = len(self.envList) self.tempEnv = [] self.playedEnv = [] for val in self.envList: self.tempEnv.append(val) for i in range(self.order): self.tempEnv.append(self.envList[i]) self.playedEnv = self.envList[self.envLen - self.order:] self.newVal = 0 self.condition = False self.probTable = [] self.markEnv = [] for val in range(self.envLen): for i in range(len(self.tempEnv) - self.order): for iord in range(self.order): if self.playedEnv[len(self.playedEnv) - (iord + 1)] != self.tempEnv[ (self.order - 1) + i - iord]: self.condition = False break else: self.condition = True if self.condition: self.probTable.append(self.tempEnv[i + self.order]) self.newVal = self.probTable[random.randint( 0, (len(self.probTable) - 1))] self.markEnv.append(self.newVal) self.playedEnv.append(self.newVal) self.markEnv = np.asarray(self.markEnv) self.datar[:] = np.asarray(self.markEnv) self.pat = call() self.pat = pyo.Pattern(call, self.refreshRate).play() self.mul = pyo.TableRead(self.tab, 1 / self.dur, 1, 3).play() return self.mul
def refresh(self): self.snd.refresh() self.snd = self.snd self.dur = self.snd.dur self.refreshRate = self.snd.refreshRate self.len = int(self.dur * self.res) self.tab = pyo.DataTable(self.len) self.datar = np.asarray(self.tab.getBuffer()) self.table = self.snd.table self.env = self.table.getEnvelope(self.len) self.envAr = np.concatenate( np.round(np.absolute(np.asarray(self.env)), 2)) self.tableOG = self.snd.tableOG self.durOG = self.snd.durOG self.son = pyo.TableRead(self.table, 1 / self.dur, 1).play() self.son = pyo.Mix(self.son, 2) self.son.out()
def play(self): def hist(): self.refreshTable() self.hist, self.bin = np.histogram(self.envAr, density=True, bins=self.nbin) self.hist = np.round(self.hist, 8) self.hist = self.hist / self.hist.sum() self.bins = self.bin[0:self.nbin] self.pat = hist() self.pat = pyo.Pattern(hist, self.refreshRate).play() def call(): randHist = np.float(np.random.choice(self.bins, p=self.hist)) self.datar[:] = randHist self.patHist = pyo.Pattern(call, 1 / self.res).play() self.mul = pyo.TableRead(self.tab, 1 / self.dur, 1, 3).play() return self.mul
def table_wrap(self, audio, duration=None): """Records a PyoAudio generator into a sound table, returns a tableread object which can play the audio with .out() Args: audio: duration: """ if not duration: duration = self.duration # Duration is in ms, so divide by 1000 # See https://groups.google.com/forum/#!topic/pyo-discuss/N-pan7wPF-o # TODO: Get chnls to be responsive to NCHANNELS in prefs. hardcoded for now tab = pyo.NewTable( length=(float(duration) / 1000), chnls=prefs.NCHANNELS ) # Prefs should always be declared in the global namespace tabrec = pyo.TableRec(audio, table=tab, fadetime=0.005).play() sleep((float(duration) / 1000)) self.table = pyo.TableRead(tab, freq=tab.getRate(), loop=0)
def init_sound(self): """ Load the wavfile with :mod:`scipy.io.wavfile` , converting int to float as needed. Create a sound table, resampling sound if needed. """ fs, audio = wavfile.read(self.path) if audio.dtype in ['int16', 'int32']: audio = int_to_float(audio) # load file to sound table if self.server_type == 'pyo': self.dtable = pyo.DataTable(size=audio.shape[0], chnls=prefs.NCHANNELS, init=audio.tolist()) # get server to determine sampling rate modification and duration server_fs = self.dtable.getServer().getSamplingRate() self.duration = float(self.dtable.getSize()) / float(fs) self.table = pyo.TableRead(table=self.dtable, freq=float(fs) / server_fs, loop=False, mul=self.amplitude) elif self.server_type == 'jack': # attenuate amplitude audio = audio * self.amplitude self.duration = float(audio.shape[0]) / fs # resample to match our audio server's sampling rate if fs != self.fs: new_samples = self.duration * self.fs audio = resample(audio, new_samples) self.table = audio self.initialized = True
def __init__(self) -> None: self.fader = pyo.Fader(fadein=0.005, fadeout=self._fadeout_time) self.trigger1 = pyo.Trig() self._mul = self.max_vol self.spatialisation = [[pyo.Sig(0) for _ in range(4)] for _ in range(2)] self.table_reads = [ pyo.TableRead( table, freq=table.getRate(), ) for table in self.snd_tables[0] ] self.processed_tables = [ pyo.ButLP(table_read, freq=6000) for table_read in self.table_reads ] self.spatialised_tables = [ pyo.Mix(signal, voices=4, mul=[self.fader * spat_value for spat_value in spat]) for signal, spat in zip(self.processed_tables, self.spatialisation) ] self.generator = self.spatialised_tables[0] + self.spatialised_tables[1]
def __init__(self, input=None, buf_size=SAMPLE_RATE//4, overlap=0, patience=None, buffer_count=2, mul=1, add=0): """ Parameters ---------- input : PyoObject Parent PyoObject (stub) length : int Number of samples per buffer overlap : int Number of overlapping samples between adjacent buffers """ pyo.PyoObject.__init__(self, mul, add) self.input = input self.buf_size = buf_size self.overlap = overlap self.patience = patience self.fifo = Queue() self.is_tfilling = True # filling tables (upon initial play) self.is_qfilling = False # filling queue (until patience reached) self.is_ready = False # ready to play self.is_playing = False # should be playing when ready # Tables and table readers do process grains of audio self.curr_buf = 0 assert overlap <= buf_size / 2 self.buffer_count = buffer_count if self.patience is None: self.patience = self.buffer_count self.tables = [pyo.DataTable(buf_size) for _ in range(self.buffer_count)] self.faders = [pyo.Fader(fadein=overlap/SAMPLE_RATE, fadeout=overlap/SAMPLE_RATE, dur=buf_size/SAMPLE_RATE, mul=mul) for _ in range(self.buffer_count)] self.oscs = [pyo.TableRead(t, freq=t.getRate(), mul=f) for t, f in zip(self.tables, self.faders)] self.sum = reduce(lambda a, b: a + b, self.oscs) + add # Timing mechanism to coordinate the tables self.p_metros = [pyo.Metro(time=(self.buffer_count * (buf_size - overlap) / SAMPLE_RATE)) for i in range(self.buffer_count)] self.p_trigs = [pyo.TrigFunc(m, self._play_table, arg=(i)) for i, m in enumerate(self.p_metros)] self.l_trigs = [pyo.TrigFunc(tbr['trig'], self._load_table, arg=(i)) for i, tbr in enumerate(self.oscs)] self._base_objs = self.sum.getBaseObjects()
def loadFiles(self): dir = os.getcwd() for path, dirs, files in os.walk(dir): # ignore directories whose name starts with '_' for d in dirs: if d.startswith('_'): dirs.remove(d) for f in files: if len(self.filenames) >= MIDI_MAX_NOTE: break if os.path.splitext(f)[1].lower() in [ '.wav', '.aif', '.aiff', '.flac' ]: print f, u"ajouté" self.filenames.append(os.path.join(path, f)) self.filenames.sort() for f in self.filenames: self.tables.append(pyo.SndTable(f)) self.readers.append( pyo.TableRead(table=self.tables[-1], freq=self.tables[-1].getRate()))
def __init__(self, sound='01-181117_1724.wav'): self.sound = sound self.table = pyo.SndTable(path=self.sound, stop=5, chnl=0, initchnls=0).normalize() self.read = pyo.TableRead(self.table, loop=0).play()
def __init__(self, path): self.data, _ = librosa.load(path, sr=SAMPLE_RATE) self.table = pyo.DataTable(self.data.shape[0]) self.player = pyo.TableRead(self.table, freq=self.table.getRate()) self.table.replace(list(self.data))
import pyo import numpy as np from numpy.random import choice import random s = pyo.Server().boot() table = pyo.SndTable('feu.wav') dur = table.getDur() read = pyo.TableRead(table, 1 / dur, 1).out() def out(mul=1): vol = mul read.setMul(vol) return read #pat = pyo.Pattern(out, .5, 0).play() s.gui(locals())
def i_spectral_pyo(xv,yv): # As i_spectral2 but uses pyo as audio engine ''' # How to use the function # NOTE: there is an instability between pyo and matplotlib when using the # wx-based GUI - graphics MUST be set to False when running in a jupyter notebook import numpy as np import sys,os,re,time sys.path.append('/Users/marco/Dropbox (Personal)/Musica/Applications/musicntwrk') from sonifiPy import * path = './' infile = 'DOSCAR.dat' xv, y = r_1Ddata(path,infile) s,a = i_specral_pyo(xv,y[0],graphics=True) s.start() time.sleep(5) s.stop() ''' nlines = xv.shape[0] nbins = int(np.sqrt(nlines)-np.sqrt(nlines)%1)**2 while nbins > nlines or not(nbins != 0 and ((nbins & (nbins - 1)) == 0)): nbins = int((np.sqrt(nbins)-1)**2) yfft = np.zeros((nbins),dtype=int) for n in range(nbins): yfft[n] = n+1 xminf = xv[0] xmaxf = xv[-1] xvf=np.asarray(xv) xvs = (xv-xminf)/(xmaxf-xminf)*nbins for line in range(nlines): if xvs[line] >= nbins: xvs[line] = -1 xvf[line] = yfft[int(xvs[line])] # Normalization of the data shape into MIDI velocity yminf = min(yv) ymaxf = max(yv) yvf=np.asarray(yv) yvf = (yv-yminf)/(ymaxf-yminf)*127 vel=np.zeros((nbins),dtype=float) nvel=0 for note in range(nbins): for line in range(nlines): if xvf[line] == yfft[note]: vel[nvel] = yvf[line] nvel=nvel+1 break velmax = max(vel) vel /= velmax # FFT for FIR filter ftvel = FFT.irfft(vel) ftvel = FFT.fftshift(ftvel) # start the pyo server s = po.Server().boot() # signal to filter sf = po.PinkNoise(.5) # FIR filter # Create a table of length `buffer size` bs = ftvel.shape[0] # Create a table of length `buffer size` t = po.DataTable(size=bs) osc = po.TableRead(t) # Share the table's memory with a numpy array. arr = np.asarray(t.getBuffer()) # assign ftvel to the table memory buffer arr[:] = ftvel # do the convolution a = po.Convolve(sf, table=t, size=t.getSize(), mul=.5).out() #mix(2).out() return(s,a)
return self.BufferInfo.decodedbuffer @property def lastFrame(self): return self.BufferInfo.lastFrame @lastFrame.setter def lastFrame(self, value): length = len(self.Buffer_Info.keys()) print(length) if length != value: self.BufferInfo.lastFrame = value for key in range(value + 1, length): self.Buffer_Info.pop(key) class AudioTable_Reader: ... if __name__ == "__main__": from random import randint Server = pyo.Server().boot() INST = AudioTable("D:\\music\\mosesdt.mp3").decode() INST.seek(29) tablereader = pyo.TableRead(INST, INST.getRate()).out() Server.start() Server.setAmp(0.1) Server.gui(locals())
def _updateSnd(self): self.needsUpdate = False self._snd = pyo.TableRead(self._sndTable, freq=self._sndTable.getRate(), loop=self.loop, mul=self.volume)