def __init__(self, count): self.even_odd = count self.client_messaged_even = pyo.SfPlayer(imsend_sound_path, speed=[1, 0.995], loop=False, mul=0.4) self.client_messaged_odd = pyo.SfPlayer(imrcv_sound_path, speed=[1, 0.995], loop=False, mul=0.4)
def __init__(self, filename, loop=False): self._filename = filename self._is_playing = False self._lib = _lib self._lib_version = _lib_version self._loop = loop self._play_timestamp = 0 self._play_count = 0 self._thread = -1 if not _initialized: self._server = init() else: self._server = _server if _initialized and not isinstance(_initialized, Exception): if not self._lib and _lib: self._lib = _lib self._lib_version = _lib_version if not self._snd and self._filename: if self._lib == "pyo": self._snd = pyo.SfPlayer(safe_str(self._filename), loop=self._loop) elif self._lib == "pyglet": snd = pyglet.media.load(self._filename, streaming=False) self._ch = pyglet.media.Player() self._snd = snd elif self._lib == "SDL": rw = sdl.SDL_RWFromFile(safe_str(self._filename, "UTF-8"), "rb") self._snd = self._server.Mix_LoadWAV_RW(rw, 1) elif self._lib == "wx": self._snd = wx.Sound(self._filename)
def load_sample(self, path: str, sample_pos: int): """ load a sample to a certain position :param path: path to sample file :param sample_pos: position in sampler to load sample to """ if not os.path.isfile(path): raise NotFoundException(f'Sample {path} not found.') self.samples[sample_pos] = pyo.SfPlayer(path, speed=1, loop=False)
def playback5(self, file_path=icq_uh_oh): print('pb5') select = random.random() snd = pyo.SfPlayer(file_path, speed=1, loop=False, mul=5) panner = pyo.Pan(snd, outs=2, pan=random.random(), spread=random.random()).out() time.sleep(1.5)
def __init__(self, pack, amp=1.): """ Pack should simply be a list of filenames. pyo.SfPlayer objects are created upon instantiation. This way, multiple Sampler instances can have their own bitstreams. """ self.notes = [] for filename in pack: self.notes.append(pyo.SfPlayer(filename, speed=[1, 1])) self.Amp = amp
def render(self): s = pyo.Server(audio="offline") s.boot() sndinfo = pyo.sndinfo(self.path) s.recordOptions(filename=self.complete_output_path, dur=sndinfo[1]) source = pyo.SfPlayer(self.path) source_envelope = pyo.Expseg(list( zip(self.source_envelope.times, self.source_envelope.levels)), exp=5) waveguide_envelope = pyo.Expseg( list( zip(self.waveguide_envelope.times, self.waveguide_envelope.levels)), exp=5, ) filter_envelope = pyo.Expseg( list(zip(self.filter_envelope.times, self.filter_envelope.levels)), exp=5, ) filter_q_envelope = pyo.Linseg( list( zip(self.filter_q_envelope.times, self.filter_q_envelope.levels))) waveguide = pyo.Waveguide( source, freq=self.frequencies, minfreq=self.minfreq, mul=self.waveguide_mul, dur=self.waveguide_dur, ) bandpass = pyo.Resonx( waveguide, self.frequencies, q=filter_q_envelope, stages=4, mul=self.bandpass_mul, ) lfo = pyo.Sine(freq=self.pan_lfo_frequency, mul=0.5, add=0.5) bandpass_pan = pyo.Pan(bandpass, outs=2, pan=lfo) filter_q_envelope.play() filter_envelope.play() source_envelope.play() waveguide_envelope.play() (bandpass_pan * filter_envelope).out() (waveguide * waveguide_envelope).out() (source * source_envelope).out() s.start()
def walker(graph, args, length=10): """Random walk through a component of the sound graph, playing as we go""" num, component = args start = np.random.choice(list(component)) files = islice(random_walk(graph, start), max(5, len(component) // length)) print('{}--{}'.format(num, len(component))) files = printer(files, '~~~~{}~'.format(num)) samples = (os.path.join(os.path.dirname(sys.argv[1]), path) for path in files) player = pyo.SfPlayer(next(samples), mul=0.1) trig = pyo.TrigFunc(player['trig'], make_looper(samples, player)) player.out() while player.isPlaying(): time.sleep(1)
def playback4(self, file_path=imsend_sound_path): print('pb4') select = random.random() snd = pyo.SfPlayer(file_path, speed=1, loop=False, mul=5) rev = pyo.STRev(snd, inpos=select, revtime=4, cutoff=5000, bal=1, roomSize=4) panner = pyo.Pan(rev, outs=2, pan=random.random(), spread=random.random()).out() time.sleep(rev.revtime + .01) rev.stop()
def importSoundfiles(dirpath='./', filepath='./', mult=0.1, gain=1.0): # reading wavefiles try: obj = [None] * len(glob.glob(dirpath + filepath)) fil = [None] * len(glob.glob(dirpath + filepath)) n = 0 for file in glob.glob(dirpath + filepath): fil[n] = file n += 1 for i in range(len(glob.glob(dirpath + filepath))): obj[i] = po.SfPlayer(fil[i], mul=mult * gain).stop() except: print('error in file reading') pass return (obj, fil, mult)
def listen(device_num, initial_files): """Start listening for MIDI. This function will not block, just start the audio serber and all the midi handling.""" server = pyo.Server(nchnls=1) server.deactivateMidi() server.boot().start() players = { note: pyo.SfPlayer(path, mul=.1).stop() for note, path in initial_files.items() } mixdown = pyo.Mix(list(players.values())) out = pyo.Compress(mixdown) out = pyo.Freeverb(out).out() raw = pyo.MidiListener(make_callback(players, out), device_num).start() return players, server
def init_mixer(self, identifier, file_path, loop=False, amp=0.1, channels=None): sf = pyo.SfPlayer(file_path, loop=loop) mixer = pyo.Mixer(outs=self.channels, chnls=1) mixer.addInput(0, sf) for o in range(self.channels): mixer.setAmp(0, o, amp) self.mixers[identifier] = mixer self.files[identifier] = sf if channels is not None: self.set_mixer(identifier, channels) mixer.out() sf.play()
from time import sleep import pyo import json import redis with open('config.json', 'r') as fp: config = json.load(fp) r = redis.Redis(host='localhost', port=6379, db=0) server = pyo.Server().boot() sf = pyo.SfPlayer(config['wav_file'], loop=True) # replace <filename> sh = pyo.FreqShift(sf).out() hr = pyo.Harmonizer(sh).out() ch = pyo.Chorus(sh).out() dly = pyo.Delay(sh).out() server.start() while True: temp = r.rpop('temp') if temp is None: print("Queue is empty") sleep(1) else: print(f'got {temp} on a worker') mod = (int(temp) - 19000) / 1000 # range between -14 and 14 print(f'using mod {mod}') hr.transpo = mod - 7 print(f"set transpo to {hr.transpo}") dly.delay = (mod + 14) * 0.05 print(f"set delay to {dly.delay}")
def __init__(self): self.client_left = pyo.SfPlayer(exit_sound_path, speed=[1, 0.995], loop=False, mul=0.4)
# starting server SERVER.boot() import gender_player import midi # making final mixer MIXER = pyo.Mixer(outs=8, chnls=1) logging.info("getting inputs") if SIMULATION_VERSE: VERSE_PATH = "{}/{}".format(settings.SIMULATION_PATH, SIMULATION_VERSE) INPUTS = { instrument: pyo.SfPlayer( "{}/{}/synthesis.wav".format(VERSE_PATH, instrument), mul=0.7, ) for instrument, _ in settings.INPUT2INSTRUMENT_MAPPING.items() if instrument != "pianoteq" } INPUTS.update({"pianoteq": pyo.Input(3)}) else: INPUTS = { instrument: pyo.Input(channel).play() for instrument, channel in settings.INPUT2INSTRUMENT_MAPPING.items() } logging.info("adding meter for inputs") INPUT_METER = loclog.Meter(*INPUTS.items())
text = row[1] filename = "text_" + key keybinds[key] = filename + ".aif" print("Loading %s... [%s]" % (key, text)) output = gTTS(text=text, lang=language, slow=False) output.save(filename + ".mp3") os.system("ffmpeg -hide_banner -loglevel panic -y -i %s.mp3 %s.aif" % (filename, filename)) os.remove(filename + ".mp3") s = pyo.Server(nchnls=2) s.boot().start() sf_players = (pyo.SfPlayer(path='text_q.aif', mul=0.5), pyo.SfPlayer(path='text_q.aif', mul=0.5)) def play_file(key): print("playing " + key) sf_players[0].setSound(keybinds[key]) sf_players[1].setSound(keybinds[key]) sf_players[0].out(0) sf_players[1].out(1) running = True def stop(): global running
from time import sleep import pyo import json import redis with open('config.json', 'r') as fp: config = json.load(fp) r = redis.Redis(host='localhost', port=6379, db=0) server = pyo.Server().boot() sf = pyo.SfPlayer("zoomsamples/lastdrop.wav", loop=True) ch = pyo.Chorus(sf).out() hr = pyo.Harmonizer(sf).out() server.start() room_temp = None temp = None while True: temp = r.rpop('temp') if temp is None: #print("Queue is empty") sleep(1) else: print(f'got {temp} on a worker')
def __init__(self, master=None, experiment=[], logger=None): ########################### # INIT EXPERIMENT ########################### self.experiment=experiment ########################### # INIT LOGGING ########################### self.logger=logger self.currentTrial = 0 self.currentBlock = 0 self.blockType = 0 self.mouse = 0 ########################### # INIT TIMING ########################### #self.t = timr(1, self.runexperiment) #placeholder to make sure the variable exists self.timers = [] ########################### # INIT VISUAL ########################### self.waitForRatingAnswer = False self.waitForRatingAnswer2 = False self.numRects = 4 self.rects=range(self.numRects) self.screenWidth = 640 self.screenHeight = 480 Frame.__init__(self,master) self.grid() self.userPrompt = StringVar() # moved these up here so they only happen once pianoimage = Image.open(KEYBOARD_IMAGE) self.pianoImage = ImageTk.PhotoImage(pianoimage) sliderimage = Image.open(SLIDER_IMAGE) self.sliderImage = ImageTk.PhotoImage(sliderimage) self.fingerString = StringVar() self.qString = StringVar() self.countString = StringVar() self.create_GUI() ########################### # INIT AUDI ########################### self.s = pyo.Server(buffersize = 8, nchnls = 1) # before booting the server, I'll prompt the user to choose an input device # NOTE: This can be hard-coded later if you always want it to choose a specific MIDI input device # pyo.pm_list_devices() # self.choice = input("Which device will you choose?") # self.s.setMidiInputDevice(int(self.choice)) self.s.setMidiInputDevice(int(3)) self.s.boot() self.s.start() # test = pyo.LFO(freq=440.0).out() time.sleep(1) # settling time # test.stop() # MIDI Stuff self.refnote = 72 self.polynum = 4 self.pianosound = range(self.polynum) self.notes = pyo.Notein(poly=self.polynum, scale=0, mul=0.5) self.enablePlayback = False self.enableNoteLogging = False self.noteTrig = pyo.TrigFunc(self.notes['trigon'],self.onNoteon,range(self.polynum)) #for p in range(polynum): # note trigger mixer self.trigmix = pyo.Mixer(1,self.polynum) for p in range(self.polynum): self.trigmix.addInput(p,self.notes['trigon'][p]) self.trigmix.setAmp(p,0,1.0) self.polyNoteTrig = self.trigmix[0] global midikeymapping # needs to be visible everywhere midikeymapping = 1 # set mapping to 1 to start with # preload sound files self.melodies = [] self.extract = [] for i in range(self.polynum): self.pianosound[i] = pyo.SfPlayer(EXTRACT_DIR + PIANO_FILE[0], speed=1, loop=False, offset=0, interp=2, mul=1, add=0) for fname in STIM_FILES: self.melodies.append(pyo.SfPlayer(STIM_DIR + fname, mul=0.5)) for fname in EXTRACT_FILES: self.extract.append(pyo.SfPlayer(EXTRACT_DIR + fname, mul=0.5)) self.metronome = pyo.SfPlayer(EXTRACT_DIR + METRO_FILE[0], mul=0.5) # prepare sequence and timing triggers # metroSeq launches the metronome self.trialMetroSeq = pyo.Seq(time=NOTEDUR/1000.0, seq=[3,3,3,1], poly=1, onlyonce=True, speed=1) self.expectedKeySeq = pyo.Seq(time=NOTEDUR/1000.0, seq=[9,1,1,1,1], poly=1, onlyonce=True, speed=1) # trialStartTrigger will be manually launched when we want to start a trial self.trialStartTrigger = pyo.Trig().stop() self.warmuptrialStartTrigger = pyo.Trig().stop() self.dummyTrigger = pyo.Trig().stop() self.timerLogsEnabled = False # eventTimer will measure the time between trial events # eventTimer is initially triggered by the trial start, but will later be switched to measure between note events self.trialEventTimer = pyo.Timer(self.polyNoteTrig,self.trialStartTrigger) self.expectedEventTimer = pyo.Timer(self.expectedKeySeq,self.expectedKeySeq) self.timerMeasurement = pyo.DataTable(1) self.lastTimerMeasurement = 0.0 self.expectedMeasurement = pyo.DataTable(1) self.lastExpectedMeasurement = 0.0 self.measurementRecorder = pyo.TablePut(self.trialEventTimer, self.timerMeasurement).play() self.expectedRecorder = pyo.TablePut(self.expectedEventTimer, self.expectedMeasurement).play() self.resetAtStim = False # triggers for the optimized stim delivery self.t1 = pyo.TrigFunc(self.trialStartTrigger,self.playAudioExtract) self.t2 = pyo.TrigFunc(self.trialStartTrigger,self.trialMetroSeq.out) self.t2b = pyo.TrigFunc(self.trialStartTrigger,self.expectedKeySeq.out) self.t3 = pyo.TrigFunc(self.trialMetroSeq,self.playMetronome) # self.t3 = pyo.TrigFunc(self.trialMetroSeq,self.metronome.out) self.t4 = pyo.TrigFunc(self.polyNoteTrig,self.noteTiming) self.t5 = pyo.TrigFunc(self.expectedKeySeq,self.expectedTiming) # triggers for the optimized stim delivery in training #self.t1 = pyo.TrigFunc(self.warmuptrialStartTrigger,self.playAudioExtract) self.t6 = pyo.TrigFunc(self.warmuptrialStartTrigger,self.trialMetroSeq.out) self.t7 = pyo.TrigFunc(self.warmuptrialStartTrigger,self.expectedKeySeq.out) # self.t3 = pyo.TrigFunc(self.trialMetroSeq,self.playMetronome) # self.t3 = pyo.TrigFunc(self.trialMetroSeq,self.metronome.out) # self.t4 = pyo.TrigFunc(self.notes['trigon'],self.noteTiming) # self.t5 = pyo.TrigFunc(self.expectedKeySeq,self.expectedTiming) ########################### # INIT INPUT DEVICES ########################### self.set_keybinds() self.waitForSpacebar = True ############################ self.enableAudioFeedback = False self.QUIT = False self.pause = False