def audio_callback(outdata, frame_count, time_info, status): rmlist = [] gv.playingsounds = gv.playingsounds[-gv.MAX_POLYPHONY:] b = samplerbox_audio.mixaudiobuffers(gv.playingsounds, rmlist, frame_count, gv.FADEOUT, gv.FADEOUTLENGTH, gv.PRERELEASE, gv.SPEED, gv.SPEEDRANGE, gv.PITCHBEND, gv.PITCHSTEPS) """ With the below uncommented, a MIDI volume controller will control ALSA audio only. This is problematic; we want to control ALSA only. Full volume sounddevice might cause distortion. """ # if gv.USE_ALSA_MIXER == False: # Use alsamixer's setvolume instead b *= (gv.global_volume * gv.volumeCC) if gv.USE_FREEVERB and gv.IS_DEBIAN: b_verb = b gv.ac.reverb.freeverbprocess( b_verb.ctypes.data_as(gv.ac.reverb.c_float_p), b.ctypes.data_as(gv.ac.reverb.c_float_p), frame_count) # if gv.USE_ALSA_MIXER == False: # Use alsamixer's setvolume instead b_verb *= (gv.global_volume * gv.volumeCC) for e in rmlist: try: gv.playingsounds.remove(e) except: pass outdata[:] = b.reshape(outdata.shape)
def AudioCallback(in_data, frame_count, time_info, status): global playingsounds rmlist = [] playingsounds = playingsounds[-MAX_POLYPHONY:] b = samplerbox_audio.mixaudiobuffers(playingsounds, rmlist, frame_count, FADEOUT, FADEOUTLENGTH, SPEED) for e in rmlist: try: playingsounds.remove(e) except: pass b *= globalvolume odata = (b.astype(numpy.int16)).tostring() return (odata, pyaudio.paContinue)
def AudioCallback(outdata, frame_count, time_info, status): global playingsounds rmlist = [] playingsounds = playingsounds[-MAX_POLYPHONY:] b = samplerbox_audio.mixaudiobuffers(playingsounds, rmlist, frame_count, FADEOUT, FADEOUTLENGTH, SPEED) for e in rmlist: try: playingsounds.remove(e) except: pass b *= globalvolume outdata[:] = b.reshape(outdata.shape)
def AudioCallback2(in_data, frame_count, time_info, status): global playingsounds2 rmlist = [] playingsounds2 = playingsounds2[-MAX_POLYPHONY:] b = samplerbox_audio.mixaudiobuffers(playingsounds2, rmlist, frame_count, FADEOUT, FADEOUTLENGTH, SPEED, globalvolume) for e in rmlist: try: playingsounds2.remove(e) except: pass # odata = (b.astype(numpy.int16)).tostring() odata = b.tostring() return (odata, pyaudio.paContinue)
def AudioCallback(outdata, frame_count, time_info, status): global counting, countdown p = len(gv.playingsounds) - MAX_POLYPHONY if p > 0: print "MAX_POLYPHONY %d exceeded with %d notes" % (MAX_POLYPHONY, p) for i in xrange(p + gv.playingbacktracks - 1): if gv.playingsounds[i].playingstopmode( ) != 3: # let the backtracks be del gv.playingsounds[i] # get it out of the system # Handle arpeggiator before soundgen to reduce timing issues at chord/sequence changes if arp.active and not counting: arp.process() # audio-module: rmlist = [] b = samplerbox_audio.mixaudiobuffers( rmlist, frame_count, FADEOUT, FADEOUTLENGTH, SPEED, SPEEDRANGE, gv.PITCHBEND + LFO.VIBRvalue + PITCHCORR, PITCHSTEPS) for e in rmlist: try: if e.sound.stopmode == 3 or e.sound.stopmode == -1: # keep track of backtrack/once status gv.playingnotes[e.note + (e.channel * gv.MTCHNOTES)] = [] gv.playingsounds.remove(e) except: pass # volume control and audio effects/filters if not counting: LFO.process[LFO.effect]() b *= ( 10**(LFO.TREMvalue * gv.volumeCC) - 1 ) / 9 # linear doesn't sound natural, this may be to complicated too though... if Cpp.LFtype > 0: Cpp.c_filters.moog(b.ctypes.data_as(c_float_p), b.ctypes.data_as(c_float_p), frame_count) if Cpp.AWtype > 0: Cpp.c_filters.autowah(b.ctypes.data_as(c_float_p), b.ctypes.data_as(c_float_p), frame_count) if Cpp.DLYtype > 0: Cpp.c_filters.delay(b.ctypes.data_as(c_float_p), b.ctypes.data_as(c_float_p), frame_count) if Cpp.FVtype > 0: Cpp.c_filters.reverb(b.ctypes.data_as(c_float_p), b.ctypes.data_as(c_float_p), frame_count) outdata[:] = b.reshape(outdata.shape) # Use this module as timer for ledblinks if gv.LEDblink and not counting: gv.LEDsblink() if counting: counting -= 1 else: counting = countdown
def AudioCallback(outdata, frame_count, time_info, status): global playingsounds rmlist = [] playingsounds = playingsounds[-MAX_POLYPHONY:] b = samplerbox_audio.mixaudiobuffers(playingsounds, rmlist, frame_count, FADEOUT, FADEOUTLENGTH, SPEED) for e in rmlist: try: playingsounds.remove(e) except: pass b *= globalvolume if currentInput == 0 and currentOutput < 2: b = b.reshape(b.size / 2, 2) if currentOutput == 0: b[:, 1] = numpy.zeros(b.size / 2) else: b[:, 0] = numpy.zeros(b.size / 2) outdata[:] = b.reshape(outdata.shape)
def AudioCallback(outdata, frame_count, time_info, status): global playingsounds, SampleLoading global BackingRunning global BackWav, BackIndex, ClickWav, ClickIndex global globalvolume, backvolume, clickvolume rmlist = [] # print "sounds: " +str(len(playingsounds)) + " notes: " + str(len(playingnotes)) + " sust: " + str(len(sustainplayingnotes)) playingsounds = playingsounds[-MAX_POLYPHONY:] b = samplerbox_audio.mixaudiobuffers(playingsounds, rmlist, frame_count, FADEOUT, FADEOUTLENGTH, SPEED) for e in rmlist: try: playingsounds.remove(e) except: pass # b *= globalvolume if USE_FREEVERB: b_temp = b freeverbprocess(b_temp.ctypes.data_as(c_float_p), b.ctypes.data_as(c_float_p), frame_count) # IF USE_TONECONTOL # b = numpy.array(chain.filter(bb)) # b=bb if CHANNELS == 4: # 4 channel playback # if backingtrack running: add in the audio if BackingRunning: BackData = BackWav[BackIndex:BackIndex + 2 * frame_count] ClickData = ClickWav[ClickIndex:ClickIndex + 2 * frame_count] BackIndex += 2 * frame_count ClickIndex += 2 * frame_count if len(b) != len(BackData) or len(b) != len(ClickData): BackingRunning = False BackData = None BackIndex = 0 ClickData = None ClickIndex = 0 if BackingRunning: newdata = (backvolume * BackData + b * globalvolume) Click = ClickData * clickvolume else: Click = numpy.zeros(frame_count * 2, dtype=numpy.float32) newdata = b * globalvolume # putting streams in 4 channel audio by magic in numpy reshape a1 = newdata.reshape(frame_count, 2) a2 = Click.reshape(frame_count, 2) ch4 = numpy.hstack((a1, a2)).reshape(1, frame_count * 4) # Mute while loading Sample or BackingTrack # otherwise there could be dirty hick-ups if SampleLoading or (BackLoadingPerc > 0 and BackLoadingPerc < 100): ch4 *= 0 return (ch4.astype(numpy.int16).tostring(), pyaudio.paContinue) else: # 2 Channel playback # if backingtrack running: add in the audio if BackingRunning: BackData = BackWav[BackIndex:BackIndex + 2 * frame_count] BackIndex += 2 * frame_count if len(b) != len(BackData): BackingRunning = False BackData = None BackIndex = 0 if BackingRunning: newdata = (backvolume * BackData + b * globalvolume) else: newdata = b * globalvolume outdata[:] = newdata.reshape(outdata.shape)