def playRegion( adata, rtype, rgnIdx ): # Get this region quantum q = cluster.getRegionsOfType( adata.analysis, rtype )[rgnIdx] # Don't know what to do right now. Write to wav file and do a system call. aout = adata[ q ] aout.encode('/tmp/noplay.wav') os.system( 'aplay /tmp/noplay.wav' )
def playRegion(adata, rtype, rgnIdx): # Get this region quantum q = cluster.getRegionsOfType(adata.analysis, rtype)[rgnIdx] # Don't know what to do right now. Write to wav file and do a system call. aout = adata[q] aout.encode('/tmp/noplay.wav') os.system('aplay /tmp/noplay.wav')
print 'Playing cluster %d (of %d), section %d (of %d)' % \ (clSection, clsec.nbClusters(), currSec, \ clsec.sizeOfCluster(clSection) ) currFn = clsec.getFilenameOfRegion(currSec) csong = getSongFromCache(currFn) # must be in the cache assert csong != None if warmUpCounter > 0: warmUpCounter -= 1 if warmUpCounter == 0: print 'MASHING...' else: # a) get the section audio rgnIdx = clsec.getSongRegionIdx(currSec) # an int allSectionsForSong = cluster.getRegionsOfType( \ csong.m_adata.analysis, 'sections' ) # array of quanta secAData = csong.m_adata[allSectionsForSong[rgnIdx]] # b) get the beat concat audio and merge (beatAData, indexBars) = addBarsToAudio( \ clInfo, csong.m_adata, allSectionsForSong[rgnIdx], \ indexBars ) # Let's not always mix, it can sound messy if np.random.random() < 0.25: secnResult = audio.mix( secAData, beatAData ) else: secnResult = beatAData #secAData = addBeatsToAudio( clInfo, csong.m_adata, \ # allSectionsForSong[rgnIdx] ) playAudioData( secnResult ) # todo: keep a history and don't go back too early?
print 'Playing cluster %d (of %d), section %d (of %d)' % \ (clSection, clsec.nbClusters(), currSec, \ clsec.sizeOfCluster(clSection) ) currFn = clsec.getFilenameOfRegion(currSec) csong = getSongFromCache(currFn) # must be in the cache assert csong != None if warmUpCounter > 0: warmUpCounter -= 1 if warmUpCounter == 0: print 'MASHING...' else: # a) get the section audio rgnIdx = clsec.getSongRegionIdx(currSec) # an int allSectionsForSong = cluster.getRegionsOfType( \ csong.m_adata.analysis, 'sections' ) # array of quanta secAData = csong.m_adata[allSectionsForSong[rgnIdx]] # b) get the beat concat audio and merge (beatAData, indexBars) = addBarsToAudio( \ clInfo, csong.m_adata, allSectionsForSong[rgnIdx], \ indexBars ) # Let's not always mix, it can sound messy if np.random.random() < 0.25: secnResult = audio.mix(secAData, beatAData) else: secnResult = beatAData #secAData = addBeatsToAudio( clInfo, csong.m_adata, \ # allSectionsForSong[rgnIdx] ) playAudioData(secnResult) # todo: keep a history and don't go back too early?