def shift_octaves_and_play(self, audio_quantum, octaves): import numpy as np from pypitch import PyPitch ad = audio_quantum.render() new_data = PyPitch.shiftPitchOctaves(ad.data, octaves) self.stream.write(new_data.astype(np.int16).tostring())
def shift_octaves_and_play(self, audio_quantum, octaves): """ Takes an echonest.remix.audio.AudioQuantum and a number of octaves to shift by. It first shifts the AudioQuantum's semitones by the specified input using pypitch and then writes the modified data to the stream. """ import numpy as np from pypitch import PyPitch ad = audio_quantum.render() # gets AudioData object for AudioQuantum new_data = PyPitch.shiftPitchOctaves( ad.data, octaves) # shifts octaves using pypitch self.stream.write(new_data.astype( np.int16).tostring()) # writes data to stream
def main(input, semitones): track = audio.LocalAudioFile(input) collect = [] for section in track.analysis.sections: section_data = section.render().data new_data = PyPitch.shiftPitchSemiTones(section_data, semitones) ts = audio.AudioData(ndarray=new_data, shape=new_data.shape, sampleRate=track.sampleRate, numChannels=new_data.shape[1]) collect.append(ts) out = audio.assemble(collect, numChannels=2) out.encode( input.split('.')[0] + '_' + ('d' if semitones < 0 else 'u') + str(abs(semitones)) + '.mp3')
def shift_and_play(self, audio_quantum, ratio, semitones): ad = audio_quantum.render() new_data = PyPitch.shiftPitchSemiTones(dirac.timeScale(ad.data, ratio), semitones) self.stream.write(new_data.astype(np.int16).tostring())
def shift_semitones_and_play(self, audio_quantum, semitones): ad = audio_quantum.render() new_data = PyPitch.shiftPitchSemiTones(ad.data, semitones) self.stream.write(new_data.astype(np.int16).tostring())