def runTranscribe(show=True, plot=True, useMic=True, seconds=20.0, useScale=None, saveFile=True): ''' runs all the methods to record from audio for `seconds` length (default 10.0) and transcribe the resulting melody returning a music21.Score object if `show` is True, show the stream. if `plot` is True then a Tk graph of the frequencies will be displayed. if `useMic` is True then use the microphone. If False it will load the file of `saveFile` or the default temp file to run transcriptions from. a different scale besides the chromatic scale can be specified by setting `useScale`. See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given). Microtonal scales are totally accepted, as are retuned scales where A != 440hz. if `saveFile` is False then then the recorded audio is saved to disk. If set to `True` then `environLocal.getRootTempDir() + os.path.sep + 'ex.wav'` is used as the filename. If set to anything else then it will use that as the filename. ''' from music21 import audioSearch as audioSearchBase if useScale is None: useScale = scale.ChromaticScale('C4') #beginning - recording or not if saveFile != False: if saveFile == True: WAVE_FILENAME = environLocal.getRootTempDir() + os.path.sep + 'ex.wav' else: WAVE_FILENAME = saveFile else: WAVE_FILENAME = False # the rest of the score if useMic is True: freqFromAQList = audioSearchBase.getFrequenciesFromMicrophone(length=seconds, storeWaveFilename=WAVE_FILENAME) else: freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(waveFilename=WAVE_FILENAME) detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(freqFromAQList, useScale) detectedPitchesFreq = audioSearchBase.smoothFrequencies(detectedPitchesFreq) (detectedPitchObjects, listplot) = audioSearchBase.pitchFrequenciesToObjects(detectedPitchesFreq, useScale) (notesList, durationList) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects) myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(notesList, durationList, removeRestsAtBeginning=True) if show == True: myScore.show() if plot == True: try: import matplotlib.pyplot # for find except ImportError: raise audioSearchBase.AudioSearchException("Cannot plot without matplotlib installed.") matplotlib.pyplot.plot(listplot) matplotlib.pyplot.show() environLocal.printDebug("* END") return myScore
def runGame(): useScale = scale.ChromaticScale('C4') roundNumber = 0 good = True gameNotes = [] print("Welcome to the music21 game!") print("Rules:") print( "The computer generates a note (and it will play them in the future).") print("The player has to play all the notes from the beginning.") time.sleep(2) print("3, 2, 1 GO!") nameNotes = ["A", "B", "C", "D", "E", "F", "G"] while (good == True): randomNumber = random.randint(0, 6) octaveNumber = 4 # I can put a random number here... fullNameNote = "%s%d" % (nameNotes[randomNumber], octaveNumber) gameNotes.append(note.Note(fullNameNote)) roundNumber = roundNumber + 1 print("ROUND %d" % roundNumber) print("NOTES UNTIL NOW: (this will not be shown in the final version)") for k in range(len(gameNotes)): print(gameNotes[k].fullName) seconds = 2 * roundNumber + 2 freqFromAQList = base.getFrequenciesFromMicrophone( length=seconds, storeWaveFilename=None) detectedPitchesFreq = base.detectPitchFrequencies( freqFromAQList, useScale) detectedPitchesFreq = base.smoothFrequencies(detectedPitchesFreq) (detectedPitchObjects, unused_listplot) = base.pitchFrequenciesToObjects( detectedPitchesFreq, useScale) (notesList, unused_durationList ) = base.joinConsecutiveIdenticalPitches(detectedPitchObjects) j = 0 i = 0 while i < len(notesList) and j < len(gameNotes) and good == True: if notesList[i].name == "rest": i = i + 1 elif notesList[i].name == gameNotes[j].name: i = i + 1 j = j + 1 else: print("WRONG NOTE! You played", notesList[i].fullName, "and should have been", gameNotes[j].fullName) good = False if good == True and j != len(gameNotes): good = False print("YOU ARE VERY SLOW!!! PLAY FASTER NEXT TIME!") if good == False: print("GAME OVER! TOTAL ROUNDS: %d" % roundNumber)
def game(self): self.round = self.round + 1 print("self.round %d" % self.round) # print "NOTES UNTIL NOW: (this will not be shown in the final version)" # for k in range(len(self.gameNotes)): # print self.gameNotes[k].fullName seconds = 2 + self.round freqFromAQList = base.getFrequenciesFromMicrophone( length=seconds, storeWaveFilename=None) detectedPitchesFreq = base.detectPitchFrequencies( freqFromAQList, self.useScale) detectedPitchesFreq = base.smoothFrequencies(detectedPitchesFreq) (detectedPitchObjects, unused_listplot) = base.pitchFrequenciesToObjects( detectedPitchesFreq, self.useScale) (notesList, unused_durationList ) = base.joinConsecutiveIdenticalPitches(detectedPitchObjects) j = 0 i = 0 while i < len(notesList) and j < len( self.gameNotes) and self.good == True: if notesList[i].name == "rest": i = i + 1 elif notesList[i].name == self.gameNotes[j].name: i = i + 1 j = j + 1 else: print("WRONG NOTE! You played", notesList[i].fullName, "and should have been", self.gameNotes[j].fullName) self.good = False if self.good == True and j != len(self.gameNotes): self.good = False print("YOU ARE VERY SLOW!!! PLAY FASTER NEXT TIME!") if self.good == False: print("YOU LOSE!! HAHAHAHA") else: while i < len(notesList) and notesList[i].name == "rest": i = i + 1 if i < len(notesList): self.gameNotes.append(notesList[i]) #add a new note print("WELL DONE!") else: print("YOU HAVE NOT ADDED A NEW NOTE! REPEAT AGAIN NOW") self.round = self.round - 1 return self.good
def runGame(): useScale = scale.ChromaticScale('C4') roundNumber = 0 good = True gameNotes = [] print("Welcome to the music21 game!") print("Rules:") print("The computer generates a note (and it will play them in the future).") print("The player has to play all the notes from the beginning.") time.sleep(2) print("3, 2, 1 GO!") nameNotes = ["A", "B", "C", "D", "E", "F", "G"] while(good == True): randomNumber = random.randint(0, 6) octaveNumber = 4 # I can put a random number here... fullNameNote = "%s%d" % (nameNotes[randomNumber], octaveNumber) gameNotes.append(note.Note(fullNameNote)) roundNumber = roundNumber + 1 print("ROUND %d" % roundNumber) print("NOTES UNTIL NOW: (this will not be shown in the final version)") for k in range(len(gameNotes)): print(gameNotes[k].fullName) seconds = 2 * roundNumber + 2 freqFromAQList = base.getFrequenciesFromMicrophone(length=seconds, storeWaveFilename=None) detectedPitchesFreq = base.detectPitchFrequencies(freqFromAQList, useScale) detectedPitchesFreq = base.smoothFrequencies(detectedPitchesFreq) (detectedPitchObjects, unused_listplot) = base.pitchFrequenciesToObjects(detectedPitchesFreq, useScale) (notesList, unused_durationList) = base.joinConsecutiveIdenticalPitches(detectedPitchObjects) j = 0 i = 0 while i < len(notesList) and j < len(gameNotes) and good == True: if notesList[i].name == "rest": i = i + 1 elif notesList[i].name == gameNotes[j].name: i = i + 1 j = j + 1 else: print("WRONG NOTE! You played", notesList[i].fullName, "and should have been", gameNotes[j].fullName) good = False if good == True and j != len(gameNotes): good = False print("YOU ARE VERY SLOW!!! PLAY FASTER NEXT TIME!") if good == False: print("GAME OVER! TOTAL ROUNDS: %d" % roundNumber)
def monophonicStreamFromFile(fileName, useScale=None): ''' Reads in a .wav file and returns a stream representing the transcribed, monophonic audio. `fileName` should be the complete path to a file on the disk. a different scale besides the chromatic scale can be specified by setting `useScale`. See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given). Microtonal scales are totally accepted, as are retuned scales where A != 440hz. We demonstrate with an audio file beginning with an ascending scale. >>> import os #_DOCS_HIDE >>> taw = 'test_audio.wav' #_DOCS_HIDE >>> waveFile = str(common.getSourceFilePath() / 'audioSearch' / taw) #_DOCS_HIDE >>> #_DOCS_SHOW waveFile = 'test_audio.wav' >>> p = audioSearch.transcriber.monophonicStreamFromFile(waveFile) >>> p <music21.stream.Part ...> >>> p.show('text') {0.0} <music21.note.Note C> {0.25} <music21.note.Note C> {0.75} <music21.note.Note D> {1.75} <music21.note.Note E> {2.75} <music21.note.Note F> {4.25} <music21.note.Note G> {5.25} <music21.note.Note A> {6.25} <music21.note.Note B> {7.25} <music21.note.Note C> ... ''' from music21 import audioSearch as audioSearchBase freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile( waveFilename=fileName) detectedPitchesFreq = audioSearchBase.detectPitchFrequencies( freqFromAQList, useScale) detectedPitchesFreq = audioSearchBase.smoothFrequencies( detectedPitchesFreq) (detectedPitchObjects, unused_listplot) = audioSearchBase.pitchFrequenciesToObjects( detectedPitchesFreq, useScale) (notesList, durationList ) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects) myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream( notesList, durationList, removeRestsAtBeginning=True) return myScore.parts.first()
def monophonicStreamFromFile(fileName, useScale=None): ''' Reads in a .wav file and returns a stream representing the transcribed, monophonic audio. `fileName` should be the complete path to a file on the disk. a different scale besides the chromatic scale can be specified by setting `useScale`. See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given). Microtonal scales are totally accepted, as are retuned scales where A != 440hz. We demonstrate with an audio file beginning with an ascending scale. >>> import os #_DOCS_HIDE >>> taw = 'test_audio.wav' #_DOCS_HIDE >>> waveFile = os.path.join(common.getSourceFilePath(), 'audioSearch', taw) #_DOCS_HIDE >>> #_DOCS_SHOW waveFile = 'test_audio.wav' >>> p = audioSearch.transcriber.monophonicStreamFromFile(waveFile) >>> p <music21.stream.Part ...> >>> p.show('text') {0.0} <music21.note.Note C> {0.25} <music21.note.Note C> {0.75} <music21.note.Note D> {1.75} <music21.note.Note E> {2.75} <music21.note.Note F> {4.25} <music21.note.Note G> {5.25} <music21.note.Note A> {6.25} <music21.note.Note B> {7.25} <music21.note.Note C> ... ''' from music21 import audioSearch as audioSearchBase freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(waveFilename=fileName) detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(freqFromAQList, useScale) detectedPitchesFreq = audioSearchBase.smoothFrequencies(detectedPitchesFreq) (detectedPitchObjects, unused_listplot) = audioSearchBase.pitchFrequenciesToObjects(detectedPitchesFreq, useScale) (notesList, durationList) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects) myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream( notesList, durationList, removeRestsAtBeginning=True) return myScore.parts[0]
def game(self): self.round = self.round + 1 print("self.round %d" % self.round) # print "NOTES UNTIL NOW: (this will not be shown in the final version)" # for k in range(len(self.gameNotes)): # print self.gameNotes[k].fullName seconds = 2 + self.round freqFromAQList = base.getFrequenciesFromMicrophone(length=seconds, storeWaveFilename=None) detectedPitchesFreq = base.detectPitchFrequencies(freqFromAQList, self.useScale) detectedPitchesFreq = base.smoothFrequencies(detectedPitchesFreq) (detectedPitchObjects, unused_listplot) = base.pitchFrequenciesToObjects(detectedPitchesFreq, self.useScale) (notesList, unused_durationList) = base.joinConsecutiveIdenticalPitches(detectedPitchObjects) j = 0 i = 0 while i < len(notesList) and j < len(self.gameNotes) and self.good == True: if notesList[i].name == "rest": i = i + 1 elif notesList[i].name == self.gameNotes[j].name: i = i + 1 j = j + 1 else: print("WRONG NOTE! You played", notesList[i].fullName, "and should have been", self.gameNotes[j].fullName) self.good = False if self.good == True and j != len(self.gameNotes): self.good = False print("YOU ARE VERY SLOW!!! PLAY FASTER NEXT TIME!") if self.good == False: print("YOU LOSE!! HAHAHAHA") else: while i < len(notesList) and notesList[i].name == "rest": i = i + 1 if i < len(notesList): self.gameNotes.append(notesList[i]) #add a new note print("WELL DONE!") else: print("YOU HAVE NOT ADDED A NEW NOTE! REPEAT AGAIN NOW") self.round = self.round - 1 return self.good
def repeatTranscription(self): ''' First, it records from the microphone (or from a file if is used for test). Later, it processes the signal in order to detect the pitches. It converts them into music21 objects and compares them with the score. It finds the best matching position of the recorded signal with the score, and decides, depending on matching accuracy, the last note predicted and some other parameters, in which position the recorded signal is. It returns a value that is False if the song has not finished, or true if there has been a problem like some consecutive bad matchings or the score has finished. >>> from music21.audioSearch import scoreFollower >>> scoreNotes = ' '.join(['c4', 'd', 'e', 'f', 'g', 'a', 'b', "c'", 'c', 'e', ... 'g', "c'", 'a', 'f', 'd', 'c#', 'd#', 'f#', 'c', 'e', 'g', "c'", ... 'a', 'f', 'd', 'c#', 'd#', 'f#']) >>> scNotes = converter.parse('tinynotation: 4/4 ' + scoreNotes, makeNotation=False) >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes) >>> ScF.useMic = False >>> import os #_DOCS_HIDE >>> ScF.waveFile = str(common.getSourceFilePath() #_DOCS_HIDE ... / 'audioSearch' / 'test_audio.wav') #_DOCS_HIDE >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav' >>> ScF.seconds_recording = 10 >>> ScF.useScale = scale.ChromaticScale('C4') >>> ScF.currentSample = 0 >>> exitType = ScF.repeatTranscription() >>> print(exitType) False >>> print(ScF.lastNotePosition) 10 ''' from music21 import audioSearch # print('WE STAY AT:',) # print(self.lastNotePosition, len(self.scoreNotesOnly),) # print('en percent %d %%' % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)),) # print(' this search begins at: ', self.startSearchAtSlot,) # print('countdown %d' % self.countdown) # print('Measure last note', self.scoreStream[self.lastNotePosition].measureNumber) environLocal.printDebug('repeat transcription starting') if self.useMic is True: freqFromAQList = audioSearch.getFrequenciesFromMicrophone( length=self.seconds_recording, storeWaveFilename=None, ) else: getFreqFunc = audioSearch.getFrequenciesFromPartialAudioFile freqFromAQList, self.waveFile, self.currentSample = getFreqFunc( self.waveFile, length=self.seconds_recording, startSample=self.currentSample, ) if self.totalFile == 0: self.totalFile = self.waveFile.getnframes() environLocal.printDebug('got Frequencies from Microphone') time_start = time() detectedPitchesFreq = audioSearch.detectPitchFrequencies( freqFromAQList, self.useScale) detectedPitchesFreq = audioSearch.smoothFrequencies( detectedPitchesFreq) detectedPitchObjects, unused_listplot = audioSearch.pitchFrequenciesToObjects( detectedPitchesFreq, self.useScale) notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches( detectedPitchObjects) self.silencePeriodDetection(notesList) environLocal.printDebug('made it to here...') scNotes = self.scoreStream[self. lastNotePosition:self.lastNotePosition + len(notesList)] # print('1') transcribedScore, self.qle = audioSearch.notesAndDurationsToStream( notesList, durationList, scNotes=scNotes, qle=self.qle, ) # print('2') totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = self.matchingNotes( self.scoreStream, transcribedScore, self.startSearchAtSlot, self.lastNotePosition, ) # print('3') self.processing_time = time() - time_start environLocal.printDebug('and even to here...') if END_OF_SCORE is True: exitType = 'endOfScore' # 'endOfScore' return exitType # estimate position, or exit if we can't at all... exitType = self.updatePosition(prob, totalLengthPeriod, time_start) if self.useMic is False: # reading from the disc (only for TESTS) # skip ahead the processing time. getFreqFunc = audioSearch.getFrequenciesFromPartialAudioFile freqFromAQList, junk, self.currentSample = getFreqFunc( self.waveFile, length=self.processing_time, startSample=self.currentSample, ) if self.lastNotePosition > len(self.scoreNotesOnly): # print('finishedPerforming') exitType = 'finishedPerforming' elif (self.useMic is False and self.currentSample >= self.totalFile): # print('waveFileEOF') exitType = 'waveFileEOF' environLocal.printDebug('about to return -- exitType: %s ' % exitType) return exitType
def repeatTranscription(self): ''' First, it records from the microphone (or from a file if is used for test). Later, it processes the signal in order to detect the pitches. It converts them into music21 objects and compares them with the score. It finds the best matching position of the recorded signal with the score, and decides, depending on matching accuracy, the last note predicted and some other parameters, in which position the recorded signal is. It returns a value that is False if the song has not finished, or true if there has been a problem like some consecutive bad matchings or the score has finished. >>> from music21.audioSearch import scoreFollower >>> scoreNotes = " ".join(["c4", "d", "e", "f", "g", "a", "b", "c'", "c", "e", ... "g", "c'", "a", "f", "d", "c#", "d#", "f#","c", "e", "g", "c'", ... "a", "f", "d", "c#", "d#", "f#"]) >>> scNotes = converter.parse("tinynotation: 4/4 " + scoreNotes, makeNotation=False) >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes) >>> ScF.useMic = False >>> import os #_DOCS_HIDE >>> ScF.waveFile = os.path.join(common.getSourceFilePath(), #_DOCS_HIDE ... 'audioSearch', 'test_audio.wav') #_DOCS_HIDE >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav' >>> ScF.seconds_recording = 10 >>> ScF.useScale = scale.ChromaticScale('C4') >>> ScF.currentSample = 0 >>> exitType = ScF.repeatTranscription() >>> print(exitType) False >>> print(ScF.lastNotePosition) 10 ''' from music21 import audioSearch # print "WE STAY AT:", # print self.lastNotePosition, len(self.scoreNotesOnly), # print "en percent %d %%" % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)), # print " this search begins at: ", self.startSearchAtSlot, # print "countdown %d" % self.countdown # print "Measure last note", self.scoreStream[self.lastNotePosition].measureNumber environLocal.printDebug("repeat transcription starting") if self.useMic is True: freqFromAQList = audioSearch.getFrequenciesFromMicrophone( length=self.seconds_recording, storeWaveFilename=None, ) else: freqFromAQList, self.waveFile, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.seconds_recording, startSample=self.currentSample, ) if self.totalFile == 0: self.totalFile = self.waveFile.getnframes() environLocal.printDebug("got Frequencies from Microphone") time_start = time() detectedPitchesFreq = audioSearch.detectPitchFrequencies(freqFromAQList, self.useScale) detectedPitchesFreq = audioSearch.smoothFrequencies(detectedPitchesFreq) detectedPitchObjects, unused_listplot = audioSearch.pitchFrequenciesToObjects( detectedPitchesFreq, self.useScale) notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches( detectedPitchObjects) self.silencePeriodDetection(notesList) environLocal.printDebug("made it to here...") scNotes = self.scoreStream[self.lastNotePosition:self.lastNotePosition + len(notesList)] #print "1" transcribedScore, self.qle = audioSearch.notesAndDurationsToStream( notesList, durationList, scNotes=scNotes, qle=self.qle, ) #print "2" totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = self.matchingNotes( self.scoreStream, transcribedScore, self.startSearchAtSlot, self.lastNotePosition, ) #print "3" self.processing_time = time() - time_start environLocal.printDebug("and even to here...") if END_OF_SCORE is True: exitType = "endOfScore" # "endOfScore" return exitType # estimate position, or exit if we can't at all... exitType = self.updatePosition(prob, totalLengthPeriod, time_start) if self.useMic is False: # reading from the disc (only for TESTS) # skip ahead the processing time. freqFromAQList, junk, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.processing_time, startSample=self.currentSample, ) if self.lastNotePosition > len(self.scoreNotesOnly): #print "finishedPerforming" exitType = "finishedPerforming" elif (self.useMic is False and self.currentSample >= self.totalFile): #print "waveFileEOF" exitType = "waveFileEOF" environLocal.printDebug("about to return -- exitType: %s " % exitType) return exitType
def repeatTranscription(self): ''' First, it records from the microphone (or from a file if is used for test). Later, it processes the signal in order to detect the pitches. It converts them into music21 objects and compares them with the score. It finds the best matching position of the recorded signal with the score, and decides, depending on matching accuracy, the last note predicted and some other parameters, in which position the recorded signal is. It returns a value that is False if the song has not finished, or true if there has been a problem like some consecutive bad matchings or the score has finished. >>> from music21 import common, converter >>> from music21.audioSearch import scoreFollower >>> scoreNotes = " ".join(["c4", "d", "e", "f", "g", "a", "b", "c'", "c", "e", ... "g", "c'", "a", "f", "d", "c#", "d#", "f#","c", "e", "g", "c'", ... "a", "f", "d", "c#", "d#", "f#"]) >>> scNotes = converter.parse("tinynotation: 4/4 " + scoreNotes) >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes) >>> ScF.useMic = False >>> import os #_DOCS_HIDE >>> readPath = os.path.join(common.getSourceFilePath(), 'audioSearch', 'test_audio.wav') #_DOCS_HIDE >>> ScF.waveFile = readPath #_DOCS_HIDE >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav' >>> ScF.seconds_recording = 10 >>> ScF.useScale = scale.ChromaticScale('C4') >>> ScF.currentSample = 0 >>> exitType = ScF.repeatTranscription() >>> print exitType False >>> print ScF.lastNotePosition 10 ''' from music21 import audioSearch # print "WE STAY AT:", # print self.lastNotePosition, len(self.scoreNotesOnly), # print "en percent %d %%" % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)), # print " this search begins at: ", self.startSearchAtSlot, # print "countdown %d" % self.countdown # print "Measure last note", self.scoreStream[self.lastNotePosition].measureNumber environLocal.printDebug("repeat transcription starting") if self.useMic is True: freqFromAQList = audioSearch.getFrequenciesFromMicrophone( length=self.seconds_recording, storeWaveFilename=None, ) else: freqFromAQList, self.waveFile, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.seconds_recording, startSample=self.currentSample, ) if self.totalFile == 0: self.totalFile = self.waveFile.getnframes() environLocal.printDebug("got Frequencies from Microphone") time_start = time() detectedPitchesFreq = audioSearch.detectPitchFrequencies( freqFromAQList, self.useScale) detectedPitchesFreq = audioSearch.smoothFrequencies( detectedPitchesFreq) detectedPitchObjects, unused_listplot = \ audioSearch.pitchFrequenciesToObjects( detectedPitchesFreq, self.useScale) notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches( detectedPitchObjects) self.silencePeriodDetection(notesList) environLocal.printDebug("made it to here...") scNotes = self.scoreStream[self. lastNotePosition:self.lastNotePosition + len(notesList)] #print "1" transcribedScore, self.qle = audioSearch.notesAndDurationsToStream( notesList, durationList, scNotes=scNotes, qle=self.qle, ) #print "2" totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = \ self.matchingNotes( self.scoreStream, transcribedScore, self.startSearchAtSlot, self.lastNotePosition, ) #print "3" self.processing_time = time() - time_start environLocal.printDebug("and even to here...") if END_OF_SCORE is True: exitType = "endOfScore" # "endOfScore" return exitType # estimate position, or exit if we can't at all... exitType = self.updatePosition(prob, totalLengthPeriod, time_start) if self.useMic is False: # reading from the disc (only for TESTS) # skip ahead the processing time. freqFromAQList, junk, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.processing_time, startSample=self.currentSample, ) if self.lastNotePosition > len(self.scoreNotesOnly): #print "finishedPerforming" exitType = "finishedPerforming" elif (self.useMic is False and self.currentSample >= self.totalFile): #print "waveFileEOF" exitType = "waveFileEOF" environLocal.printDebug("about to return -- exitType: %s " % exitType) return exitType