def repeatTranscription(self): ''' First, it records from the microphone (or from a file if is used for test). Later, it processes the signal in order to detect the pitches. It converts them into music21 objects and compares them with the score. It finds the best matching position of the recorded signal with the score, and decides, depending on matching accuracy, the last note predicted and some other parameters, in which position the recorded signal is. It returns a value that is False if the song has not finished, or true if there has been a problem like some consecutive bad matchings or the score has finished. >>> from music21.audioSearch import scoreFollower >>> scoreNotes = " ".join(["c4", "d", "e", "f", "g", "a", "b", "c'", "c", "e", ... "g", "c'", "a", "f", "d", "c#", "d#", "f#","c", "e", "g", "c'", ... "a", "f", "d", "c#", "d#", "f#"]) >>> scNotes = converter.parse("tinynotation: 4/4 " + scoreNotes, makeNotation=False) >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes) >>> ScF.useMic = False >>> import os #_DOCS_HIDE >>> ScF.waveFile = os.path.join(common.getSourceFilePath(), #_DOCS_HIDE ... 'audioSearch', 'test_audio.wav') #_DOCS_HIDE >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav' >>> ScF.seconds_recording = 10 >>> ScF.useScale = scale.ChromaticScale('C4') >>> ScF.currentSample = 0 >>> exitType = ScF.repeatTranscription() >>> print(exitType) False >>> print(ScF.lastNotePosition) 10 ''' from music21 import audioSearch # print "WE STAY AT:", # print self.lastNotePosition, len(self.scoreNotesOnly), # print "en percent %d %%" % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)), # print " this search begins at: ", self.startSearchAtSlot, # print "countdown %d" % self.countdown # print "Measure last note", self.scoreStream[self.lastNotePosition].measureNumber environLocal.printDebug("repeat transcription starting") if self.useMic is True: freqFromAQList = audioSearch.getFrequenciesFromMicrophone( length=self.seconds_recording, storeWaveFilename=None, ) else: freqFromAQList, self.waveFile, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.seconds_recording, startSample=self.currentSample, ) if self.totalFile == 0: self.totalFile = self.waveFile.getnframes() environLocal.printDebug("got Frequencies from Microphone") time_start = time() detectedPitchesFreq = audioSearch.detectPitchFrequencies(freqFromAQList, self.useScale) detectedPitchesFreq = audioSearch.smoothFrequencies(detectedPitchesFreq) detectedPitchObjects, unused_listplot = audioSearch.pitchFrequenciesToObjects( detectedPitchesFreq, self.useScale) notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches( detectedPitchObjects) self.silencePeriodDetection(notesList) environLocal.printDebug("made it to here...") scNotes = self.scoreStream[self.lastNotePosition:self.lastNotePosition + len(notesList)] #print "1" transcribedScore, self.qle = audioSearch.notesAndDurationsToStream( notesList, durationList, scNotes=scNotes, qle=self.qle, ) #print "2" totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = self.matchingNotes( self.scoreStream, transcribedScore, self.startSearchAtSlot, self.lastNotePosition, ) #print "3" self.processing_time = time() - time_start environLocal.printDebug("and even to here...") if END_OF_SCORE is True: exitType = "endOfScore" # "endOfScore" return exitType # estimate position, or exit if we can't at all... exitType = self.updatePosition(prob, totalLengthPeriod, time_start) if self.useMic is False: # reading from the disc (only for TESTS) # skip ahead the processing time. freqFromAQList, junk, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.processing_time, startSample=self.currentSample, ) if self.lastNotePosition > len(self.scoreNotesOnly): #print "finishedPerforming" exitType = "finishedPerforming" elif (self.useMic is False and self.currentSample >= self.totalFile): #print "waveFileEOF" exitType = "waveFileEOF" environLocal.printDebug("about to return -- exitType: %s " % exitType) return exitType
def repeatTranscription(self): ''' First, it records from the microphone (or from a file if is used for test). Later, it processes the signal in order to detect the pitches. It converts them into music21 objects and compares them with the score. It finds the best matching position of the recorded signal with the score, and decides, depending on matching accuracy, the last note predicted and some other parameters, in which position the recorded signal is. It returns a value that is False if the song has not finished, or true if there has been a problem like some consecutive bad matchings or the score has finished. >>> from music21 import common, converter >>> from music21.audioSearch import scoreFollower >>> scoreNotes = " ".join(["c4", "d", "e", "f", "g", "a", "b", "c'", "c", "e", ... "g", "c'", "a", "f", "d", "c#", "d#", "f#","c", "e", "g", "c'", ... "a", "f", "d", "c#", "d#", "f#"]) >>> scNotes = converter.parse("tinynotation: 4/4 " + scoreNotes) >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes) >>> ScF.useMic = False >>> import os #_DOCS_HIDE >>> readPath = os.path.join(common.getSourceFilePath(), 'audioSearch', 'test_audio.wav') #_DOCS_HIDE >>> ScF.waveFile = readPath #_DOCS_HIDE >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav' >>> ScF.seconds_recording = 10 >>> ScF.useScale = scale.ChromaticScale('C4') >>> ScF.currentSample = 0 >>> exitType = ScF.repeatTranscription() >>> print exitType False >>> print ScF.lastNotePosition 10 ''' from music21 import audioSearch # print "WE STAY AT:", # print self.lastNotePosition, len(self.scoreNotesOnly), # print "en percent %d %%" % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)), # print " this search begins at: ", self.startSearchAtSlot, # print "countdown %d" % self.countdown # print "Measure last note", self.scoreStream[self.lastNotePosition].measureNumber environLocal.printDebug("repeat transcription starting") if self.useMic is True: freqFromAQList = audioSearch.getFrequenciesFromMicrophone( length=self.seconds_recording, storeWaveFilename=None, ) else: freqFromAQList, self.waveFile, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.seconds_recording, startSample=self.currentSample, ) if self.totalFile == 0: self.totalFile = self.waveFile.getnframes() environLocal.printDebug("got Frequencies from Microphone") time_start = time() detectedPitchesFreq = audioSearch.detectPitchFrequencies( freqFromAQList, self.useScale) detectedPitchesFreq = audioSearch.smoothFrequencies( detectedPitchesFreq) detectedPitchObjects, unused_listplot = \ audioSearch.pitchFrequenciesToObjects( detectedPitchesFreq, self.useScale) notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches( detectedPitchObjects) self.silencePeriodDetection(notesList) environLocal.printDebug("made it to here...") scNotes = self.scoreStream[self. lastNotePosition:self.lastNotePosition + len(notesList)] #print "1" transcribedScore, self.qle = audioSearch.notesAndDurationsToStream( notesList, durationList, scNotes=scNotes, qle=self.qle, ) #print "2" totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = \ self.matchingNotes( self.scoreStream, transcribedScore, self.startSearchAtSlot, self.lastNotePosition, ) #print "3" self.processing_time = time() - time_start environLocal.printDebug("and even to here...") if END_OF_SCORE is True: exitType = "endOfScore" # "endOfScore" return exitType # estimate position, or exit if we can't at all... exitType = self.updatePosition(prob, totalLengthPeriod, time_start) if self.useMic is False: # reading from the disc (only for TESTS) # skip ahead the processing time. freqFromAQList, junk, self.currentSample = \ audioSearch.getFrequenciesFromPartialAudioFile( self.waveFile, length=self.processing_time, startSample=self.currentSample, ) if self.lastNotePosition > len(self.scoreNotesOnly): #print "finishedPerforming" exitType = "finishedPerforming" elif (self.useMic is False and self.currentSample >= self.totalFile): #print "waveFileEOF" exitType = "waveFileEOF" environLocal.printDebug("about to return -- exitType: %s " % exitType) return exitType