def audioVirelaiSearch(): #from music21 import audioSearch from music21.audioSearch import transcriber from music21 import search virelaisSheet = cadencebook.TrecentoSheet(sheetname='virelais') virelaiCantuses = [] for i in range(2, 54): thisVirelai = virelaisSheet.makeWork(i) if thisVirelai.title != "": try: vc = thisVirelai.incipit.getElementsByClass('Part')[0] vc.insert(0, metadata.Metadata(title=thisVirelai.title)) virelaiCantuses.append(vc) except IndexError: pass searchScore = transcriber.runTranscribe(show=False, plot=False, seconds=10.0, saveFile=False) #from music21 import converter #searchScore = converter.parse("c'4 a8 a4 g8 b4. d'4. c8 b a g f4", '6/8') #searchScore.show() l = search.approximateNoteSearch(searchScore, virelaiCantuses) for i in l: print(i.metadata.title, i.matchProbability) l[0].show()
def recognizeScore(scorePart, pageMeasureNumbers, iterations=1): pages = [] for i in range(len(pageMeasureNumbers) - 1): pages.append( scorePart.measures(pageMeasureNumbers[i], pageMeasureNumbers[i + 1]).flat.notes.stream()) #divide into 24 note groups, overlapping by 16 notes each... allStreams = [] for pgMinus1 in range(len(pages)): print(str(pgMinus1 + 1)) thisPage = pages[pgMinus1] totalNotes = len(thisPage) for i in range(0, totalNotes, 8): startNote = thisPage[i] startMeasure = startNote.measureNumber print(" " + str(startMeasure)) newStream = stream.Stream(thisPage[i:i + 24]) newStream.pageNumber = pgMinus1 + 1 newStream.startMeasure = startMeasure allStreams.append(newStream) for loopy in range(iterations): if loopy > 0: print("\n\nstarting again in 3 seconds") time.sleep(3) searchScore = audioSearch.transcriber.runTranscribe(show=False, plot=False, seconds=15.0, saveFile=False) l = search.approximateNoteSearch(searchScore, allStreams) scores = [0 for j in range(len(pages))] for i in range(8): # top 8 searches topStream = l[i] scorePage = topStream.pageNumber - 1 scores[scorePage] += (topStream.matchProbability / (i + 1.5)) * 10 print("\nBest guesses (pg#, starting measure, probability)") for i, st in enumerate(l): print(st.pageNumber, st.startMeasure, st.matchProbability) if i >= 7: break print("\nWeighed top scores (pg#, score):") indexOfMaxScore = argmax_index(scores) for i in range(len(pages)): print((i + 1, scores[i]), end="") if i == indexOfMaxScore: print(" **** ") else: print("")
def recognizeScore(scorePart, pageMeasureNumbers, iterations=1): pages = [] for i in range(len(pageMeasureNumbers) - 1): pages.append(scorePart.measures(pageMeasureNumbers[i], pageMeasureNumbers[i + 1]).flat.notes.stream()) #divide into 24 note groups, overlapping by 16 notes each... allStreams = [] for pgMinus1 in range(len(pages)): print(str(pgMinus1 + 1)) thisPage = pages[pgMinus1] totalNotes = len(thisPage) for i in range(0, totalNotes, 8): startNote = thisPage[i] startMeasure = startNote.measureNumber print(" " + str(startMeasure)) newStream = stream.Stream(thisPage[i:i + 24]) newStream.pageNumber = pgMinus1 + 1 newStream.startMeasure = startMeasure allStreams.append(newStream) for loopy in range(iterations): if loopy > 0: print("\n\nstarting again in 3 seconds") time.sleep(3) searchScore = audioSearch.transcriber.runTranscribe(show=False, plot=False, seconds=15.0, saveFile=False) l = search.approximateNoteSearch(searchScore, allStreams) scores = [0 for j in range(len(pages))] for i in range(8): # top 8 searches topStream = l[i] scorePage = topStream.pageNumber - 1 scores[scorePage] += (topStream.matchProbability / (i + 1.5))*10 print("\nBest guesses (pg#, starting measure, probability)") for i,st in enumerate(l): print(st.pageNumber, st.startMeasure, st.matchProbability) if i >= 7: break print("\nWeighed top scores (pg#, score):") indexOfMaxScore = argmax_index(scores) for i in range(len(pages)): print( (i + 1, scores[i]), end="") if i == indexOfMaxScore: print(" **** ") else: print("")
def audioVirelaiSearch(): #from music21 import audioSearch from music21.audioSearch import transcriber from music21 import search virelaisSheet = cadencebook.TrecentoSheet(sheetname = 'virelais') virelaiCantuses = [] for i in range(2, 54): thisVirelai = virelaisSheet.makeWork(i) if thisVirelai.title != "": try: vc = thisVirelai.incipit.getElementsByClass('Part')[0] vc.insert(0, metadata.Metadata(title = thisVirelai.title)) virelaiCantuses.append(vc) except IndexError: pass searchScore = transcriber.runTranscribe(show = False, plot = False, seconds = 10.0, saveFile = False) #from music21 import converter #searchScore = converter.parse("c'4 a8 a4 g8 b4. d'4. c8 b a g f4", '6/8') #searchScore.show() l = search.approximateNoteSearch(searchScore, virelaiCantuses) for i in l: print(i.metadata.title, i.matchProbability) l[0].show()
def matchingNotes( self, scoreStream, transcribedScore, notePrediction, lastNotePosition, ): from music21 import audioSearch # Analyzing streams tn_recording = int(len(transcribedScore.flat.notesAndRests)) totScores = [] beginningData = [] lengthData = [] END_OF_SCORE = False # take 10% more of samples tn_window = int(math.ceil(tn_recording * 1.1)) hop = int(math.ceil(tn_window / 4)) if hop == 0: iterations = 1 else: iterations = int((math.floor(len(scoreStream) / hop)) - math.ceil(tn_window / hop)) for i in range(iterations): scNotes = scoreStream[i * hop + 1:i * hop + tn_recording + 1] name = '%d' % i beginningData.append(i * hop + 1) lengthData.append(tn_recording) scNotes.id = name totScores.append(scNotes) listOfParts = search.approximateNoteSearchWeighted( transcribedScore.flat.notesAndRests.stream(), totScores) #decision process if notePrediction > len(scoreStream) - tn_recording - hop - 1: notePrediction = len(scoreStream) - tn_recording - hop - 1 END_OF_SCORE = True environLocal.printDebug('LAST PART OF THE SCORE') #lastCountdown = self.countdown position, self.countdown = audioSearch.decisionProcess( listOfParts, notePrediction, beginningData, lastNotePosition, self.countdown, self.firstNotePage, self.lastNotePage, ) totalLength = 0 number = int(listOfParts[position].id) if self.silencePeriod is True and self.silencePeriodCounter < 5: # print(lastCountdown, self.countdown, lastNotePosition, # beginningData[number], lengthData[number]) environLocal.printDebug('All rest period') self.countdown -= 1 if self.countdown != 0: probabilityHit = 0 else: probabilityHit = listOfParts[position].matchProbability unused_listOfParts2 = search.approximateNoteSearch( transcribedScore.flat.notesAndRests.stream(), totScores) unused_listOfParts3 = search.approximateNoteSearchNoRhythm( transcribedScore.flat.notesAndRests.stream(), totScores) unused_listOfParts4 = search.approximateNoteSearchOnlyRhythm( transcribedScore.flat.notesAndRests.stream(), totScores) # print('PROBABILITIES:',) # print('pitches and durations weighted (current)', listOfParts[position].matchProbability,) # print('pitches and durations without weighting' , listOfParts2[position].matchProbability,) # print('pitches', listOfParts3[position].matchProbability,) # print('durations', listOfParts4[position].matchProbability) for i in range(len(totScores[number])): totalLength = totalLength + totScores[number][i].quarterLength if self.countdown == 0 and self.silencePeriodCounter == 0: lastNotePosition = beginningData[number] + lengthData[number] return totalLength, lastNotePosition, probabilityHit, END_OF_SCORE
def matchingNotes( self, scoreStream, transcribedScore, notePrediction, lastNotePosition, ): from music21 import audioSearch # Analyzing streams tn_recording = int(len(transcribedScore.flat.notesAndRests)) totScores = [] beginningData = [] lengthData = [] END_OF_SCORE = False # take 10% more of samples tn_window = int(math.ceil(tn_recording * 1.1)) hop = int(math.ceil(tn_window / 4)) if hop == 0: iterations = 1 else: iterations = int((math.floor(len(scoreStream) / hop)) - math.ceil(tn_window / hop)) for i in range(iterations): scNotes = scoreStream[i * hop + 1:i * hop + tn_recording + 1] name = "%d" % i beginningData.append(i * hop + 1) lengthData.append(tn_recording) scNotes.id = name totScores.append(scNotes) listOfParts = search.approximateNoteSearchWeighted( transcribedScore.flat.notesAndRests.stream(), totScores) #decision process if notePrediction > len(scoreStream) - tn_recording - hop - 1: notePrediction = len(scoreStream) - tn_recording - hop - 1 END_OF_SCORE = True environLocal.printDebug("LAST PART OF THE SCORE") #lastCountdown = self.countdown position, self.countdown = audioSearch.decisionProcess( listOfParts, notePrediction, beginningData, lastNotePosition, self.countdown, self.firstNotePage, self.lastNotePage, ) totalLength = 0 number = int(listOfParts[position].id) if self.silencePeriod is True and self.silencePeriodCounter < 5: # print(lastCountdown, self.countdown, lastNotePosition, # beginningData[number], lengthData[number]) environLocal.printDebug("All rest period") self.countdown -= 1 if self.countdown != 0: probabilityHit = 0 else: probabilityHit = listOfParts[position].matchProbability unused_listOfParts2 = search.approximateNoteSearch( transcribedScore.flat.notesAndRests.stream(), totScores) unused_listOfParts3 = search.approximateNoteSearchNoRhythm( transcribedScore.flat.notesAndRests.stream(), totScores) unused_listOfParts4 = search.approximateNoteSearchOnlyRhythm( transcribedScore.flat.notesAndRests.stream(), totScores) # print "PROBABILITIES:", # print "pitches and durations weighted (current)",listOfParts[position].matchProbability, # print "pitches and durations without weighting" , listOfParts2[position].matchProbability, # print "pitches", listOfParts3[position].matchProbability, # print "durations",listOfParts4[position].matchProbability for i in range(len(totScores[number])): totalLength = totalLength + totScores[number][i].quarterLength if self.countdown == 0 and self.silencePeriodCounter == 0: lastNotePosition = beginningData[number] + lengthData[number] return totalLength, lastNotePosition, probabilityHit, END_OF_SCORE