def testRichMetadata01(self): from music21 import corpus from music21 import metadata score = corpus.parse('jactatur') self.assertEqual(score.metadata.composer, 'Johannes Ciconia') richMetadata = metadata.RichMetadata() richMetadata.merge(score.metadata) self.assertEqual(richMetadata.composer, 'Johannes Ciconia') # update richMetadata with stream richMetadata.update(score) self.assertEqual( richMetadata.keySignatureFirst, '<music21.key.Key of F major>', ) self.assertEqual(str(richMetadata.timeSignatureFirst), '2/4') score = corpus.parse('bwv66.6') richMetadata = metadata.RichMetadata() richMetadata.merge(score.metadata) richMetadata.update(score) self.assertEqual( str(richMetadata.keySignatureFirst), '<music21.key.Key of f# minor>', ) self.assertEqual(str(richMetadata.timeSignatureFirst), '4/4')
def testFictaFeature(): luca = corpus.parse("luca/gloria.mxl") fe = MusicaFictaFeature(luca) print fe.extract().vector mv = corpus.parse("monteverdi/madrigal.3.1.xml") fe.setData(mv) print fe.extract().vector
def parse(self): from music21 import corpus if self.number is not None: return corpus.parse(self.sourcePath, number=self.number) else: return corpus.parse(self.sourcePath)
def testBvSvS(self): from music21 import corpus h = Hasher() h.hashDuration = False h.hashOffset = False s1 = corpus.parse('schoenberg', 6).parts[0] s2 = corpus.parse('schoenberg', 2).parts[0] s3 = corpus.parse('bwv66.6').parts[0] hashes1 = h.hashStream(s1) hashes2 = h.hashStream(s2) hashes3 = h.hashStream(s3) print (difflib.SequenceMatcher(a=hashes1, b=hashes2).ratio()) print (difflib.SequenceMatcher(a=hashes1, b=hashes3).ratio()) print (difflib.SequenceMatcher(a=hashes2, b=hashes3).ratio()) s2.show() h.hashPitch = False h.hashDuration = True h.hashOffset = True hashes1 = h.hashStream(s1) hashes2 = h.hashStream(s2) hashes3 = h.hashStream(s3) print (difflib.SequenceMatcher(a=hashes1, b=hashes2).ratio()) print (difflib.SequenceMatcher(a=hashes1, b=hashes3).ratio()) print (difflib.SequenceMatcher(a=hashes2, b=hashes3).ratio())
def testGetAccidentalCountSumAdvanced(self): s1 = corpus.parse('bach/bwv7.7') s2 = corpus.parse('bach/bwv66.6') totalNotes = len(s1.flat.notes) + len(s2.flat.notes) tally = getAccidentalCountSum([s1, s2], True) self.assertEqual(tally, {'sharp': 195, 'natural': 324}) self.assertEqual(totalNotes, tally['sharp'] + tally['natural'])
def demoMakeChords(): # wtc no 1 #src = corpus.parse('bwv65.2').measures(0, 5) src = corpus.parse('opus18no1/movement3.xml').measures(0, 10) src.flattenParts().makeChords(minimumWindowSize=3).show() src = corpus.parse('opus18no1/movement3.xml').measures(0, 10) src.chordify().show()
def monteverdiParallels(books=(3,), start=1, end=20, show=True, strict=False): ''' find all instances of parallel fifths or octaves in Monteverdi madrigals. ''' for book in books: for i in range(start, end + 1): filename = 'monteverdi/madrigal.%s.%s.xml' % (book, i) if strict == True: c = corpus.parse(filename) print (book,i) else: try: c = corpus.parse(filename) print (book,i) except: print ("Cannot parse %s, maybe it does not exist..." % (filename)) continue displayMe = False for i in range(len(c.parts) - 1): #iName = c.parts[i].id ifn = c.parts[i].flat.notesAndRests.stream() omi = ifn.offsetMap() for j in range(i + 1, len(c.parts)): jName = c.parts[j].id jfn = c.parts[j].flat.notesAndRests.stream() for k in range(len(omi) - 1): n1pi = omi[k]['element'] n2pi = omi[k + 1]['element'] n1pjAll = jfn.getElementsByOffset(offsetStart=omi[k]['endTime'] - .001, offsetEnd=omi[k]['endTime'] - .001, mustBeginInSpan=False) if len(n1pjAll) == 0: continue n1pj = n1pjAll[0] n2pjAll = jfn.getElementsByOffset(offsetStart=omi[k + 1]['offset'], offsetEnd=omi[k + 1]['offset'], mustBeginInSpan=False) if len(n2pjAll) == 0: continue n2pj = n2pjAll[0] if n1pj is n2pj: continue # no oblique motion if n1pi.isRest or n2pi.isRest or n1pj.isRest or n2pj.isRest: continue if n1pi.isChord or n2pi.isChord or n1pj.isChord or n2pj.isChord: continue vlq = voiceLeading.VoiceLeadingQuartet(n1pi, n2pi, n1pj, n2pj) if vlq.parallelMotion('P8') is False and vlq.parallelMotion('P5') is False: continue displayMe = True n1pi.addLyric('par ' + str(vlq.vIntervals[0].name)) n2pi.addLyric(' w/ ' + jName) if displayMe and show: c.show()
def demoGraphBach(): dpi = 300 # loping off first measure to avoid pickup s1 = corpus.parse("bach/bwv103.6").measures(1, None) s2 = corpus.parse("bach/bwv18.5-lz").measures(1, None) s1.plot("key", dpi=dpi, title="Windowed Key Analysis, Bach, BWV 103.6", windowStep="pow2") s2.plot("key", dpi=dpi, title="Windowed Key Analysis, Bach, BWV 18.5", windowStep="pow2")
def demoGraphBach(): dpi = 300 # loping off first measure to avoid pickup s1 = corpus.parse('bach/bwv103.6').measures(1,None) s2 = corpus.parse('bach/bwv18.5-lz').measures(1,None) s1.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 103.6', windowStep='pow2') s2.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 18.5', windowStep='pow2')
def demoGettingWorks(): # Can obtain works from an integrated corpus s1 = corpus.parse("bach/bwv103.6") # @UnusedVariable s2 = corpus.parse("bach/bwv18.5-lz") # @UnusedVariable # Can parse data stored in MusicXML files locally or online: s = converter.parse("http://www.musicxml.org/xml/elite.xml") # @UnusedVariable # Can parse data stored in MIDI files locally or online: s = converter.parse("http://www.jsbchorales.net/down/midi/010306b_.mid") # @UnusedVariable
def demoMakeChords(): from music21 import corpus, stream, scale, bar, layout # wtc no 1 #src = corpus.parse('bwv65.2').measures(0, 5) src = corpus.parse('opus18no1/movement3.xml').measures(0, 10) src.flattenParts().makeChords(minimumWindowSize=3).show() src = corpus.parse('opus18no1/movement3.xml').measures(0, 10) src.chordify().show()
def testRichMetadata01(self): from music21 import corpus from music21 import metadata score = corpus.parse('jactatur') self.assertEqual(score.metadata.composer, 'Johannes Ciconia') richMetadata = metadata.RichMetadata() richMetadata.merge(score.metadata) self.assertEqual(richMetadata.composer, 'Johannes Ciconia') # update richMetadata with stream richMetadata.update(score) self.assertEqual( richMetadata.keySignatureFirst, '<music21.key.KeySignature of 1 flat, mode major>', ) self.assertEqual(str(richMetadata.timeSignatureFirst), '2/4') rmdNew = metadata.RichMetadata() jsonString = freezeThaw.JSONFreezer(richMetadata).json freezeThaw.JSONThawer(rmdNew).json = jsonString self.assertEqual(rmdNew.composer, 'Johannes Ciconia') self.assertEqual(str(rmdNew.timeSignatureFirst), '2/4') self.assertEqual( str(rmdNew.keySignatureFirst), '<music21.key.KeySignature of 1 flat, mode major>', ) score = corpus.parse('bwv66.6') richMetadata = metadata.RichMetadata() richMetadata.merge(score.metadata) richMetadata.update(score) self.assertEqual( str(richMetadata.keySignatureFirst), '<music21.key.KeySignature of 3 sharps, mode minor>', ) self.assertEqual(str(richMetadata.timeSignatureFirst), '4/4') jsonString = freezeThaw.JSONFreezer(richMetadata).json freezeThaw.JSONThawer(rmdNew).json = jsonString self.assertEqual(str(rmdNew.timeSignatureFirst), '4/4') self.assertEqual( str(rmdNew.keySignatureFirst), '<music21.key.KeySignature of 3 sharps, mode minor>', )
def testTonalAmbiguityA(self): from music21 import corpus, stream # s = corpus.parse('bwv64.2') # k = s.analyze('KrumhanslSchmuckler') # k.tonalCertainty(method='correlationCoefficient') # s = corpus.parse('bwv66.6') k = s.analyze('KrumhanslSchmuckler') ta = k.tonalCertainty(method='correlationCoefficient') self.assertEqual(ta < 2 and ta > 0.1, True) s = corpus.parse('schoenberg/opus19', 6) k = s.analyze('KrumhanslSchmuckler') ta = k.tonalCertainty(method='correlationCoefficient') self.assertEqual(ta < 2 and ta > 0.1, True) sc1 = scale.MajorScale('g') sc2 = scale.MajorScale('d') sc3 = scale.MajorScale('a') sc5 = scale.MajorScale('f#') s = stream.Stream() for p in sc1.pitches: s.append(note.Note(p)) k = s.analyze('KrumhanslSchmuckler') ta = k.tonalCertainty(method='correlationCoefficient') self.assertEqual(ta < 2 and ta > 0.1, True) s = stream.Stream() for p in sc1.pitches + sc2.pitches + sc2.pitches + sc3.pitches: s.append(note.Note(p)) k = s.analyze('KrumhanslSchmuckler') ta = k.tonalCertainty(method='correlationCoefficient') self.assertEqual(ta < 2 and ta > 0.1, True) s = stream.Stream() for p in sc1.pitches + sc5.pitches: s.append(note.Note(p)) k = s.analyze('KrumhanslSchmuckler') ta = k.tonalCertainty(method='correlationCoefficient') self.assertEqual(ta < 2 and ta > 0.1, True) s = stream.Stream() for p in ('c', 'g', 'c', 'c', 'e'): s.append(note.Note(p)) k = s.analyze('KrumhanslSchmuckler') ta = k.tonalCertainty(method='correlationCoefficient') self.assertEqual(ta < 2 and ta > 0.1, True)
def testMuseDataImportTempoA(self): from music21 import corpus # a small file s = corpus.parse('movement2-09.md') self.assertEqual(len(s.parts), 5) # the tempo is found in the 4th part here self.assertEqual(str( s.parts[3].flat.getElementsByClass('TempoIndication')[0]), '<music21.tempo.MetronomeMark Largo e piano Quarter=46>') #s.show() s = corpus.parse('movement2-07.md') self.assertEqual(str( s.flat.getElementsByClass('TempoIndication')[0]), '<music21.tempo.MetronomeMark Largo Quarter=46>')
def prepareChinaEurope1(): featureExtractors = features.extractorsById( [ "r31", "r32", "r33", "r34", "r35", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "p16", "p19", "p20", "p21", ] ) # featureExtractors = features.extractorsById('all') oChina1 = corpus.parse("essenFolksong/han1") oCEurope1 = corpus.parse("essenFolksong/boehme10") ds = features.DataSet(classLabel="Region") ds.addFeatureExtractors(featureExtractors) # add works, defining the class value for w in oChina1.scores: sid = "essenFolksong/%s-%s" % ("han1", w.metadata.number) ds.addData(w, classValue="China", id=sid) for w in oCEurope1.scores: sid = "essenFolksong/%s-%s" % ("europe1", w.metadata.number) ds.addData(w, classValue="CentralEurope", id=sid) # process with all feature extractors, store all features ds.process() ds.write("d:/desktop/folkTrain.tab")
def oldAccent(show=True): from music21 import corpus, meter, articulations score = corpus.parse("bach/bwv366.xml") partBass = score.getElementById("Bass") ts = partBass.flat.getElementsByClass(meter.TimeSignature)[0] ts.beatSequence.partition(["3/8", "3/8"]) ts.accentSequence.partition(["3/8", "3/8"]) ts.setAccentWeight([1, 0.5]) for m in partBass.getElementsByClass("Measure"): lastBeat = None for n in m.notes: beat, progress = ts.getBeatProgress(n.offset) if beat != lastBeat and progress == 0: if n.tie != None and n.tie.type == "stop": continue if ts.getAccentWeight(n.offset) == 1: mark = articulations.StrongAccent() elif ts.getAccentWeight(n.offset) == 0.5: mark = articulations.Accent() n.articulations.append(mark) lastBeat = beat m = m.sorted if show: partBass.measures(1, 8).show("musicxml")
def testMultiWorkImported(self): from music21 import corpus # defines multiple works, will return an opus o = corpus.parse('josquin/milleRegrets') self.assertEqual(len(o), 4) # each score in the opus is a Stream that contains a Part and metadata p1 = o.getScoreByNumber(1).parts[0] self.assertEqual(p1.offset, 0.0) self.assertEqual(len(p1.flat.notesAndRests), 88) p2 = o.getScoreByNumber(2).parts[0] self.assertEqual(p2.offset, 0.0) self.assertEqual(len(p2.flat.notesAndRests), 80) p3 = o.getScoreByNumber(3).parts[0] self.assertEqual(p3.offset, 0.0) self.assertEqual(len(p3.flat.notesAndRests), 82) p4 = o.getScoreByNumber(4).parts[0] self.assertEqual(p4.offset, 0.0) self.assertEqual(len(p4.flat.notesAndRests), 78) sMerged = o.mergeScores() self.assertEqual(sMerged.metadata.title, 'Mille regrets') self.assertEqual(sMerged.metadata.composer, 'Josquin des Prez') self.assertEqual(len(sMerged.parts), 4) self.assertEqual(sMerged.parts[0].getElementsByClass('Clef')[0].sign, 'G') self.assertEqual(sMerged.parts[1].getElementsByClass('Clef')[0].sign, 'G') self.assertEqual(sMerged.parts[2].getElementsByClass('Clef')[0].sign, 'G') self.assertEqual(sMerged.parts[2].getElementsByClass('Clef')[0].octaveChange, -1) self.assertEqual(sMerged.parts[3].getElementsByClass('Clef')[0].sign, 'F')
def indexOnePath(filePath, *args, **kwds): if not os.path.isabs(filePath): scoreObj = corpus.parse(filePath) else: scoreObj = converter.parse(filePath) scoreDictEntry = indexScoreParts(scoreObj, *args, **kwds) return scoreDictEntry
def januaryThankYou(): names = ['opus132', 'opus133', 'opus18no3', 'opus18no4', 'opus18no5', 'opus74'] names += ['opus59no1', 'opus59no2', 'opus59no3'] for workName in names: beethovenScore = corpus.parse('beethoven/' + workName, 1) for partNum in range(4): print(workName, str(partNum)) thisPart = beethovenScore[partNum] display = stream.Stream() notes = thisPart.flat.findConsecutiveNotes(skipUnisons = True, skipChords = True, skipOctaves = True, skipRests = True, noNone = True ) for i in range(len(notes) - 4): # if (notes[i].name == 'E-' or notes[i].name == "D#") and notes[i+1].name == 'E' and notes[i+2].name == 'A': if notes[i].name == 'E-' and notes[i+1].name == 'E' and notes[i+2].name == 'A': measureNumber = 0 for site in notes[i].sites.get(): if isinstance(site, stream.Measure): measureNumber = site.number display.append(site) notes[i].lyric = workName + " " + str(thisPart.id) + " " + str(measureNumber) m = stream.Measure() m.append(notes[i]) m.append(notes[i+1]) m.append(notes[i+2]) m.append(notes[i+3]) m.insert(0, m.bestClef()) display.append(m) display.show()
def testStreams02(self): # based on Stream.testAddSlurByMelisma(self): #from music21 import corpus, spanner nStart = None; nEnd = None ex = corpus.parse('luca/gloria').parts['cantus'].measures(1,11) exFlatNotes = ex.flat.notesAndRests nLast = exFlatNotes[-1] for i, n in enumerate(exFlatNotes): if i < len(exFlatNotes) - 1: nNext = exFlatNotes[i+1] else: continue if n.hasLyrics(): nStart = n # if next is a begin, then this is an end elif nStart is not None and nNext.hasLyrics() and n.tie is None: nEnd = n elif nNext is nLast: nEnd = n if nStart is not None and nEnd is not None: nStart.addLyric(nStart.beatStr) ex.insert(spanner.Slur(nStart, nEnd)) nStart = None; nEnd = None for sp in ex.spanners.getElementsByClass('Slur'): #environLocal.printDebug(['sp', n.nameWithOctave, sp]) unused_dur = sp.getDurationBySite(exFlatNotes) n = sp.getFirst()
def runMusicxmlOutPartsBeethoven(self): '''Loading file and rendering musicxml output for each part: beethoven/opus59no2/movement3 ''' x = corpus.parse('beethoven/opus59no2/movement3', forceSource=True) #problem: doing each part is much faster than the whole score for p in x.parts: junk = GEX().parse(p)
def simple4f(show=True): # question 19: Calculate pitch-class sets for melodic passages segmented by rests. work = 'opus18no1' movementNumber = 3 s = corpus.parse(work, movementNumber) #, extList=['xml']) foundSets = [] candidateSet = [] for part in s.getElementsByClass(stream.Part): eventStream = part.flat.notesAndRests for i in range(len(eventStream)): e = eventStream[i] if isinstance(e, note.Rest) or i == len(eventStream)-1: if len(candidateSet) > 0: candidateSet.sort() # this removes redundancies for simplicity if candidateSet not in foundSets: foundSets.append(candidateSet) candidateSet = [] elif isinstance(e, note.Note): if e.pitchClass not in candidateSet: candidateSet.append(e.pitchClass) foundSets.sort() if show: print(foundSets)
def run(self): score = corpus.parse(self.filename) self.debug('PARSED') if 2 < len(score.parts): self.debug('MORE THAN TWO PARTS') self.results = None return chordifiedScore = score.chordify() self.debug('CHORDIFIED') try: chordReducer = analysis.reduceChords.ChordReducer() reducedScore = chordReducer(score).parts[0] except AssertionError as e: self.debug('REDUCTION ERROR') print(e) return self.debug('REDUCED') self.results['chordified'] = [] self.results['reduced'] = [] for i in range(1, 5): ngrams = self.computeNGrams(reducedScore, nGramLength=i) self.results['chordified'].append(ngrams) self.debug('NGRAMS: {}'.format(i)) for i in range(1, 5): ngrams = self.computeNGrams(chordifiedScore, nGramLength=i) self.results['reduced'].append(ngrams) self.debug('NGRAMS: {}'.format(i)) self.debug('DONE!')
def runMusicxmlOutScoreBeethoven(self): ''' Loading file and rendering musicxml output of complete score: beethoven/opus59no2/movement3 ''' x = corpus.parse('beethoven/opus59no2/movement3', forceSource=True) #problem: doing each part is much faster than the whole score junk = GEX().parse(x)
def testExtractionC(self): from music21 import analysis, corpus # http://solomonsmusic.net/schenker.htm # shows extracting an Ursatz line # BACH pre;ide !, WTC src = corpus.parse('bwv846') chords = src.flattenParts().makeChords(minimumWindowSize=4, makeRests=False) for c in chords.flat.notes: c.quarterLength = 4 for m in chords.getElementsByClass('Measure'): m.clef = m.bestClef() chords.measure(1).notes[0].addLyric('::/p:e/o:5/nf:no/ta:3/g:Ursatz') chords.measure(1).notes[0].addLyric('::/p:c/o:4/nf:no/tb:I') chords.measure(24).notes[0].addLyric('::/p:d/o:5/nf:no/ta:2') chords.measure(24).notes[0].addLyric('::/p:g/o:3/nf:no/tb:V') chords.measure(30).notes[0].addLyric('::/p:f/o:4/tb:7') chords.measure(34).notes[0].addLyric('::/p:c/o:5/nf:no/v:1/ta:1') chords.measure(34).notes[0].addLyric('::/p:g/o:4/nf:no/v:2') chords.measure(34).notes[0].addLyric('::/p:c/o:4/nf:no/v:1/tb:I') sr = analysis.reduction.ScoreReduction() sr.chordReduction = chords #sr.score = src unused_post = sr.reduce()
def testDataSet(): fes = features.extractorsById(["ql1", "ql2", "ql3"]) ds = features.DataSet(classLabel="Composer") ds.addFeatureExtractors(fes) b1 = corpus.parse("bwv1080", 7).measures(0, 50) ds.addData(b1, classValue="Bach", id="artOfFugue") ds.addData("bwv66.6.xml", classValue="Bach") # ds.addData('c:/handel/hwv56/movement3-05.md', ds.addData("hwv56/movement3-05.md", classValue="Handel") ds.addData("http://www.midiworld.com/midis/other/handel/gfh-jm01.mid") ds.process() print ds.getAttributeLabels() ds.write("d:/desktop/baroqueQLs.csv") fList = ds.getFeaturesAsList() print fList[0] print features.OutputTabOrange(ds).getString() for i in range(len(fList)): # display scores as pngs generated by Lilypond # if the most common note is an eighth note (0.5) # (finds the two Handel scores) if fList[i][2] == 0.5: pass # ds.streams[i].show('lily.png') p = graph.PlotFeatures(ds.streams, fes[1:], roundDigits=2) p.process()
def initializeScore(self): try: score = converter.parse(self.nameRecordedSong).parts[0] except converter.ConverterException: score = corpus.parse(self.nameRecordedSong).parts[0] self.scorePart = score self.pageMeasureNumbers = [] for e in score.flat: if 'PageLayout' in e.classes: self.pageMeasureNumbers.append(e.measureNumber) lastMeasure = score.getElementsByClass('Measure')[-1].measureNumber self.pageMeasureNumbers.append(lastMeasure) self.totalPagesScore = len(self.pageMeasureNumbers) - 1 scNotes = score.flat.notesAndRests noteCounter = 1 pageCounter = 0 middlePagesCounter = 0 self.middlePages = [] self.beginningPages = [] for i in scNotes: imn = i.measureNumber if pageCounter <= self.totalPagesScore and imn >= self.pageMeasureNumbers[pageCounter]: self.beginningPages.append(noteCounter) pageCounter += 1 if middlePagesCounter < self.totalPagesScore and imn == math.floor( (self.pageMeasureNumbers[middlePagesCounter + 1] + self.pageMeasureNumbers[middlePagesCounter]) / 2): self.middlePages.append(noteCounter) middlePagesCounter += 1 noteCounter += 1 environLocal.printDebug("beginning of the pages %s" % str(self.beginningPages)) environLocal.printDebug("middles of the pages %s" % str(self.middlePages)) environLocal.printDebug("initializeScore finished")
def xtestSimplePickle(self): from music21 import freezeThaw from music21 import corpus c = corpus.parse('bwv66.6').parts[0].measure(0).notes #c.show('t') # for el in c: # storedIds.append(el.id) # storedSitesIds.append(id(el.sites)) # # return n1 = c[0] n2 = c[1] sf = freezeThaw.StreamFreezer(c, fastButUnsafe=True) sf.setupSerializationScaffold() for dummy in n1.sites.siteDict: pass #print idKey #print n1.sites.siteDict[idKey]['obj'] for dummy in n2.sites.siteDict: pass #print idKey #print n2.sites.siteDict[idKey]['obj'] dummy = pickle.dumps(c, protocol=-1)
def xtestBusyCallback(self): ''' tests to see if the busyCallback function is called properly ''' from music21 import corpus import random def busyCounter(timeList): timeCounter = timeList[0] timeCounter.times += timeCounter.updateTime print("hi! waited %d milliseconds" % (timeCounter.times)) class Mock(): times = 0 timeCounter = Mock() timeCounter.updateTime = 500 b = corpus.parse('bach/bwv66.6') keyDetune = [] for i in range(0, 127): keyDetune.append(random.randint(-30, 30)) for n in b.flat.notes: n.microtone = keyDetune[n.midi] sp = StreamPlayer(b) sp.play(busyFunction=busyCounter, busyArgs=[timeCounter], busyWaitMilliseconds = 500)
def xtestTrecentoMadrigal(self): from music21 import corpus c = corpus.parse('beethoven/opus18no1', 2).measures(1, 19) #c = corpus.parse('PMFC_06_Giovanni-05_Donna').measures(1, 30) #c = corpus.parse('PMFC_06_Giovanni-05_Donna').measures(90, 118) #c = corpus.parse('PMFC_06_Piero_1').measures(1, 10) #c = corpus.parse('PMFC_06-Jacopo').measures(1, 30) #c = corpus.parse('PMFC_12_13').measures(1, 40) # fix clef # from music21 import clef # startClefs = c.parts[1].getElementsByClass('Measure')[0].getElementsByClass('Clef') # if len(startClefs): # clef1 = startClefs[0] # c.parts[1].getElementsByClass('Measure')[0].remove(clef1) # c.parts[1].getElementsByClass('Measure')[0].insert(0, clef.Treble8vbClef()) cr = ChordReducer() #cr.printDebug = True p = cr.multiPartReduction(c, maxChords = 3, closedPosition=True) #p = cr.multiPartReduction(c, closedPosition=True) c.insert(0, p) c.show()
def testBestTimeSignatureB(self): ''' Correct the TimeSignatures (4/4 in m. 1; no others) in a 4-measure score of 12, 11.5, 12, 13 quarters, where one of the parts is a PartStaff with multiple voices. ''' from music21 import corpus faulty = corpus.parse('demos/incorrect_time_signature_pv') for m in faulty.recurse().getElementsByClass('Measure'): m.timeSignature = m.bestTimeSignature() p1 = faulty.parts[1] tsReps = [] for m in p1.getElementsByClass('Measure'): tsReps.append(repr(m.timeSignature)) self.assertEqual(tsReps, ['<music21.meter.TimeSignature 12/4>', '<music21.meter.TimeSignature 23/8>', '<music21.meter.TimeSignature 12/4>', '<music21.meter.TimeSignature 13/4>'])
def xtestFindConsecutiveScaleB(self): from music21 import corpus scGMajor = scale.MajorScale('g4') scAMajor = scale.MajorScale('a4') scDMajor = scale.MajorScale('d4') s = corpus.parse('mozart/k80/movement1').measures(1, 28) for sc in [scGMajor, scDMajor, scAMajor]: for part in s.parts: # just first part # must provide flat version post = findConsecutiveScale(part.flat, sc, degreesRequired=5, comparisonAttribute='name') for g, group in enumerate(post): for n in group: n.addLyric(f'{sc.getTonic().name}{g + 1}')
def spliceAnalysis(book=3, madrigal=1): ''' splice an analysis of the madrigal under the analysis itself ''' #mad = corpus.parse('monteverdi/madrigal.%s.%s.xml' % (book, madrigal)) analysis = corpus.parse('monteverdi/madrigal.%s.%s.rntxt' % (book, madrigal)) # these are multiple parts in a score stream #excerpt = mad.measures(1,20) # get from first part aMeasures = analysis.parts[0].measures(1, 20) aMeasures.getElementsByClass('Measure')[0].clef = clef.TrebleClef() for myN in aMeasures.flat.notesAndRests: myN.hideObjectOnPrint = True x = aMeasures.write() print(x)
def add_composer(composer, filetype, path, explicit_repeats): """ Music21 corpus pieces by composer. Use `firms_cli.py composers` to see a list of composers. """ start = time.time() sqlIRSystem = connect(path) paths = corpus.getComposer(composer, filetype) if len(paths) == 0: print("Error: no pieces found matching composer %s" % composer) else: print("Found %s pieces" % (len(paths))) for idx,path in enumerate(paths): print("\tProcessing piece %s: %s" % (idx, path)) stream = corpus.parse(path) for piece in stream.recurse(classFilter=m21stream.Score, skipSelf=False): sqlIRSystem.add_piece(piece, path, explicit_repeats) print("Ellapsed time: %s sec" % (time.time() - start))
def x_testFreezeThawPickle(self): from music21 import freezeThaw from music21 import corpus c = corpus.parse('luca/gloria') # c.show('t') sf = freezeThaw.StreamFreezer(c, fastButUnsafe=True) d = sf.writeStr() # print(d) st = freezeThaw.StreamThawer() st.openStr(d) s = st.stream # test to see if we can find everything for dummy in s.recurse(): pass
def simple1(): ''' show correlations (if any) between notelength and pitch in several pieces coded in musicxml or humdrum and also including the trecento cadences. ''' for work in ['opus18no1', 'opus59no3']: movementNumber = 3 score = corpus.parse(work, movementNumber) #, extList=['xml']) for part in score: instrumentName = part.flat.getElementsByClass( instrument.Instrument)[0].bestName() title = '%s, Movement %s, %s' % (work, movementNumber, instrumentName) g = graph.PlotScatterPitchSpaceQuarterLength(part.flat.sorted, title=title) g.process()
def testCuthbertLocal(self): ''' test a local version of this mess... ''' from music21 import corpus, environment environLocal = environment.Environment() s = corpus.parse('luca/gloria').measures(1, 19) #s = corpus.parse('beethoven/opus18no1', 2).parts[0].measures(4, 10) vfp = VexflowPickler() vfp.defaults['m21URI'] = 'file:///Users/Cuthbert/git/music21j/src/music21' vfp.defaults['requireURI'] = 'file:///Users/Cuthbert/git/music21j/ext/require/require.js' data = vfp.fromObject(s) fp = environLocal.getTempFile('.html') with open(fp, 'w') as f: f.write(data) environLocal.launch('vexflow', fp)
def getQJ(): ''' loads Quod Jactatur from the corpus, transposes it to an easy to view range and stores it in the cache. >>> from music21.alpha.trecento import quodJactatur >>> qj = quodJactatur.getQJ() >>> qj.flat.notesAndRests[0] <music21.note.Note C> ''' qj = corpus.parse("ciconia/quod_jactatur") qjPart = qj.getElementsByClass(stream.Part)[0] qjPart.transpose("P-8", inPlace=True) qjPart.replace( qjPart.flat.getElementsByClass(clef.Clef)[0], clef.BassClef()) cachedParts['1-0-False-False'] = copy.deepcopy(qjPart) return qjPart
def testFreezeThawVariant(self): from music21 import freezeThaw from music21 import corpus from music21 import variant from music21 import stream from music21 import note c = corpus.parse('luca/gloria') data2M2 = [('f', 'eighth'), ('c', 'quarter'), ('a', 'eighth'), ('a', 'quarter')] stream2 = stream.Stream() m = stream.Measure() for pitchName, durType in data2M2: n = note.Note(pitchName) n.duration.type = durType m.append(n) # stream2.append(n) stream2.append(m) # c.show('t') variant.addVariant(c.parts[0], 6.0, stream2, variantName='rhythmic_switch', replacementDuration=3.0) # test Variant is in stream unused_v1 = c.parts[0].getElementsByClass('Variant')[0] sf = freezeThaw.StreamFreezer(c, fastButUnsafe=True) # sf.v = v d = sf.writeStr() # print(d) # print('thawing.') st = freezeThaw.StreamThawer() st.openStr(d) s = st.stream # s.show('lily.pdf') p0 = s.parts[0] variants = p0.getElementsByClass('Variant') v2 = variants[0] self.assertEqual(v2._stream[0][1].offset, 0.5)
def melodicChordExpression(show=True): ''' This method not only searches the entire second violin part of a complete string quarter for a seventh chord expressed melodically, but creates new notation to display the results with analytical markup. ''' #from music21 import * #from music21 import corpus, stream, chord beethovenScore = corpus.parse('beethoven/opus133.xml') # parts are given IDs by the MusicXML part name violin2 = beethovenScore.getElementById('2nd Violin') # create an empty container for storing found notes display = stream.Stream() # iterate over all measures for measure in violin2.getElementsByClass('Measure'): notes = measure.findConsecutiveNotes(skipUnisons=True, skipChords=True, skipOctaves=True, skipRests=True, noNone=True) pitches = [n.pitch for n in notes] # examine four-note gruops, where i is the first of four for i in range(len(pitches) - 3): # createa chord from four pitches testChord = chord.Chord(pitches[i:i + 4]) # modify duration for final presentation testChord.duration.type = "whole" if testChord.isDominantSeventh(): # append the found pitches as chord testChord.lyric = "m. " + str(measure.number) # store the chord in a measure emptyMeasure = stream.Measure() emptyMeasure.append(testChord.closedPosition()) display.append(emptyMeasure) # append the source measure, tagging # the first note with the pitch classes used in the measure measure.notesAndRests[0].lyric = chord.Chord( measure.pitches).orderedPitchClassesString display.append(measure) # showing the complete Stream will produce output if show: display.show('musicxml')
def getStreamAndmd5(corpusFilepath): ''' returns a list of all the corpus,md5hash pairs associated with this file, typically this is just a list of one tuple but if the file path contains an opus file, these are parsed into tuples with :meth:`music21.demos.bhadley.aws.unbundleOpus` and the list is returned >>> from music21.demos.bhadley.mrjobaws.awsutility import getStreamAndmd5 >>> #_DOCS_SHOW getStreamAndmd5('airdsAirs/book3.abc')[12:14] [(<music21.stream.Score ...>, 'c1666c19d63fc0940f111008e2269f75.413'), (<music21.stream.Score ...>, 'c1666c19d63fc0940f111008e2269f75.414')] >>> getStreamAndmd5('bach/bwv412.mxl') [(<music21.stream.Score ...>, 'f9d5807477e61f03b66c99ce825a3b5f')] ''' s = corpus.parse(corpusFilepath) if s.isClassOrSubclass(['Opus']): return unbundleOpus(s) else: return [(s, md5OfCorpusFile(corpusFilepath))]
def add_music21(filetype, path, explicit_repeats): """ All pieces from music21 corpus. Note, this results in over four thousand pieces and may take a significant amount of time. """ start = time.time() sqlIRSystem = connect(path) paths = corpus.getPaths(filetype) num_pieces = len(paths) for idx,path in enumerate(paths): print("Adding piece %s of %s" % (idx, num_pieces)) try: stream = corpus.parse(path) for piece in stream.recurse(classFilter=m21stream.Score, skipSelf=False): sqlIRSystem.add_piece(piece, path, explicit_repeats) except: print("\tUnable to process piece %s" % path) print("Ellapsed time: %s sec" % (time.time() - start))
def newDots(show=True): # alternative chorales: # 26.6 : no pickup, eighth notes # bach/bwv30.6 # load a Bach Chorale from the music21 corpus of supplied pieces bwv281 = corpus.parse('bach/bwv281.xml') # get just the bass part using DOM-like method calls bass = bwv281.getElementById('Bass') # apply a Lerdahl/Jackendoff-style metrical analysis to the piece. music21.analysis.metrical.labelBeatDepth(bass) # display measure 0 (pickup) to measure 6 in the default viewer # (here Finale Reader 2009) if (show is True): bass.measures(0, 6).show()
def chordifyAnalysisBrief(): #sSrc = corpus.parseWork('josquin/milleRegrets').mergeScores() from music21 import stream, interval #sExcerpt = corpus.parseWork('bwv1080', 8).measures(10,12) # 128, 134 #o = corpus.parseWork('josquin/milleRegrets') # remove number o = corpus.parse('josquin/laDeplorationDeLaMorteDeJohannesOckeghem') excerpt = o.mergeScores().measures(126, 134) reduction = excerpt.chordify() for c in reduction.flat.getElementsByClass('Chord'): c.closedPosition(forceOctave=4, inPlace=True) c.removeRedundantPitches(inPlace=True) c.annotateIntervals() excerpt.insert(0, reduction) excerpt.show()
def ex04(show=True, *arguments, **keywords): ''' This example, by graphing pitch class over note offset, shows the usage of pitch classes in the violoncello part over the duration of the composition. While the display is coarse, it is clear that the part gets less chromatic towards the end of the work. ''' if 'op133' in keywords.keys(): sStream = keywords['op133'] else: sStream = corpus.parse('opus133.xml') # load a MusicXML file # note: measure numbers are not being shown correctly # need to investigate part = sStream.parts[3] g = graph.plot.ScatterPitchClassOffset(part.flat, title=part.partName) if show: g.run()
def testM21ToTsv(self): import os from music21 import corpus bachHarmony = corpus.parse( 'bach/choraleAnalyses/riemenschneider001.rntxt') initial = M21toTSV(bachHarmony) tsvData = initial.tsvData self.assertEqual(bachHarmony.parts[0].measure(1)[0].figure, 'I') # NB pickup measure 0. self.assertEqual(tsvData[1][0], 'I') # Test .write envLocal = environment.Environment() tempF = envLocal.getTempFile() initial.write(tempF) handler = TsvHandler(tempF) self.assertEqual(handler.tsvData[0][0], 'I') os.remove(tempF)
def demoBasic(): # A score can be represented as a Stream of Parts and Metadata s1 = corpus.parse('bach/bwv103.6') # We can show() a Stream in a variety of forms #s1.show() #s1.show('midi') # has errors! #s1.show('text') # too long here # Can get the number of Elements as a length, and iterate over Elements len(s1) # Can grab polyphonic Measure range; # Can get sub-components through class or id filtering soprano = s1.getElementById('soprano') # Can employ the same show() method on any Stream or Stream subclass #soprano.show() #soprano.show('midi') # problem is here: offset is delayed # A Part might contain numerous Measure Streams len(soprano.getElementsByClass('Measure')) unused_mRange = soprano.measures(14, 16) # @UnusedVariable #mRange.show() # mRange.sorted.show('text') # here we can see this sNew = soprano.measures(14, 16).flat.notesAndRests.transpose('p-5') sNew.makeAccidentals(overrideStatus=True) ts1 = meter.TimeSignature('3/4') ts2 = meter.TimeSignature('5/8') sNew.insert(0, ts1) sNew.insert(3, ts2) #sNew.show() sNew.augmentOrDiminish(2, inPlace=True) for n in sNew.notesAndRests: if n.pitch.name == 'G' and n.quarterLength == 2: n.addLyric('%s (2 QLs)' % n.name) sNew.show()
def testScalesPy10(self): # look through s = corpus.parseWork('bwv1080/06') #part = corpus.parseWork('bwv1080/03').measures(24,29).parts[0] #part = corpus.parseWork('bwv1080/03').parts[0] #from music21 import corpus, scale, note from music21 import analysis scDMelodicMinor = scale.MelodicMinorScale('d4') scGMelodicMinor = scale.MelodicMinorScale('g4') part = corpus.parse('bwv1080/03').parts[0].measures(46, 53) for sc in [scDMelodicMinor, scGMelodicMinor]: groups = analysis.search.findConsecutiveScale( part.flat, sc, degreesRequired=4, comparisonAttribute='name') for group in groups: for n in group['stream'].notes: n.addLyric('%s^%s' % (sc.getTonic().name.lower(), sc.getScaleDegreeFromPitch( n.pitch, group['direction'])))
def main(): parser = get_cmd_line_parser(description=__doc__) ParserArguments.filename(parser) ParserArguments.tempo(parser) ParserArguments.framerate(parser) ParserArguments.set_defaults(parser) args = parser.parse_args() defaults.framerate = args.framerate print('Generating Signal:') work = corpus.parse(numpy.random.choice(corpus.getCorePaths())) notes = work.flat.notes waveform = audify(notes, args.tempo, args.verbose) print('Writing Song {} to file {}...'.format(work.corpusFilepath, args.filename)) with wav_file_context(args.filename) as fout: fout.write_frames(waveform.frames) return 0
def testTranslateA(self): # this tests a few files in this collection, some of which are hard to # parse from music21 import corpus for fn in ( 'ToCashellImGoingJig.abc', 'SundayIsMyWeddingDayJig.abc', 'SinkHimDoddieHighlandFling.abc', 'RandyWifeOfGreenlawReel.abc', 'PassionFlowerHornpipe.abc', 'NightingaleClog.abc', 'MountainRangerHornpipe.abc', 'LadiesPandelettsReel.abc', 'JauntingCarHornpipe.abc', 'GoodMorrowToYourNightCapJig.abc', 'ChandlersHornpipe.abc', 'AlistairMaclalastairStrathspey.abc', ): s = corpus.parse(fn) assert s is not None
def testTunePythagorean(self): ''' Applies a pythagorean tuning to a section of D. Luca's Gloria and then uses Marchetto da Padova's very high sharps and very low flats (except B-flat) to inflect the accidentals ''' s = corpus.parse('luca/gloria').measures(70, 79) for p in s.parts: inst = p[instrument.Instrument].first() inst.midiProgram = 52 sc = scale.ScalaScale('F2', 'pyth_12.scl') sc.tune(s) for p in s.flatten().pitches: if p.accidental is not None: if p.accidental.name == 'sharp': p.microtone = p.microtone.cents + 45 elif p.accidental.name == 'flat' and p.step == 'B': p.microtone = p.microtone.cents - 20 elif p.accidental.name == 'flat': p.microtone = p.microtone.cents - 45
def testM21ToTsv(self): from music21 import corpus bachHarmony = corpus.parse( 'bach/choraleAnalyses/riemenschneider001.rntxt') initial = M21toTSV(bachHarmony) tsvData = initial.tsvData self.assertEqual(bachHarmony.parts[0].measure(1)[0].figure, 'I') # NB pickup measure 0. self.assertEqual(tsvData[1][0], 'I') # Test .write envLocal = environment.Environment() tempF = envLocal.getTempFile() from os import path path = path.split(tempF)[0] newFileName = 'TestTsvFile' initial.write(path + newFileName) handler = TsvHandler(path + newFileName) self.assertEqual(handler.tsvData[0][0], 'I')
def chordifyAnalysis(): o = corpus.parse('josquin/milleRegrets') sSrc = o.mergeScores() #sSrc = corpus.parse('bwv1080', 1) sExcerpt = sSrc.measures(0, 20) display = stream.Score() display.metadata = sSrc.metadata for p in sExcerpt.parts: display.insert(0, p) reduction = sExcerpt.chordify() for c in reduction.flat.getElementsByClass('Chord'): c.annotateIntervals() c.closedPosition(forceOctave=4, inPlace=True) c.removeRedundantPitches(inPlace=True) display.insert(0, reduction) display.show()
def findRaisedSevenths(show=True): from music21 import corpus, meter, stream, clef score = corpus.parse('bach/bwv366.xml') ts = score.flat.getElementsByClass(meter.TimeSignature)[0] #ts.beatSequence.partition(3) found = stream.Stream() count = 0 for part in score.iter.getElementsByClass(stream.Part): found.insert(count, part.flat.iter.getElementsByClass(clef.Clef)[0]) for i, m in enumerate(part.iter.getElementsByClass('Measure')): for n in m.iter.notes: if n.name == 'C#': n.addLyric('%s, m. %s' % (part.partName[0], m.number)) n.addLyric('beat %s' % ts.getBeat(n.offset)) found.insert(count, n) count += 4 if show: found.show('musicxml')
def testScalesPy06(self): #from music21 import corpus, scale, note #from music21 import analysis scGMajor = scale.MajorScale('g4') scDMajor = scale.MajorScale('d4') s = corpus.parse('mozart/k80/movement1').measures(21, 25) s.remove(s['cello']) s.remove(s['viola']) for part in s.parts: for sc in [scGMajor, scDMajor]: groups = alpha.analysis.search.findConsecutiveScale( part.flat, sc, degreesRequired=5, comparisonAttribute='name') for group in groups: for n in group['stream'].notesAndRests: n.addLyric('%s^%s' % (sc.getTonic().name, sc.getScaleDegreeFromPitch(n.pitch)))
def testMuseDataStage1A(self): from music21 import corpus s = corpus.parse('k168', 1) self.assertEqual(len(s.parts), 4) self.assertEqual( str(s.parts[0].flat.getElementsByClass('TimeSignature')[0]), '<music21.meter.TimeSignature 4/4>') self.assertEqual([ n.offset for n in s.parts[0].getElementsByClass('Measure')[0].notes ], [0.0, 3.0, 3.5, 3.75]) self.assertEqual([ n.nameWithOctave for n in s.parts[0].getElementsByClass('Measure')[0].notes ], ['F5', 'F5', 'E5', 'D5']) self.assertEqual([ n.offset for n in s.parts[1].getElementsByClass('Measure')[0].notes ], [1.0, 2.0, 3.0])
def testMuseDataStage1B(self): from music21 import corpus s = corpus.parse('k169', 3) self.assertEqual(len(s.parts), 4) self.assertEqual( str(s.parts[0].flat.getElementsByClass('TimeSignature')[0]), '<music21.meter.TimeSignature 3/4>') self.assertEqual([ n.offset for n in s.parts[0].getElementsByClass('Measure')[0].notes ], [0.0, 2.0]) self.assertEqual([ n.nameWithOctave for n in s.parts[0].getElementsByClass('Measure')[0].notes ], ['A4', 'B4']) self.assertEqual([ n.offset for n in s.parts[2].getElementsByClass('Measure')[0].notes ], [0.0, 1.0, 2.0])
def testChordSymbols(self): from music21 import corpus, pitch # noinspection SpellCheckingInspection o = corpus.parse('nottingham-dataset/reelsa-c') self.assertEqual(len(o), 2) # each score in the opus is a Stream that contains a Part and metadata p1 = o.getScoreByNumber(81).parts[0] self.assertEqual(p1.offset, 0.0) self.assertEqual(len(p1.flat.notesAndRests), 77) self.assertEqual(len(list(p1.flat.getElementsByClass('ChordSymbol'))), 25) # Am/C self.assertEqual(list(p1.flat.getElementsByClass('ChordSymbol'))[7].root(), pitch.Pitch('A3')) self.assertEqual(list(p1.flat.getElementsByClass('ChordSymbol'))[7].bass(), pitch.Pitch('C3')) # G7/B self.assertEqual(list(p1.flat.getElementsByClass('ChordSymbol'))[14].root(), pitch.Pitch('G3')) self.assertEqual(list(p1.flat.getElementsByClass('ChordSymbol'))[14].bass(), pitch.Pitch('B2'))
def testMetadataSearch(self): from music21 import corpus score = corpus.parse('ciconia') self.assertEqual( score.metadata.search( 'quod', field='title', ), (True, 'title'), ) self.assertEqual( score.metadata.search( 'qu.d', field='title', ), (True, 'title'), ) self.assertEqual( score.metadata.search(re.compile('(.*)canon(.*)'), ), (True, 'movementName'), )
def showDots(show=True): from music21 import corpus, meter score = corpus.parse('bach/bwv281.xml') partBass = score.getElementById('Bass') ts = partBass.flat.getElementsByClass( meter.TimeSignature)[0] ts.beatSequence.partition(1) for h in range(len(ts.beatSequence)): ts.beatSequence[h] = ts.beatSequence[h].subdivide(2) for i in range(len(ts.beatSequence[h])): ts.beatSequence[h][i] = ts.beatSequence[h][i].subdivide(2) for j in range(len(ts.beatSequence[h][i])): ts.beatSequence[h][i][j] = ts.beatSequence[h][i][j].subdivide(2) for m in partBass.getElementsByClass('Measure'): for n in m.notes: for i in range(ts.getBeatDepth(n.offset)): n.addLyric('*') if show: partBass.getElementsByClass('Measure')[0:7].show()