Exemple #1
0
def demoGraphBach():

    dpi = 300

    # loping off first measure to avoid pickup
    s1 = corpus.parseWork('bach/bwv103.6').measures(1,None)
    s2 = corpus.parseWork('bach/bwv18.5-lz').measures(1,None)

    s1.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 103.6', windowStep='pow2')
    s2.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 18.5', windowStep='pow2')
Exemple #2
0
def demoGraphBach():

    dpi = 300

    # loping off first measure to avoid pickup
    s1 = corpus.parseWork("bach/bwv103.6").measures(1, None)
    s2 = corpus.parseWork("bach/bwv18.5-lz").measures(1, None)

    s1.plot("key", dpi=dpi, title="Windowed Key Analysis, Bach, BWV 103.6", windowStep="pow2")
    s2.plot("key", dpi=dpi, title="Windowed Key Analysis, Bach, BWV 18.5", windowStep="pow2")
Exemple #3
0
def demoGettingWorks():
    

    # Can obtain works from an integrated corpus 
    s1 = corpus.parseWork('bach/bwv103.6') # @UnusedVariable
    s2 = corpus.parseWork('bach/bwv18.5-lz') # @UnusedVariable

    # Can parse data stored in MusicXML files locally or online:
    s = converter.parse('http://www.musicxml.org/xml/elite.xml') # @UnusedVariable

    # Can parse data stored in MIDI files locally or online:
    s = converter.parse('http://www.jsbchorales.net/down/midi/010306b_.mid') # @UnusedVariable

    # Can parse data stored in Kern files locally or online:
    s = converter.parse('http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/371chorales&file=chor120.krn') # @UnusedVariable
    def testMultiWorkImported(self):

        from music21 import corpus
        # defines multiple works, will return an opus
        o = corpus.parseWork('josquin/milleRegrets')
        self.assertEqual(len(o), 4)
        # each score in the opus is a Stream that contains a Part and metadata
        p1 = o.getScoreByNumber(1).parts[0] 
        self.assertEqual(p1.offset, 0.0)
        self.assertEqual(len(p1.flat.notes), 89)

        p2 = o.getScoreByNumber(2).parts[0] 
        self.assertEqual(p2.offset, 0.0)
        self.assertEqual(len(p2.flat.notes), 81)

        p3 = o.getScoreByNumber(3).parts[0] 
        self.assertEqual(p3.offset, 0.0)
        self.assertEqual(len(p3.flat.notes), 83)

        p4 = o.getScoreByNumber(4).parts[0] 
        self.assertEqual(p4.offset, 0.0)
        self.assertEqual(len(p4.flat.notes), 79)


        sMerged = o.mergeScores()
        self.assertEqual(sMerged.metadata.title, 'Mille regrets')
        self.assertEqual(sMerged.metadata.composer, 'Josquin des Prez')
        self.assertEqual(len(sMerged.parts), 4)


        self.assertEqual(sMerged.parts[0].getElementsByClass('Clef')[0].sign, 'G')
        self.assertEqual(sMerged.parts[1].getElementsByClass('Clef')[0].sign, 'G')
        self.assertEqual(sMerged.parts[2].getElementsByClass('Clef')[0].sign, 'G')
        self.assertEqual(sMerged.parts[2].getElementsByClass('Clef')[0].octaveChange, -1)
        self.assertEqual(sMerged.parts[3].getElementsByClass('Clef')[0].sign, 'F')
    def testExamplesD(self):
        from music21 import corpus
        # Parse an Opus, a collection of Scores
        o = corpus.parseWork('josquin/laDeplorationDeLaMorteDeJohannesOckeghem')
        # Create a Score from a Measure range
        sExcerpt = o.mergeScores().measures(127, 133)
        # Create a reduction of Chords
        reduction = sExcerpt.chordify()
        # Iterate over the Chords and prepare presentation
        for c in reduction.flat.getElementsByClass('Chord'):
            c.closedPosition(forceOctave=4, inPlace=True)
            c.removeRedundantPitches(inPlace=True)
            c.annotateIntervals()
        # Add the reduction and display the results
        sExcerpt.insert(0, reduction)
        #sExcerpt.show()

        self.assertEqual(len(sExcerpt.flat.getElementsByClass('Chord')), 13)

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[1]), '<music21.chord.Chord E4 G4 B4 E5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[2]), '<music21.chord.Chord E4 G4 E5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[3]), '<music21.chord.Chord D4 F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[4]), '<music21.chord.Chord D4 F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[5]), '<music21.chord.Chord D4 F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[6]), '<music21.chord.Chord A4 C5 E5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[7]), '<music21.chord.Chord A4 C5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[8]), '<music21.chord.Chord G4 A4 B4 C5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[9]), '<music21.chord.Chord F4 A4 D5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[10]), '<music21.chord.Chord F4 G4 A4 D5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[11]), '<music21.chord.Chord F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[12]), '<music21.chord.Chord E4 G4 B4 E5>')
    def testOverviewMeterB(self):

        sSrc = corpus.parseWork('bach/bwv13.6.xml')

        sPart = sSrc.getElementById('Alto')
        ts = meter.TimeSignature('6/8')

        sMeasures = sPart.flat.notes.makeMeasures(ts)
        #sMeasures.show('t')

        sMeasures.makeTies(inPlace=True)

        # we have the same time signature value, but not the same object
        self.assertEquals(sMeasures[0].timeSignature.numerator, ts.numerator)
        self.assertEquals(sMeasures[0].timeSignature.denominator,
                         ts.denominator)
        # only have ts in first bar
        self.assertEquals(sMeasures[1].timeSignature, None)

        beatStrList = []
        for n in sMeasures.flat.notes:
            bs = n.beatStr
            n.addLyric(bs)
            beatStrList.append(bs)
            #environLocal.printDebug(['offset/parent', n, n.offset, n.parent, beatStr, 'bestMeasure:', beatMeasure])

        self.assertEquals(beatStrList[:10], ['1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '2 2/3'] )

        # TODO: there is a problem here with tied notes
        # the tied note gets the same offset as its origin
        # need to investigate
        #self.assertEquals(beatStrList, ['1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '2 2/3', '1', '1 1/3', '1 2/3', '2', '2 1/3', '1', '1 2/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 1/3', '1 2/3', '2', '2 1/3', '2 2/3', '1', '1 1/3', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 1/3', '1 2/3', '2 1/3', '2 2/3', '1', '1 2/3', '2', '2 1/3'])

        #sMeasures.show()
        post = sMeasures.musicxml
    def testIntervalDiversity(self):
        from music21 import note, stream, corpus
        
        s = stream.Stream()
        s.append(note.Note('g#3'))
        s.append(note.Note('a3'))
        s.append(note.Note('g4'))

        id = MelodicIntervalDiversity()
        self.assertEqual(str(id.countMelodicIntervals(s)), "{'m7': [<music21.interval.Interval m7>, 1], 'm2': [<music21.interval.Interval m2>, 1]}")


        s = stream.Stream()
        s.append(note.Note('c3'))
        s.append(note.Note('d3'))
        s.append(note.Note('c3'))
        s.append(note.Note('d3'))

        id = MelodicIntervalDiversity()
        self.assertEqual(str(id.countMelodicIntervals(s)), "{'M2': [<music21.interval.Interval M2>, 3]}")

        self.assertEqual(str(id.countMelodicIntervals(s, ignoreDirection=False)), """{'M-2': [<music21.interval.Interval M-2>, 1], 'M2': [<music21.interval.Interval M2>, 2]}""")

        id = MelodicIntervalDiversity()
        s = corpus.parseWork('hwv56', '1-08')
        #s.show()

        self.assertEqual(str(id.countMelodicIntervals(s.parts[1])), "{'P5': [<music21.interval.Interval P5>, 1], 'P4': [<music21.interval.Interval P4>, 1], 'm3': [<music21.interval.Interval m3>, 1], 'M2': [<music21.interval.Interval M2>, 2]}")

        self.assertEqual(str(id.countMelodicIntervals(s)), "{'M3': [<music21.interval.Interval M3>, 1], 'P4': [<music21.interval.Interval P4>, 5], 'P5': [<music21.interval.Interval P5>, 2], 'M2': [<music21.interval.Interval M2>, 8], 'm3': [<music21.interval.Interval m3>, 3], 'm2': [<music21.interval.Interval m2>, 1]}")
Exemple #8
0
def ex02(show=True, *arguments, **keywords):

    
    # This example searches the second violin part for adjacent non-redundant pitch classes that form dominant seventh chords.
    
    from music21 import corpus, chord, stream
    
    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml') # load a MusicXML file

    v2Part = sStream[1].getElementsByClass('Measure') # get all measures from the first violin
    
    # First, collect all non-redundant adjacent pitch classes, and store these pitch classes in a list. 
    pitches = []
    for i in range(len(v2Part.pitches)):
        pn = v2Part.pitches[i].name
        if i > 0 and pitches[-1] == pn: continue
        else: pitches.append(pn)
    
    # Second, compare all adjacent four-note groups of pitch classes and determine which are dominant sevenths; store this in a list and display the results. 
    found = stream.Stream()
    for i in range(len(pitches)-3):
        testChord = chord.Chord(pitches[i:i+4])
        if testChord.isDominantSeventh():
            found.append(testChord)
    if show:
        found.show()
def januaryThankYou():
    names = ['opus132', 'opus133', 'opus18no3', 'opus18no4', 'opus18no5', 'opus74']
    names += ['opus59no1', 'opus59no2', 'opus59no3']

    for workName in names:
        beethovenScore = corpus.parseWork('beethoven/' + workName, 1)
        for partNum in range(4):
            print(workName, str(partNum))
            thisPart = beethovenScore[partNum]
            display = stream.Stream()
            notes = thisPart.flat.findConsecutiveNotes(skipUnisons = True, skipChords = True,
                       skipOctaves = True, skipRests = True, noNone = True )
            for i in range(len(notes) - 4):
#                if (notes[i].name == 'E-' or notes[i].name == "D#") and notes[i+1].name == 'E' and notes[i+2].name == 'A':
                if notes[i].name == 'E-' and notes[i+1].name == 'E' and notes[i+2].name == 'A':
                        measureNumber = 0
                        for site in notes[i]._definedContexts.getSites():
                            if isinstance(site, stream.Measure):
                                measureNumber = site.number
                                display.append(site)
                        notes[i].lyric = workName + " " + str(thisPart.id) + " " + str(measureNumber)
                        m = stream.Measure()
                        m.append(notes[i])
                        m.append(notes[i+1])
                        m.append(notes[i+2])
                        m.append(notes[i+3])
                        m.insert(0, m.bestClef())
                        display.append(m)
            try:
                display.show()
            except:
                pass
 def runMusicxmlOutPartsBeethoven(self):
     '''Loading file and rendering musicxml output for each part: beethoven/opus59no2/movement3
     '''
     x = corpus.parseWork('beethoven/opus59no2/movement3', forceSource=True)
     #problem: doing each part is much faster than the whole score
     for p in x.parts:
         post = p.musicxml
Exemple #11
0
def schumann(show = True):
    streamObject = corpus.parseWork('schumann/opus41no1', 3)
    streamObject.plot('pitch')

    from music21.humdrum import testFiles as tf
    streamObject = converter.parse(tf.mazurka6)
    streamObject.plot('pitch')
Exemple #12
0
def ex01(show=True, *arguments, **keywords):
    # This example extracts first a part, then a measure from a complete score. Next, pitches are isolated from this score as pitch classes. Finally, consecutive pitches from this measure are extracted, made into a chord, and shown to be a dominant seventh chord. 
    
    from music21 import corpus, chord

    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml') # load a MusicXML file

    v2Part = sStream[1].getElementsByClass('Measure') # get all measures from the second violin
    if show:
        v2Part[48].show() # render the 48th measure as notation
    
    # create a list of pitch classes in this measure
    pcGroup = [n.pitchClass for n in v2Part[48].pitches] 

    if show:
        print(pcGroup) # display the collected pitch classes as a list
    # extract from the third pitch until just before the end
    pnGroup = [n.nameWithOctave for n in v2Part[48].pitches[2:-1]] 
    qChord = chord.Chord(pnGroup) # create a chord from these pitches
    
    if show:
        qChord.show() # render this chord as notation
        print(qChord.isDominantSeventh()) # find if this chord is a dominant
Exemple #13
0
def findRaisedSevenths(show=True):
    import music21
    from music21 import corpus, meter, stream

    score = corpus.parseWork('bach/bwv366.xml')  
    ts = score.flat.getElementsByClass(
        meter.TimeSignature)[0]
    #ts.beat.partition(3)

    found = stream.Stream()
    count = 0
    for part in score.getElementsByClass(stream.Part):
        found.insert(count, 
            part.flat.getElementsByClass(
            music21.clef.Clef)[0])
        for i in range(len(part.measures)):
            m = part.measures[i]
            for n in m.notes:
                if n.name == 'C#': 
                    n.addLyric('%s, m. %s' % (          
        part.getInstrument().partName[0], 
        m.measureNumber))
                    n.addLyric('beat %s' %
        ts.getBeat(n.offset))
                    found.insert(count, n)
                    count += 4
    if show:
        found.show('musicxml')
def simple4e(show=True):
    # 250.    Identify the longest note in a score
    from music21 import stream
    
    qLenMax = 0
    beethovenQuartet = corpus.parseWork('opus18no1', 3, extList=['xml'])
    maxNote = None
    for part in beethovenQuartet.getElementsByClass(stream.Part):
#         lily.LilyString("{ \\time 2/4 " + str(part.bestClef().lily) + " " + str(part.lily) + "}").showPNG()

        # note: this probably is not re-joining tied notes
        pf = part.flat.notes
        for n in pf:
            if n.quarterLength >= qLenMax and n.isNote==True:
                qLenMax = n.quarterLength
                maxNote = n
        maxNote.color = 'red'

        offset = part.flat.getOffsetByElement(maxNote)
        if offset == None:
            raise Exception('cannot find this note in the Stream: %s' % offset)
        display = part.flat.extractContext(maxNote, before = 4.0, after = 6.0)
               
    if show:
        print('longest duration was: %s quarters long' % (qLenMax))
        lily.LilyString("{ \\time 2/4 " + str(display.bestClef().lily) + " " + str(display.lily) + "}").showPNG()
        display.show()
def simple4f(show=True):
    # question 19: Calculate pitch-class sets for melodic passages segmented by rests.
    work = 'opus18no1'
    movementNumber = 3
    s = corpus.parseWork(work, movementNumber, extList=['xml'])

    foundSets = []
    candidateSet = []
    for part in s.getElementsByClass(stream.Part):
        eventStream = part.flat.notes
        for i in range(len(eventStream)):
            e = eventStream[i]
            if isinstance(e, music21.note.Rest) or i == len(eventStream)-1:
                if len(candidateSet) > 0:
                    candidateSet.sort()
                    # this removes redundancies for simplicity
                    if candidateSet not in foundSets:
                        foundSets.append(candidateSet)
                    candidateSet = []
            elif isinstance(e, music21.note.Note):      
                if e.pitchClass not in candidateSet:
                    candidateSet.append(e.pitchClass)
    foundSets.sort()

    if show:
        print(foundSets)
Exemple #16
0
def findPotentialPassingTones(show=True):
    g = corpus.parseWork("gloria")
    gcn = g.parts["cantus"].measures(1, 126).flat.notesAndRests

    gcn[0].lyric = ""
    gcn[-1].lyric = ""
    for i in range(1, len(gcn) - 1):
        prev = gcn[i - 1]
        cur = gcn[i]
        next = gcn[i + 1]

        cur.lyric = ""

        if "Rest" in prev.classes or "Rest" in cur.classes or "Rest" in next.classes:
            continue

        int1 = interval.notesToInterval(prev, cur)
        if int1.isStep is False:
            continue

        int2 = interval.notesToInterval(cur, next)
        if int2.isStep is False:
            continue

        cma = cur.beatStrength
        if cma < 1 and cma <= prev.beatStrength and cma <= next.beatStrength:

            if int1.direction == int2.direction:
                cur.lyric = "pt"  # neighbor tone
            else:
                cur.lyric = "nt"  # passing tone
    if show:
        g.parts["cantus"].show()
def simple4b(show=True):
    from music21 import corpus
    from music21 import dynamics

    # question 8: Are dynamic swells (crescendo-diminuendos) more common than dips (diminuendos-crescendos)?
    # so we need to compute the average distance between < and > and see if it's higheror lower than > to <. And any dynamic marking in between resets the count.

    work = 'opus41no1'
    movementNumber = 2
    s = corpus.parseWork(work, movementNumber, extList='xml')
    countCrescendo = 0
    countDiminuendo = 0
    for part in s.getElementsByClass(stream.Part):
        map = [] # create a l
        wedgeStream = part.flat.getElementsByClass(dynamics.Wedge)
        for wedge in wedgeStream:
            if wedge.type == 'crescendo':
                countCrescendo += 1
                map.append(('>', wedge.offset))
            elif wedge.type == 'diminuendo': 
                countDiminuendo += 1
                map.append(('<', wedge.offset))
        if show:
            print(map)

    if show:
        print('total crescendi: %s' % countCrescendo) 
        print('total diminuendi: %s' % countDiminuendo)
    def testAnacrusisTiming(self):

        from music21 import corpus

        s = corpus.parseWork('bach/bwv103.6')

        # get just the soprano part
        soprano = s.parts['soprano']
        mts = streamToMidiTrack(soprano)

        # first note-on is not delayed, even w anacrusis
        match = """[<MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=None, data=u'Soprano'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent KEY_SIGNATURE, t=None, track=1, channel=1, data='\\x02\\x01'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent TIME_SIGNATURE, t=None, track=1, channel=1, data='\\x04\\x02\\x18\\x08'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent NOTE_ON, t=None, track=1, channel=1, pitch=66, velocity=90>, <MidiEvent DeltaTime, t=512, track=1, channel=None>, <MidiEvent NOTE_OFF, t=None, track=1, channel=1, pitch=66, velocity=0>]"""
       

        self.assertEqual(str(mts.events[:10]), match)

        # first note-on is not delayed, even w anacrusis
        match = """[<MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=None, data=u'Alto'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent KEY_SIGNATURE, t=None, track=1, channel=1, data='\\x02\\x01'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent TIME_SIGNATURE, t=None, track=1, channel=1, data='\\x04\\x02\\x18\\x08'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent NOTE_ON, t=None, track=1, channel=1, pitch=62, velocity=90>, <MidiEvent DeltaTime, t=1024, track=1, channel=None>, <MidiEvent NOTE_OFF, t=None, track=1, channel=1, pitch=62, velocity=0>]"""

        alto = s.parts['alto']
        mta = streamToMidiTrack(alto)

        self.assertEqual(str(mta.events[:10]), match)


        # try streams to midi tracks
        # get just the soprano part
        soprano = s.parts['soprano']
        mtList = streamsToMidiTracks(soprano)
        self.assertEqual(len(mtList), 1)

        # its the same as before
        match = """[<MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=None, data=u'Soprano'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent KEY_SIGNATURE, t=None, track=1, channel=1, data='\\x02\\x01'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent TIME_SIGNATURE, t=None, track=1, channel=1, data='\\x04\\x02\\x18\\x08'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent NOTE_ON, t=None, track=1, channel=1, pitch=66, velocity=90>, <MidiEvent DeltaTime, t=512, track=1, channel=None>, <MidiEvent NOTE_OFF, t=None, track=1, channel=1, pitch=66, velocity=0>]"""

        self.assertEqual(str(mtList[0].events[:10]), match)
Exemple #19
0
def demoJesse(show=True):
    luca = corpus.parseWork('luca/gloria')
    for n in luca.measures(2, 20).flat.notesAndRests:
        if n.isRest is False:
            n.lyric = n.pitch.german
    if show:   
        luca.show()
Exemple #20
0
def oldAccent(show=True):
    from music21 import corpus, meter, articulations
    
    score = corpus.parseWork('bach/bwv366.xml')
    partBass = score.getElementById('Bass')
    
    ts = partBass.flat.getElementsByClass(meter.TimeSignature)[0]
    ts.beat.partition(['3/8', '3/8'])
    ts.accent.partition(['3/8', '3/8'])
    ts.setAccentWeight([1, .5])
    
    for m in partBass.measures:
        lastBeat = None
        for n in m.notes:
            beat, progress = ts.getBeatProgress(n.offset)
            if beat != lastBeat and progress == 0:
                if n.tie != None and n.tie.type == 'stop':
                    continue
                if ts.getAccentWeight(n.offset) == 1:
                    mark = articulations.StrongAccent()
                elif ts.getAccentWeight(n.offset) == .5:
                    mark = articulations.Accent()
                n.articulations.append(mark)
                lastBeat = beat
            m = m.sorted
    if show:
        partBass.getMeasureRange(1,8).show('musicxml')
    else:
        post = partBass.musicxml
Exemple #21
0
def beethovenSearch():

    op133 = corpus.parseWork('beethoven/opus133.xml') 
    violin2 = op133.getElementById('2nd Violin')
    
    # an empty container for later display
    display = stream.Stream() 
    
    for m in violin2.getElementsByClass('Measure'):
      notes = m.findConsecutiveNotes(
        skipUnisons=True, skipOctaves=True, 
        skipRests=True, noNone=True )
     
      pitches = stream.Stream(notes).pitches  
      for i in range(len(pitches) - 3):
        # makes every set of 4 notes into a whole-note chord
        testChord = chord.Chord(pitches[i:i+4])       
        testChord.duration.type = "whole" 
        
        if testChord.isDominantSeventh():
          testChord.lyric = "m. " + str(m.number)
          m.notesAndRests[0].lyric = chord.Chord(m.pitches).primeFormString
               
          chordMeasure = stream.Measure()
          chordMeasure.append(testChord.closedPosition())
          display.append(chordMeasure)
          display.append(m)    
    display.show()
Exemple #22
0
def ex1_revised(show=True, *arguments, **keywords):
    if 'op133' in keywords.keys():
        beethovenScore = keywords['op133']
    else:
        beethovenScore = corpus.parseWork('opus133.xml') # load a MusicXML file

    violin2 = beethovenScore[1]      # most programming languages start counting from 0, 
    #  so part 0 = violin 1, part 1 = violin 2, etc.
    display = stream.Stream() # an empty container for filling with found notes
    for thisMeasure in violin2.getElementsByClass('Measure'):
        notes = thisMeasure.findConsecutiveNotes(skipUnisons = True, 
                      skipChords = True,
                       skipOctaves = True, skipRests = True, noNone = True )
        pitches = [n.pitch for n in notes]
        for i in range(len(pitches) - 3):
            testChord = chord.Chord(pitches[i:i+4])
            testChord.duration.type = "whole"
            if testChord.isDominantSeventh() is True:
                # since a chord was found in this measure, append the found pitches in closed position
                testChord.lyric = "m. " + str(thisMeasure.number)
                emptyMeasure = stream.Measure()
                emptyMeasure.append(testChord.closedPosition())
                display.append(emptyMeasure)
    
                # append the whole measure as well, tagging the first note of the measure with an
                # ordered list of all the pitch classes used in the measure.
                pcGroup = [p.pitchClass for p in thisMeasure.pitches]
                firstNote = thisMeasure.getElementsByClass(note.Note)[0]
                firstNote.lyric = str(sorted(set(pcGroup)))
                thisMeasure.setRightBarline("double")
                display.append(thisMeasure)
    
    if show:
        display.write('musicxml')
def simple4a(show=True):
    '''
    find at least 5 questions that are difficult to solve in Humdrum which are simple in music21; (one which just uses Python)
    '''

# 4a: in addition to the graphs as they are can we have a graph showing average
# dynamic for a given pitch, and a single number for the Correlation Coefficient
# between dynamic level and pitch -- the sort of super scientific. I imagine
# it'd be something like 0.55, so no, not a connection between pitch and dynamic.
    from music21 import graph, corpus  

    # question 1: Above G4 do higher pitches tend to be louder?
    work = 'opus18no1'
    movementNumber = 3
    #movement = corpus.getWork(work, movementNumber)
    #s = converter.parse(movement)

    s = corpus.parseWork('opus18no1', movementNumber, extList=['xml'])

    #s[0].show()

    for movement in [0]:
        sPart = s[movement]
        iObj = sPart.getElementsByClass(instrument.Instrument)[0]
        titleStr = '%s, Movement %s, %s' % (work, movementNumber, iObj.bestName())
    
        if not show:
            doneAction = None
        else:
            doneAction = 'write'

        p = graph.PlotScatterWeightedPitchSpaceDynamicSymbol(s[0].flat,
             doneAction=doneAction, title=titleStr)
        p.process()
Exemple #24
0
def findHighestNotes(show=True, *arguments, **keywords):
    import copy
    import music21
    from music21 import corpus, meter, stream
    
    score = corpus.parseWork('bach/bwv366.xml')
    ts = score.flat.getElementsByClass(meter.TimeSignature)[0]
    # use default partitioning
    #ts.beatSequence.partition(3)
    
    found = stream.Stream()
    for part in score.getElementsByClass(stream.Part):
        found.append(part.flat.getElementsByClass(music21.clef.Clef)[0])
        highestNoteNum = 0
        for m in part.getElementsByClass('Measure'):
            for n in m.notes:
                if n.midi > highestNoteNum:
                    highestNoteNum = n.midi
                    highestNote = copy.deepcopy(n) # optional
    
                    # These two lines will keep the look of the original
                    # note values but make each note 1 4/4 measure long:
    
                    highestNote.duration.components[0].unlink()
                    highestNote.quarterLength = 4
                    highestNote.lyric = '%s: M. %s: beat %s' % (
                        part.getInstrument().partName[0], m.number, ts.getBeat(n.offset))
        found.append(highestNote)

    if show:
        print (found.write('musicxml'))
    else:
        mx = found.musicxml
def annotateWithGerman():
    '''
    annotates a score with the German notes for each note
    '''
    bwv295 = corpus.parseWork('bach/bwv295')
    for thisNote in bwv295.flat.notes:
        thisNote.addLyric(thisNote.pitch.german)
    bwv295.show()
Exemple #26
0
def demoCombineTransform():
    from music21 import corpus, interval

    s1 = corpus.parseWork('bach/bwv103.6')
    s2 = corpus.parseWork('bach/bwv18.5-lz')

    keyPitch1 = s1.analyze('key')[0]
    gap1 = interval.Interval(keyPitch1, pitch.Pitch('C'))

    keyPitch2 = s2.analyze('key')[0]
    gap2 = interval.Interval(keyPitch2, pitch.Pitch('C'))

    sCompare = stream.Stream()
    sCompare.insert(0, s1.parts['bass'])
    sCompare.insert(0, s2.parts['bass'])

    sCompare.show()
Exemple #27
0
def altDots(show=True):
    '''This adds a syncopated bass line.
    '''
    bwv30_6 = corpus.parseWork('bach/bwv30.6.xml')
    bass = bwv30_6.getElementById('Bass')
    excerpt = bass.measures(1,10)
    music21.analysis.metrical.labelBeatDepth(excerpt)
    if (show is True):
        excerpt.show()


    bwv11_6 = corpus.parseWork('bach/bwv11.6.xml')
    alto = bwv11_6.getElementById('Alto')
    excerpt = alto.measures(13,20)
    music21.analysis.metrical.labelBeatDepth(excerpt)
    if (show is True):
        excerpt.show()
Exemple #28
0
def demoBeethoven133():

    dpi = 300

    sStream = corpus.parseWork("opus133.xml")  # load a MusicXML file
    part = sStream["cello"].stripTies()

    part.plot("scatter", values=["pitchclass", "offset"], title="Beethoven, Opus 133, Cello", dpi=dpi)
Exemple #29
0
def demoCombineTransform():
    from music21 import interval

    s1 = corpus.parseWork("bach/bwv103.6")
    s2 = corpus.parseWork("bach/bwv18.5-lz")

    keyPitch1 = s1.analyze("key")[0]
    unused_gap1 = interval.Interval(keyPitch1, pitch.Pitch("C"))

    keyPitch2 = s2.analyze("key")[0]
    unused_gap2 = interval.Interval(keyPitch2, pitch.Pitch("C"))

    sCompare = stream.Stream()
    sCompare.insert(0, s1.parts["bass"])
    sCompare.insert(0, s2.parts["bass"])

    sCompare.show()
    def testGetBeams(self):
        from music21 import corpus

        # try single character conversion
        post = _musedataBeamToBeams('=')
        self.assertEqual(str(post), '<music21.beam.Beams <music21.beam.Beam 1/continue>>')

        post = _musedataBeamToBeams(']\\')
        self.assertEqual(str(post), '<music21.beam.Beams <music21.beam.Beam 1/stop>/<music21.beam.Beam 2/partial/left>>')

        post = _musedataBeamToBeams(']/')
        self.assertEqual(str(post), '<music21.beam.Beams <music21.beam.Beam 1/stop>/<music21.beam.Beam 2/partial/right>>')


        s = corpus.parseWork('hwv56', '1-18')
        self.assertEqual(len(s.parts), 5)
        # the fourth part is vocal, and has no beams defined
        self.assertEqual(str(s.parts[3].getElementsByClass(
            'Measure')[3].notes[0].beams), '<music21.beam.Beams >')
        self.assertEqual(str(s.parts[3].getElementsByClass(
            'Measure')[3].notes[0].lyric), 'sud-')

        # the bottom part has 8ths beamed two to a bar
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[0].beams), '<music21.beam.Beams <music21.beam.Beam 1/start>>')
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[1].beams), '<music21.beam.Beams <music21.beam.Beam 1/continue>>')
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[2].beams), '<music21.beam.Beams <music21.beam.Beam 1/continue>>')
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[3].beams), '<music21.beam.Beams <music21.beam.Beam 1/stop>>')

        #s.show()
        # test that stage1 files continue to have makeBeams called
        s = corpus.parseWork('bwv1080', '16')
        # measure two has 9/16 beamed in three beats of 16ths
        self.assertEqual(len(s.parts), 2)

        #s.parts[0].getElementsByClass('Measure')[1].show()

        self.assertEqual(str(s.parts[0].getElementsByClass(
            'Measure')[1].notes[0].beams), '<music21.beam.Beams <music21.beam.Beam 1/start>/<music21.beam.Beam 2/start>>')
        self.assertEqual(str(s.parts[0].getElementsByClass(
            'Measure')[1].notes[1].beams), '<music21.beam.Beams <music21.beam.Beam 1/continue>/<music21.beam.Beam 2/continue>>')
        self.assertEqual(str(s.parts[0].getElementsByClass(
            'Measure')[1].notes[2].beams), '<music21.beam.Beams <music21.beam.Beam 1/stop>/<music21.beam.Beam 2/stop>>')