Esempio n. 1
0
def demoGraphBach():

    dpi = 300

    # loping off first measure to avoid pickup
    s1 = corpus.parseWork("bach/bwv103.6").measures(1, None)
    s2 = corpus.parseWork("bach/bwv18.5-lz").measures(1, None)

    s1.plot("key", dpi=dpi, title="Windowed Key Analysis, Bach, BWV 103.6", windowStep="pow2")
    s2.plot("key", dpi=dpi, title="Windowed Key Analysis, Bach, BWV 18.5", windowStep="pow2")
Esempio n. 2
0
def demoGraphBach():

    dpi = 300

    # loping off first measure to avoid pickup
    s1 = corpus.parseWork('bach/bwv103.6').measures(1,None)
    s2 = corpus.parseWork('bach/bwv18.5-lz').measures(1,None)

    s1.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 103.6', windowStep='pow2')
    s2.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 18.5', windowStep='pow2')
Esempio n. 3
0
def demoGraphBach():

    dpi = 300

    # loping off first measure to avoid pickup
    s1 = corpus.parseWork('bach/bwv103.6').measures(1,None)
    s2 = corpus.parseWork('bach/bwv18.5-lz').measures(1,None)

    s1.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 103.6', windowStep='pow2')
    s2.plot('key', dpi=dpi, title='Windowed Key Analysis, Bach, BWV 18.5', windowStep='pow2')
Esempio n. 4
0
def demoGettingWorks():
    

    # Can obtain works from an integrated corpus 
    s1 = corpus.parseWork('bach/bwv103.6') # @UnusedVariable
    s2 = corpus.parseWork('bach/bwv18.5-lz') # @UnusedVariable

    # Can parse data stored in MusicXML files locally or online:
    s = converter.parse('http://www.musicxml.org/xml/elite.xml') # @UnusedVariable

    # Can parse data stored in MIDI files locally or online:
    s = converter.parse('http://www.jsbchorales.net/down/midi/010306b_.mid') # @UnusedVariable

    # Can parse data stored in Kern files locally or online:
    s = converter.parse('http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/371chorales&file=chor120.krn') # @UnusedVariable
Esempio n. 5
0
def demoGettingWorks():
    

    # Can obtain works from an integrated corpus 
    s1 = corpus.parseWork('bach/bwv103.6') # @UnusedVariable
    s2 = corpus.parseWork('bach/bwv18.5-lz') # @UnusedVariable

    # Can parse data stored in MusicXML files locally or online:
    s = converter.parse('http://www.musicxml.org/xml/elite.xml') # @UnusedVariable

    # Can parse data stored in MIDI files locally or online:
    s = converter.parse('http://www.jsbchorales.net/down/midi/010306b_.mid') # @UnusedVariable

    # Can parse data stored in Kern files locally or online:
    s = converter.parse('http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/371chorales&file=chor120.krn') # @UnusedVariable
Esempio n. 6
0
    def testMultiWorkImported(self):

        from music21 import corpus
        # defines multiple works, will return an opus
        o = corpus.parseWork('josquin/milleRegrets')
        self.assertEqual(len(o), 4)
        # each score in the opus is a Stream that contains a Part and metadata
        p1 = o.getScoreByNumber(1).parts[0] 
        self.assertEqual(p1.offset, 0.0)
        self.assertEqual(len(p1.flat.notes), 89)

        p2 = o.getScoreByNumber(2).parts[0] 
        self.assertEqual(p2.offset, 0.0)
        self.assertEqual(len(p2.flat.notes), 81)

        p3 = o.getScoreByNumber(3).parts[0] 
        self.assertEqual(p3.offset, 0.0)
        self.assertEqual(len(p3.flat.notes), 83)

        p4 = o.getScoreByNumber(4).parts[0] 
        self.assertEqual(p4.offset, 0.0)
        self.assertEqual(len(p4.flat.notes), 79)


        sMerged = o.mergeScores()
        self.assertEqual(sMerged.metadata.title, 'Mille regrets')
        self.assertEqual(sMerged.metadata.composer, 'Josquin des Prez')
        self.assertEqual(len(sMerged.parts), 4)


        self.assertEqual(sMerged.parts[0].getElementsByClass('Clef')[0].sign, 'G')
        self.assertEqual(sMerged.parts[1].getElementsByClass('Clef')[0].sign, 'G')
        self.assertEqual(sMerged.parts[2].getElementsByClass('Clef')[0].sign, 'G')
        self.assertEqual(sMerged.parts[2].getElementsByClass('Clef')[0].octaveChange, -1)
        self.assertEqual(sMerged.parts[3].getElementsByClass('Clef')[0].sign, 'F')
    def testExamplesD(self):
        from music21 import corpus
        # Parse an Opus, a collection of Scores
        o = corpus.parseWork('josquin/laDeplorationDeLaMorteDeJohannesOckeghem')
        # Create a Score from a Measure range
        sExcerpt = o.mergeScores().measures(127, 133)
        # Create a reduction of Chords
        reduction = sExcerpt.chordify()
        # Iterate over the Chords and prepare presentation
        for c in reduction.flat.getElementsByClass('Chord'):
            c.closedPosition(forceOctave=4, inPlace=True)
            c.removeRedundantPitches(inPlace=True)
            c.annotateIntervals()
        # Add the reduction and display the results
        sExcerpt.insert(0, reduction)
        #sExcerpt.show()

        self.assertEqual(len(sExcerpt.flat.getElementsByClass('Chord')), 13)

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[1]), '<music21.chord.Chord E4 G4 B4 E5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[2]), '<music21.chord.Chord E4 G4 E5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[3]), '<music21.chord.Chord D4 F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[4]), '<music21.chord.Chord D4 F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[5]), '<music21.chord.Chord D4 F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[6]), '<music21.chord.Chord A4 C5 E5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[7]), '<music21.chord.Chord A4 C5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[8]), '<music21.chord.Chord G4 A4 B4 C5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[9]), '<music21.chord.Chord F4 A4 D5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[10]), '<music21.chord.Chord F4 G4 A4 D5>')

        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[11]), '<music21.chord.Chord F4 A4 D5>')
        self.assertEqual(str(sExcerpt.flat.getElementsByClass('Chord')[12]), '<music21.chord.Chord E4 G4 B4 E5>')
    def testOverviewMeterB(self):

        sSrc = corpus.parseWork('bach/bwv13.6.xml')

        sPart = sSrc.getElementById('Alto')
        ts = meter.TimeSignature('6/8')

        sMeasures = sPart.flat.notes.makeMeasures(ts)
        #sMeasures.show('t')

        sMeasures.makeTies(inPlace=True)

        # we have the same time signature value, but not the same object
        self.assertEquals(sMeasures[0].timeSignature.numerator, ts.numerator)
        self.assertEquals(sMeasures[0].timeSignature.denominator,
                         ts.denominator)
        # only have ts in first bar
        self.assertEquals(sMeasures[1].timeSignature, None)

        beatStrList = []
        for n in sMeasures.flat.notes:
            bs = n.beatStr
            n.addLyric(bs)
            beatStrList.append(bs)
            #environLocal.printDebug(['offset/parent', n, n.offset, n.parent, beatStr, 'bestMeasure:', beatMeasure])

        self.assertEquals(beatStrList[:10], ['1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '2 2/3'] )

        # TODO: there is a problem here with tied notes
        # the tied note gets the same offset as its origin
        # need to investigate
        #self.assertEquals(beatStrList, ['1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '2 2/3', '1', '1 1/3', '1 2/3', '2', '2 1/3', '1', '1 2/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 1/3', '1 2/3', '2', '2 1/3', '2 2/3', '1', '1 1/3', '1 2/3', '2 1/3', '1', '1 2/3', '2 1/3', '1', '1 1/3', '1 2/3', '2 1/3', '2 2/3', '1', '1 2/3', '2', '2 1/3'])

        #sMeasures.show()
        post = sMeasures.musicxml
Esempio n. 9
0
def simple4f(show=True):
    # question 19: Calculate pitch-class sets for melodic passages segmented by rests.
    work = 'opus18no1'
    movementNumber = 3
    s = corpus.parseWork(work, movementNumber, extList=['xml'])

    foundSets = []
    candidateSet = []
    for part in s.getElementsByClass(stream.Part):
        eventStream = part.flat.notesAndRests
        for i in range(len(eventStream)):
            e = eventStream[i]
            if isinstance(e, music21.note.Rest) or i == len(eventStream) - 1:
                if len(candidateSet) > 0:
                    candidateSet.sort()
                    # this removes redundancies for simplicity
                    if candidateSet not in foundSets:
                        foundSets.append(candidateSet)
                    candidateSet = []
            elif isinstance(e, music21.note.Note):
                if e.pitchClass not in candidateSet:
                    candidateSet.append(e.pitchClass)
    foundSets.sort()

    if show:
        print(foundSets)
 def runMusicxmlOutPartsBeethoven(self):
     '''Loading file and rendering musicxml output for each part: beethoven/opus59no2/movement3
     '''
     x = corpus.parseWork('beethoven/opus59no2/movement3', forceSource=True)
     #problem: doing each part is much faster than the whole score
     for p in x.parts:
         post = p.musicxml
Esempio n. 11
0
def findPotentialPassingTones(show=True):
    g = corpus.parseWork("gloria")
    gcn = g.parts["cantus"].measures(1, 126).flat.notesAndRests

    gcn[0].lyric = ""
    gcn[-1].lyric = ""
    for i in range(1, len(gcn) - 1):
        prev = gcn[i - 1]
        cur = gcn[i]
        next = gcn[i + 1]

        cur.lyric = ""

        if "Rest" in prev.classes or "Rest" in cur.classes or "Rest" in next.classes:
            continue

        int1 = interval.notesToInterval(prev, cur)
        if int1.isStep is False:
            continue

        int2 = interval.notesToInterval(cur, next)
        if int2.isStep is False:
            continue

        cma = cur.beatStrength
        if cma < 1 and cma <= prev.beatStrength and cma <= next.beatStrength:

            if int1.direction == int2.direction:
                cur.lyric = "pt"  # neighbor tone
            else:
                cur.lyric = "nt"  # passing tone
    if show:
        g.parts["cantus"].show()
Esempio n. 12
0
def findHighestNotes(show=True, *arguments, **keywords):
    import copy
    import music21
    from music21 import corpus, meter, stream

    score = corpus.parseWork('bach/bwv366.xml')
    ts = score.flat.getElementsByClass(meter.TimeSignature)[0]
    # use default partitioning
    #ts.beatSequence.partition(3)

    found = stream.Stream()
    for part in score.getElementsByClass(stream.Part):
        found.append(part.flat.getElementsByClass(music21.clef.Clef)[0])
        highestNoteNum = 0
        for m in part.getElementsByClass('Measure'):
            for n in m.notes:
                if n.midi > highestNoteNum:
                    highestNoteNum = n.midi
                    highestNote = copy.deepcopy(n)  # optional

                    # These two lines will keep the look of the original
                    # note values but make each note 1 4/4 measure long:

                    highestNote.duration.components[0].unlink()
                    highestNote.quarterLength = 4
                    highestNote.lyric = '%s: M. %s: beat %s' % (
                        part.getInstrument().partName[0], m.number,
                        ts.getBeat(n.offset))
        found.append(highestNote)

    if show:
        found.show('musicxml')
Esempio n. 13
0
def ex02(show=True, *arguments, **keywords):

    # This example searches the second violin part for adjacent non-redundant pitch classes that form dominant seventh chords.

    from music21 import corpus, chord, stream

    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml')  # load a MusicXML file

    v2Part = sStream[1].getElementsByClass(
        'Measure')  # get all measures from the first violin

    # First, collect all non-redundant adjacent pitch classes, and store these pitch classes in a list.
    pitches = []
    for i in range(len(v2Part.pitches)):
        pn = v2Part.pitches[i].name
        if i > 0 and pitches[-1] == pn: continue
        else: pitches.append(pn)

    # Second, compare all adjacent four-note groups of pitch classes and determine which are dominant sevenths; store this in a list and display the results.
    found = stream.Stream()
    for i in range(len(pitches) - 3):
        testChord = chord.Chord(pitches[i:i + 4])
        if testChord.isDominantSeventh():
            found.append(testChord)
    if show:
        found.show()
Esempio n. 14
0
def simple4e(show=True):
    # 250.    Identify the longest note in a score
    from music21 import stream
    
    qLenMax = 0
    beethovenQuartet = corpus.parseWork('opus18no1', 3, extList=['xml'])
    maxNote = None
    for part in beethovenQuartet.getElementsByClass(stream.Part):
#         lily.LilyString("{ \\time 2/4 " + str(part.bestClef().lily) + " " + str(part.lily) + "}").showPNG()

        # note: this probably is not re-joining tied notes
        pf = part.flat.notes
        for n in pf:
            if n.quarterLength >= qLenMax and n.isNote==True:
                qLenMax = n.quarterLength
                maxNote = n
        maxNote.color = 'red'

        offset = part.flat.getOffsetByElement(maxNote)
        if offset == None:
            raise Exception('cannot find this note in the Stream: %s' % offset)
        display = part.flat.extractContext(maxNote, before = 4.0, after = 6.0)
               
    if show:
        print('longest duration was: %s quarters long' % (qLenMax))
        lily.LilyString("{ \\time 2/4 " + str(display.bestClef().lily) + " " + str(display.lily) + "}").showPNG()
        display.show()
Esempio n. 15
0
def demoJesse(show=True):
    luca = corpus.parseWork('luca/gloria')
    for n in luca.measures(2, 20).flat.notesAndRests:
        if n.isRest is False:
            n.lyric = n.pitch.german
    if show:   
        luca.show()
Esempio n. 16
0
    def testAnacrusisTiming(self):

        from music21 import corpus

        s = corpus.parseWork('bach/bwv103.6')

        # get just the soprano part
        soprano = s.parts['soprano']
        mts = streamToMidiTrack(soprano)

        # first note-on is not delayed, even w anacrusis
        match = """[<MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=None, data=u'Soprano'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent KEY_SIGNATURE, t=None, track=1, channel=1, data='\\x02\\x01'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent TIME_SIGNATURE, t=None, track=1, channel=1, data='\\x04\\x02\\x18\\x08'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent NOTE_ON, t=None, track=1, channel=1, pitch=66, velocity=90>, <MidiEvent DeltaTime, t=512, track=1, channel=None>, <MidiEvent NOTE_OFF, t=None, track=1, channel=1, pitch=66, velocity=0>]"""
       

        self.assertEqual(str(mts.events[:10]), match)

        # first note-on is not delayed, even w anacrusis
        match = """[<MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=None, data=u'Alto'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent KEY_SIGNATURE, t=None, track=1, channel=1, data='\\x02\\x01'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent TIME_SIGNATURE, t=None, track=1, channel=1, data='\\x04\\x02\\x18\\x08'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent NOTE_ON, t=None, track=1, channel=1, pitch=62, velocity=90>, <MidiEvent DeltaTime, t=1024, track=1, channel=None>, <MidiEvent NOTE_OFF, t=None, track=1, channel=1, pitch=62, velocity=0>]"""

        alto = s.parts['alto']
        mta = streamToMidiTrack(alto)

        self.assertEqual(str(mta.events[:10]), match)


        # try streams to midi tracks
        # get just the soprano part
        soprano = s.parts['soprano']
        mtList = streamsToMidiTracks(soprano)
        self.assertEqual(len(mtList), 1)

        # its the same as before
        match = """[<MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent SEQUENCE_TRACK_NAME, t=0, track=1, channel=None, data=u'Soprano'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent KEY_SIGNATURE, t=None, track=1, channel=1, data='\\x02\\x01'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent TIME_SIGNATURE, t=None, track=1, channel=1, data='\\x04\\x02\\x18\\x08'>, <MidiEvent DeltaTime, t=0, track=1, channel=None>, <MidiEvent NOTE_ON, t=None, track=1, channel=1, pitch=66, velocity=90>, <MidiEvent DeltaTime, t=512, track=1, channel=None>, <MidiEvent NOTE_OFF, t=None, track=1, channel=1, pitch=66, velocity=0>]"""

        self.assertEqual(str(mtList[0].events[:10]), match)
Esempio n. 17
0
def beethovenSearch():
    op133 = corpus.parseWork('beethoven/opus133.xml')
    violin2 = op133.getElementById('2nd Violin')

    # an empty container for later display
    display = stream.Stream()

    for m in violin2.getElementsByClass('Measure'):
        notes = m.findConsecutiveNotes(skipUnisons=True,
                                       skipOctaves=True,
                                       skipRests=True,
                                       noNone=True)

        pitches = stream.Stream(notes).pitches
        for i in range(len(pitches) - 3):
            # makes every set of 4 notes into a whole-note chord
            testChord = chord.Chord(pitches[i:i + 4])
            testChord.duration.type = "whole"

            if testChord.isDominantSeventh():
                testChord.lyric = "m. " + str(m.number)
                m.notesAndRests[0].lyric = chord.Chord(
                    m.pitches).primeFormString

                chordMeasure = stream.Measure()
                chordMeasure.append(testChord.closedPosition())
                display.append(chordMeasure)
                display.append(m)
    display.show()
Esempio n. 18
0
def oldAccent(show=True):
    from music21 import corpus, meter, articulations
    
    score = corpus.parseWork('bach/bwv366.xml')
    partBass = score.getElementById('Bass')
    
    ts = partBass.flat.getElementsByClass(meter.TimeSignature)[0]
    ts.beat.partition(['3/8', '3/8'])
    ts.accent.partition(['3/8', '3/8'])
    ts.setAccentWeight([1, .5])
    
    for m in partBass.measures:
        lastBeat = None
        for n in m.notes:
            beat, progress = ts.getBeatProgress(n.offset)
            if beat != lastBeat and progress == 0:
                if n.tie != None and n.tie.type == 'stop':
                    continue
                if ts.getAccentWeight(n.offset) == 1:
                    mark = articulations.StrongAccent()
                elif ts.getAccentWeight(n.offset) == .5:
                    mark = articulations.Accent()
                n.articulations.append(mark)
                lastBeat = beat
            m = m.sorted
    if show:
        partBass.getMeasureRange(1,8).show('musicxml')
    else:
        post = partBass.musicxml
Esempio n. 19
0
def schumann(show=True):
    streamObject = corpus.parseWork('schumann/opus41no1', 3)
    streamObject.plot('pitch')

    from music21.humdrum import testFiles as tf
    streamObject = converter.parse(tf.mazurka6)
    streamObject.plot('pitch')
Esempio n. 20
0
def findRaisedSevenths(show=True):
    import music21
    from music21 import corpus, meter, stream

    score = corpus.parseWork('bach/bwv366.xml')  
    ts = score.flat.getElementsByClass(
        meter.TimeSignature)[0]
    #ts.beat.partition(3)

    found = stream.Stream()
    count = 0
    for part in score.getElementsByClass(stream.Part):
        found.insert(count, 
            part.flat.getElementsByClass(
            music21.clef.Clef)[0])
        for i in range(len(part.measures)):
            m = part.measures[i]
            for n in m.notes:
                if n.name == 'C#': 
                    n.addLyric('%s, m. %s' % (          
        part.getInstrument().partName[0], 
        m.measureNumber))
                    n.addLyric('beat %s' %
        ts.getBeat(n.offset))
                    found.insert(count, n)
                    count += 4
    if show:
        found.show('musicxml')
Esempio n. 21
0
def januaryThankYou():
    names = ['opus132', 'opus133', 'opus18no3', 'opus18no4', 'opus18no5', 'opus74']
    names += ['opus59no1', 'opus59no2', 'opus59no3']

    for workName in names:
        beethovenScore = corpus.parseWork('beethoven/' + workName, 1)
        for partNum in range(4):
            print(workName, str(partNum))
            thisPart = beethovenScore[partNum]
            display = stream.Stream()
            notes = thisPart.flat.findConsecutiveNotes(skipUnisons = True, skipChords = True,
                       skipOctaves = True, skipRests = True, noNone = True )
            for i in range(len(notes) - 4):
#                if (notes[i].name == 'E-' or notes[i].name == "D#") and notes[i+1].name == 'E' and notes[i+2].name == 'A':
                if notes[i].name == 'E-' and notes[i+1].name == 'E' and notes[i+2].name == 'A':
                        measureNumber = 0
                        for site in notes[i].sites.getSites():
                            if isinstance(site, stream.Measure):
                                measureNumber = site.number
                                display.append(site)
                        notes[i].lyric = workName + " " + str(thisPart.id) + " " + str(measureNumber)
                        m = stream.Measure()
                        m.append(notes[i])
                        m.append(notes[i+1])
                        m.append(notes[i+2])
                        m.append(notes[i+3])
                        m.insert(0, m.bestClef())
                        display.append(m)
            try:
                display.show()
            except:
                pass
Esempio n. 22
0
def simple4b(show=True):
    from music21 import corpus
    from music21 import dynamics

    # question 8: Are dynamic swells (crescendo-diminuendos) more common than dips (diminuendos-crescendos)?
    # so we need to compute the average distance between < and > and see if it's higheror lower than > to <. And any dynamic marking in between resets the count.

    work = 'opus41no1'
    movementNumber = 2
    s = corpus.parseWork(work, movementNumber, extList='xml')
    countCrescendo = 0
    countDiminuendo = 0
    for part in s.getElementsByClass(stream.Part):
        map = []  # create a l
        wedgeStream = part.flat.getElementsByClass(dynamics.DynamicWedge)
        for wedge in wedgeStream:
            if wedge.type == 'crescendo':
                countCrescendo += 1
                map.append(('>', wedge.offset))
            elif wedge.type == 'diminuendo':
                countDiminuendo += 1
                map.append(('<', wedge.offset))
        if show:
            print(map)

    if show:
        print('total crescendi: %s' % countCrescendo)
        print('total diminuendi: %s' % countDiminuendo)
Esempio n. 23
0
def simple4a(show=True):
    '''
    find at least 5 questions that are difficult to solve in Humdrum which are simple in music21; (one which just uses Python)
    '''

    # 4a: in addition to the graphs as they are can we have a graph showing average
    # dynamic for a given pitch, and a single number for the Correlation Coefficient
    # between dynamic level and pitch -- the sort of super scientific. I imagine
    # it'd be something like 0.55, so no, not a connection between pitch and dynamic.
    from music21 import graph, corpus

    # question 1: Above G4 do higher pitches tend to be louder?
    work = 'opus18no1'
    movementNumber = 3
    #movement = corpus.getWork(work, movementNumber)
    #s = converter.parse(movement)

    s = corpus.parseWork('opus18no1', movementNumber, extList=['xml'])

    #s[0].show()

    for movement in [0]:
        sPart = s.parts[movement]
        iObj = sPart.getElementsByClass(instrument.Instrument)[0]
        titleStr = '%s, Movement %s, %s' % (work, movementNumber,
                                            iObj.bestName())

        if not show:
            doneAction = None
        else:
            doneAction = 'write'

        p = graph.PlotScatterWeightedPitchSpaceDynamicSymbol(
            s.parts[0].flat, doneAction=doneAction, title=titleStr)
        p.process()
Esempio n. 24
0
def beethovenSearch():

    op133 = corpus.parseWork('beethoven/opus133.xml') 
    violin2 = op133.getElementById('2nd Violin')
    
    # an empty container for later display
    display = stream.Stream() 
    
    for m in violin2.getElementsByClass('Measure'):
      notes = m.findConsecutiveNotes(
        skipUnisons=True, skipOctaves=True, 
        skipRests=True, noNone=True )
     
      pitches = stream.Stream(notes).pitches  
      for i in range(len(pitches) - 3):
        # makes every set of 4 notes into a whole-note chord
        testChord = chord.Chord(pitches[i:i+4])       
        testChord.duration.type = "whole" 
        
        if testChord.isDominantSeventh():
          testChord.lyric = "m. " + str(m.number)
          m.notesAndRests[0].lyric = chord.Chord(m.pitches).primeFormString
               
          chordMeasure = stream.Measure()
          chordMeasure.append(testChord.closedPosition())
          display.append(chordMeasure)
          display.append(m)    
    display.show()
Esempio n. 25
0
def ex01(show=True, *arguments, **keywords):
    # This example extracts first a part, then a measure from a complete score. Next, pitches are isolated from this score as pitch classes. Finally, consecutive pitches from this measure are extracted, made into a chord, and shown to be a dominant seventh chord.

    from music21 import corpus, chord

    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml')  # load a MusicXML file

    v2Part = sStream[1].getElementsByClass(
        'Measure')  # get all measures from the second violin
    if show:
        v2Part[48].show()  # render the 48th measure as notation

    # create a list of pitch classes in this measure
    pcGroup = [n.pitchClass for n in v2Part[48].pitches]

    if show:
        print(pcGroup)  # display the collected pitch classes as a list
    # extract from the third pitch until just before the end
    pnGroup = [n.nameWithOctave for n in v2Part[48].pitches[2:-1]]
    qChord = chord.Chord(pnGroup)  # create a chord from these pitches

    if show:
        qChord.show()  # render this chord as notation
        print(qChord.isDominantSeventh())  # find if this chord is a dominant
Esempio n. 26
0
def schumann(show = True):
    streamObject = corpus.parseWork('schumann/opus41no1', 3)
    streamObject.plot('pitch')

    from music21.humdrum import testFiles as tf
    streamObject = converter.parse(tf.mazurka6)
    streamObject.plot('pitch')
Esempio n. 27
0
def januaryThankYou():
    names = ['opus132', 'opus133', 'opus18no3', 'opus18no4', 'opus18no5', 'opus74']
    names += ['opus59no1', 'opus59no2', 'opus59no3']

    for workName in names:
        beethovenScore = corpus.parseWork('beethoven/' + workName, 1)
        for partNum in range(4):
            print(workName, str(partNum))
            thisPart = beethovenScore[partNum]
            display = stream.Stream()
            notes = thisPart.flat.findConsecutiveNotes(skipUnisons = True, skipChords = True,
                       skipOctaves = True, skipRests = True, noNone = True )
            for i in range(len(notes) - 4):
#                if (notes[i].name == 'E-' or notes[i].name == "D#") and notes[i+1].name == 'E' and notes[i+2].name == 'A':
                if notes[i].name == 'E-' and notes[i+1].name == 'E' and notes[i+2].name == 'A':
                        measureNumber = 0
                        for site in notes[i]._definedContexts.getSites():
                            if isinstance(site, stream.Measure):
                                measureNumber = site.number
                                display.append(site)
                        notes[i].lyric = workName + " " + str(thisPart.id) + " " + str(measureNumber)
                        m = stream.Measure()
                        m.append(notes[i])
                        m.append(notes[i+1])
                        m.append(notes[i+2])
                        m.append(notes[i+3])
                        m.insert(0, m.bestClef())
                        display.append(m)
            try:
                display.show()
            except:
                pass
Esempio n. 28
0
def simple4a(show=True):
    '''
    find at least 5 questions that are difficult to solve in Humdrum which are simple in music21; (one which just uses Python)
    '''

# 4a: in addition to the graphs as they are can we have a graph showing average
# dynamic for a given pitch, and a single number for the Correlation Coefficient
# between dynamic level and pitch -- the sort of super scientific. I imagine
# it'd be something like 0.55, so no, not a connection between pitch and dynamic.
    from music21 import graph, corpus  

    # question 1: Above G4 do higher pitches tend to be louder?
    work = 'opus18no1'
    movementNumber = 3
    #movement = corpus.getWork(work, movementNumber)
    #s = converter.parse(movement)

    s = corpus.parseWork('opus18no1', movementNumber, extList=['xml'])

    #s[0].show()

    for movement in [0]:
        sPart = s[movement]
        iObj = sPart.getElementsByClass(instrument.Instrument)[0]
        titleStr = '%s, Movement %s, %s' % (work, movementNumber, iObj.bestName())
    
        if not show:
            doneAction = None
        else:
            doneAction = 'write'

        p = graph.PlotScatterWeightedPitchSpaceDynamicSymbol(s[0].flat,
             doneAction=doneAction, title=titleStr)
        p.process()
Esempio n. 29
0
def findHighestNotes(show=True, *arguments, **keywords):
    import copy
    import music21
    from music21 import corpus, meter, stream
    
    score = corpus.parseWork('bach/bwv366.xml')
    ts = score.flat.getElementsByClass(meter.TimeSignature)[0]
    # use default partitioning
    #ts.beatSequence.partition(3)
    
    found = stream.Stream()
    for part in score.getElementsByClass(stream.Part):
        found.append(part.flat.getElementsByClass(music21.clef.Clef)[0])
        highestNoteNum = 0
        for m in part.getElementsByClass('Measure'):
            for n in m.notes:
                if n.midi > highestNoteNum:
                    highestNoteNum = n.midi
                    highestNote = copy.deepcopy(n) # optional
    
                    # These two lines will keep the look of the original
                    # note values but make each note 1 4/4 measure long:
    
                    highestNote.duration.components[0].unlink()
                    highestNote.quarterLength = 4
                    highestNote.lyric = '%s: M. %s: beat %s' % (
                        part.getInstrument().partName[0], m.number, ts.getBeat(n.offset))
        found.append(highestNote)

    if show:
        print (found.write('musicxml'))
    else:
        mx = found.musicxml
Esempio n. 30
0
def simple4f(show=True):
    # question 19: Calculate pitch-class sets for melodic passages segmented by rests.
    work = 'opus18no1'
    movementNumber = 3
    s = corpus.parseWork(work, movementNumber, extList=['xml'])

    foundSets = []
    candidateSet = []
    for part in s.getElementsByClass(stream.Part):
        eventStream = part.flat.notes
        for i in range(len(eventStream)):
            e = eventStream[i]
            if isinstance(e, music21.note.Rest) or i == len(eventStream)-1:
                if len(candidateSet) > 0:
                    candidateSet.sort()
                    # this removes redundancies for simplicity
                    if candidateSet not in foundSets:
                        foundSets.append(candidateSet)
                    candidateSet = []
            elif isinstance(e, music21.note.Note):      
                if e.pitchClass not in candidateSet:
                    candidateSet.append(e.pitchClass)
    foundSets.sort()

    if show:
        print(foundSets)
Esempio n. 31
0
    def testIntervalDiversity(self):
        from music21 import note, stream, corpus
        
        s = stream.Stream()
        s.append(note.Note('g#3'))
        s.append(note.Note('a3'))
        s.append(note.Note('g4'))

        id = MelodicIntervalDiversity()
        self.assertEqual(str(id.countMelodicIntervals(s)), "{'m7': [<music21.interval.Interval m7>, 1], 'm2': [<music21.interval.Interval m2>, 1]}")


        s = stream.Stream()
        s.append(note.Note('c3'))
        s.append(note.Note('d3'))
        s.append(note.Note('c3'))
        s.append(note.Note('d3'))

        id = MelodicIntervalDiversity()
        self.assertEqual(str(id.countMelodicIntervals(s)), "{'M2': [<music21.interval.Interval M2>, 3]}")

        self.assertEqual(str(id.countMelodicIntervals(s, ignoreDirection=False)), """{'M-2': [<music21.interval.Interval M-2>, 1], 'M2': [<music21.interval.Interval M2>, 2]}""")

        id = MelodicIntervalDiversity()
        s = corpus.parseWork('hwv56', '1-08')
        #s.show()

        self.assertEqual(str(id.countMelodicIntervals(s.parts[1])), "{'P5': [<music21.interval.Interval P5>, 1], 'P4': [<music21.interval.Interval P4>, 1], 'm3': [<music21.interval.Interval m3>, 1], 'M2': [<music21.interval.Interval M2>, 2]}")

        self.assertEqual(str(id.countMelodicIntervals(s)), "{'M3': [<music21.interval.Interval M3>, 1], 'P4': [<music21.interval.Interval P4>, 5], 'P5': [<music21.interval.Interval P5>, 2], 'M2': [<music21.interval.Interval M2>, 8], 'm3': [<music21.interval.Interval m3>, 3], 'm2': [<music21.interval.Interval m2>, 1]}")
Esempio n. 32
0
def simple4b(show=True):
    from music21 import corpus
    from music21 import dynamics

    # question 8: Are dynamic swells (crescendo-diminuendos) more common than dips (diminuendos-crescendos)?
    # so we need to compute the average distance between < and > and see if it's higheror lower than > to <. And any dynamic marking in between resets the count.

    work = 'opus41no1'
    movementNumber = 2
    s = corpus.parseWork(work, movementNumber, extList='xml')
    countCrescendo = 0
    countDiminuendo = 0
    for part in s.getElementsByClass(stream.Part):
        map = [] # create a l
        wedgeStream = part.flat.getElementsByClass(dynamics.Wedge)
        for wedge in wedgeStream:
            if wedge.type == 'crescendo':
                countCrescendo += 1
                map.append(('>', wedge.offset))
            elif wedge.type == 'diminuendo': 
                countDiminuendo += 1
                map.append(('<', wedge.offset))
        if show:
            print(map)

    if show:
        print('total crescendi: %s' % countCrescendo) 
        print('total diminuendi: %s' % countDiminuendo)
Esempio n. 33
0
def ex01(show=True, *arguments, **keywords):
    # This example extracts first a part, then a measure from a complete score. Next, pitches are isolated from this score as pitch classes. Finally, consecutive pitches from this measure are extracted, made into a chord, and shown to be a dominant seventh chord. 
    
    from music21 import corpus, chord

    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml') # load a MusicXML file

    v2Part = sStream[1].getElementsByClass('Measure') # get all measures from the second violin
    if show:
        v2Part[48].show() # render the 48th measure as notation
    
    # create a list of pitch classes in this measure
    pcGroup = [n.pitchClass for n in v2Part[48].pitches] 

    if show:
        print(pcGroup) # display the collected pitch classes as a list
    # extract from the third pitch until just before the end
    pnGroup = [n.nameWithOctave for n in v2Part[48].pitches[2:-1]] 
    qChord = chord.Chord(pnGroup) # create a chord from these pitches
    
    if show:
        qChord.show() # render this chord as notation
        print(qChord.isDominantSeventh()) # find if this chord is a dominant
Esempio n. 34
0
def ex1_revised(show=True, *arguments, **keywords):
    if 'op133' in keywords.keys():
        beethovenScore = keywords['op133']
    else:
        beethovenScore = corpus.parseWork('opus133.xml') # load a MusicXML file

    violin2 = beethovenScore[1]      # most programming languages start counting from 0, 
    #  so part 0 = violin 1, part 1 = violin 2, etc.
    display = stream.Stream() # an empty container for filling with found notes
    for thisMeasure in violin2.getElementsByClass('Measure'):
        notes = thisMeasure.findConsecutiveNotes(skipUnisons = True, 
                      skipChords = True,
                       skipOctaves = True, skipRests = True, noNone = True )
        pitches = [n.pitch for n in notes]
        for i in range(len(pitches) - 3):
            testChord = chord.Chord(pitches[i:i+4])
            testChord.duration.type = "whole"
            if testChord.isDominantSeventh() is True:
                # since a chord was found in this measure, append the found pitches in closed position
                testChord.lyric = "m. " + str(thisMeasure.number)
                emptyMeasure = stream.Measure()
                emptyMeasure.append(testChord.closedPosition())
                display.append(emptyMeasure)
    
                # append the whole measure as well, tagging the first note of the measure with an
                # ordered list of all the pitch classes used in the measure.
                pcGroup = [p.pitchClass for p in thisMeasure.pitches]
                firstNote = thisMeasure.getElementsByClass(note.Note)[0]
                firstNote.lyric = str(sorted(set(pcGroup)))
                thisMeasure.setRightBarline("double")
                display.append(thisMeasure)
    
    if show:
        display.write('musicxml')
Esempio n. 35
0
def demoJesse(show=True):
    luca = corpus.parseWork('luca/gloria')
    for n in luca.measures(2, 20).flat.notesAndRests:
        if n.isRest is False:
            n.lyric = n.pitch.german
    if show:
        luca.show()
Esempio n. 36
0
def ex1_revised(show=True, *arguments, **keywords):
    if 'op133' in keywords.keys():
        beethovenScore = keywords['op133']
    else:
        beethovenScore = corpus.parseWork('opus133.xml') # load a MusicXML file

    violin2 = beethovenScore[1]      # most programming languages start counting from 0, 
    #  so part 0 = violin 1, part 1 = violin 2, etc.
    display = stream.Stream() # an empty container for filling with found notes
    for thisMeasure in violin2.getElementsByClass('Measure'):
        notes = thisMeasure.findConsecutiveNotes(skipUnisons = True, 
                      skipChords = True,
                       skipOctaves = True, skipRests = True, noNone = True )
        pitches = [n.pitch for n in notes]
        for i in range(len(pitches) - 3):
            testChord = chord.Chord(pitches[i:i+4])
            testChord.duration.type = "whole"
            if testChord.isDominantSeventh() is True:
                # since a chord was found in this measure, append the found pitches in closed position
                testChord.lyric = "m. " + str(thisMeasure.number)
                emptyMeasure = stream.Measure()
                emptyMeasure.append(testChord.closedPosition())
                display.append(emptyMeasure)
    
                # append the whole measure as well, tagging the first note of the measure with an
                # ordered list of all the pitch classes used in the measure.
                pcGroup = [p.pitchClass for p in thisMeasure.pitches]
                firstNote = thisMeasure.getElementsByClass(note.Note)[0]
                firstNote.lyric = str(sorted(set(pcGroup)))
                thisMeasure.setRightBarline("double")
                display.append(thisMeasure)
    
    if show:
        display.show('musicxml')
Esempio n. 37
0
def ex02(show=True, *arguments, **keywords):

    
    # This example searches the second violin part for adjacent non-redundant pitch classes that form dominant seventh chords.
    
    from music21 import corpus, chord, stream
    
    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml') # load a MusicXML file

    v2Part = sStream[1].getElementsByClass('Measure') # get all measures from the first violin
    
    # First, collect all non-redundant adjacent pitch classes, and store these pitch classes in a list. 
    pitches = []
    for i in range(len(v2Part.pitches)):
        pn = v2Part.pitches[i].name
        if i > 0 and pitches[-1] == pn: continue
        else: pitches.append(pn)
    
    # Second, compare all adjacent four-note groups of pitch classes and determine which are dominant sevenths; store this in a list and display the results. 
    found = stream.Stream()
    for i in range(len(pitches)-3):
        testChord = chord.Chord(pitches[i:i+4])
        if testChord.isDominantSeventh():
            found.append(testChord)
    if show:
        found.show()
Esempio n. 38
0
def altDots(show=True):
    '''This adds a syncopated bass line.
    '''
    bwv30_6 = corpus.parseWork('bach/bwv30.6.xml')
    bass = bwv30_6.getElementById('Bass')
    excerpt = bass.measures(1,10)
    music21.analysis.metrical.labelBeatDepth(excerpt)
    if (show is True):
        excerpt.show()


    bwv11_6 = corpus.parseWork('bach/bwv11.6.xml')
    alto = bwv11_6.getElementById('Alto')
    excerpt = alto.measures(13,20)
    music21.analysis.metrical.labelBeatDepth(excerpt)
    if (show is True):
        excerpt.show()
Esempio n. 39
0
    def testGetBeams(self):
        from music21 import corpus

        # try single character conversion
        post = _musedataBeamToBeams('=')
        self.assertEqual(str(post), '<music21.beam.Beams <music21.beam.Beam 1/continue>>')

        post = _musedataBeamToBeams(']\\')
        self.assertEqual(str(post), '<music21.beam.Beams <music21.beam.Beam 1/stop>/<music21.beam.Beam 2/partial/left>>')

        post = _musedataBeamToBeams(']/')
        self.assertEqual(str(post), '<music21.beam.Beams <music21.beam.Beam 1/stop>/<music21.beam.Beam 2/partial/right>>')


        s = corpus.parseWork('hwv56', '1-18')
        self.assertEqual(len(s.parts), 5)
        # the fourth part is vocal, and has no beams defined
        self.assertEqual(str(s.parts[3].getElementsByClass(
            'Measure')[3].notes[0].beams), '<music21.beam.Beams >')
        self.assertEqual(str(s.parts[3].getElementsByClass(
            'Measure')[3].notes[0].lyric), 'sud-')

        # the bottom part has 8ths beamed two to a bar
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[0].beams), '<music21.beam.Beams <music21.beam.Beam 1/start>>')
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[1].beams), '<music21.beam.Beams <music21.beam.Beam 1/continue>>')
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[2].beams), '<music21.beam.Beams <music21.beam.Beam 1/continue>>')
        self.assertEqual(str(s.parts[4].getElementsByClass(
            'Measure')[3].notes[3].beams), '<music21.beam.Beams <music21.beam.Beam 1/stop>>')

        #s.show()
        # test that stage1 files continue to have makeBeams called
        s = corpus.parseWork('bwv1080', '16')
        # measure two has 9/16 beamed in three beats of 16ths
        self.assertEqual(len(s.parts), 2)

        #s.parts[0].getElementsByClass('Measure')[1].show()

        self.assertEqual(str(s.parts[0].getElementsByClass(
            'Measure')[1].notes[0].beams), '<music21.beam.Beams <music21.beam.Beam 1/start>/<music21.beam.Beam 2/start>>')
        self.assertEqual(str(s.parts[0].getElementsByClass(
            'Measure')[1].notes[1].beams), '<music21.beam.Beams <music21.beam.Beam 1/continue>/<music21.beam.Beam 2/continue>>')
        self.assertEqual(str(s.parts[0].getElementsByClass(
            'Measure')[1].notes[2].beams), '<music21.beam.Beams <music21.beam.Beam 1/stop>/<music21.beam.Beam 2/stop>>')
Esempio n. 40
0
def demoCombineTransform():
    from music21 import interval

    s1 = corpus.parseWork('bach/bwv103.6')
    s2 = corpus.parseWork('bach/bwv18.5-lz')

    keyPitch1 = s1.analyze('key')[0]
    unused_gap1 = interval.Interval(keyPitch1, pitch.Pitch('C'))

    keyPitch2 = s2.analyze('key')[0]
    unused_gap2 = interval.Interval(keyPitch2, pitch.Pitch('C'))

    sCompare = stream.Stream()
    sCompare.insert(0, s1.parts['bass'])
    sCompare.insert(0, s2.parts['bass'])

    sCompare.show()
Esempio n. 41
0
def demoCombineTransform():
    from music21 import corpus, interval

    s1 = corpus.parseWork('bach/bwv103.6')
    s2 = corpus.parseWork('bach/bwv18.5-lz')

    keyPitch1 = s1.analyze('key')[0]
    gap1 = interval.Interval(keyPitch1, pitch.Pitch('C'))

    keyPitch2 = s2.analyze('key')[0]
    gap2 = interval.Interval(keyPitch2, pitch.Pitch('C'))

    sCompare = stream.Stream()
    sCompare.insert(0, s1.parts['bass'])
    sCompare.insert(0, s2.parts['bass'])

    sCompare.show()
Esempio n. 42
0
def annotateWithGerman():
    '''
    annotates a score with the German notes for each note
    '''
    bwv295 = corpus.parseWork('bach/bwv295')
    for thisNote in bwv295.flat.notes:
        thisNote.addLyric(thisNote.pitch.german)
    bwv295.show()
Esempio n. 43
0
def demoCombineTransform():
    from music21 import interval

    s1 = corpus.parseWork("bach/bwv103.6")
    s2 = corpus.parseWork("bach/bwv18.5-lz")

    keyPitch1 = s1.analyze("key")[0]
    unused_gap1 = interval.Interval(keyPitch1, pitch.Pitch("C"))

    keyPitch2 = s2.analyze("key")[0]
    unused_gap2 = interval.Interval(keyPitch2, pitch.Pitch("C"))

    sCompare = stream.Stream()
    sCompare.insert(0, s1.parts["bass"])
    sCompare.insert(0, s2.parts["bass"])

    sCompare.show()
Esempio n. 44
0
def demoBeethoven133():

    dpi = 300

    sStream = corpus.parseWork("opus133.xml")  # load a MusicXML file
    part = sStream["cello"].stripTies()

    part.plot("scatter", values=["pitchclass", "offset"], title="Beethoven, Opus 133, Cello", dpi=dpi)
Esempio n. 45
0
def demoBeethoven133():

    dpi = 300

    sStream = corpus.parseWork('opus133.xml') # load a MusicXML file
    part = sStream['cello'].stripTies()

    part.plot('scatter', values=['pitchclass', 'offset'],
                 title='Beethoven, Opus 133, Cello', dpi=dpi)
Esempio n. 46
0
def ex01Alt(show=True, *arguments, **keywords):
    # measure here is a good test of dynamics positioning:
    from music21 import corpus, chord
    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml') # load a MusicXML file
    v2Part = sStream[1].getElementsByClass('Measure') # get all measures from the second violin

    if show:
        v2Part[45].show() # render the 48th measure as notation
Esempio n. 47
0
def simple4e(show=True):
    # 250.    Identify the longest note in a score
    qLenMax = 0
    beethovenQuartet = corpus.parseWork('beethoven/opus18no1/movement4.xml')
    maxMeasure = 0
    for part in beethovenQuartet.parts:
        partStripped = part.stripTies()
        for n in partStripped.flat.notesAndRests:
            if n.quarterLength > qLenMax and n.isRest is False:
                qLenMax = n.quarterLength
                maxMeasure = n.measureNumber
    if show:
        beethovenQuartet.measures(maxMeasure - 2, maxMeasure + 2).show()
Esempio n. 48
0
def  pitchDensity(show=True):

    #from music21 import corpus, graph
    
    beethovenScore = corpus.parseWork('opus133.xml')
    celloPart = beethovenScore.getElementById('Cello')
    
    #First, we take a "flat" view of the Stream, which removes nested containers such as Measures. Second, we combine tied notes into single notes with summed durations.
    
    notes = celloPart.flat.stripTies()
    g = graph.PlotScatterPitchClassOffset(notes, 
        title='Beethoven Opus 133, Cello', alpha=.2)
    g.process()
Esempio n. 49
0
def chordifyAnalysisHandel():
    from music21 import stream, interval

    sExcerpt = corpus.parseWork('hwv56', '3-03')
    sExcerpt = sExcerpt.measures(0,10)
    display = stream.Score()
    for p in sExcerpt.parts: display.insert(0, p)
    reduction = sExcerpt.chordify()
    for c in reduction.flat.getElementsByClass('Chord'):
        c.annotateIntervals()
        c.closedPosition(forceOctave=4, inPlace=True)
        c.removeRedundantPitches(inPlace=True)
    display.insert(0, reduction)
    display.show()
Esempio n. 50
0
def ex03(show=True, *arguments, **keywords):
    
    # This examples graphs the usage of pitch classes in the first and second violin parts. 
    
    from music21 import corpus, graph
    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml') # load a MusicXML file

    # Create a graph of pitch class for the first and second part
    for part in [sStream[0], sStream[1]]:
        g = graph.PlotHistogramPitchClass(part, title=part.getInstrument().partName)
        if show:
            g.process()
Esempio n. 51
0
def altDots(show=True):
    '''This adds a syncopated bass line.
    '''
    bwv30_6 = corpus.parseWork('bach/bwv30.6.xml')
    bass = bwv30_6.getElementById('Bass')
    excerpt = bass.measures(1, 10)
    music21.analysis.metrical.labelBeatDepth(excerpt)
    if (show is True):
        excerpt.show()

    bwv11_6 = corpus.parse('bach/bwv11.6.xml')
    alto = bwv11_6.getElementById('Alto')
    excerpt = alto.measures(13, 20)
    music21.analysis.metrical.labelBeatDepth(excerpt)
    if (show is True):
        excerpt.show()
Esempio n. 52
0
def demoBasic():

    # A score can be represented as a Stream of Parts and Metadata
    s1 = corpus.parseWork('bach/bwv103.6')

    # We can show() a Stream in a variety of forms
    #s1.show()
    #s1.show('midi') # has errors!
    #s1.show('text') # too long here

    # Can get the number of Elements as a length, and iterate over Elements
    len(s1)

    # Can grab polyphonic Measure range;

    # Can get sub-components through class or id filtering
    soprano = s1.getElementById('soprano')


    # Can employ the same show() method on any Stream or Stream subclass
    #soprano.show()
    #soprano.show('midi') # problem is here: offset is delayed

    # A Part might contain numerous Measure Streams
    len(soprano.getElementsByClass('Measure'))
    mRange = soprano.measures(14,16) # @UnusedVariable
    #mRange.show()
    # mRange.sorted.show('text') # here we can see this



    sNew = soprano.measures(14,16).flat.notesAndRests.transpose('p-5')
    sNew.makeAccidentals(overrideStatus=True)
    ts1 = meter.TimeSignature('3/4')
    ts2 = meter.TimeSignature('5/8')
    sNew.insert(0, ts1)
    sNew.insert(3, ts2)

    #sNew.show()
    

    sNew.augmentOrDiminish(2, inPlace=True)  
    for n in sNew.notesAndRests:
        if n.pitch.name == 'G' and n.quarterLength == 2:
            n.addLyric('%s (2 QLs)' % n.name)
    sNew.show()
Esempio n. 53
0
def simple1():
    '''
    show correlations (if any) between notelength and pitch in several pieces coded in musicxml or humdrum and also including the trecento cadences.
    '''
    
    for work in ['opus18no1', 'opus59no3']:
        movementNumber = 3
        score = corpus.parseWork(work, movementNumber, extList=['xml'])
    
        for part in score:
            instrumentName = part.flat.getElementsByClass(
                instrument.Instrument)[0].bestName()
            title='%s, Movement %s, %s' % (work, movementNumber, instrumentName)

            g = graph.PlotScatterPitchSpaceQuarterLength(part.flat.sorted, 
                title=title)
            g.process()
Esempio n. 54
0
def ex04(show=True, *arguments, **keywords):

    # This example, by graphing pitch class over note offset, shows the usage of pitch classes in the violoncello part over the duration of the composition. While the display is coarse, it is clear that the part gets less chromatic towards the end of the work.

    from music21 import corpus
    if 'op133' in keywords.keys():
        sStream = keywords['op133']
    else:
        sStream = corpus.parseWork('opus133.xml')  # load a MusicXML file

    # note: measure numbers are not being shown correcntly
    # need to investigate
    part = sStream[3]

    g = graph.PlotScatterPitchClassOffset(part.flat,
                                          title=part.getInstrument().partName)
    if show:
        g.process()
Esempio n. 55
0
def melodicChordExpression(show=True):
    '''This method not only searches the entire second violin part of a complete string quarter for a seventh chord expressed melodically, but creates new notation to display the results with analytical markup. 
    '''
    #from music21 import *
    #from music21 import corpus, stream, chord
    beethovenScore = corpus.parseWork(
                  'beethoven/opus133.xml') 
    # parts are given IDs by the MusicXML part name 
    violin2 = beethovenScore.getElementById(
                            '2nd Violin')
    # create an empty container for storing found notes
    display = stream.Stream() 
    
    # iterate over all measures
    for measure in violin2.getElementsByClass('Measure'):
        notes = measure.findConsecutiveNotes(
            skipUnisons=True, skipChords=True, 
            skipOctaves=True, skipRests=True, 
            noNone=True)
        pitches = [n.pitch for n in notes]
        # examine four-note gruops, where i is the first of four
        for i in range(len(pitches) - 3):
            # createa chord from four pitches
            testChord = chord.Chord(pitches[i:i+4])           
            # modify duration for final presentation
            testChord.duration.type = "whole" 
            if testChord.isDominantSeventh():
                # append the found pitches as chord
                testChord.lyric = "m. " + str(
                    measure.number)
                # store the chord in a measure
                emptyMeasure = stream.Measure()
                emptyMeasure.append(
                   testChord.closedPosition())
                display.append(emptyMeasure)
                # append the source measure, tagging 
                # the first note with the pitch classes used in the measure
                measure.notesAndRests[0].lyric = chord.Chord(
                    measure.pitches).orderedPitchClassesString
                display.append(measure)
    # showing the complete Stream will produce output
    if show:
        display.show('musicxml')
Esempio n. 56
0
def chordifyAnalysis():
    from music21 import stream, interval

    o = corpus.parseWork('josquin/milleRegrets')
    sSrc = o.mergeScores()
    #sSrc = corpus.parseWork('bwv1080', 1)

    sExcerpt = sSrc.measures(0, 20)

    display = stream.Score()
    display.metadata = sSrc.metadata
    for p in sExcerpt.parts:
        display.insert(0, p)

    reduction = sExcerpt.chordify()
    for c in reduction.flat.getElementsByClass('Chord'):
        c.annotateIntervals()
        c.closedPosition(forceOctave=4, inPlace=True)
        c.removeRedundantPitches(inPlace=True)
    display.insert(0, reduction)
    display.show()
Esempio n. 57
0
def findPotentialPassingTones(show=True):
    g = corpus.parseWork('gloria')
    gcn = g.parts['cantus'].measures(1, 126).flat.notesAndRests

    gcn[0].lyric = ""
    gcn[-1].lyric = ""
    for i in range(1, len(gcn) - 1):
        prev = gcn[i - 1]
        cur = gcn[i]
        next = gcn[i + 1]

        cur.lyric = ""

        if "Rest" in prev.classes or "Rest" in cur.classes \
            or "Rest" in next.classes:
            continue

        int1 = interval.notesToInterval(prev, cur)
        if int1.isStep is False:
            continue

        int2 = interval.notesToInterval(cur, next)
        if int2.isStep is False:
            continue

        cma = cur.beatStrength
        if cma < 1 and \
            cma <= prev.beatStrength and \
            cma <= next.beatStrength:

            if int1.direction == int2.direction:
                cur.lyric = 'pt'  # neighbor tone
            else:
                cur.lyric = 'nt'  # passing tone
    if show:
        g.parts['cantus'].show()
Esempio n. 58
0
 def _parse(self, xml):
     if self._collectionName == 'pmlp':
         return corpus.parseWork(xml)
     else:
         return music21.corpus.parseWork(xml)