Example #1
0
    def runCorrectingMeasures(self,dirname):
        path = dirname
        resultS1File=self.getFileName(dirname, "result.S1.xml")
        resultS1=converter.parse(resultS1File)
        resultS2File=self.getFileName(dirname, "result.S2.xml")
        resultS2=converter.parse(resultS2File)
        
        oProcess=[]
        oProcess.append(resultS1)
        oProcess.append(resultS2)
        processOMR=ProcessOMR()
        alignedArrays=processOMR.align(oProcess)
        resultS1=alignedArrays[0]
        resultS2=alignedArrays[1]

        cm=CorrectingMeasures()
        measuresIndex=processOMR.flagIncorrectMeasures(resultS2)[0]
#         measuresIndex=cm.getWrongMeasures(resultS2)
        print measuresIndex
        for mIndex in measuresIndex:
#             if(resultS2.parts[0].getElementsByClass(stream.Measure)[mIndex].duration.quarterLength<4):
            resultS2=cm.correctNotesInMeasure(resultS1,resultS2,mIndex)
        m21F=Music21Functions()
        resultS3=m21F.filterExtraMeasures(resultS2)
        resultS3.write("musicxml", path+'/result.S3.xml')
Example #2
0
def pitchQuarterLengthUsageWeightedScatter(show=True):
    
    from music21 import converter, graph
    from music21.musicxml import testFiles as xml
    from music21.humdrum import testFiles as kern
    
    mozartStream = converter.parse(xml.mozartTrioK581Excerpt)
    notes = mozartStream.flat.stripTies()
    g = graph.PlotScatterWeightedPitchSpaceQuarterLength(notes, 
        title='Mozart Trio K. 581 Excerpt')
    g.process()
    
    g = graph.PlotScatterWeightedPitchClassQuarterLength(notes, 
        title='Mozart Trio K. 581 Excerpt')
    g.process()
    
    
    chopinStream = converter.parse(kern.mazurka6) 
    notes = chopinStream.flat.stripTies()
    g = graph.PlotScatterWeightedPitchSpaceQuarterLength(notes,
        title='Chopin Mazurka 6 Excerpt')
    g.process()
    
    g = graph.PlotScatterWeightedPitchClassQuarterLength(notes,
        title='Chopin Mazurka 6 Excerpt')
    g.process()
Example #3
0
def makeExampleScore():
    r'''
    Makes example score for use in stream-to-timespan conversion docs.

    >>> score = timespans.makeExampleScore()
    >>> score.show('text')
    {0.0} <music21.stream.Part ...>
        {0.0} <music21.instrument.Instrument PartA: : >
        {0.0} <music21.stream.Measure 1 offset=0.0>
            {0.0} <music21.clef.BassClef>
            {0.0} <music21.meter.TimeSignature 2/4>
            {0.0} <music21.note.Note C>
            {1.0} <music21.note.Note D>
        {2.0} <music21.stream.Measure 2 offset=2.0>
            {0.0} <music21.note.Note E>
            {1.0} <music21.note.Note F>
        {4.0} <music21.stream.Measure 3 offset=4.0>
            {0.0} <music21.note.Note G>
            {1.0} <music21.note.Note A>
        {6.0} <music21.stream.Measure 4 offset=6.0>
            {0.0} <music21.note.Note B>
            {1.0} <music21.note.Note C>
            {2.0} <music21.bar.Barline style=final>
    {0.0} <music21.stream.Part ...>
        {0.0} <music21.instrument.Instrument PartB: : >
        {0.0} <music21.stream.Measure 1 offset=0.0>
            {0.0} <music21.clef.BassClef>
            {0.0} <music21.meter.TimeSignature 2/4>
            {0.0} <music21.note.Note C>
        {2.0} <music21.stream.Measure 2 offset=2.0>
            {0.0} <music21.note.Note G>
        {4.0} <music21.stream.Measure 3 offset=4.0>
            {0.0} <music21.note.Note E>
        {6.0} <music21.stream.Measure 4 offset=6.0>
            {0.0} <music21.note.Note D>
            {2.0} <music21.bar.Barline style=final>

    '''
    from music21 import converter
    from music21 import stream
    streamA = converter.parse('tinynotation: 2/4 C4 D E F G A B C')
    streamB = converter.parse('tinynotation: 2/4 C2 G E D')
    streamA.makeMeasures(inPlace=True)
    streamB.makeMeasures(inPlace=True)
    partA = stream.Part()
    for x in streamA:
        partA.append(x)
    instrumentA = partA.getInstrument()
    instrumentA.partId = 'PartA'
    partA.insert(0, instrumentA)
    partB = stream.Part()
    for x in streamB:
        partB.append(x)
    instrumentB = partB.getInstrument()
    instrumentB.partId = 'PartB'
    partB.insert(0, instrumentB)
    score = stream.Score()
    score.insert(0, partA)
    score.insert(0, partB)
    return score
Example #4
0
    def testMeasureCopyingB(self):
        from music21 import converter
        from music21 import pitch

        src = """m1 G: IV || b3 d: III b4 ii
m2 v b2 III6 b3 iv6 b4 ii/o6/5
m3 i6/4 b3 V
m4-5 = m2-3
m6-7 = m4-5
"""
        s = converter.parse(src, format='romantext')
        rnStream = s.flat.getElementsByClass('RomanNumeral')

        for elementNumber in [0, 6, 12]:
            self.assertEqual(rnStream[elementNumber + 4].figure, 'III6')
            self.assertEqual(str(rnStream[elementNumber + 4].pitches), '[A4, C5, F5]')

            x = rnStream[elementNumber + 4].pitches[2].accidental
            if x == None: x = pitch.Accidental('natural')
            self.assertEqual(x.alter, 0)

            self.assertEqual(rnStream[elementNumber + 5].figure, 'iv6')
            self.assertEqual(str(rnStream[elementNumber + 5].pitches), '[B-4, D5, G5]')

            self.assertEqual(rnStream[elementNumber + 5].pitches[0].accidental.displayStatus, True)



        from music21.romanText import testFiles
        s = converter.parse(testFiles.monteverdi_3_13)
        m25 = s.measure(25)
        rn = m25.flat.getElementsByClass('RomanNumeral')
        self.assertEqual(rn[1].figure, 'III')
        self.assertEqual(str(rn[1].key), 'd minor')
Example #5
0
    def testBasic(self):
        from music21 import converter, corpus

        a = converter.parse(corpus.getWork('haydn/opus74no2/movement4.xml'))
        post = assembleLyrics(a)
        self.assertEqual(post, '') # no lyrics!

        a = converter.parse(corpus.getWork('luca/gloria'))
        post = assembleLyrics(a)
        self.assertEqual(post.startswith('Et in terra pax hominibus bone voluntatis'), True) 
Example #6
0
def loadTrainSample(sampleName):
    
   scoreName = sampleName+ ".score.xml";
   metaName  = sampleName+ ".meta";
   perfName  = sampleName+ ".perf.mid";

   score = converter.parse(scoreName);
   meta = loadMetadata(metaName)
   perf = converter.parse(perfName);
   name = os.path.basename(sampleName)
   return {'name': name, 'score':score, 'meta':meta, 'perf':perf}
Example #7
0
def pitchQuarterLengthUsage3D(show=True):
    from music21.musicxml import testFiles as xml
    from music21.humdrum import testFiles as kern

    mozartStream = converter.parse(xml.mozartTrioK581Excerpt)  # @UndefinedVariable
    g = graph.Plot3DBarsPitchSpaceQuarterLength(mozartStream.flat.stripTies(), colors=["r"])
    g.process()

    chopinStream = converter.parse(kern.mazurka6)
    g = graph.Plot3DBarsPitchSpaceQuarterLength(chopinStream.flat.stripTies(), colors=["b"])
    g.process()
Example #8
0
def demoGettingWorks():

    # Can obtain works from an integrated corpus
    s1 = corpus.parse("bach/bwv103.6")  # @UnusedVariable
    s2 = corpus.parse("bach/bwv18.5-lz")  # @UnusedVariable

    # Can parse data stored in MusicXML files locally or online:
    s = converter.parse("http://www.musicxml.org/xml/elite.xml")  # @UnusedVariable

    # Can parse data stored in MIDI files locally or online:
    s = converter.parse("http://www.jsbchorales.net/down/midi/010306b_.mid")  # @UnusedVariable
Example #9
0
def threeDimBoth():
    from music21.musicxml.testFiles import mozartTrioK581Excerpt # @UnresolvedImport
    from music21.humdrum import testFiles as kernTest  

    mozartStream = converter.parse(mozartTrioK581Excerpt)
    g = graph.Plot3DBarsPitchSpaceQuarterLength(mozartStream.flat)
    g.process()
    
    chopinStream = converter.parse(kernTest.mazurka6) 
    g = graph.Plot3DBarsPitchSpaceQuarterLength(chopinStream.flat)
    g.process()
Example #10
0
def demoGraphMozartChopin():
    from music21.musicxml import testFiles as xmlTest
    from music21.humdrum import testFiles as kernTest  

    dpi = 300

    mozartStream = converter.parse(xmlTest.mozartTrioK581Excerpt) # @UndefinedVariable
    g = graph.Plot3DBarsPitchSpaceQuarterLength(mozartStream.stripTies(), dpi=dpi, title='Mozart Trio K. 581, Excerpt', colors=['#CD4F39'], alpha=.8)
    g.process()
    
    chopinStream = converter.parse(kernTest.mazurka6) 
    g = graph.Plot3DBarsPitchSpaceQuarterLength(chopinStream.stripTies(), dpi=dpi, title='Chopin Mazurka 6, Excerpt', colors=['#6495ED'], alpha=.8)
    g.process()
Example #11
0
    def testLandiniCadence(self):
        from music21 import converter, features, corpus, graph

        s = converter.parse(['f#4 f# e g2', '3/4'])
        fe = features.native.LandiniCadence(s)
        self.assertEqual(fe.extract().vector[0], 1)        
        
        s = converter.parse(['f#4 f# f# g2', '3/4'])
        fe = features.native.LandiniCadence(s)
        self.assertEqual(fe.extract().vector[0], 0)        

        s = converter.parse(['f#4 e a g2', '3/4'])
        fe = features.native.LandiniCadence(s)
        self.assertEqual(fe.extract().vector[0], 0)        
Example #12
0
    def testLandiniCadence(self):
        from music21 import converter, features

        s = converter.parse('tinynotation: 3/4 f#4 f# e g2')
        fe = features.native.LandiniCadence(s)
        self.assertEqual(fe.extract().vector[0], 1)        
        
        s = converter.parse('tinynotation: 3/4 f#4 f# f# g2')
        fe = features.native.LandiniCadence(s)
        self.assertEqual(fe.extract().vector[0], 0)        

        s = converter.parse('tinynotation: 3/4 f#4 e a g2')
        fe = features.native.LandiniCadence(s)
        self.assertEqual(fe.extract().vector[0], 0)        
Example #13
0
    def testRepeatBracketsB(self):
        from music21.abcFormat import testFiles
        from music21 import converter
        from music21 import corpus
        s = converter.parse(testFiles.morrisonsJig)
        # TODO: get
        self.assertEqual(len(s.flat.getElementsByClass('RepeatBracket')), 2)
        #s.show()
        # four repeat brackets here; 2 at beginning, 2 at end
        s = converter.parse(testFiles.hectorTheHero)
        self.assertEqual(len(s.flat.getElementsByClass('RepeatBracket')), 4)

        s = corpus.parse('JollyTinkersReel')
        self.assertEqual(len(s.flat.getElementsByClass('RepeatBracket')), 4)
Example #14
0
def searchSegment(querymidi):
    qstream = converter.parse(querymidi)
    qmelo = melody_extractor.extractMelody(qstream)
    results = []

    # get similarity scores for each segment
    for filename in os.listdir(segments_folder):
        if filename.endswith('.mid'):
            filepath = os.path.join(segments_folder, filename)
            stream = converter.parse(filepath)
            melo = melody_extractor.extractMelody(stream)
            score = similarity_scores.ioir_edit_distance_norm(qmelo,melo)
            results.append((score, filename))
    return sorted(results)
Example #15
0
def demoGettingWorks():
    

    # Can obtain works from an integrated corpus 
    s1 = corpus.parseWork('bach/bwv103.6') # @UnusedVariable
    s2 = corpus.parseWork('bach/bwv18.5-lz') # @UnusedVariable

    # Can parse data stored in MusicXML files locally or online:
    s = converter.parse('http://www.musicxml.org/xml/elite.xml') # @UnusedVariable

    # Can parse data stored in MIDI files locally or online:
    s = converter.parse('http://www.jsbchorales.net/down/midi/010306b_.mid') # @UnusedVariable

    # Can parse data stored in Kern files locally or online:
    s = converter.parse('http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/371chorales&file=chor120.krn') # @UnusedVariable
Example #16
0
    def testMeasureCopyingB(self):
        from music21 import converter
        from music21 import pitch

        src = """m1 G: IV || b3 d: III b4 ii
m2 v b2 III6 b3 iv6 b4 ii/o6/5
m3 i6/4 b3 V
m4-5 = m2-3
m6-7 = m4-5
"""
        s = converter.parse(src, format='romantext')
        rnStream = s.flat.getElementsByClass('RomanNumeral')

        for elementNumber in [0, 6, 12]:
            self.assertEqual(rnStream[elementNumber + 4].figure, 'III6')
            self.assertEqual(str([str(p) for p in rnStream[elementNumber + 4].pitches]), "['A4', 'C5', 'F5']")

            x = rnStream[elementNumber + 4].pitches[2].accidental
            if x == None: x = pitch.Accidental('natural')
            self.assertEqual(x.alter, 0)

            self.assertEqual(rnStream[elementNumber + 5].figure, 'iv6')
            self.assertEqual(str([str(p) for p in rnStream[elementNumber + 5].pitches]), "['B-4', 'D5', 'G5']")

            self.assertEqual(rnStream[elementNumber + 5].pitches[0].accidental.displayStatus, True)
Example #17
0
    def xtest003(self):
        '''Add key velocities to some MIDI data that reflect accent levels arising from the meter.

        Modify this to just adjust dynamics based on meter; this should be 
        reflected in musical output
        '''
        from music21 import articulations
        from music21 import converter

        partStream = converter.parse("dicterliebe1.xml")
        #for part in partStream.partData:
        # a part stream could have an iterator that partitions itself
        # into measure-length part streams
        for measure in partStream.getElementsByClass('Measure')():  # () ? 
            # measure is a partStream isolated for just the desired measure
            # assuming only one meter per measure
            meterObj = measure['meter']
            # get a list of pairs, specifying offset and accent
            for offset, accent in meterObj.accentPattern():
                if accent > 'mf': # assuming symbolic representation
                    # get all relevant elements
                    subStream = measure.getElementsByOffset(offset, 
                                    offset + meterObj.denominator)
                    # get a stream of just dynamics
                    dynamics = subStream.filterClass(articulations.DynamicArticulation)
                    for obj in dynamics:
                        # can we increment dynamics by dynamics?
                        obj += 'pppp'
Example #18
0
    def testJSONSerializationMetadata(self):
        from music21 import converter
        from music21.musicxml import testFiles as mTF
        from music21 import metadata

        md = metadata.Metadata(
            title='Concerto in F',
            date='2010',
            composer='Frank',
            )
        # environLocal.printDebug([str(md.json)])
        self.assertEqual(md.composer, 'Frank')
        self.assertEqual(md.date, '2010/--/--')
        self.assertEqual(md.composer, 'Frank')
        self.assertEqual(md.title, 'Concerto in F')

        # test getting meta data from an imported source
        c = converter.parse(mTF.mozartTrioK581Excerpt)  # @UndefinedVariable
        md = c.metadata

        self.assertEqual(md.movementNumber, '3')
        self.assertEqual(
            md.movementName, 'Menuetto (Excerpt from Second Trio)')
        self.assertEqual(md.title, 'Quintet for Clarinet and Strings')
        self.assertEqual(md.number, 'K. 581')
        self.assertEqual(md.composer, 'Wolfgang Amadeus Mozart')
Example #19
0
def schumann(show = True):
    streamObject = corpus.parseWork('schumann/opus41no1', 3)
    streamObject.plot('pitch')

    from music21.humdrum import testFiles as tf
    streamObject = converter.parse(tf.mazurka6)
    streamObject.plot('pitch')
Example #20
0
def testMIDIParse():
    from music21 import converter, common
    from music21 import freezeThaw

    #a = 'https://github.com/ELVIS-Project/vis/raw/master/test_corpus/prolationum-sanctus.midi'
    #c = converter.parse(a)
#     c = corpus.parse('bwv66.6', forceSource=True)
#     v = freezeThaw.StreamFreezer(c)
#     v.setupSerializationScaffold()
#     return v.writeStr() # returns a string
    import os
    a = os.path.join(common.getSourceFilePath(),
                     'midi',
                     'testPrimitive',
                     'test03.mid')

    #a = 'https://github.com/ELVIS-Project/vis/raw/master/test_corpus/prolationum-sanctus.midi'
    c = converter.parse(a)
    v = freezeThaw.StreamFreezer(c)
    v.setupSerializationScaffold()


    mockType = lambda x: x.__class__.__name__ == 'weakref'
    ty = TreeYielder(mockType)
    for val in ty.run(c):
        print(val, ty.currentLevel())
Example #21
0
    def testTrillExtensionA(self):
        from music21 import converter
        from music21.musicxml import testPrimitive

        s = converter.parse(testPrimitive.notations32a)
        raw = fromMusic21Object(s)  # test roundtrip output
        self.assertEqual(raw.count("<wavy-line"), 4)
Example #22
0
def demoBachSearchBrief():    
    choraleList = corpus.getBachChorales()
    results = stream.Stream()
    for filePath in choraleList:
      fileName = os.path.split(filePath)[1]
      pieceName = fileName.replace('.xml', '')
      chorale = converter.parse(filePath)
      print fileName
      key = chorale.analyze('key')
      if key.mode == 'minor':
        lastChordPitches = []
        for part in chorale.parts:
          lastChordPitches.append(part.flat.pitches[-1])
        lastChord = chord.Chord(lastChordPitches)
        lastChord.duration.type = "whole"
        lastChord.transpose("P8", inPlace=True)
        if lastChord.isMinorTriad() is False and lastChord.isIncompleteMinorTriad() is False:
          continue
        lastChord.lyric = pieceName
        m = stream.Measure()
        m.keySignature = chorale.flat.getElementsByClass(
          'KeySignature')[0]
        m.append(lastChord)
        results.append(m.makeAccidentals(inPlace=True))
    results.show()
Example #23
0
 def initializeScore(self):
     try:
         score = converter.parse(self.nameRecordedSong).parts[0]
     except converter.ConverterException:
         score = corpus.parse(self.nameRecordedSong).parts[0]
     self.scorePart = score
     self.pageMeasureNumbers = []
     for e in score.flat:
         if 'PageLayout' in e.classes:
             self.pageMeasureNumbers.append(e.measureNumber)
     lastMeasure = score.getElementsByClass('Measure')[-1].measureNumber
     self.pageMeasureNumbers.append(lastMeasure)
     self.totalPagesScore = len(self.pageMeasureNumbers) - 1
     scNotes = score.flat.notesAndRests
     noteCounter = 1
     pageCounter = 0
     middlePagesCounter = 0
     self.middlePages = []
     self.beginningPages = []
     for i in scNotes:
         imn = i.measureNumber
         if pageCounter <= self.totalPagesScore and imn >= self.pageMeasureNumbers[pageCounter]:
             self.beginningPages.append(noteCounter)
             pageCounter += 1
         if middlePagesCounter < self.totalPagesScore and imn == math.floor(
                         (self.pageMeasureNumbers[middlePagesCounter + 1] + 
                                 self.pageMeasureNumbers[middlePagesCounter]) 
                                                                 / 2):
             self.middlePages.append(noteCounter)
             middlePagesCounter += 1
         noteCounter += 1
     environLocal.printDebug("beginning of the pages %s" % str(self.beginningPages))
     environLocal.printDebug("middles of the pages %s" % str(self.middlePages))
     environLocal.printDebug("initializeScore finished")
Example #24
0
def indexOnePath(filePath, *args, **kwds):
    if not os.path.isabs(filePath):
        scoreObj = corpus.parse(filePath)
    else:
        scoreObj = converter.parse(filePath)
    scoreDictEntry = indexScoreParts(scoreObj, *args, **kwds)
    return scoreDictEntry
Example #25
0
def fbFeatureExtraction():
    exampleFB = converter.parse("ismir2011_fb_example1b.xml")
    fe1 = features.jSymbolic.PitchClassDistributionFeature(exampleFB)
    print fe1.extract().vector
    # [0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.6666666666666666, 0.0, 0.0, 1.0, 0.0, 0.0]
    n1 = exampleFB.parts[0][1][5]
    n1.expressions.append(expressions.Turn())
    x = expressions.realizeOrnaments(n1)
    n2 = exampleFB.parts[0][2][2]
    n2.expressions.append(expressions.Mordent())
    y = expressions.realizeOrnaments(n2)

    exampleFB.parts[0][1].elements = [exampleFB.parts[0][1][4]]
    exampleFB.parts[0][1].append(x)
    exampleFB.parts[0][2].elements = [exampleFB.parts[0][2][0], exampleFB.parts[0][2][1]]
    exampleFB.parts[0][2].append(y)

    fb1 = figuredBass.realizer.figuredBassFromStream(exampleFB.parts[1])
    # realization = fb1.realize()
    sol1 = fb1.generateRandomRealization()

    exampleFBOut = stream.Score()
    exampleFBOut.insert(0, exampleFB.parts[0])
    exampleFBOut.insert(0, sol1.parts[0])
    exampleFBOut.insert(0, sol1.parts[1])

    fe1.setData(exampleFBOut)
    print fe1.extract().vector
Example #26
0
    def xtestEx04(self):
        # what

        scSrc = scale.MajorScale()

        niederlande = corpus.search('niederlande', field='locale')

        results = {}
        for unused_name, group in [('niederlande', niederlande)]:
            workCount = 0

            for fp, n in group:
                workCount += 1
    
                s = converter.parse(fp, number=n)
    
                # derive a best-fit concrete major scale
                scFound = scSrc.derive(s)

                # if we find a scale with no unmatched pitches
                if len(scFound.match(s)['notMatched']) == 0:
                    # find out what pitches in major scale are not used
                    post = scFound.findMissing(s)
                    for p in post:
                        degree = scFound.getScaleDegreeFromPitch(p)
                        if degree not in results.keys():
                            results[degree] = 0
                        results[degree] += 1

        print ('Of %s works, the following major scale degrees are not used the following number of times:' % workCount)
        print (results)
Example #27
0
    def testBracketA(self):
        from music21 import converter
        from music21.musicxml import testPrimitive

        s = converter.parse(testPrimitive.directions31a)
        raw = fromMusic21Object(s)
        self.assertEqual(raw.count("<bracket"), 4)
Example #28
0
    def testGlissandoA(self):
        from music21 import converter
        from music21.musicxml import testPrimitive

        s = converter.parse(testPrimitive.spanners33a)
        raw = fromMusic21Object(s)  # test roundtrip
        self.assertEqual(raw.count("<glissando"), 2)
Example #29
0
    def testBracketB(self):
        from music21 import converter
        from music21.musicxml import testPrimitive

        s = converter.parse(testPrimitive.spanners33a)
        raw = fromMusic21Object(s)  # test roundtrip output
        self.assertEqual(raw.count("<bracket"), 12)
Example #30
0
    def get_note_rep_array(self, file_name, one_hot):
        """
        Converts a MIDI file into a suitable representation for use in the neural network.

        :param file_name: path and file name for a MIDI file that will be used for training.
        :param one_hot: determines if the representation used for notes will be one-hot (1-of-n)
        :return: an array with the appropriate number of steps to represent each note.
        """
        midi_stream = converter.parse(file_name)
        note_list_arr = []

        for note in self.get_note_objects(midi_stream):

            duration = self.get_note_duration(note)

            if duration < self.T_STEP_LENGTH:
                steps = 1
            else:
                steps = int(duration / self.T_STEP_LENGTH)

            temp_note = [self.get_midi_representation(note)]
            for num_steps in range(steps):
                if one_hot:
                    note_list_arr.append(self.to_onehot_1d(temp_note))
                else:
                    note_list_arr.append(temp_note)
        return note_list_arr
def get_notes_chords_rests(path):
    """ Get all the notes, chords and rests from the midi files in the song directory """
    note_list = []
    try:
        midi_file = converter.parse(path)
        for el in midi_file.recurse().notes:
            if isinstance(el, note.Note):
                check_rest_amount(el, note_list)
                note_list.append(str(el.pitch))
            elif isinstance(el, chord.Chord):
                check_rest_amount(el, note_list)
                note_list.append('.'.join(str(n) for n in el.normalOrder))
            elif isinstance(el, note.Rest):
                check_rest_amount(el, note_list)
                note_list.append('Rest')
    except Exception as e:
        print("failed on ", path, e)
        pass
    return note_list
Example #32
0
def italianA6ResolutionExample():
    '''
    The Italian augmented sixth chord (It+6) is the only
    augmented sixth chord to consist of only three
    pitch names, and when represented in four parts, the
    tonic is doubled. The tonic can resolve up, down or
    stay the same, and in four parts, the two tonics always
    resolve differently, resulting in two equally
    acceptable resolutions. An alternate approach to resolving
    the It+6 chord was taken, such that an It+6
    chord could map internally to two different resolutions.
    Every other special resolution in fbRealizer
    consists of a 1:1 mapping of special chords to resolutions.


    Here, the It+6 chord is resolving to the dominant, minor tonic,
    and major tonic, respectively. In the
    dominant resolution shown, the tonics (D) are resolving inward,
    but they can resolve outward as well. In
    the minor tonic resolution, the higher tonic is resolving up to F,
    and the lower tonic remains the same.
    In the major tonic resolution, the higher tonic remains the same,
    while the lower tonic resolves up to the F#.

    >>> from music21.figuredBass import examples
    >>> from music21.figuredBass import rules
    >>> fbLine = examples.italianA6ResolutionExample()
    >>> fbRules = rules.Rules()
    >>> fbRules.upperPartsMaxSemitoneSeparation = None
    >>> fbRules.partMovementLimits.append([1, 4])
    >>> fbRealization = fbLine.realize(fbRules)
    >>> fbRealization.keyboardStyleOutput = False
    >>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()

    .. image:: images/figuredBass/fbExamples_it+6.*
        :width: 700
    '''
    from music21 import converter, key
    s = converter.parse(
        "tinynotation: D4 BB-4_#6,3 AA2_# D4 BB-4_#6,3 AA2_6,4 D4 BB-4_#6,3 AA2_#6,4",
        makeNotation=False)
    s.insert(0, key.Key('d'))
    return realizer.figuredBassFromStream(s)
Example #33
0
    def OnViewMusicXimple(self, e):
        self.filename = ''
        dlg = wx.FileDialog(self,
                            message="Choose a file",
                            defaultDir=os.getcwd(),
                            defaultFile="",
                            wildcard="",
                            style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR)

        # Show the dialog and retrieve the user response. If it is the OK response,
        # process the data.
        if dlg.ShowModal() == wx.ID_OK:
            # This returns a Python list of files that were selected.
            paths = dlg.GetPaths()

            for path in paths:
                xml = converter.parse(path)
                xml.show()
        dlg.Destroy()
Example #34
0
    def testNoteheadWithTies(self):
        #what happens when you have notes with two different noteheads tied together?
        from music21 import tie
        from music21 import converter

        n1 = note.Note('c3')
        n1.notehead = 'diamond'
        n1.tie = tie.Tie('start')
        n2 = note.Note('c3')
        n2.notehead = 'cross'
        n2.tie = tie.Tie('end')
        p = stream.Part()
        p.append(n1)
        p.append(n2)

        xml = fromMusic21Object(p)
        m = converter.parse(xml)
        self.assertEqual(m.flat.notes[0].notehead, 'diamond')
        self.assertEqual(m.flat.notes[1].notehead, 'cross')
Example #35
0
    def testRomanTextString(self):
        from music21 import converter
        s = converter.parse(
            'm1 KS1 I \n m2 V6/5 \n m3 I b3 V7 \n m4 KS-3 vi \n m5 a: i b3 V4/2 \n m6 I',
            format='romantext')

        rnStream = s.flat.getElementsByClass('RomanNumeral')
        self.assertEqual(rnStream[0].figure, 'I')
        self.assertEqual(rnStream[1].figure, 'V6/5')
        self.assertEqual(rnStream[2].figure, 'I')
        self.assertEqual(rnStream[3].figure, 'V7')
        self.assertEqual(rnStream[4].figure, 'vi')
        self.assertEqual(rnStream[5].figure, 'i')
        self.assertEqual(rnStream[6].figure, 'V4/2')
        self.assertEqual(rnStream[7].figure, 'I')

        rnStreamKey = s.flat.getElementsByClass('KeySignature')
        self.assertEqual(rnStreamKey[0].sharps, 1)
        self.assertEqual(rnStreamKey[1].sharps, -3)
Example #36
0
    def testPivotInCopyMultiple2(self):
        '''
        test whether a chord in a pivot situation outside of copying affects copying
        '''

        from music21 import converter
        testCase = '''
m1 G: I
m2 V D: I
m3 G: IV
m4 V
m5 I
m6-7 = m4-5
m8 I
'''
        s = converter.parse(testCase, format='romanText')
        m = s.measure(5).flat
        self.assertEqual(
            m.getElementsByClass('RomanNumeral')[0].key.name, 'G major')
Example #37
0
def get_notes():
    notes = []
    for file in glob.glob("midi_songs/*.mid"):
        midi = converter.parse(file)
        notes_to_parse = None
        parts = instrument.partitionByInstrument(midi)
        if parts: # file has instrument parts
            notes_to_parse = parts.parts[0].recurse()
        else: # file has notes in a flat structure
            notes_to_parse = midi.flat.notes
        for element in notes_to_parse:
            if isinstance(element, note.Note):
                notes.append(str(element.pitch))
            elif isinstance(element, chord.Chord):
                notes.append('.'.join(str(n) for n in element.normalOrder))

    print('Attained all notes in string format')

    return notes
Example #38
0
def test_subtractive_process_nonlinear(example_stream):
    result = minimalism.subtractive_process(
        example_stream,
        step_value=sequences.kolakoski(),
        step_mode=minimalism.StepMode.ABSOLUTE,
        iterations_end=8,
    )
    intended_result = converter.parse("""tinyNotation: 
        C D E F G A B c d e f g      
        D E F G A B c d e f g  
        E F G A B c d e f g   
        E F G A B c d e f g   
        D E F G A B c d e f g  
        D E F G A B c d e f g  
        E F G A B c d e f g   
        D E F G A B c d e f g  
        E F G A B c d e f g   
        """)
    assert list(result.flat.notes) == list(intended_result.flat.notes)
Example #39
0
    def get_notes(self):
        """ Get all the notes and chords from the midi files in the ./midi_songs directory """
        notes = []

        for file in self.songs:
            print("Parsing %s" % file)
            try:
                midi = converter.parse(file)
            except IndexError as e:
                print(f"Could not parse {file}")
                print(e)
                continue

            notes_to_parse = None

            try:  # file has instrument parts
                s2 = instrument.partitionByInstrument(midi)
                notes_to_parse = s2.parts[0].recurse()
            except:  # file has notes in a flat structure
                notes_to_parse = midi.flat.notes

            prev_offset = 0.0
            for element in notes_to_parse:
                if isinstance(element, note.Note) or isinstance(
                        element, chord.Chord):
                    duration = element.duration.quarterLength
                    if isinstance(element, note.Note):
                        name = element.pitch
                    elif isinstance(element, chord.Chord):
                        name = ".".join(str(n) for n in element.normalOrder)
                    notes.append(f"{name}${duration}")

                    rest_notes = int((element.offset - prev_offset) /
                                     TIMESTEP - 1)
                    for _ in range(0, rest_notes):
                        notes.append("NULL")

                prev_offset = element.offset

        with open("notes/" + self.model_name, "wb") as filepath:
            pickle.dump(notes, filepath)

        return notes
Example #40
0
def get_notes():
    """ Get all the notes and chords from the midi files in the ./midi_songs directory """
    notes = []
    n = ["C","C#","D","E-","E","F","F#","G","G#","A","B-","B"]
    pitchnames = [note.Note(no + str(octave)).nameWithOctave for octave in range(8) for no in n ]
    pitchnames.append("C8")
    n_vocab = len(pitchnames)
    file = "midi_songs/Cids.mid"

    midi = converter.parse(file)

    print("Parsing %s" % file)

    notes_to_parse = None

    try: # file has instrument parts
        s2 = instrument.partitionByInstrument(midi)
        notes_to_parse = s2.parts[0].recurse() 
    except: # file has notes in a flat structure
        notes_to_parse = midi.flat.notes

    for element in notes_to_parse:
        if isinstance(element, note.Note):
            d = element.duration.quarterLength
            c = [one_hot_encoded(pitchnames.index(element.nameWithOctave), n_vocab,d)]
            for i in range(1,5):
                c.append([0 for _ in range(n_vocab)])
            notes.append(c)
        #elif isinstance(element,  note.Rest):
           #notes.append(one_hot_encoded([], n_vocab))
        elif isinstance(element, chord.Chord):
            d = element.duration.quarterLength
            c = []
            for i,nota in enumerate(element._notes):
                if i < 5:
                    c.append(one_hot_encoded(pitchnames.index(nota.nameWithOctave), n_vocab,d))
            while i < 5:
                c.append([0 for _ in range(n_vocab)])
                i += 1
            notes.append(c)
                
    
    return notes
Example #41
0
 def runTest(self, filename):
     # Music21 by default caches parses as pickle files, we disable that here
     origChant = convertGABC(filename)
     chson = origChant.toCHSON()
     chant = converter.parse(chson, format='chson',  forceSource=True, storePickle=False)
     for orig1, copy1 in zip(origChant, chant):
         self.assertIsInstance(copy1, type(orig1))
         if hasattr(orig1, 'elements'):
             self.assertTrue(hasattr(copy1, 'elements'))
             for orig2, copy2 in zip(orig1, copy1):
                 self.assertIsInstance(copy2, type(orig2))
                 if hasattr(orig2, 'elements'):
                     self.assertTrue(hasattr(copy2, 'elements'))
                     for orig3, copy3 in zip(orig2, copy2):
                         self.assertIsInstance(copy3, type(orig3))
                         if hasattr(orig3, 'elements'):
                             self.assertTrue(hasattr(copy3, 'elements'))
                             for orig4, copy4 in zip(orig3, copy3):
                                 self.assertIsInstance(copy2, type(orig2))
    def parse_import(self, src: str):
        """ Attempt to parse the given source as input to be
            converted to a shawzin macro.\n
            Accepts raw string data, filename, or URL, etc.
        """
        # use midi parser to load MIDI content from the selected file
        try:
            pieceStream = converter.parse(src)
        except IndexError:
            print("Error: no valid file was selected for import")
            return

        imported_piece = []
        # current offset from beginning as we parse through the stream
        # `offset` in the stream is absolute; I want to find relative offsets for now
        time_pos = 0

        # for each thing in the m21 stream object
        for element in pieceStream.flat:
            # figure out if we care about it
            # (It can have like rests, clefs, key sig, style markings, etc.)
            # TODO care about finer points later on e.g. tempo, maybe dynamics
            # if that thing is a m21 Note object
            if isinstance(element, note.Note):
                # find the wait/rest length since last note (or beginning)
                waittime = element.offset - time_pos
                # add to the piece in similar mannr to how the button GUI does it
                # TODO `waittime/5` should just be `waittime` or configurable per-piece or something
                imported_piece.append((waittime / 5, (element.pitch, )))
                # advance time to this note for the next note
                time_pos = element.offset

            # if the element is a m21 Chord object
            elif isinstance(element, chord.Chord):
                # find the wait time since last note/chord/the beginning
                waittime = element.offset - time_pos
                # add each note's pitch to the piece list
                imported_piece.append((waittime / 5, element.pitches))
                # advance time to this chord for next note/chord
                time_pos = element.offset

        # generate a lua script to copy/paste with Logitech G-Suite
        generate_lua(imported_piece)
def exampleB():
    '''
    This example was retrieved from page 114 of *The Music Theory Handbook* by Marjorie Merryman.

    >>> from music21.figuredBass import examples
    >>> fbLine = examples.exampleB()
    >>> #_DOCS_SHOW fbLine.generateBassLine().show()

    .. image:: images/figuredBass/fbExamples_bassLineB.*
        :width: 700

    First, fbLine is realized with the default rules set.


    >>> fbRealization1 = fbLine.realize()
    >>> fbRealization1.getNumSolutions()
    422
    >>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()

    .. image:: images/figuredBass/fbExamples_sol1B.*
        :width: 700


    Now, a Rules object is created, and the restriction that the chords
    need to be complete is lifted. fbLine is realized once again.


    >>> from music21.figuredBass import rules
    >>> fbRules = rules.Rules()
    >>> fbRules.forbidIncompletePossibilities = False
    >>> fbRealization2 = fbLine.realize(fbRules)
    >>> fbRealization2.getNumSolutions()
    188974
    >>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()

    .. image:: images/figuredBass/fbExamples_sol2B.*
        :width: 700
    '''
    from music21 import converter, key
    s = converter.parse("tinynotation: 4/4 D4 A4_7,5,#3 B-4 F4_6 G4_6 AA4_7,5,#3 D2",
                        makeNotation=False)
    s.insert(0, key.Key('d'))
    return realizer.figuredBassFromStream(s)
Example #44
0
def generateBoogieVamp(blRealization = None, numRepeats = 5):
    '''
    Turns whole notes in twelve bar blues bass line to blues boogie woogie bass line. Takes
    in numRepeats, which is the number of times to repeat the bass line. Also, takes in a 
    realization of :meth:`~music21.figuredBass.examples.twelveBarBlues`. If none is provided, 
    a default realization with :attr:`~music21.figuredBass.rules.Rules.forbidVoiceOverlap`
    set to False and :attr:`~music21.figuredBass.rules.Rules.partMovementLimits` set to
    [(1,4),(2,12),(3,12)] is used.     

    >>> from music21.figuredBass import examples
    >>> #_DOCS_SHOW examples.generateBoogieVamp(numRepeats = 1).show()
    
    .. image:: images/figuredBass/fbExamples_boogieVamp.*
        :width: 700   
    '''
    from music21 import converter, stream, interval
    if blRealization == None:
        bluesLine = twelveBarBlues()
        fbRules = rules.Rules()
        fbRules.partMovementLimits = [(1,4),(2,12),(3,12)]
        fbRules.forbidVoiceOverlap = False
        blRealization = bluesLine.realize(fbRules)
        sampleScore = blRealization.generateRandomRealizations(numRepeats)
    
    boogieBassLine = converter.parse("tinynotation: BB-8. D16 F8. G16 A-8. G16 F8. D16", makeNotation=False)

    newBassLine = stream.Part()
    newBassLine.append(sampleScore[1][0]) #Time signature
    newBassLine.append(sampleScore[1][1]) #Key signature

    for n in sampleScore[1].notes:
        i = interval.notesToInterval(boogieBassLine[0], n)
        tp = boogieBassLine.transpose(i)
        for lyr in n.lyrics:
            tp.notes[0].addLyric(lyr.text)
        for m in tp.notes:
            newBassLine.append(m)
    
    newScore = stream.Score()
    newScore.insert(0, sampleScore[0])
    newScore.insert(newBassLine)
    
    return newScore
Example #45
0
def recognizeLuca():
    '''
    can music21 using audio search recognize which page I play if 
    I play a passage from dLuca's gloria
    OMRd?
    
    
    Works great!
    
    
    OMR was done with SharpEye Light on Finale.
    '''
    # first page begins on m1, second on m23, etc. --
    # not needed when we import page objects in music21.
    pageMeasureNumbers = [1, 23, 50, 81, 104, 126]  #126 is end of document
    dlucaAll = converter.parse('dluca_scanned.xml')
    #dlucaAll = corpus.parse('luca/gloria')
    dlucaCantus = dlucaAll.parts[0]
    recognizeScore(dlucaCantus, pageMeasureNumbers)
Example #46
0
    def extract_chords_duration_time_from_song(self, song):

        score = converter.parse(song)
        score_chordify = score.chordify()
        chords_duration_time = []
        chords,durations,times = [],[],[]
        for element in score_chordify:
            if isinstance(element, note.Note):
                chords.append(element.pitch)
                durations.append(round(float(element.duration.quarterLength),3))
                times.append(float(element.offset))
                # chords_duration_time.append((element.pitch, round(float(element.duration.quarterLength),3), float(element.offset)))
            elif isinstance(element, chord.Chord):
                chords.append('.'.join(str(n) for n in sorted(element.pitches)))
                durations.append(round(float(element.duration.quarterLength),3))
                times.append(float(element.offset))
                # chords_duration_time.append(('.'.join(str(n) for n in element.pitches), round(float(element.duration.quarterLength),3), float(element.offset)))
        # return chords_duration_time
        return [times,chords,durations]
Example #47
0
def pitchedPhase(cycles=None, show=False):
    '''
    Creates a phase composition in the style of 
    1970s minimalism, but bitonally.
    
    The source code describes how this works.
    
    
    >>> #_DOCS_SHOW composition.phasing.pitchedPhase(cycles = 4, show = True)
    
    .. image:: images/phasingDemo.*
            :width: 576

    '''

    sSrc = converter.parse("""tinynotation: 12/16 E16 F# B c# d F# E c# B F# d c# 
                              E16 F# B c# d F# E c# B F# d c#""", makeNotation=False)
    sPost = stream.Score()
    sPost.title = 'phasing experiment'
    sPost.insert(0, stream.Part())
    sPost.insert(0, stream.Part())

    durationToShift = duration.Duration('64th')
    increment = durationToShift.quarterLength
    if cycles == None:
        cycles = int(round(1/increment)) + 1

    for i in range(cycles):
        sPost.parts[0].append(copy.deepcopy(sSrc))
        sMod = copy.deepcopy(sSrc)
        # increment last note
        sMod.notesAndRests[-1].quarterLength += increment
        
        #randInterval = random.randint(-12,12)
        #sMod.transpose(randInterval, inPlace=True)
        sPost.parts[1].append(sMod)


    if show:
        sPost.show('midi')
        sPost.show()
    else: # get musicxml
        pass
Example #48
0
    def testCountingAxisFormat(self):
        def countingAxisFormatter(n, formatDict):
            if n.pitch.accidental is not None:
                formatDict['color'] = 'red'
            return n.pitch.diatonicNoteNum

        from music21.graph.plot import Histogram
        from music21 import converter
        s = converter.parse('tinynotation: 4/4 C4 D E F C D# E F#')
        hist = Histogram(s)
        hist.doneAction = None
        hist.axisX = Axis(hist, 'x')
        hist.axisX.extractOneElement = countingAxisFormatter
        hist.run()
        self.assertEqual(hist.data, [(1, 2, {}), (2, 2, {
            'color': 'red'
        }), (3, 2, {}), (4, 2, {
            'color': 'red'
        })])
Example #49
0
def V43ResolutionExample():
    '''
    The dominant 4,3 can resolve to either the tonic 5,3 or tonic 6,3. The proper resolution
    is dependent on the bass note of the tonic, and is determined in context, as shown in the
    following figured bass realization.

    >>> from music21.figuredBass import examples
    >>> fbLine = examples.V43ResolutionExample()
    >>> fbRealization = fbLine.realize()
    >>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
    
    .. image:: images/figuredBass/fbExamples_V43.*
        :width: 350
    '''
    from music21 import converter, key
    s = converter.parse("tinynotation: 4/4 D2 E2_4,3 D2_5,3 E2_4,3 F#1_6,3",
                        makeNotation=False)
    s.insert(0, key.Key('D'))
    return realizer.figuredBassFromStream(s)
Example #50
0
    def testNoChord(self):
        from music21 import converter

        src = """m1 G: IV || b3 d: III b4 NC
m2 b2 III6 b3 iv6 b4 ii/o6/5
m3 NC b3 G: V
"""
        s = converter.parse(src, format='romantext')
        p = s.parts[0]
        m1 = p.getElementsByClass('Measure')[0]
        r1 = m1[-1]
        self.assertIn('Rest', r1.classes)
        self.assertEqual(r1.quarterLength, 1.0)
        m2 = p.getElementsByClass('Measure')[1]
        r2 = m2[0]
        self.assertIn('Rest', r2.classes)
        self.assertEqual(r1.quarterLength, 1.0)
        rn1 = m2[1]
        self.assertIn('RomanNumeral', rn1.classes)
def exampleC():
    '''
    This example was retrieved from page 114 of *The Music Theory Handbook* by Marjorie Merryman.

    >>> from music21.figuredBass import examples
    >>> fbLine = examples.exampleC()
    >>> #_DOCS_SHOW fbLine.generateBassLine().show()

    .. image:: images/figuredBass/fbExamples_bassLineC.*
        :width: 700

    First, fbLine is realized with the default rules set.

    >>> fbRealization1 = fbLine.realize()
    >>> fbRealization1.getNumSolutions()
    833
    >>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()

    .. image:: images/figuredBass/fbExamples_sol1C.*
        :width: 700


    Now, parallel fifths are allowed in realizations. The image below
    shows one of them. There is a parallel fifth between the bass and
    alto parts going from the half-diminished 6,5 (B,F#) to the dominant
    seventh (C#,G#) in the second measure.

    >>> from music21.figuredBass import rules
    >>> fbRules = rules.Rules()
    >>> fbRules.forbidParallelFifths = False
    >>> fbRealization2 = fbLine.realize(fbRules)
    >>> fbRealization2.getNumSolutions()
    2427
    >>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()

    .. image:: images/figuredBass/fbExamples_sol2C.*
        :width: 700
    '''
    from music21 import converter, key
    s = converter.parse("tinynotation: 4/4 FF#4 GG#4_#6 AA4_6 FF#4 BB4_6,5 C#4_7,5,#3 F#2",
                        makeNotation=False)
    s.insert(0, key.Key('f#'))
    return realizer.figuredBassFromStream(s)
Example #52
0
def get_notes():
    """
    从 music_midi 目录中的所有 MIDI 文件里提取 note(音符)和 chord(和弦)
    Note 样例: A, B, A#, B#, G#, E, ...
    Chord 样例: [B4 E5 G#5], [C5 E5], ...
    因为 Chord 就是多个 Note 的集合,所以把它们简单地统称为 “Note”
    """
    # 确保包含所有 MIDI 文件的 music_midi 文件夹在所有 Python 文件的同级目录下
    if not os.path.exists("music_midi"):
        raise Exception("包含所有 MIDI 文件的 music_midi 文件夹不在此目录下,请添加")

    notes = []

    # glob : 匹配所有符合条件的文件,并以 List 的形式返回
    for midi_file in glob.glob("music_midi/*.mid"):
        stream = converter.parse(midi_file)

        parts = instrument.partitionByInstrument(stream)

        if parts:  # 如果有乐器部分, 取第一个乐器部分
            notes_to_parse = parts.parts[0].recurse()
        else:
            notes_to_parse = stream.flat.notes

        for element in notes_to_parse:
            # 如果是 Note 类型,那么取它的音调
            if isinstance(element, note.Note):
                # 格式例如: E6
                notes.append(str(element.pitch))
            # 如果是 Chord 类型,那么取它各个音调的序号
            elif isinstance(element, chord.Chord):
                # 转换后格式例如: 4.15.7
                notes.append('.'.join(str(n) for n in element.normalOrder))

    # 如果 data 目录不存在,创建此目录
    if not os.path.exists("data"):
        os.mkdir("data")
    # 将数据写入 data 目录下的 notes 文件
    with open('data/notes', 'wb') as filepath:
        pickle.dump(notes, filepath)

    return notes
Example #53
0
def process(file):
    print("Extracting file ", file)
    try:
        stream=conv.parse(file)
    except Exception as e:
        print("[ERROR] Can not open ", file)
        rename(file, "./Errori/" + basename(file))
        return None
    currentpiece = {'title' : file} #todo: basename(file)
    for feature in extractors:
        try:
            currentpiece[feature.__name__] = feature(stream).extract().vector
        except Exception as e:
            print("[ERROR] ", e)
            print("          Occurred while processing ", file)
            print("          Was looking for ", feature.__name__)
            rename(file, "./Errori/" + basename(file))
            return None
    rename(file, "./Crunched/" + basename(file))
    return currentpiece
Example #54
0
    def update(self):
        """ Get all the notes and chords from the midi files in the dataset directory """
        for file in glob.glob(self.path_to_data):
            midi = converter.parse(file)

            print("Parsing %s" % file)

            notes_to_parse = None

            try: # file has instrument parts
                s2 = instrument.partitionByInstrument(midi)
                notes_to_parse = s2.parts[0].recurse() 
            except: # file has notes in a flat structure
                notes_to_parse = midi.flat.notes

            for element in notes_to_parse:
                if isinstance(element, note.Note):
                    self.array.append(str(element.pitch))
                elif isinstance(element, chord.Chord):
                    self.array.append('.'.join(str(n) for n in element.normalOrder))
Example #55
0
    def retrieve_notes(self):
        notes = []
        for file in os.listdir(self.data_folder):
            midi = converter.parse(self.data_folder + file)
            try:
                s2 = instrument.partitionByInstrument(midi)
                notes_to_parse = s2.parts[0].recurse()
            except:
                notes_to_parse = midi.flat.notes

            for element in notes_to_parse:
                if isinstance(element, note.Note):
                    notes.append(str(element.pitch))
                elif isinstance(element, chord.Chord):
                    notes.append('.'.join(str(n) for n in element.normalOrder))

        with open('data/notes', 'wb') as filepath:
            pickle.dump(notes, filepath)

        return notes
Example #56
0
def get_notes(tag):
    filepath = 'music_train/{}/'.format(tag)
    files = os.listdir(filepath)
    Notes = []
    for file in files:
        st = converter.parse(filepath + file)
        parts = instrument.partitionByInstrument(st)
        if parts:
            for part in parts:
                if 'Piano' in str(part):
                    notes = part.recurse()
                    for element in notes:
                        if isinstance(element, note.Note):
                            Notes.append(str(element.pitch))
                        elif isinstance(element, chord.Chord):
                            Notes.append('.'.join(
                                str(n) for n in element.normalOrder))
    with open('data/notes/{}'.format(tag), 'w') as f:
        f.write(str(Notes))
    return Notes
Example #57
0
def ch2_writing_V_A(show=True, *arguments, **keywords):
    '''p. 18

    Using the meter signature given, add bar lines to the following melodies. 
    '''
    from music21 import key

    # note: tiny is not encoding C#s for c'#4 properly (it seems)
    ex = converter.parse("tinynotation: 3/2 g#1 f#4 g#4 a1 g#2 f#1 g#4. en8 g#2 f#4 r4 f#4 d#8 B8 e2 r4 e4 a4. a8 a2 g#4 g# b4. e8 a2~ a4 a4 d'n4. d'8 d'n2 c'#4 c'# c'# c'#")
    
    ex.insert(0, key.KeySignature(4))
    # presently, this only works if makeAccidentals is called before make measures
    ex.makeAccidentals(inPlace=True)
    ex = ex.makeMeasures() 
    ex.makeBeams(inPlace=True)

    if show:
        ex.show()
    else:
        unused_post = musicxml.m21ToString.fromMusic21Object(ex)
def convert_midi_to_notes(midi_file):
    """ Converts midi to a list of notes """
    notes = list()
    midi = converter.parse(midi_file)
    notes_to_parse = None

    try:
        s2 = instrument.partitionByInstrument(midi)
        notes_to_parse = s2.parts[0].recurse()
    except:
        notes_to_parse = midi.flat.notesAndRests

    for element in notes_to_parse:
        if isinstance(element, note.Note):
            notes.append(str(element.pitch))
        elif isinstance(element, chord.Chord):
            notes.append(".".join(str(n) for n in element.normalOrder))
        elif isinstance(element, note.Rest):
            notes.append(element.name)
    return notes
Example #59
0
def get_notes():
    """ Get all the notes and chords from the midi files in the ./midi_songs directory """
    notes = []

    for file in glob.glob("rammstein/*.mid*"):
        midi = converter.parse(file)

        print("Parsing %s" % file)

        notes_to_parse = None

        try:  # file has instrument parts
            s2 = instrument.partitionByInstrument(midi)
            notes_to_parse = s2.parts[0].recurse()
        except:  # file has notes in a flat structure
            notes_to_parse = midi.flat.notes

        notes.extend(parse_notes(notes_to_parse))

    return notes
    def test_first_measures_of_bach(self):
        # first two measures of soprano part
        the_settings = LilyPondSettings()
        the_score = converter.parse('test_corpus/bwv77.mxl')
        actual = _process_stream(the_score.parts[0][:3], the_settings)
        actual = actual[8:]  # remove the randomized part name
        expect = u""" =
{
\t%% Soprano
\t\set Staff.instrumentName = \markup{ "Soprano" }
\t\set Staff.shortInstrumentName = \markup{ "Sop." }
\t\partial 4
\t\clef treble
\t\key b \minor
\t\\time 4/4
\te'8 fis'8 |
\tg'4 a'4 b'4 a'4 |
}
"""
        self.assertEqual(actual, expect)