Esempio n. 1
0
    def testStreams01(self):
        '''
        Basic stream issues
        '''
        #from music21 import note, stream, clef, metadata, spanner

        #==== "fig-df02"
        # Storing, Ordering, and Timing Elements

        n1 = note.Note('g3', type='half')
        n2 = note.Note('d4', type='half')
        cf1 = clef.AltoClef()

        m1 = stream.Measure(number=1)
        m1.append([n1, n2])
        m1.insert(0, cf1)

        # the measure has three elements
        assert len(m1) == 3
        # the offset returned is the most-recently set
        assert n2.offset == 2.0
        # automatic sorting positions Clef first
        assert m1[0] == cf1
        # list-like indices follow sort order
        assert m1.index(n2) == 2
        # can find an element based on a given offset
        assert m1.getElementAtOrBefore(3) == n2

        n3 = note.Note('g#3', quarterLength=0.5)
        n4 = note.Note('d-4', quarterLength=3.5)

        m2 = stream.Measure(number=2)
        m2.append([n3, n4])

        # appended position is after n3
        assert n4.offset == .5
        assert m2.highestOffset == .5
        # can access objects on elements
        assert m2[1].duration.quarterLength == 3.5
        # the Stream duration is the highest offset + duration
        assert m2.duration.quarterLength == 4

        p1 = stream.Part()
        p1.append([m1, m2])

        # the part has 2 components
        assert len(p1) == 2
        # the Stream duration is the highest offset + durations
        assert p1.duration.quarterLength == 8
        # can access Notes from Part using multiple indices
        assert p1[1][0].pitch.nameWithOctave == 'G#3'

        s1 = stream.Score()
        s1.append(p1)
        md1 = metadata.Metadata(title='The music21 Stream')
        s1.insert(0, md1)
        # calling show by default renders musicxml output
        #s1.show()

        #==== "fig-df02" end

        #==== "fig-df03"
        # Positioning the Same Element in Multiple Containers
        # show positioning the same element in multiple containers
        # do not yet use a flat representation
        s2 = stream.Stream()
        s3 = stream.Stream()
        s2.insert(10, n2)
        s3.insert(40, n2)

        # the offset attribute returns the last assigned
        assert n2.offset == 40
        # we can provide a site to finde a location-specific offset
        assert n2.getOffsetBySite(m1) == 2.0
        assert n2.getOffsetBySite(s2) == 10
        # the None site provides a default offset
        assert set(n2.sites.get()) == set([None, m1, s2, s3])
        # the same instance is found in all Streams
        assert m1.hasElement(n2) == True
        assert s2.hasElement(n2) == True
        assert s3.hasElement(n2) == True

        # only offset is independent to each location
        n2.pitch.transpose('-M2', inPlace=True)
        assert s2[s2.index(n2)].nameWithOctave == 'C4'
        assert s3[s3.index(n2)].nameWithOctave == 'C4'
        assert m1[m1.index(n2)].nameWithOctave == 'C4'

        # the transposition is maintained in the original context
        #s1.show()

        #==== "fig-df03" end

        #==== "fig-df04"
        # Simultaneous Access to Hierarchical and Flat Representations
        #s1.flat.show('t')

        # lengths show the number of elements; indices are sequential
        s1Flat = s1.flat
        assert len(s1) == 2
        assert len(s1Flat) == 6
        assert s1Flat[4] == n3
        assert s1Flat[5] == n4

        # adding another Part to the Score results in a different flat representation
        n5 = note.Note('a#1', quarterLength=2.5)
        n6 = note.Note('b2', quarterLength=1.5)
        m4 = stream.Measure(number=2)
        m4.append([n5, n6])

        r1 = note.Rest(type='whole')
        cf2 = m4.bestClef()  # = BassClef
        m3 = stream.Measure(number=1)
        m3.append([cf2, r1])

        p2 = stream.Part()
        p2.append([m3, m4])
        s1.insert(0, p2)

        assert 'BassClef' in cf2.classes

        # objects are sorted by offset
        s1Flat = s1.flat
        assert len(s1) == 3
        assert len(s1.flat) == 10
        assert s1Flat[6] == n3
        assert s1Flat[7] == n5
        assert s1Flat[8] == n4
        assert s1Flat[9] == n6

        # the F-sharp in m. 2 now as offsets for both flat non-flat sites
        assert n3.getOffsetBySite(m2) == 0
        assert n3.getOffsetBySite(s1Flat) == 4
        # the B in m. 2 now as offsets for both flat non-flat sites
        assert n6.getOffsetBySite(m4) == 2.5
        assert n6.getOffsetBySite(s1Flat) == 6.5

        #s1.show()

        #==== "fig-df04" end

        #==== "fig-df05"
        # Iterating and Filtering Elements by Class

        # get the Clef object, and report its sign, from Measure 1
        assert m1.getElementsByClass('Clef').stream()[0].sign == 'C'
        # collect into a list the sign of all clefs in the flat Score
        assert [cf.sign
                for cf in s1.flat.getElementsByClass('Clef')] == ['C', 'F']

        # collect the offsets Measures in the first part
        assert [e.offset for e in p1.elements] == [0.0, 4.0]
        # collect the offsets of Note in the first part flattened
        assert [e.offset
                for e in p1.flat.notesAndRests] == [0.0, 2.0, 4.0, 4.5]
        # collect the offsets of Notes in all parts flattened
        assert [e.offset for e in s1.flat.notesAndRests
                ] == [0.0, 0.0, 2.0, 4.0, 4.0, 4.5, 6.5]

        # get all pitch names
        match = []
        for e in s1.flat.getElementsByClass('Note').stream():
            match.append(e.pitch.nameWithOctave)
        assert match == ['G3', 'C4', 'G#3', 'A#1', 'D-4', 'B2']

        # collect all Notes and transpose up a perfect fifth
        for n in s1.flat.getElementsByClass('Note').stream():
            n.transpose('P5', inPlace=True)

        # check that all pitches are correctly transposed
        match = []
        for e in s1.flat.getElementsByClass('Note').stream():
            match.append(e.pitch.nameWithOctave)
        assert match == ['D4', 'G4', 'D#4', 'E#2', 'A-4', 'F#3']

        #s1.show()

        #==== "fig-df05" end

        #==== "fig-df06"
        # Searching by Locations and Contexts

        # a Note can always find a Clef
        self.assertIs(n4.getContextByClass('Clef'), cf1)
        # must search oldest sites first
        assert n6.getContextByClass('Clef',
                                    sortByCreationTime='reverse') == cf2

        #        # a Note can find their Measure number from a flat Part
        #        match = []
        #        for e in p1.flat.getElementsByClass('Note'):
        #            match.append(e.getContextByClass('Measure').number)
        #        assert match == [1, 1, 2, 2]

        # all Notes can find their Measure number from a flat Score
        match = []
        for e in s1.flat.notesAndRests:
            match.append([e.name, e.getContextByClass('Measure').number])
        assert match == [['D', 1], ['rest', 1], ['G', 1], ['D#', 2], ['E#', 2],
                         ['A-', 2], ['F#', 2]]
        #==== "fig-df06" end

        #==== "fig-df06"
        # Non-Hierarchical Object Associations
        #oldIds = []
        #for idKey in n1.sites.siteDict:
        #    print (idKey, n1.sites.siteDict[idKey].isDead)
        #    oldIds.append(idKey)
        #print("-------")

        # Spanners can be positioned in Parts or Measures
        sp1 = spanner.Slur([n1, n4])
        p1.append(sp1)
        sp2 = spanner.Slur([n5, n6])
        m4.insert(0, sp2)

        #print(id(sp1), id(sp1.spannerStorage), n1.sites.siteDict[id(sp1.spannerStorage)].isDead)
        #if id(sp1.spannerStorage) in oldIds:
        #    print ("******!!!!!!!!!*******")

        # Elements can report on what Spanner they belong to
        ss1 = n1.getSpannerSites()
        self.assertTrue(sp1 in ss1, (ss1, sp1))

        ss6 = n6.getSpannerSites()
        assert sp2 in ss6

        p1Flat = p1.flat
        assert sp1.getDurationSpanBySite(p1Flat) == [0.0, 8.0]

        p2Flat = p2.flat
        assert sp2.getDurationSpanBySite(p2Flat) == [4.0, 8.0]

        #s1.show()
        #==== "fig-df06" end

        # additional tests
        self.assertEqual(m1.clef, cf1)
Esempio n. 2
0
    def asScore(self):
        '''
        returns all snippets as a score chunk

        
        >>> deduto = alpha.trecento.cadencebook.BallataSheet().workByTitle('deduto')
        >>> deduto.title
        'Deduto sey a quel'
        >>> dedutoScore = deduto.asScore()
        >>> dedutoScore
        <music21.stream.Score ...>
        >>> #_DOCS_HIDE dedutoScore.show()

        Changes made to a snippet are reflected in the asScore() score object:
        
        >>> deduto.snippets[0].parts[0].flat.notes[0].name = "C###"
        >>> deduto.asScore().parts[0].flat.notes[0].name
        'C###'
        '''
        s = stream.Score()
        md = metadata.Metadata()
        s.insert(0, md)
        s.metadata.composer = self.composer
        s.metadata.title = self.title

        for dummy in range(self.totalVoices):
            s.insert(0, stream.Part())
        bs = self.snippets
        for thisSnippet in bs:
            if thisSnippet is None:
                continue
            if (thisSnippet.tenor is None and thisSnippet.cantus is None
                    and thisSnippet.contratenor is None):
                continue
            for partNumber, snippetPart in enumerate(
                    thisSnippet.getElementsByClass('Stream')):
                if thisSnippet.snippetName != "" and partNumber == self.totalVoices - 1:
                    textEx = expressions.TextExpression(
                        thisSnippet.snippetName)
                    textEx.positionVertical = 'below'
                    if 'FrontPaddedSnippet' in thisSnippet.classes:
                        if snippetPart.hasMeasures():
                            snippetPart.getElementsByClass(
                                'Measure')[-1].insert(0, textEx)
                        else:
                            snippetPart.append(textEx)
                    else:
                        if snippetPart.hasMeasures():
                            snippetPart.getElementsByClass(
                                'Measure')[0].insert(0, textEx)
                        else:
                            snippetPart.insert(0, textEx)
#                if currentTs is None or timeSig != currentTs:
#                    s.append(timeSig)
#                    currentTs = timeSig
                try:
                    currentScorePart = s.parts[partNumber]
                except IndexError:
                    continue  # error in coding
                for thisElement in snippetPart:
                    if 'TimeSignature' in thisElement.classes:
                        continue
                    currentScorePart.append(thisElement)

        return s
Esempio n. 3
0
def split_part(part, max_length, part_index=-1):
    new_part = stream.Part(id='part' + str(part_index))
    for n in part.notesAndRests:
        for new_note in split_note(n, max_length):
            new_part.append(new_note)
    return new_part
Esempio n. 4
0
    def systemFromSystem(self, systemElement, systemObj=None):
        r'''
        returns a :class:`~music21.stream.System` object from a <system> tag.
        The System object will contain :class:`~music21.stream.Part` objects
        which will have the notes, etc. contained in it.

        TODO: Handle multiple <voices>
        '''
        if systemObj is None:
            systemObj = stream.System()

        stavesList = systemElement.findall('staves')
        if not stavesList:
            raise CapellaImportException(
                'No <staves> tag found in this <system> element')
        if len(stavesList) > 1:
            raise CapellaImportException(
                'More than one <staves> tag found in this <system> element')
        stavesElement = stavesList[0]
        staffList = stavesElement.findall('staff')
        if not stavesList:
            raise CapellaImportException(
                'No <staff> tag found in the <staves> element for this <system> element'
            )
        for thisStaffElement in staffList:
            # do something with defaultTime
            partId = 'UnknownPart'
            if 'layout' in thisStaffElement.attrib:
                partId = thisStaffElement.attrib['layout']
            partObj = stream.Part()
            partObj.id = partId

            voicesList = thisStaffElement.findall('voices')
            if not voicesList:
                raise CapellaImportException(
                    'No <voices> tag found in the <staff> tag for the <staves> element '
                    + 'for this <system> element')
            voicesElement = voicesList[0]
            voiceList = voicesElement.findall('voice')
            if not voiceList:
                raise CapellaImportException(
                    'No <voice> tag found in the <voices> tag for the <staff> tag for the '
                    + '<staves> element for this <system> element')
            if len(voiceList) == 1:  # single voice staff... perfect!
                thisVoiceElement = voiceList[0]
                noteObjectsList = thisVoiceElement.findall('noteObjects')
                if not noteObjectsList:
                    raise CapellaImportException(
                        'No <noteObjects> tag found in the <voice> tag found in the '
                        +
                        '<voices> tag for the <staff> tag for the <staves> element for '
                        + 'this <system> element')
                if len(noteObjectsList) > 1:
                    raise CapellaImportException(
                        'More than one <noteObjects> tag found in the <voice> tag found '
                        +
                        'in the <voices> tag for the <staff> tag for the <staves> element '
                        + 'for this <system> element')
                thisNoteObject = noteObjectsList[0]
                self.streamFromNoteObjects(thisNoteObject, partObj)
            systemObj.insert(0, partObj)
        return systemObj
Esempio n. 5
0
    def run(self,
            inputScore,
            allowableChords=None,
            closedPosition=False,
            forbiddenChords=None,
            maximumNumberOfChords=3):
        if 'Score' not in inputScore.classes:
            raise ChordReducerException("Must be called on a stream.Score")

        if allowableChords is not None:
            if not all(isinstance(x, chord.Chord) for x in allowableChords):
                raise ChordReducerException(
                    "All allowableChords must be Chords")
            intervalClassSets = []
            for x in allowableChords:
                intervalClassSet = self._getIntervalClassSet(x.pitches)
                intervalClassSets.append(intervalClassSet)
            allowableChords = frozenset(intervalClassSets)

        if forbiddenChords is not None:
            if not all(isinstance(x, chord.Chord) for x in forbiddenChords):
                raise ChordReducerException(
                    "All forbiddenChords must be Chords")
            intervalClassSets = []
            for x in allowableChords:
                intervalClassSet = self._getIntervalClassSet(x.pitches)
                intervalClassSets.append(intervalClassSet)
            forbiddenChords = frozenset(intervalClassSets)

        scoreTree = tree.fromStream.asTimespans(inputScore,
                                                flatten=True,
                                                classList=(note.Note,
                                                           chord.Chord))

        self.removeZeroDurationTimespans(scoreTree)
        self.splitByBass(scoreTree)
        self.removeVerticalDissonances(scoreTree=scoreTree,
                                       allowableChords=allowableChords,
                                       forbiddenChords=forbiddenChords)

        partwiseTrees = scoreTree.toPartwiseTimespanTrees()

        self.fillBassGaps(scoreTree, partwiseTrees)

        self.removeShortTimespans(scoreTree, partwiseTrees, duration=0.5)
        self.fillBassGaps(scoreTree, partwiseTrees)
        self.fillMeasureGaps(scoreTree, partwiseTrees)

        self.removeShortTimespans(scoreTree, partwiseTrees, duration=1.0)
        self.fillBassGaps(scoreTree, partwiseTrees)
        self.fillMeasureGaps(scoreTree, partwiseTrees)

        reduction = stream.Score()
        #partwiseReduction = tree.toPartwiseScore()
        #for part in partwiseReduction:
        #    reduction.append(part)
        chordifiedReduction = tree.toStream.chordified(
            scoreTree,
            templateStream=inputScore,
        )
        chordifiedPart = stream.Part()
        for measure in chordifiedReduction.getElementsByClass('Measure'):
            reducedMeasure = self.reduceMeasureToNChords(
                measure,
                maximumNumberOfChords=maximumNumberOfChords,
                weightAlgorithm=self.qlbsmpConsonance,
                trimBelow=0.25,
            )
            chordifiedPart.append(reducedMeasure)
        reduction.append(chordifiedPart)

        if closedPosition:
            for x in reduction.recurse().getElementsByClass('Chord'):
                x.closedPosition(forceOctave=4, inPlace=True)

        return reduction
Esempio n. 6
0
def abcToStreamPart(abcHandler, inputM21=None, spannerBundle=None):
    '''
    Handler conversion of a single Part of a multi-part score.
    Results are added into the provided inputM21 object
    or a newly created Part object

    The part object is then returned.
    '''
    from music21 import abcFormat

    if inputM21 is None:
        p = stream.Part()
    else:
        p = inputM21

    if spannerBundle is None:
        #environLocal.printDebug(['mxToMeasure()', 'creating SpannerBundle'])
        spannerBundle = spanner.SpannerBundle()


    # need to call on entire handlers, as looks for special criterial,
    # like that at least 2 regular bars are used, not just double bars
    if abcHandler.definesMeasures():
        # first, split into a list of Measures; if there is only metadata and
        # one measure, that means that no measures are defined
        barHandlers = abcHandler.splitByMeasure()
        #environLocal.printDebug(['barHandlers', len(barHandlers)])
        # merge loading meta data with each bar that preceedes it
        mergedHandlers = abcFormat.mergeLeadingMetaData(barHandlers)
        #environLocal.printDebug(['mergedHandlers', len(mergedHandlers)])
    else: # simply stick in a single list
        mergedHandlers = [abcHandler]

    # if only one merged handler, do not create measures
    if len(mergedHandlers) <= 1:
        useMeasures = False
    else:
        useMeasures = True

    # each unit in merged handlers defines possible a Measure (w/ or w/o metadata),
    # trailing meta data, or a single collection of metadata and note data

    barCount = 0
    measureNumber = 1
    # merged handler are ABCHandlerBar objects, defining attributes for barlines

    for mh in mergedHandlers:
        # if use measures and the handler has notes; otherwise add to part
        #environLocal.printDebug(['abcToStreamPart', 'handler', 'left:', mh.leftBarToken,
        #    'right:', mh.rightBarToken, 'len(mh)', len(mh)])

        if useMeasures and mh.hasNotes():
            #environLocal.printDebug(['abcToStreamPart', 'useMeasures',
            #    useMeasures, 'mh.hasNotes()', mh.hasNotes()])
            dst = stream.Measure()
            # bar tokens are already extracted form token list and are available
            # as attributes on the handler object
            # may return None for a regular barline

            if mh.leftBarToken is not None:
                # this may be Repeat Bar subclass
                bLeft = mh.leftBarToken.getBarObject()
                if bLeft is not None:
                    dst.leftBarline = bLeft
                if mh.leftBarToken.isRepeatBracket():
                    # get any open spanners of RepeatBracket type
                    rbSpanners = spannerBundle.getByClass('RepeatBracket'
                                        ).getByCompleteStatus(False)
                    # this indication is most likely an opening, as ABC does
                    # not encode second ending ending boundaries
                    # we can still check thought:
                    if not rbSpanners:
                        # add this measure as a componnt
                        rb = spanner.RepeatBracket(dst)
                        # set number, returned here
                        rb.number = mh.leftBarToken.isRepeatBracket()
                        # only append if created; otherwise, already stored
                        spannerBundle.append(rb)
                    else: # close it here
                        rb = rbSpanners[0] # get RepeatBracket
                        rb.addSpannedElements(dst)
                        rb.completeStatus = True
                        # this returns 1 or 2 depending on the repeat
                    # in ABC, second repeats close immediately; that is
                    # they never span more than one measure
                    if mh.leftBarToken.isRepeatBracket() == 2:
                        rb.completeStatus = True

            if mh.rightBarToken is not None:
                bRight = mh.rightBarToken.getBarObject()
                if bRight is not None:
                    dst.rightBarline = bRight
                # above returns bars and repeats; we need to look if we just
                # have repeats
                if mh.rightBarToken.isRepeat():
                    # if we have a right bar repeat, and a spanner repeat
                    # bracket is open (even if just assigned above) we need
                    # to close it now.
                    # presently, now r bar conditions start a repeat bracket
                    rbSpanners = spannerBundle.getByClass(
                                        'RepeatBracket').getByCompleteStatus(False)
                    if any(rbSpanners):
                        rb = rbSpanners[0] # get RepeatBracket
                        rb.addSpannedElements(dst)
                        rb.completeStatus = True
                        # this returns 1 or 2 depending on the repeat
                        # do not need to append; already in bundle
            barCount += 1
        else:
            dst = p # store directly in a part instance

        #environLocal.printDebug([mh, 'dst', dst])
        #ql = 0 # might not be zero if there is a pickup

        postTransposition, clefSet = parseTokens(mh, dst, p, useMeasures)

        # append measure to part; in the case of trailing meta data
        # dst may be part, even though useMeasures is True
        if useMeasures and 'Measure' in dst.classes:
            # check for incomplete bars
            # must have a time signature in this bar, or defined recently
            # could use getTimeSignatures() on Stream

            if barCount == 1 and dst.timeSignature is not None: # easy case
                # can only do this b/c ts is defined
                if dst.barDurationProportion() < 1.0:
                    dst.padAsAnacrusis()
                    dst.number = 0
                    #environLocal.printDebug([
                    #    'incompletely filled Measure found on abc import; ',
                    #    'interpreting as a anacrusis:', 'padingLeft:', dst.paddingLeft])
            else:
                dst.number = measureNumber
                measureNumber += 1
            p.coreAppend(dst)

    try:
        reBar(p, inPlace=True)
    except (ABCTranslateException, meter.MeterException, ZeroDivisionError):
        pass
    # clefs are not typically defined, but if so, are set to the first measure
    # following the meta data, or in the open stream
    if not clefSet and not p.recurse().getElementsByClass('Clef'):
        if useMeasures:  # assume at start of measures
            p.getElementsByClass('Measure')[0].clef = clef.bestClef(p, recurse=True)
        else:
            p.coreInsert(0, clef.bestClef(p, recurse=True))

    if postTransposition != 0:
        p.transpose(postTransposition, inPlace=True)

    if useMeasures and p.recurse().getElementsByClass('TimeSignature'):
        # call make beams for now; later, import beams
        #environLocal.printDebug(['abcToStreamPart: calling makeBeams'])
        try:
            p.makeBeams(inPlace=True)
        except (meter.MeterException, stream.StreamException) as e:
            environLocal.warn("Error in beaming...ignoring: %s" % str(e))

    # copy spanners into topmost container; here, a part
    rm = []
    for sp in spannerBundle.getByCompleteStatus(True):
        p.coreInsert(0, sp)
        rm.append(sp)
    # remove from original spanner bundle
    for sp in rm:
        spannerBundle.remove(sp)
    p.coreElementsChanged()
    return p
Esempio n. 7
0
    def testNoteheadSmorgasbord(self):
        # tests the of many different types of noteheads
        from music21 import expressions

        n = note.Note('c3')
        n.notehead = 'diamond'
        
        p = stream.Part()
        tn = expressions.TextExpression('diamond')
        m = note.Note('c3')
        m.notehead = 'cross'
        tm = expressions.TextExpression('cross')
        l = note.Note('c3')
        l.notehead = 'triangle'
        tl = expressions.TextExpression('triangle')
        k = note.Note('c3')
        k.notehead = 'circle-x'
        tk = expressions.TextExpression('circle-x')
        j = note.Note('c3')
        j.notehead = 'x'
        tj = expressions.TextExpression('x')
        i = note.Note('c3')
        i.notehead = 'slash'
        ti = expressions.TextExpression('slash')
        h = note.Note('c3')
        h.notehead = 'square'
        th = expressions.TextExpression('square')
        g = note.Note('c3')
        g.notehead = 'arrow down'
        tg = expressions.TextExpression('arrow down')
        f = note.Note('c3')
        f.notehead = 'inverted triangle'
        tf = expressions.TextExpression('inverted triangle')
        f.addLyric('inverted triangle')
        e = note.Note('c3')
        e.notehead = 'back slashed'
        te = expressions.TextExpression('back slashed')
        d = note.Note('c3')
        d.notehead = 'fa'
        td = expressions.TextExpression('fa')
        c = note.Note('c3')
        c.notehead = 'normal'
        tc = expressions.TextExpression('normal')

        noteList = [tc, c, tn, n, th, h, tl, l, tf, f, tg, g, te, e, ti, i, tj, j, tm, m, tk, k, td, d]
        for thisNote in noteList:
            p.append(thisNote)

        #p.show()
        raw = fromMusic21Object(p)

        self.assertEqual(raw.find('<notehead>diamond</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>square</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>triangle</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>inverted triangle</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>arrow down</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>back slashed</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>slash</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>x</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>cross</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>circle-x</notehead>') > 0, True)
        self.assertEqual(raw.find('<notehead>fa</notehead>') > 0, True)
Esempio n. 8
0
    def convertM21(self, outVote, arrError, ground):
        errorColor = "#ff0000"
        missingColor = "#00ff00"
        #         sOut=stream.Stream()
        sOut = stream.Score()
        sPart = stream.Part()
        measureIndex = 1
        measure = stream.Measure()
        measure.number = measureIndex
        indexS = 0

        for symbol in outVote:
            mytie = ""
            realDuration = None
            s = symbol
            isError = False
            isMissing = False
            if (len(ground) > indexS):
                sGround = ground[indexS]

            if (indexS in arrError):
                isError = True
                if s == "*":
                    s = sGround
                    isMissing = True

            if isinstance(s, list):
                s = s[0]
            if s.find('TS:') != -1:
                ts = meter.TimeSignature(s[3:])
                if isError:
                    ts.color = errorColor
                if isMissing:
                    ts.color = missingColor
                measure.append(ts)
            if s.find('KS:') != -1:
                k = key.KeySignature(int(s[3:]))
                if isError:
                    k.color = errorColor
                if isMissing:
                    k.color = missingColor
                measure.append(k)
            if s.find('CL:') != -1:
                c = clef.clefFromString(str(s[3:]))
                if isError:
                    c.color = errorColor
                if isMissing:
                    c.color = missingColor
                measure.append(c)
            if s.find('N:') != -1:
                try:
                    if isinstance(symbol, list):
                        realDuration = symbol[1]
                        mytie = symbol[2]

                    sep = s.index("_")
                    duration = s[sep + 1:]
                    #                     if realDuration!=None:
                    #                         duration=realDuration
                    if (float(duration) > 0):
                        n = note.Note(s[2:sep], quarterLength=float(duration))
                        if isError:
                            n.color = errorColor
                        if isMissing:
                            n.color = missingColor
                        if mytie != "":
                            n.tie = tie.Tie(mytie)
                        measure.append(n)
                except:
                    print "error" + s

            if s.find('R:') != -1:
                try:
                    if isinstance(symbol, list):
                        realDuration = symbol[1]
                        mytie = symbol[2]
                    duration = s[2:]
                    #                     if realDuration!=None:
                    #                         duration=realDuration
                    n = note.Rest(quarterLength=float(duration))
                    if isError:
                        n.color = errorColor
                    if isMissing:
                        n.color = missingColor
                    measure.append(n)
                except:
                    print "error" + s

            if s.find('C:') != -1:
                notes = s.split("[:")
                cPitch = []
                for n in notes:
                    if n != 'C:':
                        sep = n.index("_")
                        duration = n[sep + 1:]
                        pitch = n[0:sep]
                        cPitch.append(pitch)
                c = chord.Chord(cPitch)
                c.duration.quarterLength = float(duration)
                if isError:
                    c.color = errorColor
                if isMissing:
                    c.color = missingColor
                measure.append(c)
            if s.find('!') != -1:

                if isinstance(symbol, list):
                    barType = symbol[1]
                    barRepeat = symbol[2]
                    if barType != "":
                        mybartype = bar.styleToMusicXMLBarStyle(barType)
                        myBar = bar.Barline(style=mybartype)
                        measure.rightBarline = myBar

                    if barRepeat != "":
                        myBar = bar.Repeat(direction=barRepeat)
                        if barRepeat == "start":
                            measure.leftBarline = myBar
                        if barRepeat == "end":
                            measure.rightBarline = myBar
                sPart.append(measure)
                measureIndex += 1
                measure = stream.Measure()
                measure.number = measureIndex
            indexS += 1

        sOut.append(sPart)
        return sOut
Esempio n. 9
0
def weave_data_frame_to_midi(data_frame,
                             midi_file_directory=os.getcwd(),
                             save_midi_file=True):
    if isinstance(data_frame, pd.DataFrame):

        score_dict = {}
        for idx in range(0, len(data_frame.iloc[:, 0:1].drop_duplicates())):
            score = stream.Score()
            score_dict[data_frame.iloc[:,
                                       0:1].drop_duplicates().iloc[idx,
                                                                   0]] = score

        part_dict = {}
        for idx in range(0, len(data_frame.iloc[:, 0:2].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:, 0:2].drop_duplicates().iloc[idx, 1]):
                part = stream.Part()
                part_dict[data_frame.iloc[:, 0:2].drop_duplicates().iloc[
                    idx, 1]] = part
                score_dict[data_frame.iloc[:, 0:2].drop_duplicates().iloc[
                    idx, 0]].append(part)

        for idx in range(0, len(data_frame.iloc[:, 0:4].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:, 0:4].drop_duplicates().iloc[idx, 3]):
                if data_frame.iloc[:, 0:4].drop_duplicates().iloc[
                        idx, 2] == 'StringInstrument':
                    instrument_element = instrument.StringInstrument()
                else:
                    instrument_element = instrument.fromString(
                        data_frame.iloc[:, 0:4].drop_duplicates().iloc[idx, 2])
                part_dict[data_frame.iloc[:, 0:4].drop_duplicates().iloc[
                    idx, 1]].append(instrument_element)
                instrument_element.offset = data_frame.iloc[:, 0:
                                                            4].drop_duplicates(
                                                            ).iloc[idx, 3]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates())):
            if not math.isnan(data_frame.iloc[:, [0, 1, 4, 5, 6]].
                              drop_duplicates().iloc[idx, 3]):
                metronome_element = tempo.MetronomeMark(
                    data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates().iloc[
                        idx, 2],
                    data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates().iloc[
                        idx, 3])
                part_dict[data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates(
                ).iloc[idx, 1]].append(metronome_element)
                metronome_element.offset = data_frame.iloc[:,
                                                           [0, 1, 4, 5, 6
                                                            ]].drop_duplicates(
                                                            ).iloc[idx, 4]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates())):
            if not math.isnan(data_frame.iloc[:, [0, 1, 7, 8, 9]].
                              drop_duplicates().iloc[idx, 4]):
                key_element = key.Key(
                    data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates().iloc[
                        idx, 2],
                    data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates().iloc[
                        idx, 3])
                part_dict[data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates(
                ).iloc[idx, 1]].append(key_element)
                key_element.offset = data_frame.iloc[:, [0, 1, 7, 8, 9
                                                         ]].drop_duplicates(
                                                         ).iloc[idx, 4]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 10, 11]].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:,
                                    [0, 1, 10, 11]].drop_duplicates().iloc[idx,
                                                                           3]):
                time_signature_element = meter.TimeSignature(
                    data_frame.iloc[:,
                                    [0, 1, 10, 11]].drop_duplicates().iloc[idx,
                                                                           2])
                part_dict[data_frame.iloc[:, [0, 1, 10, 11]].drop_duplicates().
                          iloc[idx, 1]].append(time_signature_element)
                time_signature_element.offset = data_frame.iloc[:, [
                    0, 1, 10, 11
                ]].drop_duplicates().iloc[idx, 3]

        voice_dict = {}
        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:,
                                    [0, 1, 12, 13]].drop_duplicates().iloc[idx,
                                                                           2]):
                voice = stream.Voice()
                voice_dict[data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates(
                ).iloc[idx, 2]] = voice
                part_dict[data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates().
                          iloc[idx, 1]].append(voice)
                voice.offset = data_frame.iloc[:,
                                               [0, 1, 12, 13]].drop_duplicates(
                                               ).iloc[idx, 3]

        for idx in range(
                0,
                len(data_frame.iloc[:,
                                    [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]])):
            try:
                if not math.isnan(
                        data_frame.iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                        .iloc[idx, 9]):
                    if data_frame.iloc[:,
                                       [0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                                        ]].iloc[idx, 3] == "Note":
                        note_element = note.Note()
                        voice_dict[data_frame.
                                   iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                                   .iloc[idx, 2]].append(note_element)
                        note_element.pitch.name = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 4]
                        note_element.pitch.octave = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 6]
                        note_element.volume.velocity = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 7]
                        note_element.duration.quarterLength = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 8]
                        note_element.offset = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9]

                    elif data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                    ]].iloc[idx, 3] == "Rest":
                        rest_element = note.Rest()
                        voice_dict[data_frame.
                                   iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                                   .iloc[idx, 2]].append(rest_element)
                        rest_element.duration.quarterLength = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 8]
                        rest_element.offset = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9]

                    elif data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                    ]].iloc[idx, 3] == "Chord":
                        if data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx - 1, 9] != data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9] or data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 3] != 'Chord':
                            chord_element = chord.Chord()
                            voice_dict[
                                data_frame.
                                iloc[:,
                                     [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]].
                                iloc[idx, 2]].append(chord_element)

                        if len(data_frame.
                               iloc[:, [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]].
                               iloc[idx, 4]) > 2:
                            print(
                                "When the chord is in a row is still under development."
                            )
                            return False
                        else:
                            pitch_element = note.Note()
                            pitch_element.pitch.name = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 4]
                            pitch_element.pitch.octave = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 6]
                            pitch_element.volume.velocity = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 7]
                            pitch_element.duration.quarterLength = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 8]
                            chord_element.add(pitch_element)
                            chord_element.offset = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 9]
                    else:
                        print(
                            str(idx) +
                            "th row is cannot converted to midi file")
            except KeyError:
                pass

            print_progress_bar_weaving(idx, data_frame)

        if score_dict:
            for _midi_file_name, score in zip(score_dict.keys(),
                                              score_dict.values()):
                if score:
                    midi_file = midi.translate.streamToMidiFile(score)

                    if save_midi_file and midi_file:
                        midi_file.open(
                            midi_file_directory + '/' + _midi_file_name +
                            '_encoded.mid', 'wb')
                        midi_file.write()
                        midi_file.close()
                        print(midi_file_directory + '/' + _midi_file_name +
                              '_encoded.mid is saved')

    else:
        print("The inputted data isn't data frame")
        return False

    return score_dict
Esempio n. 10
0
def indexed_chorale_to_score(seq):
    """

    :param seq: voice major
    :param pickled_dataset:
    :return:
    """
    #_, _, _, index2notes, note2indexes, _ = pickle.load(open(pickled_dataset, 'rb'))
    index2notes = [{
        0: 'B4',
        1: 'D-5',
        2: 'F#4',
        3: '__',
        4: 'G-4',
        5: 'C5',
        6: 'E-4',
        7: 'A-4',
        8: 'rest',
        9: 'A#4',
        10: 'G5',
        11: 'G4',
        12: 'D#4',
        13: 'F5',
        14: 'C#5',
        15: 'B-4',
        16: 'A4',
        17: 'F4',
        18: 'E#4',
        19: 'C#4',
        20: 'END',
        21: 'E5',
        22: 'A-5',
        23: 'F#5',
        24: 'E#5',
        25: 'E-5',
        26: 'G-5',
        27: 'A5',
        28: 'START',
        29: 'D4',
        30: 'D5',
        31: 'E4',
        32: 'G#4',
        33: 'C4',
        34: 'D#5',
        35: 'B#4',
        36: 'C-5',
        37: 'G#5',
        38: 'F-4',
        39: 'D-4',
        40: 'F-5',
        41: 'B--4',
        42: 'B#3',
        43: 'F##4',
        44: 'C##4',
        45: 'G##4',
        46: 'C##5',
        47: 'D##4',
        48: 'E--5',
        49: 'B--5',
        50: 'F##5',
        51: 'D##5'
    }, {
        0: 'A3',
        1: 'B4',
        2: 'F#4',
        3: 'D-5',
        4: '__',
        5: 'A-3',
        6: 'D-4',
        7: 'F3',
        8: 'G-4',
        9: 'C5',
        10: 'E-4',
        11: 'A-4',
        12: 'rest',
        13: 'A#4',
        14: 'D#4',
        15: 'G4',
        16: 'A#3',
        17: 'C#5',
        18: 'B-4',
        19: 'G#3',
        20: 'A4',
        21: 'F4',
        22: 'E#4',
        23: 'C#4',
        24: 'END',
        25: 'B-3',
        26: 'F#3',
        27: 'START',
        28: 'B3',
        29: 'D4',
        30: 'E4',
        31: 'D5',
        32: 'F##4',
        33: 'G#4',
        34: 'C4',
        35: 'G3',
        36: 'F-4',
        37: 'B#3',
        38: 'B#4',
        39: 'C-4',
        40: 'C##4',
        41: 'D##4',
        42: 'F##3',
        43: 'E#3',
        44: 'G##3',
        45: 'G##4',
        46: 'A##3',
        47: 'G-3',
        48: 'C-5',
        49: 'B--4',
        50: 'E--4',
        51: 'A--4',
        52: 'E##4',
        53: 'B--3'
    }, {
        0: 'A3',
        1: 'F#4',
        2: '__',
        3: 'D3',
        4: 'F3',
        5: 'A-3',
        6: 'D-4',
        7: 'B#3',
        8: 'C-4',
        9: 'G-4',
        10: 'E-4',
        11: 'A-4',
        12: 'rest',
        13: 'D#4',
        14: 'G4',
        15: 'C#3',
        16: 'A#3',
        17: 'G#3',
        18: 'A4',
        19: 'F4',
        20: 'E#4',
        21: 'C#4',
        22: 'END',
        23: 'B-3',
        24: 'E3',
        25: 'F#3',
        26: 'START',
        27: 'B3',
        28: 'D4',
        29: 'C3',
        30: 'E4',
        31: 'D#3',
        32: 'G#4',
        33: 'C4',
        34: 'G3',
        35: 'E-3',
        36: 'E#3',
        37: 'G-3',
        38: 'B--3',
        39: 'F-3',
        40: 'F-4',
        41: 'C##4',
        42: 'B#2',
        43: 'D##3',
        44: 'F##3',
        45: 'C##3',
        46: 'G##3',
        47: 'D-3',
        48: 'D##4',
        49: 'F##4',
        50: 'A##3',
        51: 'B--4',
        52: 'E--4',
        53: 'E##3'
    }, {
        0: 'F2',
        1: 'A3',
        2: '__',
        3: 'D3',
        4: 'F3',
        5: 'A-3',
        6: 'G-3',
        7: 'D-4',
        8: 'B#3',
        9: 'D#2',
        10: 'C-3',
        11: 'E-4',
        12: 'rest',
        13: 'G-2',
        14: 'E2',
        15: 'D#4',
        16: 'C#3',
        17: 'A2',
        18: 'F#2',
        19: 'A#3',
        20: 'D2',
        21: 'E-2',
        22: 'G2',
        23: 'G#3',
        24: 'C2',
        25: 'C#2',
        26: 'C#4',
        27: 'E#2',
        28: 'END',
        29: 'A#2',
        30: 'G#2',
        31: 'B-3',
        32: 'D-3',
        33: 'E3',
        34: 'B#2',
        35: 'F##3',
        36: 'F#3',
        37: 'START',
        38: 'B3',
        39: 'C3',
        40: 'D4',
        41: 'E4',
        42: 'B-2',
        43: 'D#3',
        44: 'C4',
        45: 'G3',
        46: 'A-2',
        47: 'E-3',
        48: 'E#3',
        49: 'B2',
        50: 'C-4',
        51: 'B--3',
        52: 'F-3',
        53: 'B--2',
        54: 'C##3',
        55: 'F##2',
        56: 'D##2',
        57: 'G##2',
        58: 'C##2',
        59: 'D##3',
        60: 'B#1',
        61: 'G##3',
        62: 'D-2',
        63: 'A##2',
        64: 'E##2',
        65: 'F-4',
        66: 'F-2',
        67: 'E--3',
        68: 'E--4',
        69: 'A--3',
        70: 'C##4',
        71: 'E##3',
        72: 'A##3'
    }]

    note2indexes = [{
        'B4': 0,
        'D-5': 1,
        'F#4': 2,
        '__': 3,
        'G-4': 4,
        'C5': 5,
        'E-4': 6,
        'A-4': 7,
        'rest': 8,
        'A#4': 9,
        'G5': 10,
        'G4': 11,
        'D#4': 12,
        'F5': 13,
        'C#5': 14,
        'B-4': 15,
        'A4': 16,
        'F4': 17,
        'E#4': 18,
        'C#4': 19,
        'END': 20,
        'E5': 21,
        'A-5': 22,
        'F#5': 23,
        'E#5': 24,
        'E-5': 25,
        'G-5': 26,
        'A5': 27,
        'START': 28,
        'D4': 29,
        'D5': 30,
        'E4': 31,
        'G#4': 32,
        'C4': 33,
        'D#5': 34,
        'B#4': 35,
        'C-5': 36,
        'G#5': 37,
        'F-4': 38,
        'D-4': 39,
        'F-5': 40,
        'B--4': 41,
        'B#3': 42,
        'F##4': 43,
        'C##4': 44,
        'G##4': 45,
        'C##5': 46,
        'D##4': 47,
        'E--5': 48,
        'B--5': 49,
        'F##5': 50,
        'D##5': 51
    }, {
        'A3': 0,
        'B4': 1,
        'F#4': 2,
        'D-5': 3,
        '__': 4,
        'A-3': 5,
        'D-4': 6,
        'F3': 7,
        'G-4': 8,
        'C5': 9,
        'E-4': 10,
        'A-4': 11,
        'rest': 12,
        'A#4': 13,
        'D#4': 14,
        'G4': 15,
        'A#3': 16,
        'C#5': 17,
        'B-4': 18,
        'G#3': 19,
        'A4': 20,
        'F4': 21,
        'E#4': 22,
        'C#4': 23,
        'END': 24,
        'B-3': 25,
        'F#3': 26,
        'START': 27,
        'B3': 28,
        'D4': 29,
        'E4': 30,
        'D5': 31,
        'F##4': 32,
        'G#4': 33,
        'C4': 34,
        'G3': 35,
        'F-4': 36,
        'B#3': 37,
        'B#4': 38,
        'C-4': 39,
        'C##4': 40,
        'D##4': 41,
        'F##3': 42,
        'E#3': 43,
        'G##3': 44,
        'G##4': 45,
        'A##3': 46,
        'G-3': 47,
        'C-5': 48,
        'B--4': 49,
        'E--4': 50,
        'A--4': 51,
        'E##4': 52,
        'B--3': 53
    }, {
        'A3': 0,
        'F#4': 1,
        '__': 2,
        'D3': 3,
        'F3': 4,
        'A-3': 5,
        'D-4': 6,
        'B#3': 7,
        'C-4': 8,
        'G-4': 9,
        'E-4': 10,
        'A-4': 11,
        'rest': 12,
        'D#4': 13,
        'G4': 14,
        'C#3': 15,
        'A#3': 16,
        'G#3': 17,
        'A4': 18,
        'F4': 19,
        'E#4': 20,
        'C#4': 21,
        'END': 22,
        'B-3': 23,
        'E3': 24,
        'F#3': 25,
        'START': 26,
        'B3': 27,
        'D4': 28,
        'C3': 29,
        'E4': 30,
        'D#3': 31,
        'G#4': 32,
        'C4': 33,
        'G3': 34,
        'E-3': 35,
        'E#3': 36,
        'G-3': 37,
        'B--3': 38,
        'F-3': 39,
        'F-4': 40,
        'C##4': 41,
        'B#2': 42,
        'D##3': 43,
        'F##3': 44,
        'C##3': 45,
        'G##3': 46,
        'D-3': 47,
        'D##4': 48,
        'F##4': 49,
        'A##3': 50,
        'B--4': 51,
        'E--4': 52,
        'E##3': 53
    }, {
        'F2': 0,
        'A3': 1,
        '__': 2,
        'D3': 3,
        'F3': 4,
        'A-3': 5,
        'G-3': 6,
        'D-4': 7,
        'B#3': 8,
        'D#2': 9,
        'C-3': 10,
        'E-4': 11,
        'rest': 12,
        'G-2': 13,
        'E2': 14,
        'D#4': 15,
        'C#3': 16,
        'A2': 17,
        'F#2': 18,
        'A#3': 19,
        'D2': 20,
        'E-2': 21,
        'G2': 22,
        'G#3': 23,
        'C2': 24,
        'C#2': 25,
        'C#4': 26,
        'E#2': 27,
        'END': 28,
        'A#2': 29,
        'G#2': 30,
        'B-3': 31,
        'D-3': 32,
        'E3': 33,
        'B#2': 34,
        'F##3': 35,
        'F#3': 36,
        'START': 37,
        'B3': 38,
        'C3': 39,
        'D4': 40,
        'E4': 41,
        'B-2': 42,
        'D#3': 43,
        'C4': 44,
        'G3': 45,
        'A-2': 46,
        'E-3': 47,
        'E#3': 48,
        'B2': 49,
        'C-4': 50,
        'B--3': 51,
        'F-3': 52,
        'B--2': 53,
        'C##3': 54,
        'F##2': 55,
        'D##2': 56,
        'G##2': 57,
        'C##2': 58,
        'D##3': 59,
        'B#1': 60,
        'G##3': 61,
        'D-2': 62,
        'A##2': 63,
        'E##2': 64,
        'F-4': 65,
        'F-2': 66,
        'E--3': 67,
        'E--4': 68,
        'A--3': 69,
        'C##4': 70,
        'E##3': 71,
        'A##3': 72
    }]
    num_pitches = list(map(len, index2notes))
    slur_indexes = list(map(lambda d: d[SLUR_SYMBOL], note2indexes))

    score = stream.Score()
    for voice_index, v in enumerate(seq):
        part = stream.Part(id='part' + str(voice_index))
        dur = 0
        f = note.Rest()
        for k, n in enumerate(v):
            # if it is a played note
            if not n == slur_indexes[voice_index]:
                # add previous note
                if dur > 0:
                    f.duration = duration.Duration(dur / SUBDIVISION)
                    part.append(f)

                dur = 1
                f = standard_note(index2notes[voice_index][n])
            else:
                dur += 1
        # add last note
        f.duration = duration.Duration(dur / SUBDIVISION)
        part.append(f)
        score.insert(part)
    return score
Esempio n. 11
0
def lowerLines():
    restLengths = [0, 16, 12, 11, 10, 7, 6, 7, 6, 5, 4, 3, 8, 10, 12, 14, 16, 17, 18, 19, 20]
    correctTranspositions = [-1, 2, -3, -3, 1, 1, 6, 3, -2] # correct the first note of rotations 13-21
    fixLastNoteLengths = {11: 4.5, 12: 3, 13: 2.5, 14: 2, 15: 1.5, 20: 10.5}
    
    currentNote = 0
    rotationNumber = 1
    myRow = stream.Part()
    for phraseNumber in range(1,21):
        myRow.append(note.Rest(quarterLength=restLengths[phraseNumber]/2.0))
        if phraseNumber == 8: ## inconsistency in RCS's scheme
            currentNote += 2
        for addNote in range(21 - phraseNumber):
            if rotationNumber <= 10 or rotationNumber >= 20:
                #default
                appendNote = copy.deepcopy(rowNotes[currentNote % 10])
            else: # second set of rotations is up a step:
                appendNote = rowNotes[currentNote % 10].transpose(2)
#                if phraseNumber == 8 and addNote == 9: # mistaken transpositions by RCS
#                    appendNote = appendNote.transpose(-1)
#                    appendNote.lyrics.append(note.Lyric(text="*", number=3))
#
#                elif phraseNumber == 9 and addNote == 6:
#                    appendNote = appendNote.transpose(2)
#                    appendNote.lyrics.append(note.Lyric(text="*", number=3))

            if addNote == 0:
                if phraseNumber != 8:
                    appendNote.lyrics.append(note.Lyric(text="p" + str(phraseNumber), number=1))
                else:
                    appendNote.lyrics.append(note.Lyric(text="p8*", number=1))
            if (currentNote % 10 == (rotationNumber + 8) % 10) and (currentNote != 0):
                currentNote += 2
                rotationNumber += 1
            else:
                if (currentNote % 10 == (rotationNumber + 9) % 10):
                    appendNote.lyrics.append(note.Lyric(text="r" + str(rotationNumber), number=2))
                    if rotationNumber in range(13, 22):
                        appendNote.transpose(correctTranspositions[rotationNumber-13], inPlace = True)
                        appendNote.pitch.simplifyEnharmonic(inPlace = True)
                        appendNote.lyrics.append(note.Lyric(text="*", number=3))
                        
                currentNote += 1
            if addNote == 20-phraseNumber: # correct Last Notes
                #if phraseNumber == 12: # bug in Finale for accidental display?
                #    appendNote.pitch.accidental.displayStatus = True
                if phraseNumber in fixLastNoteLengths:
                    appendNote.quarterLength = fixLastNoteLengths[phraseNumber]
            myRow.append(appendNote)

    #retrograde
    totalNotes = len(myRow)
    for i in range(2, totalNotes+1): #skip last note
        el = myRow[totalNotes-i]
        if 'Note' in el.classes:
            elNote = el.transpose('A1')
            elNote.pitch.simplifyEnharmonic(inPlace = True)
            elNote.lyrics = []
            myRow.append(elNote)
        else:
            elRest = copy.deepcopy(el) # rests
            if i == 2:
                elRest.quarterLength=11.5
            myRow.append(elRest)
    
    myRow.insert(0, meter.TimeSignature('2/2'))

    myRow.show()
Esempio n. 12
0
    def tensor_to_score(self, tensor_score, fermata_tensor=None):
        """
        :param tensor_score: (num_voices, length)
        :return: music21 score object
        """
        slur_indexes = [
            note2index[SLUR_SYMBOL] for note2index in self.note2index_dicts
        ]

        score = music21.stream.Score()
        num_voices = tensor_score.size(0)
        name_parts = (num_voices == 4)
        part_names = ['Soprano', 'Alto', 'Tenor', 'Bass']

        for voice_index, (voice, index2note, slur_index) in enumerate(
                zip(tensor_score, self.index2note_dicts, slur_indexes)):
            add_fermata = False
            if name_parts:
                part = stream.Part(id=part_names[voice_index],
                                   partName=part_names[voice_index],
                                   partAbbreviation=part_names[voice_index],
                                   instrumentName=part_names[voice_index])
            else:
                part = stream.Part(id='part' + str(voice_index))
            dur = 0
            total_duration = 0
            f = music21.note.Rest()
            for note_index in [n.item() for n in voice]:
                # if it is a played note
                if not note_index == slur_indexes[voice_index]:
                    # add previous note
                    if dur > 0:
                        f.duration = music21.duration.Duration(
                            dur / self.subdivision)

                        if add_fermata:
                            f.expressions.append(music21.expressions.Fermata())
                            add_fermata = False

                        part.append(f)

                    dur = 1
                    f = standard_note(index2note[note_index])
                    if fermata_tensor is not None and voice_index == 0:
                        if fermata_tensor[0, total_duration] == 1:
                            add_fermata = True
                        else:
                            add_fermata = False
                    total_duration += 1

                else:
                    dur += 1
                    total_duration += 1
            # add last note
            f.duration = music21.duration.Duration(dur / self.subdivision)
            if add_fermata:
                f.expressions.append(music21.expressions.Fermata())
                add_fermata = False

            part.append(f)
            score.insert(part)
        return score
Esempio n. 13
0
def abcToStreamPart(abcHandler, inputM21=None, spannerBundle=None):
    '''
    Handler conversion of a single Part of a multi-part score.
    Results are added into the provided inputM21 object
    or a newly created Part object

    The part object is then returned.
    '''
    from music21 import abcFormat

    if inputM21 is None:
        p = stream.Part()
    else:
        p = inputM21

    if spannerBundle is None:
        #environLocal.printDebug(['mxToMeasure()', 'creating SpannerBundle'])
        spannerBundle = spanner.SpannerBundle()

    # need to call on entire handlers, as looks for special criterial,
    # like that at least 2 regular bars are used, not just double bars
    if abcHandler.definesMeasures():
        # first, split into a list of Measures; if there is only metadata and
        # one measure, that means that no measures are defined
        barHandlers = abcHandler.splitByMeasure()
        #environLocal.printDebug(['barHandlers', len(barHandlers)])
        # merge loading meta data with each bar that preceedes it
        mergedHandlers = abcFormat.mergeLeadingMetaData(barHandlers)
        #environLocal.printDebug(['mergedHandlers', len(mergedHandlers)])
    else:  # simply stick in a single list
        mergedHandlers = [abcHandler]

    # if only one merged handler, do not create measures
    if len(mergedHandlers) <= 1:
        useMeasures = False
    else:
        useMeasures = True

    # each unit in merged handlers defines possible a Measure (w/ or w/o metadata), trailing meta data, or a single collection of metadata and note data

    barCount = 0
    measureNumber = 1
    # merged handler are ABCHandlerBar objects, defining attributes for barlines

    for mh in mergedHandlers:
        # if use measures and the handler has notes; otherwise add to part
        #environLocal.printDebug(['abcToStreamPart', 'handler', 'left:', mh.leftBarToken, 'right:', mh.rightBarToken, 'len(mh)', len(mh)])

        if useMeasures and mh.hasNotes():
            #environLocal.printDebug(['abcToStreamPart', 'useMeasures', useMeasures, 'mh.hasNotes()', mh.hasNotes()])
            dst = stream.Measure()
            # bar tokens are already extracted form token list and are available
            # as attributes on the handler object
            # may return None for a regular barline

            if mh.leftBarToken is not None:
                # this may be Repeat Bar subclass
                bLeft = mh.leftBarToken.getBarObject()
                if bLeft != None:
                    dst.leftBarline = bLeft
                if mh.leftBarToken.isRepeatBracket():
                    # get any open spanners of RepeatBracket type
                    rbSpanners = spannerBundle.getByClassComplete(
                        'RepeatBracket', False)
                    # this indication is most likely an opening, as ABC does
                    # not encode second ending ending boundaries
                    # we can still check thought:
                    if len(rbSpanners) == 0:
                        # add this measure as a componnt
                        rb = spanner.RepeatBracket(dst)
                        # set number, returned here
                        rb.number = mh.leftBarToken.isRepeatBracket()
                        # only append if created; otherwise, already stored
                        spannerBundle.append(rb)
                    else:  # close it here
                        rb = rbSpanners[0]  # get RepeatBracket
                        rb.addSpannedElements(dst)
                        rb.completeStatus = True
                        # this returns 1 or 2 depending on the repeat
                    # in ABC, second repeats close immediately; that is
                    # they never span more than one measure
                    if mh.leftBarToken.isRepeatBracket() == 2:
                        rb.completeStatus = True

            if mh.rightBarToken is not None:
                bRight = mh.rightBarToken.getBarObject()
                if bRight != None:
                    dst.rightBarline = bRight
                # above returns bars and repeats; we need to look if we just
                # have repeats
                if mh.rightBarToken.isRepeat():
                    # if we have a right bar repeat, and a spanner repeat
                    # bracket is open (even if just assigned above) we need
                    # to close it now.
                    # presently, now r bar conditions start a repeat bracket
                    rbSpanners = spannerBundle.getByClassComplete(
                        'RepeatBracket', False)
                    if len(rbSpanners) > 0:
                        rb = rbSpanners[0]  # get RepeatBracket
                        rb.addSpannedElements(dst)
                        rb.completeStatus = True
                        # this returns 1 or 2 depending on the repeat
                        # do not need to append; already in bundle
            barCount += 1
        else:
            dst = p  # store directly in a part instance

        #environLocal.printDebug([mh, 'dst', dst])
        #ql = 0 # might not be zero if there is a pickup
        # in case need to transpose due to clef indication
        postTransposition = 0
        clefSet = False
        for t in mh.tokens:
            if isinstance(t, abcFormat.ABCMetadata):
                if t.isMeter():
                    ts = t.getTimeSignatureObject()
                    if ts != None:  # can be None
                        # should append at the right position
                        if useMeasures:  # assume at start of measures
                            dst.timeSignature = ts
                        else:
                            dst._appendCore(ts)
                elif t.isKey():
                    ks = t.getKeySignatureObject()
                    if useMeasures:  # assume at start of measures
                        dst.keySignature = ks
                    else:
                        dst._appendCore(ks)
                    # check for clef information sometimes stored in key
                    clefObj, transposition = t.getClefObject()
                    if clefObj != None:
                        clefSet = False
                        #environLocal.printDebug(['found clef in key token:', t, clefObj, transposition])
                        if useMeasures:  # assume at start of measures
                            dst.clef = clefObj
                        else:
                            dst._appendCore(clefObj)
                        postTransposition = transposition
                elif t.isTempo():
                    mmObj = t.getMetronomeMarkObject()
                    dst._appendCore(mmObj)

            # as ABCChord is subclass of ABCNote, handle first
            elif isinstance(t, abcFormat.ABCChord):
                # may have more than notes?
                pitchNameList = []
                accStatusList = []  # accidental display status list
                for tSub in t.subTokens:
                    # notes are contained as subtokens are already parsed
                    if isinstance(tSub, abcFormat.ABCNote):
                        pitchNameList.append(tSub.pitchName)
                        accStatusList.append(tSub.accidentalDisplayStatus)
                c = chord.Chord(pitchNameList)
                c.quarterLength = t.quarterLength
                # adjust accidental display for each contained pitch
                for pIndex in range(len(c.pitches)):
                    if c.pitches[pIndex].accidental == None:
                        continue
                    c.pitches[pIndex].accidental.displayStatus = accStatusList[
                        pIndex]
                dst._appendCore(c)

                #ql += t.quarterLength

            elif isinstance(t, abcFormat.ABCNote):
                if t.isRest:
                    n = note.Rest()
                else:
                    n = note.Note(t.pitchName)
                    if n.accidental != None:
                        n.accidental.displayStatus = t.accidentalDisplayStatus

                n.quarterLength = t.quarterLength

                # start or end a tie at note n
                if t.tie is not None:
                    if t.tie == "start":
                        n.tie = tie.Tie(t.tie)
                        n.tie.style = "normal"
                    elif t.tie == "stop":
                        n.tie = tie.Tie(t.tie)
                ### Was: Extremely Slow for large Opus files... why?
                ### Answer: some pieces didn't close all their spanners, so
                ###         everything was in a Slur/Diminuendo, etc.
                for span in t.applicableSpanners:
                    span.addSpannedElements(n)

                if t.inGrace:
                    n = n.getGrace()

                n.articulations = []
                while len(t.artic) > 0:
                    tmp = t.artic.pop()
                    if tmp == "staccato":
                        n.articulations.append(articulations.Staccato())
                    if tmp == "upbow":
                        n.articulations.append(articulations.UpBow())
                    if tmp == "downbow":
                        n.articulations.append(articulations.DownBow())
                    if tmp == "accent":
                        n.articulations.append(articulations.Accent())
                    if tmp == "strongaccent":
                        n.articulations.append(articulations.StrongAccent())
                    if tmp == "tenuto":
                        n.articulations.append(articulations.Tenuto())

                dst._appendCore(n)
            elif isinstance(t, abcFormat.ABCSlurStart):
                p._appendCore(t.slurObj)
            elif isinstance(t, abcFormat.ABCCrescStart):
                p._appendCore(t.crescObj)
            elif isinstance(t, abcFormat.ABCDimStart):
                p._appendCore(t.dimObj)
        dst._elementsChanged()

        # append measure to part; in the case of trailing meta data
        # dst may be part, even though useMeasures is True
        if useMeasures and 'Measure' in dst.classes:
            # check for incomplete bars
            # must have a time signature in this bar, or defined recently
            # could use getTimeSignatures() on Stream

            if barCount == 1 and dst.timeSignature != None:  # easy case
                # can only do this b/c ts is defined
                if dst.barDurationProportion() < 1.0:
                    dst.padAsAnacrusis()
                    dst.number = 0
                    #environLocal.printDebug(['incompletely filled Measure found on abc import; interpreting as a anacrusis:', 'padingLeft:', dst.paddingLeft])
            else:
                dst.number = measureNumber
                measureNumber += 1
            p._appendCore(dst)

    try:
        reBar(p, inPlace=True)
    except (ABCTranslateException, meter.MeterException, ZeroDivisionError):
        pass
    # clefs are not typically defined, but if so, are set to the first measure
    # following the meta data, or in the open stream
    if not clefSet:
        if useMeasures:  # assume at start of measures
            p.getElementsByClass('Measure')[0].clef = p.flat.bestClef()
        else:
            p._insertCore(0, p.bestClef())

    if postTransposition != 0:
        p.transpose(postTransposition, inPlace=True)

    if useMeasures and len(
            p.flat.getTimeSignatures(searchContext=False,
                                     returnDefault=False)) > 0:
        # call make beams for now; later, import beams
        #environLocal.printDebug(['abcToStreamPart: calling makeBeams'])
        try:
            p.makeBeams(inPlace=True)
        except meter.MeterException as e:
            environLocal.warn("Error in beaming...ignoring: %s" % str(e))

    # copy spanners into topmost container; here, a part
    rm = []
    for sp in spannerBundle.getByCompleteStatus(True):
        p._insertCore(0, sp)
        rm.append(sp)
    # remove from original spanner bundle
    for sp in rm:
        spannerBundle.remove(sp)
    p._elementsChanged()
    return p
Esempio n. 14
0
    def testScales01(self):
        from music21 import pitch

        #==== "fig-py01"

        # Providing a tonic makes this concrete
        sc1 = scale.MajorScale('g4')
        sc2 = scale.MajorScale('e-3')

        # Comparing Concrete and Abstract Scales
        assert (sc1 == sc2) == False
        assert (sc1.abstract == sc2.abstract) == True

        # Without arguments, getPitches() returns a single span
        assert common.pitchList(
            sc1.getPitches()) == '[G4, A4, B4, C5, D5, E5, F#5, G5]'
        assert common.pitchList(sc2.getPitches(
            'c2', 'c3')) == '[C2, D2, E-2, F2, G2, A-2, B-2, C3]'

        # As a Chord, Scale pitches gain additional functionality
        assert sc1.getChord().forteClass == '7-35'

        # Given a degree, get the pitch
        assert str(sc1.pitchFromDegree(5)) == 'D5'
        assert common.pitchList(
            sc2.pitchesFromScaleDegrees([7, 2], 'e-6',
                                        'e-9')) == '[F6, D7, F7, D8, F8, D9]'

        # Get a scale degree from a pitch
        assert sc1.getScaleDegreeFromPitch('d') == 5
        assert sc2.getScaleDegreeFromPitch('d') == 7

        # Get the next pitch given step directions
        match = [pitch.Pitch('g2')]
        for direction in [1, 1, 1, -2, 4, -1, 1, 1, 1]:
            # Append the next pitch based on the last-added pitch
            match.append(sc1.next(match[-1], direction))
        assert common.pitchList(
            match), '[G2, A2, B2, C3, A2, E3, D3, E3, F#3, G3]'

        # Derive new scales based on a provided collection or degree
        assert str(sc1.derive(['c4', 'g4', 'b8',
                               'f2'])) == '<music21.scale.MajorScale C major>'
        assert str(sc1.deriveByDegree(
            7, 'C#')) == '<music21.scale.MajorScale D major>'

        # Methods unique to DiatonicScale subclasses
        assert str(
            sc2.getRelativeMinor()) == '<music21.scale.MinorScale C minor>'
        #==== "fig-py01" end

        #==== "fig-py02"
        sc1 = scale.PhrygianScale('g4')
        assert common.pitchList(
            sc1.getPitches()) == '[G4, A-4, B-4, C5, D5, E-5, F5, G5]'
        assert str(
            sc1.getRelativeMajor()) == '<music21.scale.MajorScale E- major>'
        assert str(sc1.getTonic()), str(sc1.getDominant()) == ('G4', 'D5')

        sc2 = scale.HypodorianScale('a6')
        assert common.pitchList(sc2.getPitches(
            'e2', 'e3')) == '[E2, F#2, G2, A2, B2, C3, D3, E3]'
        assert str(
            sc2.getRelativeMajor()) == '<music21.scale.MajorScale G major>'
        assert str(sc2.getTonic()), str(sc2.getDominant()) == ('A6', 'C7')

        #==== "fig-py02" end

        #==== "fig-py06"
        # see below
        #==== "fig-py06" end

        #==== "fig-py03"
        #print('\n\nfig-py03')

        sc1 = scale.HarmonicMinorScale('a3')
        assert common.pitchList(
            sc1.getPitches()) == '[A3, B3, C4, D4, E4, F4, G#4, A4]'
        assert str(sc1.getTonic()), str(sc1.getDominant()) == ('A3', 'E4')

        s = stream.Stream()
        for d in [1, 3, 2, 1, 6, 5, 8, 7, 8]:
            s.append(
                note.Note(sc1.pitchFromDegree(d, equateTermini=False),
                          type='eighth'))
        #s.show()
        #==== "fig-py03" end

        #==== "fig-py04"
        import random

        sc1 = scale.MelodicMinorScale('c4')
        assert common.pitchList(sc1.getPitches(
            direction='ascending')) == '[C4, D4, E-4, F4, G4, A4, B4, C5]'
        assert common.pitchList(
            sc1.getPitches('c3', 'c5', direction='descending')
        ) == '[C5, B-4, A-4, G4, F4, E-4, D4, C4, B-3, A-3, G3, F3, E-3, D3, C3]'
        assert str(sc1.getTonic()), str(sc1.getDominant()) == ('C4', 'G4')

        s = stream.Stream()
        p = None
        for i in range(8):  # was 16, but sometimes exceeded scale length.
            direction = random.choice([-1, 1])
            for j in range(2):
                p = sc1.next(p, direction)
                s.append(note.Note(p, quarterLength=.25))
        #s.show()
        #==== "fig-py04" end

        #==== "fig-py05"
        sc1 = scale.OctatonicScale('e3', 'm2')
        assert common.pitchList(
            sc1.getPitches()) == '[E3, F3, G3, A-3, B-3, C-4, D-4, D4, E4]'
        sc2 = scale.OctatonicScale('e3', 'M2')
        assert common.pitchList(
            sc2.getPitches()) == '[E3, F#3, G3, A3, B-3, C4, D-4, E-4, F-4]'

        part1 = stream.Part()
        part2 = stream.Part()
        durPart1 = [1, 1, .5, .5, 1]
        durPart2 = [3, 1]
        degrees = list(range(1, 9))
        for unused in range(4):
            random.shuffle(degrees)
            random.shuffle(durPart1)
            random.shuffle(durPart2)
            i = 0
            for dur in durPart1:
                part1.append(
                    note.Note(sc2.pitchFromDegree(degrees[i]),
                              quarterLength=dur))
                i += 1
            for dur in durPart2:
                part2.append(
                    note.Note(sc2.pitchFromDegree(degrees[i],
                                                  minPitch='c2',
                                                  maxPitch='c3'),
                              quarterLength=dur))
                i += 1
        s = stream.Score()
        s.insert(0, part1)
        s.insert(0, part2)
        #s.show()

        # add notation example; perhaps create tri-chords from scale-completing selections
        #==== "fig-py05" end

        #sc = scale.SieveScale('c2', '(-3@2 & 4) | (-3@1 & 4@1) | (3@2 & 4@2) | (-3 & 4@3)')

        #==== "fig-py07"
        # add examples
        sc1 = scale.SieveScale('c4', '3@0|4@0')
        self.assertEqual(common.pitchList(sc1.getPitches()),
                         '[C4, E-4, E4, F#4, G#4, A4, C5]')

        sc2 = scale.SieveScale('c4', '5@0|7@0')
        self.assertEqual(
            common.pitchList(sc2.getPitches()),
            '[C4, F4, G4, B-4, D5, E-5, G#5, A5, C#6, E6, F#6, B6]')

        s = stream.Stream()
        pColection = sc2.getPitches('c3', 'c7')
        random.shuffle(pColection)
        for p in pColection:
            s.append(note.Note(p, type='16th'))
        #s.show()
        #==== "fig-py07" end

        #==== "fig-py08"

        sc1 = scale.RagAsawari('g3')
        self.assertEqual(
            common.pitchList(sc1.getPitches(direction='ascending')),
            '[G3, A3, C4, D4, E-4, G4]')
        self.assertEqual(
            common.pitchList(sc1.getPitches(direction='descending')),
            '[G4, F4, E-4, D4, C4, B-3, A3, G3]')

        sc2 = scale.RagMarwa('g3')
        assert common.pitchList(sc2.getPitches(direction='ascending')
                                ) == '[G3, A-3, B3, C#4, E4, F#4, E4, G4, A-4]'
        assert common.pitchList(
            sc2.getPitches(direction='descending'
                           )) == '[A-4, G4, A-4, F#4, E4, C#4, B3, A-3, G3]'

        p1 = None
        s = stream.Stream()
        for direction in ([1] * 10) + ([-1] * 8) + ([1] * 4) + ([-1] *
                                                                3) + ([1] * 4):
            p1 = sc1.next(p1, direction)
            s.append(note.Note(p1, quarterLength=.25))
        #s.show()

        p1 = None
        s = stream.Stream()
        for direction in ([1] * 10) + ([-1] * 8) + ([1] * 4) + ([-1] *
                                                                3) + ([1] * 4):
            p1 = sc2.next(p1, direction)
            s.append(note.Note(p1, quarterLength=.25))
        #s.show()

        #==== "fig-py08" end

        #==== "fig-py09"
        #import random
        sc1 = scale.WeightedHexatonicBlues('c3')
        p = 'c3'
        s = stream.Stream()
        for n in range(32):
            p = sc1.next(p, random.choice([-1, 1]))
            n = note.Note(p, quarterLength=random.choice([.5, .25, .25]))
            s.append(n)
Esempio n. 15
0
    def generateRealizationFromPossibilityProgression(self, possibilityProgression):
        '''
        Generates a realization as a :class:`~music21.stream.Score` given a possibility progression.        
        '''
        sol = stream.Score()
        
        bassLine = stream.Part()
        bassLine.append([copy.deepcopy(self._keySig), copy.deepcopy(self._inTime)])
        r = None
        if self._paddingLeft != 0.0:
            r = note.Rest(quarterLength = self._paddingLeft)
            bassLine.append(copy.deepcopy(r))
            
        if self.keyboardStyleOutput:
            rightHand = stream.Part()
            sol.insert(0.0, rightHand)
            rightHand.append([copy.deepcopy(self._keySig), copy.deepcopy(self._inTime)])
            if r is not None:
                rightHand.append(copy.deepcopy(r))

            for segmentIndex in range(len(self._segmentList)):
                possibA = possibilityProgression[segmentIndex]
                bassNote = self._segmentList[segmentIndex].bassNote
                bassLine.append(copy.deepcopy(bassNote))  
                rhPitches = possibA[0:-1]                           
                rhChord = chord.Chord(rhPitches)
                rhChord.quarterLength = self._segmentList[segmentIndex].quarterLength
                rightHand.append(rhChord)
            rightHand.insert(0.0, clef.TrebleClef())
            
            rightHand.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
            if r is not None:
                rightHand[0].pop(3)
                rightHand[0].padAsAnacrusis()
                
        else: # Chorale-style output
            upperParts = []
            for partNumber in range(len(possibilityProgression[0]) - 1):
                fbPart = stream.Part()
                sol.insert(0.0, fbPart)
                fbPart.append([copy.deepcopy(self._keySig), copy.deepcopy(self._inTime)])
                if r is not None:
                    fbPart.append(copy.deepcopy(r))
                upperParts.append(fbPart)

            for segmentIndex in range(len(self._segmentList)):
                possibA = possibilityProgression[segmentIndex]
                bassNote = self._segmentList[segmentIndex].bassNote
                bassLine.append(copy.deepcopy(bassNote))  

                for partNumber in range(len(possibA) - 1):
                    n1 = note.Note(possibA[partNumber])
                    n1.quarterLength = self._segmentList[segmentIndex].quarterLength
                    upperParts[partNumber].append(n1)
                    
            for upperPart in upperParts:
                upperPart.insert(0.0, upperPart.bestClef(True))
                upperPart.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
                if r is not None:
                    upperPart[0].pop(3)
                    upperPart[0].padAsAnacrusis()

                     
        bassLine.insert(0.0, clef.BassClef())
        bassLine.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
        if r is not None:
            bassLine[0].pop(3)
            bassLine[0].padAsAnacrusis()           
        sol.insert(0.0, bassLine)
        return sol
Esempio n. 16
0
def PIG2Stream(fname, beam=0, time_unit=.5, fixtempo=0):
    """
    Convert a PIG text file to a music21 Stream object.
    time_unit must be multiple of 2.
    beam = 0, right hand
    beam = 1, left hand.
    """
    from music21 import stream, note, chord
    from music21.articulations import Fingering
    import numpy as np

    f = open(fname, "r")
    lines = f.readlines()
    f.close()

    #work out note type from distribution of durations
    # triplets are squashed to the closest figure
    durations = []
    firstonset = 0
    blines=[]
    for l in lines:
        if l.startswith('//'): continue
        _, onset, offset, name, _, _, channel, _ = l.split()
        onset, offset = float(onset), float(offset)
        if beam != int(channel): continue
        if not firstonset:
            firstonset = onset
        if offset-onset<0.0001: continue
        durations.append(offset-onset)
        blines.append(l)
    durations = np.array(durations)
    logdurs = -np.log2(durations)
    mindur = np.min(logdurs)
    expos = (logdurs-mindur).astype(int)
    if np.max(expos) > 3:
        mindur = mindur + 1
    #print(durations, '\nexpos=',expos, '\nmindur=', mindur)

    sf = stream.Part()
    sf.id = beam

    # first rest
    if not fixtempo and firstonset:
        r = note.Rest()
        logdur = -np.log2(firstonset)
        r.duration.quarterLength = 1.0/time_unit/pow(2, int(logdur-mindur))
        sf.append(r)

    n = len(blines)
    for i in range(n):
        if blines[i].startswith('//'): continue
        _, onset, offset, name, _, _, _, finger = blines[i].split()
        onset, offset = float(onset), float(offset)
        name = name.replace('b', '-')

        chordnotes = [name]
        for j in range(1, 5):
            if i+j<n:
                noteid1, onset1, offset1, name1, _, _, _, finger1 = blines[i+j].split()
                onset1 = float(onset1)
                if onset1 == onset:
                    name1 = name1.replace('b', '-')
                    chordnotes.append(name1)

        if len(chordnotes)>1:
            an = chord.Chord(chordnotes)
        else:
            an = note.Note(name)
            if '_' not in finger:
                x = Fingering(abs(int(finger)))
                x.style.absoluteY = 20
            an.articulations.append(x)

        if fixtempo:
            an.duration.quarterLength = fixtempo
        else:
            logdur = -np.log2(offset - onset)
            an.duration.quarterLength = 1.0/time_unit/pow(2, int(logdur-mindur))
        #print('note/chord:', an, an.duration.quarterLength, an.duration.type, 't=',onset)

        sf.append(an)

        # rest up to the next
        if i+1<n:
            _, onset1, _, _, _, _, _, _ = blines[i+1].split()
            onset1 = float(onset1)
            if onset1 - offset > 0:
                r = note.Rest()
                if fixtempo:
                    r.duration.quarterLength = fixtempo
                logdur = -np.log2(onset1 - offset)
                d = int(logdur-mindur)
                if d<4:
                    r.duration.quarterLength = 1.0/time_unit/pow(2, d)
                    sf.append(r)
    return sf
Esempio n. 17
0
    def prepStream(self):
        '''
        Prepares a music21 stream for the harmonic analysis to go into.
        Specifically: creates the score, part, and measure streams,
        as well as some (the available) metadata based on the original TSV data.
        Works like the .template() method,
        except that we don't have a score to base the template on as such.
        '''
        s = stream.Score()
        p = stream.Part()

        s.insert(0, metadata.Metadata())

        firstEntry = self.chordList[0]  # Any entry will do
        s.metadata.opusNumber = firstEntry.op
        s.metadata.number = firstEntry.no
        s.metadata.movementNumber = firstEntry.mov
        s.metadata.title = 'Op' + firstEntry.op + '_No' + firstEntry.no + '_Mov' + firstEntry.mov

        startingKeySig = str(self.chordList[0].global_key)
        ks = key.Key(startingKeySig)
        p.insert(0, ks)

        currentTimeSig = str(self.chordList[0].timesig)
        ts = meter.TimeSignature(currentTimeSig)
        p.insert(0, ts)

        currentMeasureLength = ts.barDuration.quarterLength

        currentOffset = 0

        previousMeasure: int = self.chordList[0].measure - 1  # Covers pickups
        for entry in self.chordList:
            if entry.measure == previousMeasure:
                continue
            elif entry.measure != previousMeasure + 1:  # Not every measure has a chord change.
                for mNo in range(previousMeasure + 1, entry.measure):
                    m = stream.Measure(number=mNo)
                    m.offset = currentOffset + currentMeasureLength
                    p.insert(m)

                    currentOffset = m.offset
                    previousMeasure = mNo
            else:  # entry.measure = previousMeasure + 1
                m = stream.Measure(number=entry.measure)
                m.offset = entry.totbeat
                p.insert(m)
                if entry.timesig != currentTimeSig:
                    newTS = meter.TimeSignature(entry.timesig)
                    m.insert(entry.beat - 1, newTS)

                    currentTimeSig = entry.timesig
                    currentMeasureLength = newTS.barDuration.quarterLength

                previousMeasure = entry.measure
                currentOffset = entry.totbeat

        s.append(p)

        self.preparedStream = s

        return s
Esempio n. 18
0
def toPart(volpianoText, *, breaksToLayout=False):
    '''
    Returns a music21 Part from volpiano text.

    >>> veniSancti = volpiano.toPart('1---c--d---f--d---ed--c--d---f'
    ...                              + '---g--h--j---hgf--g--h---')
    >>> veniSancti.show('text')
    {0.0} <music21.stream.Measure 0 offset=0.0>
        {0.0} <music21.clef.TrebleClef>
        {0.0} <music21.note.Note C>
        {1.0} <music21.note.Note D>
        {2.0} <music21.note.Note F>
        {3.0} <music21.note.Note D>
        {4.0} <music21.note.Note E>
        {5.0} <music21.note.Note D>
        {6.0} <music21.volpiano.Neume <music21.note.Note E><music21.note.Note D>>
        {6.0} <music21.note.Note C>
        {7.0} <music21.note.Note D>
        {8.0} <music21.note.Note F>
        {9.0} <music21.note.Note G>
        {10.0} <music21.note.Note A>
        {11.0} <music21.note.Note B>
        {12.0} <music21.note.Note A>
        {13.0} <music21.note.Note G>
        {14.0} <music21.note.Note F>
        {15.0} <music21.volpiano.Neume <music21.note.Note A><music21.note.Note G>>
        {15.0} <music21.note.Note G>
        {16.0} <music21.note.Note A>

    Clefs!

    >>> clefTest = volpiano.toPart('1---c--2---c')
    >>> clefTest.show('text')
    {0.0} <music21.stream.Measure 0 offset=0.0>
        {0.0} <music21.clef.TrebleClef>
        {0.0} <music21.note.Note C>
        {1.0} <music21.clef.BassClef>
        {1.0} <music21.note.Note E>
    >>> for n in clefTest.recurse().notes:
    ...     n.nameWithOctave
    'C4'
    'E2'

    Flats and Naturals:

    >>> accTest = volpiano.toPart('1---e--we--e--We--e')
    >>> [n.name for n in accTest.recurse().notes]
    ['E', 'E-', 'E-', 'E', 'E']

    Breaks and barlines

    >>> breakTest = volpiano.toPart('1---e-7-e-77-e-777-e-3-e-4')
    >>> breakTest.show('text')
    {0.0} <music21.stream.Measure 0 offset=0.0>
        {0.0} <music21.clef.TrebleClef>
        {0.0} <music21.note.Note E>
        {1.0} <music21.volpiano.LineBreak object at 0x105250fd0>
        {1.0} <music21.note.Note E>
        {2.0} <music21.volpiano.PageBreak object at 0x105262128>
        {2.0} <music21.note.Note E>
        {3.0} <music21.volpiano.ColumnBreak object at 0x105262240>
        {3.0} <music21.note.Note E>
        {4.0} <music21.bar.Barline type=regular>
    {4.0} <music21.stream.Measure 0 offset=4.0>
        {0.0} <music21.note.Note E>
        {1.0} <music21.bar.Barline type=double>


    As layout objects using breaksToLayout=True

    >>> breakTest = volpiano.toPart('1---e-7-e-77-e-777-e-3-e-4', breaksToLayout=True)
    >>> breakTest.show('text')
    {0.0} <music21.stream.Measure 0 offset=0.0>
        {0.0} <music21.clef.TrebleClef>
        {0.0} <music21.note.Note E>
        {1.0} <music21.layout.SystemLayout>
        {1.0} <music21.note.Note E>
        {2.0} <music21.layout.PageLayout>
        {2.0} <music21.note.Note E>
        {3.0} <music21.volpiano.ColumnBreak object at 0x105262240>
        {3.0} <music21.note.Note E>
        {4.0} <music21.bar.Barline type=regular>
    {4.0} <music21.stream.Measure 0 offset=4.0>
        {0.0} <music21.note.Note E>
        {1.0} <music21.bar.Barline type=double>


    Liquescence test:

    >>> breakTest = volpiano.toPart('1---e-E-')
    >>> breakTest.recurse().notes[0].editorial.liquescence
    False
    >>> breakTest.recurse().notes[0].notehead
    'normal'
    >>> breakTest.recurse().notes[1].editorial.liquescence
    True
    >>> breakTest.recurse().notes[1].notehead
    'x'

    Changed in v5.7 -- corrected spelling of liquescence.
    '''
    p = stream.Part()
    m = stream.Measure()

    currentMeasure = m
    currentNeumeSpanner = None
    noteThatWouldGoInSpanner = None
    lastClef = clef.TrebleClef()
    continuousNumberOfBreakTokens = 0

    bIsFlat = False
    eIsFlat = False

    for token in volpianoText:
        if token == '7':
            continuousNumberOfBreakTokens += 1
            continue
        elif continuousNumberOfBreakTokens > 0:
            if not breaksToLayout:  # default
                breakClass = classByNumBreakTokens[
                    continuousNumberOfBreakTokens]
                breakToken = breakClass()  # pylint: disable=not-callable
            else:
                breakClass = classByNumBreakTokensLayout[
                    continuousNumberOfBreakTokens]
                if continuousNumberOfBreakTokens < 3:
                    breakToken = breakClass(isNew=True)  # pylint: disable=not-callable
                else:
                    breakToken = breakClass()  # pylint: disable=not-callable

            currentMeasure.append(breakToken)

        continuousNumberOfBreakTokens = 0

        if token == '-':
            noteThatWouldGoInSpanner = None
            if currentNeumeSpanner:
                currentMeasure.append(currentNeumeSpanner)
                currentNeumeSpanner = None
            continue

        if token in '1234':
            noteThatWouldGoInSpanner = None
            currentNeumeSpanner = None

        if token in '12':
            if token == '1':
                c = clef.TrebleClef()
            else:
                c = clef.BassClef()

            lastClef = c
            m.append(c)

        elif token in '34':
            bl = bar.Barline()
            if token == '4':
                bl.type = 'double'
            m.rightBarline = bl
            p.append(m)
            m = stream.Measure()

        elif token in normalPitches or token in liquescentPitches:
            n = note.Note()
            n.stemDirection = 'noStem'

            if token in normalPitches:
                distanceFromLowestLine = normalPitches.index(token) - 5
                n.editorial.liquescence = False
            else:
                distanceFromLowestLine = liquescentPitches.index(token) - 5
                n.notehead = 'x'
                n.editorial.liquescence = True

            clefLowestLine = lastClef.lowestLine
            diatonicNoteNum = clefLowestLine + distanceFromLowestLine

            n.pitch.diatonicNoteNum = diatonicNoteNum
            if n.pitch.step == 'B' and bIsFlat:
                n.pitch.accidental = pitch.Accidental('flat')
            elif n.pitch.step == 'E' and eIsFlat:
                n.pitch.accidental = pitch.Accidental('flat')

            m.append(n)

            if noteThatWouldGoInSpanner is not None:
                currentNeumeSpanner = Neume([noteThatWouldGoInSpanner, n])
                noteThatWouldGoInSpanner = None
            else:
                noteThatWouldGoInSpanner = n

        elif token in accidentalTokens:
            if token.lower() in eflatTokens and token in naturalTokens:
                eIsFlat = False
            elif token.lower() in bflatTokens and token in naturalTokens:
                bIsFlat = False
            elif token.lower() in eflatTokens and token in flatTokens:
                eIsFlat = True
            elif token.lower() in bflatTokens and token in flatTokens:
                bIsFlat = True
            else:  # pragma: no cover
                raise VolpianoException('Unknown accidental: ' + token +
                                        ': Should not happen')

    if continuousNumberOfBreakTokens > 0:
        breakClass = classByNumBreakTokens[continuousNumberOfBreakTokens]
        breakToken = breakClass()  # pylint: disable=not-callable
        currentMeasure.append(breakToken)

    if m:
        p.append(m)

    return p
Esempio n. 19
0
    def testShowAllTypes(self):
        '''
        show all known types to display
        
        tests fromMusic21Object()
        '''
        from music21 import scale
        from music21 import chord
        from music21 import duration
        from music21 import dynamics
        from music21 import meter
        from music21 import pitch
        
        m = stream.Measure()
        n = note.Note("D#6")
        m.repeatAppend(n, 6)
        m.show()

        s = stream.Stream()
        s.repeatAppend(n, 6)
        s.show()
        
        s = stream.Score()
        s.repeatAppend(n, 6)
        s.show()
        
        s = stream.Score()
        p = stream.Part()
        p.repeatAppend(n, 6)
        p2 = stream.Part()
        p2.repeatAppend(n, 6)
        s.insert(0, p)
        s.insert(0, p2)
        s.show()
        #emptyStream
        s = stream.Stream()
        s.show()
        p2.show()

        n.show()
        
        c = chord.Chord(['C3','D4','E5'])
        c.show()
        
        r = note.Rest()
        r.show()
        
        p = pitch.Pitch()
        p.show()
        
        d = duration.Duration(2.0)
        d.show()
        
        #empty duration! shows blank 4/4 measure, maybe with default rest.
        d = duration.Duration()
        d.show()

        mf = dynamics.Dynamic('mf')
        mf.show()
        
        cm = scale.MajorScale('C')
        cm.show()
        
        o = scale.OctatonicScale("C#4")
        o.show()
        
        ts = meter.TimeSignature('3/4')
        ts.show()
Esempio n. 20
0
    def matchingNotes(
        self,
        scoreStream,
        transcribedScore,
        notePrediction,
        lastNotePosition,
    ):
        from music21 import audioSearch

        # Analyzing streams
        tn_recording = int(len(transcribedScore.flatten().notesAndRests))
        totScores = []
        beginningData = []
        lengthData = []
        END_OF_SCORE = False
        # take 10% more of samples
        tn_window = int(math.ceil(tn_recording * 1.1))
        hop = int(math.ceil(tn_window / 4))
        if hop == 0:
            iterations = 1
        else:
            iterations = int((math.floor(len(scoreStream) / hop))
                             - math.ceil(tn_window / hop))

        for i in range(iterations):
            excerpt = scoreStream[i * hop + 1:i * hop + tn_recording + 1]
            scNotes = stream.Part(excerpt)
            name = str(i)
            beginningData.append(i * hop + 1)
            lengthData.append(tn_recording)
            scNotes.id = name
            totScores.append(scNotes)
        listOfParts = search.approximateNoteSearchWeighted(
            transcribedScore.flatten().notesAndRests.stream(), totScores)

        # decision process
        if notePrediction > len(scoreStream) - tn_recording - hop - 1:
            notePrediction = len(scoreStream) - tn_recording - hop - 1
            END_OF_SCORE = True
            environLocal.printDebug('LAST PART OF THE SCORE')

        # lastCountdown = self.countdown
        position, self.countdown = audioSearch.decisionProcess(
            listOfParts,
            notePrediction,
            beginningData,
            lastNotePosition,
            self.countdown,
            self.firstNotePage,
            self.lastNotePage,
        )

        totalLength = 0
        number = int(listOfParts[position].id)

        if self.silencePeriod is True and self.silencePeriodCounter < 5:
            # print(lastCountdown, self.countdown, lastNotePosition,
            #    beginningData[number], lengthData[number])
            environLocal.printDebug('All rest period')
            self.countdown -= 1

        if self.countdown != 0:
            probabilityHit = 0
        else:
            probabilityHit = listOfParts[position].matchProbability

        unused_listOfParts2 = search.approximateNoteSearch(
            transcribedScore.flatten().notesAndRests.stream(), totScores)
        unused_listOfParts3 = search.approximateNoteSearchNoRhythm(
            transcribedScore.flatten().notesAndRests.stream(), totScores)
        unused_listOfParts4 = search.approximateNoteSearchOnlyRhythm(
            transcribedScore.flatten().notesAndRests.stream(), totScores)
        # print('PROBABILITIES:',)
        # print('pitches and durations weighted (current)',
        #     listOfParts[position].matchProbability,)
        # print('pitches and durations without weighting',
        #     listOfParts2[position].matchProbability,)
        # print('pitches', listOfParts3[position].matchProbability,)
        # print('durations', listOfParts4[position].matchProbability)

        for i in range(len(totScores[number])):
            totalLength = totalLength + totScores[number][i].quarterLength

        if self.countdown == 0 and self.silencePeriodCounter == 0:
            lastNotePosition = beginningData[number] + lengthData[number]

        return totalLength, lastNotePosition, probabilityHit, END_OF_SCORE
Esempio n. 21
0
    def partScoreFromSystemScore(self, systemScore):
        '''
        Take a :class:`~music21.stream.Score` object which is organized
        by Systems and return a new `Score` object which is organized by
        Parts.
        '''
        # this line is redundant currently, since all we have in systemScore
        # are Systems, but later there will be other things.
        systemStream = systemScore.getElementsByClass('System')
        partDictById = {}
        for thisSystem in systemStream:
            # this line is redundant currently, since all we have in
            # thisSystem are Parts, but later there will be other things.
            systemOffset = systemScore.elementOffset(thisSystem)
            partStream = thisSystem.getElementsByClass('Part')
            for j, thisPart in enumerate(partStream):
                if thisPart.id not in partDictById:
                    newPart = stream.Part()
                    newPart.id = thisPart.id
                    partDictById[thisPart.id] = {'part': newPart, 'number': j}
                else:
                    newPart = partDictById[thisPart.id]['part']
                for el in thisPart:  # no need for recurse...
                    newPart.coreInsert(common.opFrac(el.offset + systemOffset),
                                       el)
                newPart.coreElementsChanged()
        newScore = stream.Score()
        # ORDERED DICT
        parts = [None for i in range(len(partDictById))]
        for partId in partDictById:
            partDict = partDictById[partId]
            parts[partDict['number']] = partDict['part']
        for p in parts:
            # remove redundant Clef and KeySignatures
            if p is None:
                print('part entries do not match partDict!')
                continue
            clefs = p.getElementsByClass('Clef')
            keySignatures = p.getElementsByClass('KeySignature')
            lastClef = None
            lastKeySignature = None
            for c in clefs:
                if c == lastClef:
                    p.remove(c)
                else:
                    lastClef = c
            for ks in keySignatures:
                if ks == lastKeySignature:
                    p.remove(ks)
                else:
                    lastKeySignature = ks
            p.makeMeasures(inPlace=True)
            # for m in p.getElementsByClass('Measure'):
            #    barLines = m.getElementsByClass('Barline')
            #    for bl in barLines:
            #        blOffset = bl.offset
            #        if blOffset == 0.0:
            #            m.remove(bl)
            #            m.leftBarline = bl
            #        elif blOffset == m.highestTime:
            #            m.remove(bl)
            #            m.rightBarline = bl  # will not yet work for double repeats!

            newScore.coreInsert(0, p)
        newScore.coreElementsChanged()
        return newScore
Esempio n. 22
0
    def repeatTranscription(self):
        '''
        First, it records from the microphone (or from a file if is used for
        test). Later, it processes the signal in order to detect the pitches.
        It converts them into music21 objects and compares them with the score.
        It finds the best matching position of the recorded signal with the
        score, and decides, depending on matching accuracy, the last note
        predicted and some other parameters, in which position the recorded
        signal is.

        It returns a value that is False if the song has not finished, or true
        if there has been a problem like some consecutive bad matchings or the
        score has finished.

        >>> from music21.audioSearch import scoreFollower
        >>> scoreNotes = ' '.join(['c4', 'd', 'e', 'f', 'g', 'a', 'b', "c'", 'c', 'e',
        ...     'g', "c'", 'a', 'f', 'd', 'c#', 'd#', 'f#', 'c', 'e', 'g', "c'",
        ...     'a', 'f', 'd', 'c#', 'd#', 'f#'])
        >>> scNotes = converter.parse('tinynotation: 4/4 ' + scoreNotes, makeNotation=False)
        >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
        >>> ScF.useMic = False
        >>> import os #_DOCS_HIDE
        >>> ScF.waveFile = str(common.getSourceFilePath() #_DOCS_HIDE
        ...                 / 'audioSearch' / 'test_audio.wav') #_DOCS_HIDE
        >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav'
        >>> ScF.seconds_recording = 10
        >>> ScF.useScale = scale.ChromaticScale('C4')
        >>> ScF.currentSample = 0
        >>> exitType = ScF.repeatTranscription()
        >>> print(exitType)
        False
        >>> print(ScF.lastNotePosition)
        10

        '''
        from music21 import audioSearch

        # print('WE STAY AT:',)
        # print(self.lastNotePosition, len(self.scoreNotesOnly),)
        # print('en percent %s %%' % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)),)
        # print(' this search begins at: ', self.startSearchAtSlot,)
        # print('countdown %s' % self.countdown)
        # print('Measure last note', self.scoreStream[self.lastNotePosition].measureNumber)

        environLocal.printDebug('repeat transcription starting')

        if self.useMic is True:
            freqFromAQList = audioSearch.getFrequenciesFromMicrophone(
                length=self.seconds_recording,
                storeWaveFilename=None,
            )
        else:
            getFreqFunc = audioSearch.getFrequenciesFromPartialAudioFile
            freqFromAQList, self.waveFile, self.currentSample = getFreqFunc(
                self.waveFile,
                length=self.seconds_recording,
                startSample=self.currentSample,
            )
            if self.totalFile == 0:
                self.totalFile = self.waveFile.getnframes()

        environLocal.printDebug('got Frequencies from Microphone')

        time_start = time()
        detectedPitchesFreq = audioSearch.detectPitchFrequencies(freqFromAQList, self.useScale)
        detectedPitchesFreq = audioSearch.smoothFrequencies(detectedPitchesFreq)
        detectedPitchObjects, unused_listplot = audioSearch.pitchFrequenciesToObjects(
            detectedPitchesFreq, self.useScale)
        notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches(
            detectedPitchObjects)
        self.silencePeriodDetection(notesList)
        environLocal.printDebug('made it to here...')
        excerpt = self.scoreStream[self.lastNotePosition:self.lastNotePosition + len(notesList)]
        scNotes = stream.Part(excerpt)
        # print('1')
        transcribedScore, self.qle = audioSearch.notesAndDurationsToStream(
            notesList,
            durationList,
            scNotes=scNotes,
            qle=self.qle,
        )
        # print('2')
        totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = self.matchingNotes(
            self.scoreStream,
            transcribedScore,
            self.startSearchAtSlot,
            self.lastNotePosition,
        )
        # print('3')
        self.processing_time = time() - time_start
        environLocal.printDebug('and even to here...')
        if END_OF_SCORE is True:
            exitType = 'endOfScore'  # 'endOfScore'
            return exitType

        # estimate position, or exit if we can't at all...
        exitType = self.updatePosition(prob, totalLengthPeriod, time_start)

        if self.useMic is False:  # reading from the disc (only for TESTS)
            # skip ahead the processing time.
            getFreqFunc = audioSearch.getFrequenciesFromPartialAudioFile
            freqFromAQList, junk, self.currentSample = getFreqFunc(
                self.waveFile,
                length=self.processing_time,
                startSample=self.currentSample,
            )

        if self.lastNotePosition > len(self.scoreNotesOnly):
            # print('finishedPerforming')
            exitType = 'finishedPerforming'
        elif (self.useMic is False and self.currentSample >= self.totalFile):
            # print('waveFileEOF')
            exitType = 'waveFileEOF'

        environLocal.printDebug(f'about to return -- exitType: {exitType} ')
        return exitType
Esempio n. 23
0
def musedataPartToStreamPart(museDataPart, inputM21=None):
    '''Translate a musedata part to a :class:`~music21.stream.Part`.
    '''
    from music21 import stream
    from music21 import note
    from music21 import tempo

    if inputM21 is None:
        s = stream.Score()
    else:
        s = inputM21

    p = stream.Part()
    p.id = museDataPart.getPartName()
    p.partName = p.id

    # create and store objects
    mdmObjs = museDataPart.getMeasures()

    #environLocal.printDebug(['first measure parent', mdmObjs[0].parent])

    barCount = 0
    # get each measure
    # store last Note/Chord/Rest for tie comparisons; span measures
    eLast = None
    for mIndex, mdm in enumerate(mdmObjs):
        #environLocal.printDebug(['processing:', mdm.src])
        if not mdm.hasNotes():
            continue

        if mdm.hasVoices():
            hasVoices = True
            vActive = stream.Voice()
        else:
            hasVoices = False
            vActive = None

        #m = stream.Measure()
        # get a measure object with a left configured bar line
        if mIndex <= len(mdmObjs) - 2:
            mdmNext = mdmObjs[mIndex + 1]
        else:
            mdmNext = None

        m = mdm.getMeasureObject()

        # conditions for a final measure definition defining the last bar
        if mdmNext != None and not mdmNext.hasNotes():
            #environLocal.printDebug(['got mdmNext not none and not has notes'])
            # get bar from next measure definition
            m.rightBarline = mdmNext.getBarObject()

        if barCount == 0:  # only for when no bars are defined
            # the parent of the measure is the part
            c = mdm.parent.getClefObject()
            if c != None:
                m.clef = mdm.parent.getClefObject()
            m.timeSignature = mdm.parent.getTimeSignatureObject()
            m.keySignature = mdm.parent.getKeySignature()
            # look for a tempo indication
            directive = mdm.parent.getDirective()
            if directive is not None:
                tt = tempo.TempoText(directive)
                # if this appears to be a tempo indication, than get metro
                if tt.isCommonTempoText():
                    mm = tt.getMetronomeMark()
                    m.insert(0, mm)

        # get all records; may be notes or note components
        mdrObjs = mdm.getRecords()
        # store pairs of pitches and durations for chording after a
        # new note has been found
        pendingRecords = []

        # get notes in each record
        for i in range(len(mdrObjs)):
            mdr = mdrObjs[i]
            #environLocal.printDebug(['processing:', mdr.src])

            if mdr.isBack():
                # the current use of back assumes tt back assumes tt we always
                # return to the start of the measure; this may not be the case
                if pendingRecords != []:
                    eLast = _processPending(hasVoices, pendingRecords, eLast,
                                            m, vActive)
                    pendingRecords = []

                # every time we encounter a back, we need to store
                # our existing voice and create a new one
                m.insert(0, vActive)
                vActive = stream.Voice()

            if mdr.isRest():
                #environLocal.printDebug(['got mdr rest, parent:', mdr.parent])
                # check for pending records first
                if pendingRecords != []:
                    eLast = _processPending(hasVoices, pendingRecords, eLast,
                                            m, vActive)
                    pendingRecords = []
                # create rest after clearing pending records
                r = note.Rest()
                r.quarterLength = mdr.getQuarterLength()
                if hasVoices:
                    vActive.coreAppend(r)
                else:
                    m.coreAppend(r)
                eLast = r
                continue
            # a note is note as chord, but may have chord tones
            # attached to it that follow
            elif mdr.isChord():
                # simply append if a chord; do not clear or change pending
                pendingRecords.append(mdr)

            elif mdr.isNote():
                # either this is a note alone, or this is the first
                # note found that is not a chord; if first not a chord
                # need to append immediately
                if pendingRecords != []:
                    # this could be a Chord or Note
                    eLast = _processPending(hasVoices, pendingRecords, eLast,
                                            m, vActive)
                    pendingRecords = []
                # need to append this record for the current note
                pendingRecords.append(mdr)

        # check for any remaining single notes (if last) or chords
        if pendingRecords != []:
            eLast = _processPending(hasVoices, pendingRecords, eLast, m,
                                    vActive)

        # may be bending elements in a voice to append to a measure
        if vActive is not None and vActive:
            vActive.coreElementsChanged()
            m.coreInsert(0, vActive)

        m.coreElementsChanged()

        if barCount == 0 and m.timeSignature != None:  # easy case
            # can only do this b/c ts is defined
            if m.barDurationProportion() < 1.0:
                m.padAsAnacrusis()
                #environLocal.printDebug(['incompletely filled Measure found on musedata import; ',
                #   'interpreting as a anacrusis:', 'padingLeft:', m.paddingLeft])
        p.coreAppend(m)
        barCount += 1

    p.coreElementsChanged()
    # for now, make all imports a c-score on import;
    tInterval = museDataPart.getTranspositionIntervalObject()
    #environLocal.printDebug(['got transposition interval', p.id, tInterval])
    if tInterval is not None:
        p.flat.transpose(tInterval,
                         classFilterList=['Note', 'Chord', 'KeySignature'],
                         inPlace=True)
        # need to call make accidentals to correct new issues
        p.makeAccidentals()

    if museDataPart.stage == 1:
        # cannot yet get stage 1 clef data
        p.getElementsByClass('Measure')[0].clef = clef.bestClef(p,
                                                                recurse=True)
        p.makeBeams(inPlace=True)
        # will call overridden method on Part
        p.makeAccidentals()
    # assume that beams and clefs are defined in all stage 2

    s.insert(0, p)
    return s
Esempio n. 24
0
from music21 import stream, note, clef

if __name__ == '__main__':
    p = stream.Part()
    p2 = stream.Part()
    m1 = stream.Measure()
    m2 = stream.Measure()
    m1.insert(0, note.Note("C5", type="whole"))
    m2.insert(0, note.Note("D3", type="whole"))
    m1.insert(0, clef.TrebleClef())
    m2.insert(0, clef.BassClef())
    p.insert(0, m1)
    p2.insert(0, m2)
    s = stream.Score()
    s.insert(0, p)
    s.insert(0, p2)
    s.show('vexflow')
Esempio n. 25
0
def __parse_midi(data_fn):
    ''' Helper function to parse a MIDI file into its measures and chords '''
    # Parse the MIDI data for separate melody and accompaniment parts.
    midi_data = converter.parse(data_fn)
    # Get melody part, compress into single voice.
    melody_stream = midi_data[5]  # For Metheny piece, Melody is Part #5.
    melody1, melody2 = melody_stream.getElementsByClass(stream.Voice)
    for j in melody2:
        melody1.insert(j.offset, j)
    melody_voice = melody1

    for i in melody_voice:
        if i.quarterLength == 0.0:
            i.quarterLength = 0.25

    # Change key signature to adhere to comp_stream (1 sharp, mode = major).
    # Also add Electric Guitar.
    melody_voice.insert(0, instrument.ElectricGuitar())
    # melody_voice.insert(0, key.KeySignature(sharps=1, mode='major'))
    melody_voice.insert(0, key.KeySignature(sharps=1))

    # The accompaniment parts. Take only the best subset of parts from
    # the original data. Maybe add more parts, hand-add valid instruments.
    # Should add least add a string part (for sparse solos).
    # Verified are good parts: 0, 1, 6, 7 '''
    partIndices = [0, 1, 6, 7]
    comp_stream = stream.Voice()
    comp_stream.append(
        [j.flat for i, j in enumerate(midi_data) if i in partIndices])

    # Full stream containing both the melody and the accompaniment.
    # All parts are flattened.
    full_stream = stream.Voice()
    for i in range(len(comp_stream)):
        full_stream.append(comp_stream[i])
    full_stream.append(melody_voice)

    # Extract solo stream, assuming you know the positions ..ByOffset(i, j).
    # Note that for different instruments (with stream.flat), you NEED to use
    # stream.Part(), not stream.Voice().
    # Accompanied solo is in range [478, 548)
    solo_stream = stream.Voice()
    for part in full_stream:
        curr_part = stream.Part()
        curr_part.append(part.getElementsByClass(instrument.Instrument))
        curr_part.append(part.getElementsByClass(tempo.MetronomeMark))
        curr_part.append(part.getElementsByClass(key.KeySignature))
        curr_part.append(part.getElementsByClass(meter.TimeSignature))
        curr_part.append(
            part.getElementsByOffset(476, 548, includeEndBoundary=True))
        cp = curr_part.flat
        solo_stream.insert(cp)

    # Group by measure so you can classify.
    # Note that measure 0 is for the time signature, metronome, etc. which have
    # an offset of 0.0.
    melody_stream = solo_stream[-1]
    measures = OrderedDict()
    offsetTuples = [(int(n.offset / 4), n) for n in melody_stream]
    measureNum = 0  # for now, don't use real m. nums (119, 120)
    for key_x, group in groupby(offsetTuples, lambda x: x[0]):
        measures[measureNum] = [n[1] for n in group]
        measureNum += 1

    # Get the stream of chords.
    # offsetTuples_chords: group chords by measure number.
    chordStream = solo_stream[0]
    chordStream.removeByClass(note.Rest)
    chordStream.removeByClass(note.Note)
    offsetTuples_chords = [(int(n.offset / 4), n) for n in chordStream]

    # Generate the chord structure. Use just track 1 (piano) since it is
    # the only instrument that has chords.
    # Group into 4s, just like before.
    chords = OrderedDict()
    measureNum = 0
    for key_x, group in groupby(offsetTuples_chords, lambda x: x[0]):
        chords[measureNum] = [n[1] for n in group]
        measureNum += 1

    # Fix for the below problem.
    #   1) Find out why len(measures) != len(chords).
    #   ANSWER: resolves at end but melody ends 1/16 before last measure so doesn't
    #           actually show up, while the accompaniment's beat 1 right after does.
    #           Actually on second thought: melody/comp start on Ab, and resolve to
    #           the same key (Ab) so could actually just cut out last measure to loop.
    #           Decided: just cut out the last measure.
    del chords[len(chords) - 1]
    assert len(chords) == len(measures)

    return measures, chords
def main(args):
    messenger = mes.Messenger()

    file_input = utils.parse_arg(args.file_input)

    file_output = utils.parse_arg(args.file_output)

    name_part = utils.parse_arg(args.name_part)

    score = converter.parse(file_input)

    part_new = stream.Part()

    for p in score:
        if isinstance(p, stream.Part):
            for i in range(num_measures_lead_in + 1,
                           p.measure(-1).measureNumber + 1):
                m = p.measure(i)

                chord_symbols = [
                    c for c in m if isinstance(c, harmony.ChordSymbol)
                ]

                if len(chord_symbols) == 0:
                    if name_part == 'chord':
                        chord_new = chord.Chord(
                            [p.midi for p in chord_sym_last.pitches
                             ],  # NB: we want to fail in this case
                            duration=duration.Duration(4))
                        part_new.append(chord_new)
                        chord_sym_last = chord_new
                    elif name_part == 'root':
                        note_new = chord.Chord(
                            [[p.midi for p in chord_sym_last.pitches][0]
                             ],  # NB: we want to fail in this case
                            duration=duration.Duration(4))
                        part_new.append(note_new)
                        chord_sym_last = chord.Chord(
                            [p.midi for p in chord_sym_last.pitches],
                            duration=duration.Duration(4))
                    else:
                        raise Exception(
                            'cannot parse name_part from BIAB musicxml')
                else:
                    for sym in chord_symbols:
                        if name_part == 'chord':
                            chord_new = chord.Chord(
                                [p.midi for p in sym.pitches],
                                duration=duration.Duration(4 /
                                                           len(chord_symbols)))
                            part_new.append(chord_new)
                            chord_sym_last = chord_new
                        elif name_part == 'root':
                            note_new = note.Note([p.midi
                                                  for p in sym.pitches][0],
                                                 duration=duration.Duration(
                                                     4 / len(chord_symbols)))

                            part_new.append(note_new)
                            chord_sym_last = chord.Chord(
                                [p.midi for p in sym.pitches],
                                duration=duration.Duration(4 /
                                                           len(chord_symbols)))
                        else:
                            raise Exception(
                                'cannot parse name_part from BIAB musicxml')

    if name_part == 'chord':
        part_new = postp_mxl.force_texture(part_new, num_voices=4)

    part_new.write('midi', fp=file_output)

    messenger.message(['done', 'bang'])
Esempio n. 27
0
def notesAndDurationsToStream(notesList,
                              durationList,
                              scNotes=None,
                              removeRestsAtBeginning=True,
                              qle=None):
    '''
    take a list of :class:`~music21.note.Note` objects or rests
    and an equally long list of how long
    each ones lasts in terms of samples and returns a
    Stream using the information from quarterLengthEstimation
    and quantizeDurations.

    returns a :class:`~music21.stream.Score` object, containing
    a metadata object and a single :class:`~music21.stream.Part` object, which in turn
    contains the notes, etc.  Does not run :meth:`~music21.stream.Stream.makeNotation`
    on the Score.


    >>> durationList = [20, 19, 10, 30, 6, 21]
    >>> n = note.Note
    >>> noteList = [n('C#4'), n('D5'), n('B4'), n('F#5'), n('C5'), note.Rest()]
    >>> s,lengthPart = audioSearch.notesAndDurationsToStream(noteList, durationList)
    >>> s.show('text')
    {0.0} <music21.metadata.Metadata object at ...>
    {0.0} <music21.stream.Part ...>
        {0.0} <music21.note.Note C#>
        {1.0} <music21.note.Note D>
        {2.0} <music21.note.Note B>
        {2.5} <music21.note.Note F#>
        {4.0} <music21.note.Note C>
        {4.25} <music21.note.Rest rest>
    '''
    # rounding lengths
    p2 = stream.Part()

    # If the score is available, the quarter estimation is better:
    # It could take into account the changes of tempo during the song, but it
    # would take more processing time
    if scNotes is not None:
        fe = features.native.MostCommonNoteQuarterLength(scNotes)
        mostCommon = fe.extract().vector[0]
        qle = quarterLengthEstimation(durationList, mostCommon)
    elif scNotes is None:  # this is for the transcriber
        qle = quarterLengthEstimation(durationList)

    for i in range(len(durationList)):
        actualDuration = quantizeDuration(durationList[i] / qle)
        notesList[i].quarterLength = actualDuration
        if not (removeRestsAtBeginning and (notesList[i].name == 'rest')):
            p2.append(notesList[i])
            removeRestsAtBeginning = False

    sc = stream.Score()
    sc.metadata = metadata.Metadata()
    sc.metadata.title = 'Automatic Music21 Transcription'
    sc.insert(0, p2)

    if scNotes is None:  # Case transcriber
        return sc, len(p2)
    else:  # case follower
        return sc, qle
Esempio n. 28
0
def generateChords(numChords, kind=''):
    '''
    Randomly generate a score of chords for use with the perceived dissonances
    app. These chords may be dissonant or consonant. if kind = 'diatonicTriads',
    only diatonic triads will be generated
        
    
    >>> sc = webapps.commands.generateChords(4,'diatonicTriads')
    >>> a = webapps.commands.runPerceivedDissonanceAnalysis(sc,[1.2,3.2,5.2])
    >>> chords = a['fullScore']['stream'].flat.getElementsByClass('Chord')
    >>> chords[0].color != None
    True
    >>> chords[1].color != None
    True
    >>> chords[2].color != None
    True
    >>> chords[3].color in [None, '#cc3300']
    True
    >>> sc2 = webapps.commands.generateChords(4)
    >>> a = webapps.commands.runPerceivedDissonanceAnalysis(sc2,[1.2,3.2])
    >>> chords = a['fullScore']['stream'].flat.getElementsByClass('Chord')
    >>> chords[0].color != None
    True
    >>> chords[1].color != None
    True
    >>> chords[2].color in [None, '#cc3300']
    True
    >>> chords[3].color in [None, '#cc3300']
    True
    '''
    sc = stream.Score()
    p = stream.Part()
    scl = scale.MajorScale('C')
    #possibleChordTypes = [l[0] for l in harmony.CHORD_TYPES.values()]
    possibleChordTypes = ['53', '63', '64']
    if kind == 'diatonicTriads':
        for i in range(numChords):
            startDegree = random.randrange(0, 8)
            inversion = random.randrange(0, 3)
            chordPitches = []
            #testDegrees = [d+startDegree-1 for d in traidInversions[inversion] ]
            chordPitches = [
                scl.pitchFromDegree(d + startDegree - 1)
                for d in traidInversions[inversion]
            ]
            chordType = possibleChordTypes[random.randrange(
                0, len(possibleChordTypes))]
            c = chord.Chord(chordPitches)
            c.quarterLength = 2
            p.append(c)
        p.makeMeasures(inPlace=True)
        sc.append(p)
        return sc
    else:
        for i in range(numChords):
            loPs = pitch.Pitch("C4").ps
            hiPs = pitch.Pitch("C#5").ps
            startPs = random.randrange(loPs, hiPs)
            startPitch = pitch.Pitch(ps=startPs)
            startPitchName = startPitch.name
            chordType = possibleChordTypes[random.randrange(
                0, len(possibleChordTypes))]
            c = harmony.ChordSymbol(startPitchName + ',' + chordType)
            c.writeAsChord = True
            c.quarterLength = 2
            c.volume.velocity = 127
            p.append(c)
        p.makeMeasures(inPlace=True)
        sc.append(p)
        return sc
Esempio n. 29
0
def romanTextToStreamScore(rtHandler, inputM21=None):
    '''The main processing module for single-movement RomanText works.

    Given a romanText handler or string, return or fill a Score Stream.
    '''
    # accept a string directly; mostly for testing
    from music21 import romanText as romanTextModule

    if common.isStr(rtHandler):
        rtf = romanTextModule.RTFile()
        rtHandler = rtf.readstr(rtHandler)  # return handler, processes tokens

    # this could be just a Stream, but b/c we are creating metadata, perhaps better to match presentation of other scores.

    from music21 import metadata
    from music21 import stream
    from music21 import note
    from music21 import meter
    from music21 import key
    from music21 import roman
    from music21 import tie

    if inputM21 == None:
        s = stream.Score()
    else:
        s = inputM21

    # metadata can be first
    md = metadata.Metadata()
    s.insert(0, md)

    p = stream.Part()
    # ts indication are found in header, and also found elsewhere
    tsCurrent = meter.TimeSignature('4/4')  # create default 4/4
    tsSet = False  # store if set to a measure
    lastMeasureToken = None
    lastMeasureNumber = 0
    previousRn = None
    keySigCurrent = None
    keySigSet = True  # set a keySignature
    foundAKeySignatureSoFar = False
    kCurrent, unused_prefixLyric = _getKeyAndPrefix(
        'C')  # default if none defined
    prefixLyric = ''

    repeatEndings = {}
    rnKeyCache = {}

    for t in rtHandler.tokens:
        try:

            # environLocal.printDebug(['token', t])
            if t.isTitle():
                md.title = t.data

            elif t.isWork():
                md.alternativeTitle = t.data

            elif t.isPiece():
                md.alternativeTitle = t.data

            elif t.isComposer():
                md.composer = t.data

            elif t.isMovement():
                md.movementNumber = t.data

            elif t.isTimeSignature():
                tsCurrent = meter.TimeSignature(t.data)
                tsSet = False
                # environLocal.printDebug(['tsCurrent:', tsCurrent])

            elif t.isKeySignature():
                if t.data == "":
                    keySigCurrent = key.KeySignature(0)
                elif t.data == "Bb":
                    keySigCurrent = key.KeySignature(-1)
                else:
                    pass
                    # better to print a message
                    # environLocal.printDebug(['still need to write a generic RomanText KeySignature routine.  this is just temporary'])
                    # raise RomanTextTranslateException("still need to write a generic RomanText KeySignature routine.  this is just temporary")
                keySigSet = False
                # environLocal.printDebug(['keySigCurrent:', keySigCurrent])
                foundAKeySignatureSoFar = True

            elif t.isMeasure():
                # environLocal.printDebug(['handling measure token:', t])
                #if t.number[0] % 10 == 0:
                #    print "at number " + str(t.number[0])
                if t.variantNumber is not None:
                    # environLocal.printDebug(['skipping variant: %s' % t])
                    continue
                if t.variantLetter is not None:
                    # environLocal.printDebug(['skipping variant: %s' % t])
                    continue

                # if this measure number is more than 1 greater than the last
                # defined measure number, and the previous chord is not None,
                # then fill with copies of the last-defined measure
                if ((t.number[0] > lastMeasureNumber + 1)
                        and (previousRn is not None)):
                    for i in range(lastMeasureNumber + 1, t.number[0]):
                        mFill = stream.Measure()
                        mFill.number = i
                        newRn = copy.deepcopy(previousRn)
                        newRn.lyric = ""
                        # set to entire bar duration and tie
                        newRn.duration = copy.deepcopy(tsCurrent.barDuration)
                        if previousRn.tie is None:
                            previousRn.tie = tie.Tie('start')
                        else:
                            previousRn.tie.type = 'continue'
                        # set to stop for now; may extend on next iteration
                        newRn.tie = tie.Tie('stop')
                        previousRn = newRn
                        mFill.append(newRn)
                        appendMeasureToRepeatEndingsDict(
                            lastMeasureToken, mFill, repeatEndings, i)
                        p._appendCore(mFill)
                    lastMeasureNumber = t.number[0] - 1
                    lastMeasureToken = t

                # create a new measure or copy a past measure
                if len(t.number) == 1 and t.isCopyDefinition:  # if not a range
                    p._elementsChanged()
                    m, kCurrent = _copySingleMeasure(t, p, kCurrent)
                    p._appendCore(m)
                    lastMeasureNumber = m.number
                    lastMeasureToken = t
                    romans = m.getElementsByClass(roman.RomanNumeral,
                                                  returnStreamSubClass='list')
                    if len(romans) > 0:
                        previousRn = romans[-1]

                elif len(t.number) > 1:
                    p._elementsChanged()
                    measures, kCurrent = _copyMultipleMeasures(t, p, kCurrent)
                    p.append(measures)  # appendCore does not work with list
                    lastMeasureNumber = measures[-1].number
                    lastMeasureToken = t
                    romans = measures[-1].getElementsByClass(
                        roman.RomanNumeral, returnStreamSubClass='list')
                    if len(romans) > 0:
                        previousRn = romans[-1]

                else:
                    m = stream.Measure()
                    m.number = t.number[0]
                    appendMeasureToRepeatEndingsDict(t, m, repeatEndings)
                    lastMeasureNumber = t.number[0]
                    lastMeasureToken = t

                    if not tsSet:
                        m.timeSignature = tsCurrent
                        tsSet = True  # only set when changed
                    if not keySigSet and keySigCurrent is not None:
                        m.insert(0, keySigCurrent)
                        keySigSet = True  # only set when changed

                    o = 0.0  # start offsets at zero
                    previousChordInMeasure = None
                    pivotChordPossible = False
                    numberOfAtoms = len(t.atoms)
                    setKeyChangeToken = False  # first RomanNumeral object after a key change should have this set to True

                    for i, a in enumerate(t.atoms):
                        if isinstance(a, romanTextModule.RTKey) or \
                           ((foundAKeySignatureSoFar == False) and \
                            (isinstance(a, romanTextModule.RTAnalyticKey))):
                            # found a change of Key+KeySignature or
                            # just found a change of analysis but no keysignature so far

                            # environLocal.printDebug(['handling key token:', a])
                            try:  # this sets the key and the keysignature
                                kCurrent, pl = _getKeyAndPrefix(a)
                                prefixLyric += pl
                            except:
                                raise RomanTextTranslateException(
                                    'cannot get key from %s in line %s' %
                                    (a.src, t.src))
                            # insert at beginning of measure if at beginning -- for things like pickups.
                            if m.number < 2:
                                m._insertCore(0, kCurrent)
                            else:
                                m._insertCore(o, kCurrent)
                            foundAKeySignatureSoFar = True
                            setKeyChangeToken = True

                        elif isinstance(a, romanTextModule.RTKeySignature):
                            try:  # this sets the keysignature but not the prefix text
                                thisSig = a.getKeySignature()
                            except:
                                raise RomanTextTranslateException(
                                    'cannot get key from %s in line %s' %
                                    (a.src, t.src))
                            #insert at beginning of measure if at beginning -- for things like pickups.
                            if m.number < 2:
                                m._insertCore(0, thisSig)
                            else:
                                m._insertCore(o, thisSig)
                            foundAKeySignatureSoFar = True

                        elif isinstance(a, romanTextModule.RTAnalyticKey):
                            # just a change in analyzed key, not a change in anything else
                            #try: # this sets the key, not the keysignature
                            kCurrent, pl = _getKeyAndPrefix(a)
                            prefixLyric += pl
                            setKeyChangeToken = True

                        #except:
                        #    raise RomanTextTranslateException('cannot get key from %s in line %s' % (a.src, t.src))

                        elif isinstance(a, romanTextModule.RTBeat):
                            # set new offset based on beat
                            try:
                                o = a.getOffset(tsCurrent)
                            except ValueError:
                                raise RomanTextTranslateException(
                                    "cannot properly get an offset from beat data %s under timeSignature %s in line %s"
                                    % (a.src, tsCurrent, t.src))
                            if (previousChordInMeasure is None
                                    and previousRn is not None and o > 0):
                                # setting a new beat before giving any chords
                                firstChord = copy.deepcopy(previousRn)
                                firstChord.quarterLength = o
                                firstChord.lyric = ""
                                if previousRn.tie == None:
                                    previousRn.tie = tie.Tie('start')
                                else:
                                    previousRn.tie.type = 'continue'
                                firstChord.tie = tie.Tie('stop')
                                previousRn = firstChord
                                previousChordInMeasure = firstChord
                                m._insertCore(0, firstChord)
                            pivotChordPossible = False

                        elif isinstance(a, romanTextModule.RTNoChord):
                            # use source to evaluation roman
                            rn = note.Rest()
                            if pivotChordPossible == False:
                                # probably best to find duration
                                if previousChordInMeasure is None:
                                    pass  # use default duration
                                else:  # update duration of previous chord in Measure
                                    oPrevious = previousChordInMeasure.getOffsetBySite(
                                        m)
                                    newQL = o - oPrevious
                                    if newQL <= 0:
                                        raise RomanTextTranslateException(
                                            'too many notes in this measure: %s'
                                            % t.src)
                                    previousChordInMeasure.quarterLength = newQL
                                prefixLyric = ""
                                m._insertCore(o, rn)
                                previousChordInMeasure = rn
                                previousRn = rn
                                pivotChordPossible = False

                        elif isinstance(a, romanTextModule.RTChord):
                            # use source to evaluation roman
                            try:
                                asrc = a.src
                                #                            if kCurrent.mode == 'minor':
                                #                                if asrc.lower().startswith('vi'): #vi or vii w/ or w/o o
                                #                                    if asrc.upper() == a.src: # VI or VII to bVI or bVII
                                #                                        asrc = 'b' + asrc
                                cacheTuple = (asrc,
                                              kCurrent.tonicPitchNameWithCase)
                                if cacheTuple in rnKeyCache:
                                    #print "Got a match: " + str(cacheTuple)
                                    rn = copy.deepcopy(rnKeyCache[cacheTuple])
                                else:
                                    #print "No match for: " + str(cacheTuple)
                                    rn = roman.RomanNumeral(
                                        asrc, copy.deepcopy(kCurrent))
                                    rnKeyCache[cacheTuple] = rn
                                # surprisingly, not faster... and more dangerous
                                #rn = roman.RomanNumeral(asrc, kCurrent)
                                ## SLOWEST!!!
                                #rn = roman.RomanNumeral(asrc, kCurrent.tonicPitchNameWithCase)

                                #>>> from timeit import timeit as t
                                #>>> t('roman.RomanNumeral("IV", "c#")', 'from music21 import roman', number=1000)
                                #45.75
                                #>>> t('roman.RomanNumeral("IV", k)', 'from music21 import roman, key; k = key.Key("c#")', number=1000)
                                #16.09
                                #>>> t('roman.RomanNumeral("IV", copy.deepcopy(k))', 'from music21 import roman, key; import copy; k = key.Key("c#")', number=1000)
                                #22.49

                                if setKeyChangeToken is True:
                                    rn.followsKeyChange = True
                                    setKeyChangeToken = False
                                else:
                                    rn.followsKeyChange = False
                            except (roman.RomanNumeralException,
                                    common.Music21CommonException):
                                #environLocal.printDebug('cannot create RN from: %s' % a.src)
                                rn = note.Note()  # create placeholder

                            if pivotChordPossible == False:
                                # probably best to find duration
                                if previousChordInMeasure is None:
                                    pass  # use default duration
                                else:  # update duration of previous chord in Measure
                                    oPrevious = previousChordInMeasure.getOffsetBySite(
                                        m)
                                    newQL = o - oPrevious
                                    if newQL <= 0:
                                        raise RomanTextTranslateException(
                                            'too many notes in this measure: %s'
                                            % t.src)
                                    previousChordInMeasure.quarterLength = newQL

                                rn.addLyric(prefixLyric + a.src)
                                prefixLyric = ""
                                m._insertCore(o, rn)
                                previousChordInMeasure = rn
                                previousRn = rn
                                pivotChordPossible = True
                            else:
                                previousChordInMeasure.lyric += "//" + prefixLyric + a.src
                                previousChordInMeasure.pivotChord = rn
                                prefixLyric = ""
                                pivotChordPossible = False

                        elif isinstance(a, romanTextModule.RTRepeat):
                            if o == 0:
                                if isinstance(a,
                                              romanTextModule.RTRepeatStart):
                                    m.leftBarline = bar.Repeat(
                                        direction='start')
                                else:
                                    rtt = RomanTextUnprocessedToken(a)
                                    m._insertCore(o, rtt)
                            elif tsCurrent is not None and (
                                    tsCurrent.barDuration.quarterLength == o
                                    or i == numberOfAtoms - 1):
                                if isinstance(a, romanTextModule.RTRepeatStop):
                                    m.rightBarline = bar.Repeat(
                                        direction='end')
                                else:
                                    rtt = RomanTextUnprocessedToken(a)
                                    m._insertCore(o, rtt)
                            else:  # mid measure repeat signs
                                rtt = RomanTextUnprocessedToken(a)
                                m._insertCore(o, rtt)

                        else:
                            rtt = RomanTextUnprocessedToken(a)
                            m._insertCore(o, rtt)
                            #environLocal.warn("Got an unknown token: %r" % a)

                    # may need to adjust duration of last chord added
                    if tsCurrent is not None:
                        previousRn.quarterLength = tsCurrent.barDuration.quarterLength - o
                    m._elementsChanged()
                    p._appendCore(m)

        except Exception:
            import traceback
            tracebackMessage = traceback.format_exc()
            raise RomanTextTranslateException(
                "At line %d for token %r, an exception was raised: \n%s" %
                (t.lineNumber, t, tracebackMessage))

    p._elementsChanged()
    fixPickupMeasure(p)
    p.makeBeams(inPlace=True)
    p.makeAccidentals(inPlace=True)
    _addRepeatsFromRepeatEndings(p, repeatEndings)  # 1st and second endings...
    s.insert(0, p)

    return s
Esempio n. 30
0
    def asOpus(self):
        '''
        returns all snippets as a :class:`~music21.stream.Opus` object

        
        >>> deduto = alpha.trecento.cadencebook.BallataSheet().workByTitle('deduto')
        >>> deduto.title
        'Deduto sey a quel'
        >>> dedutoScore = deduto.asOpus()
        >>> dedutoScore
        <music21.stream.Opus ...>
        >>> #_DOCS_SHOW dedutoScore.show('lily.png')

        '''
        o = stream.Opus()
        md = metadata.Metadata()
        o.insert(0, md)
        o.metadata.composer = self.composer
        o.metadata.title = self.title

        bs = self.snippets
        for thisSnippet in bs:
            if thisSnippet is None:
                continue
            if thisSnippet.tenor is None and thisSnippet.cantus is None and thisSnippet.contratenor is None:
                continue
            s = stream.Score()
            for dummy in range(self.totalVoices):
                s.insert(0, stream.Part())

            for partNumber, snippetPart in enumerate(
                    thisSnippet.getElementsByClass('TrecentoCadenceStream')):
                if thisSnippet.snippetName != "" and partNumber == self.totalVoices - 1:
                    textEx = expressions.TextExpression(
                        thisSnippet.snippetName)
                    textEx.positionVertical = 'below'
                    if 'FrontPaddedSnippet' in thisSnippet.classes:
                        if snippetPart.hasMeasures():
                            snippetPart.getElementsByClass(
                                'Measure')[-1].insert(0, textEx)
                        else:
                            snippetPart.append(textEx)
                    else:
                        if snippetPart.hasMeasures():
                            snippetPart.getElementsByClass(
                                'Measure')[0].insert(0, textEx)
                        else:
                            snippetPart.insert(0, textEx)
#                if currentTs is None or timeSig != currentTs:
#                    s.append(timeSig)
#                    currentTs = timeSig
                try:
                    currentScorePart = s.parts[partNumber]
                except IndexError:
                    continue  # error in coding
                for thisElement in snippetPart:
                    if 'TimeSignature' in thisElement.classes:
                        continue
                    currentScorePart.append(thisElement)
            o.insert(0, s)
        return o