Пример #1
0
def __get_abstract_grammars(measures, chords):
    # extract grammars
    abstract_grammars = []
    for ix in range(1, len(measures)):
        m = stream.Voice()
        for i in measures[ix]:
            m.insert(i.offset, i)
        c = stream.Voice()
        for j in chords[ix]:
            c.insert(j.offset, j)
        parsed = parse_melody(m, c)
        abstract_grammars.append(parsed)

    return abstract_grammars
Пример #2
0
    def testTypeParses(self):
        '''
        Tests successful init on a range of supported objects (score, part, even RomanNumeral).
        '''

        s = stream.Score()
        romanText.writeRoman.RnWriter(s)  # Works on a score

        p = stream.Part()
        romanText.writeRoman.RnWriter(p)  # or on a part

        s.insert(p)
        romanText.writeRoman.RnWriter(s)  # or on a score with part

        m = stream.Measure()
        RnWriter(m)  # or on a measure

        v = stream.Voice()
        # or theoretically on a voice, but will be empty for lack of measures
        emptyWriter = RnWriter(v)
        self.assertEqual(emptyWriter.combinedList, [
            'Composer: Composer unknown',
            'Title: Title unknown',
            'Analyst: ',
            'Proofreader: ',
            '',
        ])

        rn = roman.RomanNumeral('viio6', 'G')
        RnWriter(rn)  # and even (perhaps dubiously) directly on other music21 objects
Пример #3
0
def merge_streams(
    *streams: stream.Stream,
    stream_class: Optional[Type[Union[stream.Voice, stream.Part,
                                      stream.Score]]] = None
) -> stream.Stream:
    """

    Creates a new stream by combining streams vertically.

    Args:
        *streams: Streams to merge.
        stream_class: Optional; The type of stream to convert to (Score, Part or Voice). By
        default, a generic Stream is returned.

    Returns:

    """
    if stream_class is None:
        post_stream = stream.Stream()
    if stream_class is stream.Score:
        post_stream = stream.Score()
    elif stream_class is stream.Part:
        post_stream = stream.Part()
    elif stream_class is stream.Voice:
        post_stream = stream.Voice()
    for stream_ in streams:
        post_stream.insert(0, stream_)
    return post_stream
Пример #4
0
 def testLowVoiceNumbers(self):
     n = note.Note()
     v1 = stream.Voice([n])
     m = stream.Measure([v1])
     # Unnecessary voice is removed by makeNotation
     xmlOut = self.getXml(m)
     self.assertNotIn('<voice>1</voice>', xmlOut)
     n2 = note.Note()
     v2 = stream.Voice([n2])
     m.insert(0, v2)
     xmlOut = self.getXml(m)
     self.assertIn('<voice>1</voice>', xmlOut)
     self.assertIn('<voice>2</voice>', xmlOut)
     v1.id = 234
     xmlOut = self.getXml(m)
     self.assertIn('<voice>234</voice>', xmlOut)
     self.assertIn('<voice>1</voice>', xmlOut)  # is v2 now!
     v2.id = 'hello'
     xmlOut = self.getXml(m)
     self.assertIn('<voice>hello</voice>', xmlOut)
Пример #5
0
def _left_hand_interlude():
    lh_interlude = stream.Voice()
    lh_interlude.append(meter.TimeSignature("6/4"))
    for _ in range(2):
        lh_interlude.append(note.Rest(duration=duration.Duration(2.75)))
        note_1 = note.Note("E1", duration=duration.Duration(0.25))
        note_2 = note.Note("A0", duration=duration.Duration(3))
        ottava = spanner.Ottava()
        ottava.type = (8, "down")
        ottava.addSpannedElements([note_1, note_2])
        lh_interlude.append(ottava)
        lh_interlude.append(note_1)
        lh_interlude.append(note_2)
        lh_interlude.makeMeasures(inPlace=True, finalBarline=None)
    return lh_interlude
Пример #6
0
def play_melody(gen_melody):
    v = stream.Voice()
    last_note_duration = 0
    for n in gen_melody:
        if n[0] == 0:
            new_note = note.Rest()
        else:
            new_pitch = pitch.Pitch()
            # new_pitch.midi = 59.0 + n[0] - 24
            new_pitch.midi = n[0]
            new_note = note.Note(new_pitch)
        new_note.offset = v.highestOffset + last_note_duration
        new_note.duration.quarterLength = n[2]
        last_note_duration = new_note.duration.quarterLength
        v.insert(new_note)
    s = stream.Stream()
    part = stream.Part()
    part.clef = clef.BassClef()
    part.append(instrument.Harpsichord())
    part.insert(v)
    s.insert(part)

    return s
Пример #7
0
def convert_stream(
    original_stream: stream.Stream,
    stream_class: Type[Union[stream.Voice, stream.Part, stream.Score]],
) -> stream.Stream:
    """Converts a stream to a the specified type

    Args:
        original_stream: The Stream to convert.
        stream_class: The type of stream to convert to (Score, Part or Voice).

    Returns:
        Converted stream.
    """
    if stream_class is stream.Score:
        post_stream = stream.Score()
    elif stream_class is stream.Part:
        post_stream = stream.Part()
    elif stream_class is stream.Voice:
        post_stream = stream.Voice()

    for element in original_stream.elements:
        post_stream.append(element)
    return post_stream
Пример #8
0
def unparse_grammar(m1_grammar, m1_chords):
    m1_elements = stream.Voice()
    currOffset = 0.0  # for recalculate last chord.
    prevElement = None
    for ix, grammarElement in enumerate(m1_grammar.split(" ")):
        terms = grammarElement.split(",")
        currOffset += float(terms[1])  # works just fine

        # Case 1: it's a rest. Just append
        if terms[0] == "R":
            rNote = note.Rest(quarterLength=float(terms[1]))
            m1_elements.insert(currOffset, rNote)
            continue

        # Get the last chord first so you can find chord note, scale note, etc.
        try:
            lastChord = [n for n in m1_chords if n.offset <= currOffset][-1]
        except IndexError:
            m1_chords[0].offset = 0.0
            lastChord = [n for n in m1_chords if n.offset <= currOffset][-1]

        # Case: no < > (should just be the first note) so generate from range
        # of lowest chord note to highest chord note (if not a chord note, else
        # just generate one of the actual chord notes).

        # Case #1: if no < > to indicate next note range. Usually this lack of < >
        # is for the first note (no precedent), or for rests.
        if len(terms) == 2:  # Case 1: if no < >.
            insertNote = note.Note()  # default is C

            # Case C: chord note.
            if terms[0] == "C":
                insertNote = __generate_chord_tone(lastChord)

            # Case S: scale note.
            elif terms[0] == "S":
                insertNote = __generate_scale_tone(lastChord)

            # Case A: approach note.
            # Handle both A and X notes here for now.
            else:
                insertNote = __generate_approach_tone(lastChord)

            # Update the stream of generated notes
            insertNote.quarterLength = float(terms[1])
            if insertNote.octave < 4:
                insertNote.octave = 4
            m1_elements.insert(currOffset, insertNote)
            prevElement = insertNote

        # Case #2: if < > for the increment. Usually for notes after the first one.
        else:
            # Get lower, upper intervals and notes.
            interval1 = interval.Interval(terms[2].replace("<", ""))
            interval2 = interval.Interval(terms[3].replace(">", ""))
            if interval1.cents > interval2.cents:
                upperInterval, lowerInterval = interval1, interval2
            else:
                upperInterval, lowerInterval = interval2, interval1
            lowPitch = interval.transposePitch(prevElement.pitch,
                                               lowerInterval)
            highPitch = interval.transposePitch(prevElement.pitch,
                                                upperInterval)
            numNotes = int(highPitch.ps - lowPitch.ps + 1)  # for range(s, e)

            # Case C: chord note, must be within increment (terms[2]).
            # First, transpose note with lowerInterval to get note that is
            # the lower bound. Then iterate over, and find valid notes. Then
            # choose randomly from those.

            if terms[0] == "C":
                relevantChordTones = []
                for i in range(0, numNotes):
                    currNote = note.Note(
                        lowPitch.transpose(i).simplifyEnharmonic())
                    if __is_chord_tone(lastChord, currNote):
                        relevantChordTones.append(currNote)
                if len(relevantChordTones) > 1:
                    insertNote = random.choice([
                        i for i in relevantChordTones
                        if i.nameWithOctave != prevElement.nameWithOctave
                    ])
                elif len(relevantChordTones) == 1:
                    insertNote = relevantChordTones[0]
                else:  # if no choices, set to prev element +-1 whole step
                    insertNote = prevElement.transpose(random.choice([-2, 2]))
                if insertNote.octave < 3:
                    insertNote.octave = 3
                insertNote.quarterLength = float(terms[1])
                m1_elements.insert(currOffset, insertNote)

            # Case S: scale note, must be within increment.
            elif terms[0] == "S":
                relevantScaleTones = []
                for i in range(0, numNotes):
                    currNote = note.Note(
                        lowPitch.transpose(i).simplifyEnharmonic())
                    if __is_scale_tone(lastChord, currNote):
                        relevantScaleTones.append(currNote)
                if len(relevantScaleTones) > 1:
                    insertNote = random.choice([
                        i for i in relevantScaleTones
                        if i.nameWithOctave != prevElement.nameWithOctave
                    ])
                elif len(relevantScaleTones) == 1:
                    insertNote = relevantScaleTones[0]
                else:  # if no choices, set to prev element +-1 whole step
                    insertNote = prevElement.transpose(random.choice([-2, 2]))
                if insertNote.octave < 3:
                    insertNote.octave = 3
                insertNote.quarterLength = float(terms[1])
                m1_elements.insert(currOffset, insertNote)

            # Case A: approach tone, must be within increment.
            # For now: handle both A and X cases.
            else:
                relevantApproachTones = []
                for i in range(0, numNotes):
                    currNote = note.Note(
                        lowPitch.transpose(i).simplifyEnharmonic())
                    if __is_approach_tone(lastChord, currNote):
                        relevantApproachTones.append(currNote)
                if len(relevantApproachTones) > 1:
                    insertNote = random.choice([
                        i for i in relevantApproachTones
                        if i.nameWithOctave != prevElement.nameWithOctave
                    ])
                elif len(relevantApproachTones) == 1:
                    insertNote = relevantApproachTones[0]
                else:  # if no choices, set to prev element +-1 whole step
                    insertNote = prevElement.transpose(random.choice([-2, 2]))
                if insertNote.octave < 3:
                    insertNote.octave = 3
                insertNote.quarterLength = float(terms[1])
                m1_elements.insert(currOffset, insertNote)

            # update the previous element.
            prevElement = insertNote

    return m1_elements
Пример #9
0
def musedataPartToStreamPart(museDataPart, inputM21=None):
    '''Translate a musedata part to a :class:`~music21.stream.Part`.
    '''
    from music21 import stream
    from music21 import note
    from music21 import tempo

    if inputM21 == None:
        s = stream.Score()
    else:
        s = inputM21

    p = stream.Part()
    p.id = museDataPart.getPartName()

    # create and store objects
    mdmObjs = museDataPart.getMeasures()

    #environLocal.printDebug(['first measure parent', mdmObjs[0].parent])

    barCount = 0
    # get each measure
    # store last Note/Chord/Rest for tie comparisons; span measures
    eLast = None
    for mIndex, mdm in enumerate(mdmObjs):
        #environLocal.printDebug(['processing:', mdm.src])
        if not mdm.hasNotes():
            continue

        if mdm.hasVoices():
            hasVoices = True
            vActive = stream.Voice()
        else:
            hasVoices = False
            vActive = None

        #m = stream.Measure()
        # get a measure object with a left configured bar line
        if mIndex <= len(mdmObjs) - 2:
            mdmNext = mdmObjs[mIndex + 1]
        else:
            mdmNext = None

        m = mdm.getMeasureObject()

        # conditions for a final measure definition defining the last bar
        if mdmNext != None and not mdmNext.hasNotes():
            #environLocal.printDebug(['got mdmNext not none and not has notes'])
            # get bar from next measure definition
            m.rightBarline = mdmNext.getBarObject()

        if barCount == 0:  # only for when no bars are defined
            # the parent of the measure is the part
            c = mdm.parent.getClefObject()
            if c != None:
                m.clef = mdm.parent.getClefObject()
            m.timeSignature = mdm.parent.getTimeSignatureObject()
            m.keySignature = mdm.parent.getKeySignature()
            # look for a tempo indication
            directive = mdm.parent.getDirective()
            if directive is not None:
                tt = tempo.TempoText(directive)
                # if this appears to be a tempo indication, than get metro
                if tt.isCommonTempoText():
                    mm = tt.getMetronomeMark()
                    m.insert(0, mm)

        # get all records; may be notes or note components
        mdrObjs = mdm.getRecords()
        # store pairs of pitches and durations for chording after a
        # new note has been found
        pendingRecords = []

        # get notes in each record
        for i in range(len(mdrObjs)):
            mdr = mdrObjs[i]
            #environLocal.printDebug(['processing:', mdr.src])

            if mdr.isBack():
                # the current use of back assumes tt back assumes tt we always
                # return to the start of the measure; this may not be the case
                if pendingRecords != []:
                    eLast = _processPending(hasVoices, pendingRecords, eLast,
                                            m, vActive)
                    pendingRecords = []

                # every time we encounter a back, we need to store
                # our existing voice and create a new one
                m.insert(0, vActive)
                vActive = stream.Voice()

            if mdr.isRest():
                #environLocal.printDebug(['got mdr rest, parent:', mdr.parent])
                # check for pending records first
                if pendingRecords != []:
                    eLast = _processPending(hasVoices, pendingRecords, eLast,
                                            m, vActive)
                    pendingRecords = []
                # create rest after clearing pending records
                r = note.Rest()
                r.quarterLength = mdr.getQuarterLength()
                if hasVoices:
                    vActive._appendCore(r)
                else:
                    m._appendCore(r)
                eLast = r
                continue
            # a note is note as chord, but may have chord tones
            # attached to it that follow
            elif mdr.isChord():
                # simply append if a chord; do not clear or change pending
                pendingRecords.append(mdr)

            elif mdr.isNote():
                # either this is a note alone, or this is the first
                # note found that is not a chord; if first not a chord
                # need to append immediately
                if pendingRecords != []:
                    # this could be a Chord or Note
                    eLast = _processPending(hasVoices, pendingRecords, eLast,
                                            m, vActive)
                    pendingRecords = []
                # need to append this record for the current note
                pendingRecords.append(mdr)

        # check for any remaining single notes (if last) or chords
        if pendingRecords != []:
            eLast = _processPending(hasVoices, pendingRecords, eLast, m,
                                    vActive)

        # may be bending elements in a voice to append to a measure
        if vActive is not None and vActive:
            vActive.elementsChanged()
            m._insertCore(0, vActive)

        m.elementsChanged()

        if barCount == 0 and m.timeSignature != None:  # easy case
            # can only do this b/c ts is defined
            if m.barDurationProportion() < 1.0:
                m.padAsAnacrusis()
                #environLocal.printDebug(['incompletely filled Measure found on musedata import; ',
                #   'interpreting as a anacrusis:', 'padingLeft:', m.paddingLeft])
        p._appendCore(m)
        barCount += 1

    p.elementsChanged()
    # for now, make all imports a c-score on import;
    tInterval = museDataPart.getTranspositionIntervalObject()
    #environLocal.printDebug(['got transposition interval', p.id, tInterval])
    if tInterval is not None:
        p.flat.transpose(tInterval,
                         classFilterList=['Note', 'Chord', 'KeySignature'],
                         inPlace=True)
        # need to call make accidentals to correct new issues
        p.makeAccidentals()

    if museDataPart.stage == 1:
        # cannot yet get stage 1 clef data
        p.getElementsByClass('Measure')[0].clef = p.flat.bestClef()
        p.makeBeams(inPlace=True)
        # will call overridden method on Part
        p.makeAccidentals()
    # assume that beams and clefs are defined in all stage 2

    s.insert(0, p)
    return s
Пример #10
0
def generate_music(inference_model,
                   corpus=corpus,
                   abstract_grammars=abstract_grammars,
                   tones=tones,
                   tones_indices=tones_indices,
                   indices_tones=indices_tones,
                   T_y=10,
                   max_tries=1000,
                   diversity=0.5):
    """
    使用训练的模型生成音乐
    Arguments:
    model -- 训练的模型
    corpus -- 音乐语料库, 193个音调作为字符串的列表(ex: 'C,0.333,<P1,d-5>')
    abstract_grammars -- grammars列表: 'S,0.250,<m2,P-4> C,0.250,<P4,m-2> A,0.250,<P4,m-2>'
    tones -- set of unique tones, ex: 'A,0.250,<M2,d-4>' is one element of the set.
    tones_indices -- a python dictionary mapping unique tone (ex: A,0.250,< m2,P-4 >) into their corresponding indices (0-77)
    indices_tones -- a python dictionary mapping indices (0-77) into their corresponding unique tone (ex: A,0.250,< m2,P-4 >)
    Tx -- integer, number of time-steps used at training time
    temperature -- scalar value, defines how conservative/creative the model is when generating music
    Returns:
    predicted_tones -- python list containing predicted tones
    """

    # set up audio stream
    out_stream = stream.Stream()

    # Initialize chord variables
    curr_offset = 0.0  # variable used to write sounds to the Stream.
    num_chords = int(len(chords) / 3)  # number of different set of chords

    print("Predicting new values for different set of chords.")
    # Loop over all 18 set of chords. At each iteration generate a sequence of tones
    # and use the current chords to convert it into actual sounds
    for i in range(1, num_chords):

        # Retrieve current chord from stream
        curr_chords = stream.Voice()

        # Loop over the chords of the current set of chords
        for j in chords[i]:
            # Add chord to the current chords with the adequate offset, no need to understand this
            curr_chords.insert((j.offset % 4), j)

        # Generate a sequence of tones using the model
        _, indices = predict_and_sample(inference_model, x_initializer,
                                        a_initializer, c_initializer)
        indices = list(indices.squeeze())
        pred = [indices_tones[p] for p in indices]

        predicted_tones = 'C,0.25 '
        for k in range(len(pred) - 1):
            predicted_tones += pred[k] + ' '

        predicted_tones += pred[-1]

        #### POST PROCESSING OF THE PREDICTED TONES ####
        # We will consider "A" and "X" as "C" tones. It is a common choice.
        predicted_tones = predicted_tones.replace(' A',
                                                  ' C').replace(' X', ' C')

        # Pruning #1: smoothing measure
        predicted_tones = qa.prune_grammar(predicted_tones)

        # Use predicted tones and current chords to generate sounds
        sounds = qa.unparse_grammar(predicted_tones, curr_chords)

        # Pruning #2: removing repeated and too close together sounds
        sounds = qa.prune_notes(sounds)

        # Quality assurance: clean up sounds
        sounds = qa.clean_up_notes(sounds)

        # Print number of tones/notes in sounds
        print(
            'Generated %s sounds using the predicted values for the set of chords ("%s") and after pruning'
            % (len([k for k in sounds if isinstance(k, note.Note)]), i))

        # Insert sounds into the output stream
        for m in sounds:
            out_stream.insert(curr_offset + m.offset, m)
        for mc in curr_chords:
            out_stream.insert(curr_offset + mc.offset, mc)

        curr_offset += 4.0

    # Initialize tempo of the output stream with 130 bit per minute
    out_stream.insert(0.0, tempo.MetronomeMark(number=130))

    # Save audio stream to fine
    mf = midi.translate.streamToMidiFile(out_stream)
    mf.open("output/my_music.midi", 'wb')
    mf.write()
    print("Your generated music is saved in output/my_music.midi")
    mf.close()

    return out_stream
Пример #11
0
def piano_texture(token_seq, duration=1):
    rh = stream.Voice(melody_texture(token_seq, duration / 4))
    lh = stream.Voice(chord_texture(token_seq, duration))
    for note in stream.Score([rh, lh]):
        yield note
Пример #12
0
def generate(data_fn, out_fn, N_epochs):
    # model settings
    max_len = 20
    max_tries = 1000
    diversity = 0.5

    # musical settings
    bpm = 130

    # get data
    chords, abstract_grammars = get_musical_data(data_fn)
    corpus, values, val_indices, indices_val = get_corpus_data(
        abstract_grammars)
    print("corpus length:", len(corpus))
    print("total # of values:", len(values))

    # build model
    model = lstm.build_model(corpus=corpus,
                             val_indices=val_indices,
                             max_len=max_len,
                             N_epochs=N_epochs)

    # set up audio stream
    out_stream = stream.Stream()

    # generation loop
    curr_offset = 0.0
    loopEnd = len(chords)
    for loopIndex in range(1, loopEnd):
        # get chords from file
        curr_chords = stream.Voice()
        for j in chords[loopIndex]:
            curr_chords.insert((j.offset % 4), j)

        # generate grammar
        curr_grammar = __generate_grammar(
            model=model,
            corpus=corpus,
            abstract_grammars=abstract_grammars,
            values=values,
            val_indices=val_indices,
            indices_val=indices_val,
            max_len=max_len,
            max_tries=max_tries,
            diversity=diversity,
        )

        curr_grammar = curr_grammar.replace(" A", " C").replace(" X", " C")

        # Pruning #1: smoothing measure
        curr_grammar = prune_grammar(curr_grammar)

        # Get notes from grammar and chords
        curr_notes = unparse_grammar(curr_grammar, curr_chords)

        # Pruning #2: removing repeated and too close together notes
        curr_notes = prune_notes(curr_notes)

        # quality assurance: clean up notes
        curr_notes = clean_up_notes(curr_notes)

        # print # of notes in curr_notes
        print("After pruning: %s notes" %
              (len([i for i in curr_notes if isinstance(i, note.Note)])))

        # insert into the output stream
        for m in curr_notes:
            out_stream.insert(curr_offset + m.offset, m)
        for mc in curr_chords:
            out_stream.insert(curr_offset + mc.offset, mc)

        curr_offset += 4.0

    out_stream.insert(0.0, tempo.MetronomeMark(number=bpm))

    # Play the final stream through output (see 'play' lambda function above)
    # play = lambda x: midi.realtime.StreamPlayer(x).play()
    # play(out_stream)

    # save stream
    mf = midi.translate.streamToMidiFile(out_stream)
    mf.open(out_fn, "wb")
    mf.write()
    mf.close()
Пример #13
0
def __parse_midi(data_fn):
    ''' Helper function to parse a MIDI file into its measures and chords '''
    # Parse the MIDI data for separate melody and accompaniment parts.
    midi_data = converter.parse(data_fn)
    # Get melody part, compress into single voice.
    melody_stream = midi_data[5]  # For Metheny piece, Melody is Part #5.
    melody1, melody2 = melody_stream.getElementsByClass(stream.Voice)
    for j in melody2:
        melody1.insert(j.offset, j)
    melody_voice = melody1

    for i in melody_voice:
        if i.quarterLength == 0.0:
            i.quarterLength = 0.25

    # Change key signature to adhere to comp_stream (1 sharp, mode = major).
    # Also add Electric Guitar.
    melody_voice.insert(0, instrument.ElectricGuitar())
    # melody_voice.insert(0, key.KeySignature(sharps=1, mode='major'))
    melody_voice.insert(0, key.KeySignature(sharps=1))

    # The accompaniment parts. Take only the best subset of parts from
    # the original data. Maybe add more parts, hand-add valid instruments.
    # Should add least add a string part (for sparse solos).
    # Verified are good parts: 0, 1, 6, 7 '''
    partIndices = [0, 1, 6, 7]
    comp_stream = stream.Voice()
    comp_stream.append(
        [j.flat for i, j in enumerate(midi_data) if i in partIndices])

    # Full stream containing both the melody and the accompaniment.
    # All parts are flattened.
    full_stream = stream.Voice()
    for i in range(len(comp_stream)):
        full_stream.append(comp_stream[i])
    full_stream.append(melody_voice)

    # Extract solo stream, assuming you know the positions ..ByOffset(i, j).
    # Note that for different instruments (with stream.flat), you NEED to use
    # stream.Part(), not stream.Voice().
    # Accompanied solo is in range [478, 548)
    solo_stream = stream.Voice()
    for part in full_stream:
        curr_part = stream.Part()
        curr_part.append(part.getElementsByClass(instrument.Instrument))
        curr_part.append(part.getElementsByClass(tempo.MetronomeMark))
        curr_part.append(part.getElementsByClass(key.KeySignature))
        curr_part.append(part.getElementsByClass(meter.TimeSignature))
        curr_part.append(
            part.getElementsByOffset(476, 548, includeEndBoundary=True))
        cp = curr_part.flat
        solo_stream.insert(cp)

    # Group by measure so you can classify.
    # Note that measure 0 is for the time signature, metronome, etc. which have
    # an offset of 0.0.
    melody_stream = solo_stream[-1]
    measures = OrderedDict()
    offsetTuples = [(int(n.offset / 4), n) for n in melody_stream]
    measureNum = 0  # for now, don't use real m. nums (119, 120)
    for key_x, group in groupby(offsetTuples, lambda x: x[0]):
        measures[measureNum] = [n[1] for n in group]
        measureNum += 1

    # Get the stream of chords.
    # offsetTuples_chords: group chords by measure number.
    chordStream = solo_stream[0]
    chordStream.removeByClass(note.Rest)
    chordStream.removeByClass(note.Note)
    offsetTuples_chords = [(int(n.offset / 4), n) for n in chordStream]

    # Generate the chord structure. Use just track 1 (piano) since it is
    # the only instrument that has chords.
    # Group into 4s, just like before.
    chords = OrderedDict()
    measureNum = 0
    for key_x, group in groupby(offsetTuples_chords, lambda x: x[0]):
        chords[measureNum] = [n[1] for n in group]
        measureNum += 1

    # Fix for the below problem.
    #   1) Find out why len(measures) != len(chords).
    #   ANSWER: resolves at end but melody ends 1/16 before last measure so doesn't
    #           actually show up, while the accompaniment's beat 1 right after does.
    #           Actually on second thought: melody/comp start on Ab, and resolve to
    #           the same key (Ab) so could actually just cut out last measure to loop.
    #           Decided: just cut out the last measure.
    del chords[len(chords) - 1]
    assert len(chords) == len(measures)

    return measures, chords
Пример #14
0
def weave_data_frame_to_midi(data_frame,
                             midi_file_directory=os.getcwd(),
                             save_midi_file=True):
    if isinstance(data_frame, pd.DataFrame):

        score_dict = {}
        for idx in range(0, len(data_frame.iloc[:, 0:1].drop_duplicates())):
            score = stream.Score()
            score_dict[data_frame.iloc[:,
                                       0:1].drop_duplicates().iloc[idx,
                                                                   0]] = score

        part_dict = {}
        for idx in range(0, len(data_frame.iloc[:, 0:2].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:, 0:2].drop_duplicates().iloc[idx, 1]):
                part = stream.Part()
                part_dict[data_frame.iloc[:, 0:2].drop_duplicates().iloc[
                    idx, 1]] = part
                score_dict[data_frame.iloc[:, 0:2].drop_duplicates().iloc[
                    idx, 0]].append(part)

        for idx in range(0, len(data_frame.iloc[:, 0:4].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:, 0:4].drop_duplicates().iloc[idx, 3]):
                if data_frame.iloc[:, 0:4].drop_duplicates().iloc[
                        idx, 2] == 'StringInstrument':
                    instrument_element = instrument.StringInstrument()
                else:
                    instrument_element = instrument.fromString(
                        data_frame.iloc[:, 0:4].drop_duplicates().iloc[idx, 2])
                part_dict[data_frame.iloc[:, 0:4].drop_duplicates().iloc[
                    idx, 1]].append(instrument_element)
                instrument_element.offset = data_frame.iloc[:, 0:
                                                            4].drop_duplicates(
                                                            ).iloc[idx, 3]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates())):
            if not math.isnan(data_frame.iloc[:, [0, 1, 4, 5, 6]].
                              drop_duplicates().iloc[idx, 3]):
                metronome_element = tempo.MetronomeMark(
                    data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates().iloc[
                        idx, 2],
                    data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates().iloc[
                        idx, 3])
                part_dict[data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates(
                ).iloc[idx, 1]].append(metronome_element)
                metronome_element.offset = data_frame.iloc[:,
                                                           [0, 1, 4, 5, 6
                                                            ]].drop_duplicates(
                                                            ).iloc[idx, 4]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates())):
            if not math.isnan(data_frame.iloc[:, [0, 1, 7, 8, 9]].
                              drop_duplicates().iloc[idx, 4]):
                key_element = key.Key(
                    data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates().iloc[
                        idx, 2],
                    data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates().iloc[
                        idx, 3])
                part_dict[data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates(
                ).iloc[idx, 1]].append(key_element)
                key_element.offset = data_frame.iloc[:, [0, 1, 7, 8, 9
                                                         ]].drop_duplicates(
                                                         ).iloc[idx, 4]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 10, 11]].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:,
                                    [0, 1, 10, 11]].drop_duplicates().iloc[idx,
                                                                           3]):
                time_signature_element = meter.TimeSignature(
                    data_frame.iloc[:,
                                    [0, 1, 10, 11]].drop_duplicates().iloc[idx,
                                                                           2])
                part_dict[data_frame.iloc[:, [0, 1, 10, 11]].drop_duplicates().
                          iloc[idx, 1]].append(time_signature_element)
                time_signature_element.offset = data_frame.iloc[:, [
                    0, 1, 10, 11
                ]].drop_duplicates().iloc[idx, 3]

        voice_dict = {}
        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:,
                                    [0, 1, 12, 13]].drop_duplicates().iloc[idx,
                                                                           2]):
                voice = stream.Voice()
                voice_dict[data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates(
                ).iloc[idx, 2]] = voice
                part_dict[data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates().
                          iloc[idx, 1]].append(voice)
                voice.offset = data_frame.iloc[:,
                                               [0, 1, 12, 13]].drop_duplicates(
                                               ).iloc[idx, 3]

        for idx in range(
                0,
                len(data_frame.iloc[:,
                                    [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]])):
            try:
                if not math.isnan(
                        data_frame.iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                        .iloc[idx, 9]):
                    if data_frame.iloc[:,
                                       [0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                                        ]].iloc[idx, 3] == "Note":
                        note_element = note.Note()
                        voice_dict[data_frame.
                                   iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                                   .iloc[idx, 2]].append(note_element)
                        note_element.pitch.name = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 4]
                        note_element.pitch.octave = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 6]
                        note_element.volume.velocity = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 7]
                        note_element.duration.quarterLength = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 8]
                        note_element.offset = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9]

                    elif data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                    ]].iloc[idx, 3] == "Rest":
                        rest_element = note.Rest()
                        voice_dict[data_frame.
                                   iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                                   .iloc[idx, 2]].append(rest_element)
                        rest_element.duration.quarterLength = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 8]
                        rest_element.offset = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9]

                    elif data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                    ]].iloc[idx, 3] == "Chord":
                        if data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx - 1, 9] != data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9] or data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 3] != 'Chord':
                            chord_element = chord.Chord()
                            voice_dict[
                                data_frame.
                                iloc[:,
                                     [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]].
                                iloc[idx, 2]].append(chord_element)

                        if len(data_frame.
                               iloc[:, [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]].
                               iloc[idx, 4]) > 2:
                            print(
                                "When the chord is in a row is still under development."
                            )
                            return False
                        else:
                            pitch_element = note.Note()
                            pitch_element.pitch.name = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 4]
                            pitch_element.pitch.octave = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 6]
                            pitch_element.volume.velocity = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 7]
                            pitch_element.duration.quarterLength = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 8]
                            chord_element.add(pitch_element)
                            chord_element.offset = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 9]
                    else:
                        print(
                            str(idx) +
                            "th row is cannot converted to midi file")
            except KeyError:
                pass

            print_progress_bar_weaving(idx, data_frame)

        if score_dict:
            for _midi_file_name, score in zip(score_dict.keys(),
                                              score_dict.values()):
                if score:
                    midi_file = midi.translate.streamToMidiFile(score)

                    if save_midi_file and midi_file:
                        midi_file.open(
                            midi_file_directory + '/' + _midi_file_name +
                            '_encoded.mid', 'wb')
                        midi_file.write()
                        midi_file.close()
                        print(midi_file_directory + '/' + _midi_file_name +
                              '_encoded.mid is saved')

    else:
        print("The inputted data isn't data frame")
        return False

    return score_dict
Пример #15
0
                        tools.merge_streams(m_voice, t_voice).chordify())

    # Append right hand interlude
    tools.append_stream(right_hand, _right_hand_interlude())
    if section_index < 6:
        right_hand.append(
            clef.TrebleClef())  # Add treble clef except for last section

    # Left hand
    # ----------------------------------------------------------------------------------------------
    # Create the second m-voice
    m_voice2 = transformations.scalar_transposition(m_voice, -9,
                                                    reference_scale)

    # Create the pedal
    bottom_pedal = stream.Voice()
    top_pedal = stream.Stream()

    for i in range(6):
        # Define notes to use depending on the section
        if section_index in (0, 2):
            bottom_pedal_note = chord.Chord(["A3", "E4"])
        elif section_index in (1, 3):
            bottom_pedal_note = note.Note("A3")
            top_pedal_note = note.Note("E4")
        elif 3 < section_index < 7:
            bottom_pedal_note = note.Note("A2")
            top_pedal_note = note.Note("E3")
        else:
            bottom_pedal_note = note.Note("A2")
Пример #16
0
def makeMeasures(
    s,
    meterStream=None,
    refStreamOrTimeRange=None,
    searchContext=False,
    innerBarline=None,
    finalBarline='final',
    bestClef=False,
    inPlace=False,
    ):
    '''
    Takes a stream and places all of its elements into
    measures (:class:`~music21.stream.Measure` objects)
    based on the :class:`~music21.meter.TimeSignature` objects
    placed within
    the stream. If no TimeSignatures are found in the
    stream, a default of 4/4 is used.

    If `inPlace` is True, the original Stream is modified and lost
    if `inPlace` is False, this returns a modified deep copy.

    Many advanced features are available:

    (1) If a `meterStream` is given, the TimeSignatures in this
    stream are used instead of any found in the Stream.
    Alternatively, a single TimeSignature object
    can be provided in lieu of the stream. This feature lets you
    test out how a group of notes might be interpreted as measures
    in a number of different metrical schemes.

    (2) If `refStreamOrTimeRange` is provided, this Stream or List
    is used to give the span that you want to make measures for
    necessary to fill empty rests at the ends or beginnings of
    Streams, etc.  Say for instance you'd like to make a complete
    score from a short ossia section, then you might use another
    Part from the Score as a `refStreamOrTimeRange` to make sure
    that the appropriate measures of rests are added at either side.

    (3) If `innerBarline` is not None, the specified Barline object
    or string-specification of Barline style will be used to create
    Barline objects between every created Measure. The default is None.

    (4) If `finalBarline` is not None, the specified Barline object or
    string-specification of Barline style will be used to create a Barline
    objects at the end of the last Measure. The default is 'final'.

    The `searchContext` parameter determines whether or not context
    searches are used to find Clef and other notation objects.

    Here is a simple example of makeMeasures:

    A single measure of 4/4 is created by from a Stream
    containing only three quarter notes:

    ::

        >>> from music21 import articulations
        >>> from music21 import clef
        >>> from music21 import meter
        >>> from music21 import note
        >>> from music21 import stream

    ::

        >>> sSrc = stream.Stream()
        >>> sSrc.append(note.QuarterNote('C4'))
        >>> sSrc.append(note.QuarterNote('D4'))
        >>> sSrc.append(note.QuarterNote('E4'))
        >>> sMeasures = sSrc.makeMeasures()
        >>> sMeasures.show('text')
        {0.0} <music21.stream.Measure 1 offset=0.0>
            {0.0} <music21.clef.TrebleClef>
            {0.0} <music21.meter.TimeSignature 4/4>
            {0.0} <music21.note.Note C>
            {1.0} <music21.note.Note D>
            {2.0} <music21.note.Note E>
            {3.0} <music21.bar.Barline style=final>

    Notice that the last measure is incomplete -- makeMeasures
    does not fill up incomplete measures.

    We can also check that the measure created has
    the correct TimeSignature:

    ::

        >>> sMeasures[0].timeSignature
        <music21.meter.TimeSignature 4/4>

    Now let's redo this work in 2/4 by putting a TimeSignature
    of 2/4 at the beginning of the stream and rerunning
    makeMeasures. Now we will have two measures, each with
    correct measure numbers:

    ::

        >>> sSrc.insert(0.0, meter.TimeSignature('2/4'))
        >>> sMeasuresTwoFour = sSrc.makeMeasures()
        >>> sMeasuresTwoFour.show('text')
        {0.0} <music21.stream.Measure 1 offset=0.0>
            {0.0} <music21.clef.TrebleClef>
            {0.0} <music21.meter.TimeSignature 2/4>
            {0.0} <music21.note.Note C>
            {1.0} <music21.note.Note D>
        {2.0} <music21.stream.Measure 2 offset=2.0>
            {0.0} <music21.note.Note E>
            {1.0} <music21.bar.Barline style=final>

    Let us put 10 quarter notes in a Part.

    After we run makeMeasures, we will have
    3 measures of 4/4 in a new Part object. This experiment
    demonstrates that running makeMeasures does not
    change the type of Stream you are using:

    ::

        >>> sSrc = stream.Part()
        >>> n = note.Note('E-4')
        >>> n.quarterLength = 1
        >>> sSrc.repeatAppend(n, 10)
        >>> sMeasures = sSrc.makeMeasures()
        >>> len(sMeasures.getElementsByClass('Measure'))
        3
        >>> sMeasures.__class__.__name__
        'Part'

    Demonstrate what makeMeasures will do with inPlace is True:

    ::

        >>> sScr = stream.Stream()
        >>> sScr.insert(0, clef.TrebleClef())
        >>> sScr.insert(0, meter.TimeSignature('3/4'))
        >>> sScr.append(note.Note('C4', quarterLength = 3.0))
        >>> sScr.append(note.Note('D4', quarterLength = 3.0))
        >>> sScr.makeMeasures(inPlace = True)
        >>> sScr.show('text')
        {0.0} <music21.stream.Measure 1 offset=0.0>
            {0.0} <music21.clef.TrebleClef>
            {0.0} <music21.meter.TimeSignature 3/4>
            {0.0} <music21.note.Note C>
        {3.0} <music21.stream.Measure 2 offset=3.0>
            {0.0} <music21.note.Note D>
            {3.0} <music21.bar.Barline style=final>

    If after running makeMeasures you run makeTies, it will also split
    long notes into smaller notes with ties.  Lyrics and articulations
    are attached to the first note.  Expressions (fermatas,
    etc.) will soon be attached to the last note but this is not yet done:

    ::

        >>> p1 = stream.Part()
        >>> p1.append(meter.TimeSignature('3/4'))
        >>> longNote = note.Note("D#4")
        >>> longNote.quarterLength = 7.5
        >>> longNote.articulations = [articulations.Staccato()]
        >>> longNote.lyric = "hi"
        >>> p1.append(longNote)
        >>> partWithMeasures = p1.makeMeasures()
        >>> dummy = partWithMeasures.makeTies(inPlace = True)
        >>> partWithMeasures.show('text')
        {0.0} <music21.stream.Measure 1 offset=0.0>
            {0.0} <music21.clef.TrebleClef>
            {0.0} <music21.meter.TimeSignature 3/4>
            {0.0} <music21.note.Note D#>
        {3.0} <music21.stream.Measure 2 offset=3.0>
            {0.0} <music21.note.Note D#>
        {6.0} <music21.stream.Measure 3 offset=6.0>
            {0.0} <music21.note.Note D#>
            {1.5} <music21.bar.Barline style=final>

    ::

        >>> allNotes = partWithMeasures.flat.notes
        >>> allNotes[0].articulations
        [<music21.articulations.Staccato>]

    ::

        >>> allNotes[1].articulations
        []

    ::

        >>> allNotes[2].articulations
        []

    ::

        >>> [allNotes[0].lyric, allNotes[1].lyric, allNotes[2].lyric]
        ['hi', None, None]

    '''
    from music21 import spanner
    from music21 import stream

    #environLocal.printDebug(['calling Stream.makeMeasures()'])

    # the srcObj shold not be modified or chagned
    # removed element copying below and now making a deepcopy of entire stream
    # must take a flat representation, as we need to be able to
    # position components, and sub-streams might hide elements that
    # should be contained

    if s.hasVoices():
        #environLocal.printDebug(['make measures found voices'])
        # cannot make flat here, as this would destroy stream partitions
        srcObj = copy.deepcopy(s.sorted)
        voiceCount = len(srcObj.voices)
    else:
        #environLocal.printDebug(['make measures found no voices'])
        # take flat and sorted version
        srcObj = copy.deepcopy(s.flat.sorted)
        voiceCount = 0

    #environLocal.printDebug([
    #    'Stream.makeMeasures(): passed in meterStream', meterStream,
    #    meterStream[0]])

    # may need to look in activeSite if no time signatures are found
    if meterStream is None:
        # get from this Stream, or search the contexts
        meterStream = srcObj.flat.getTimeSignatures(returnDefault=True,
                        searchContext=False,
                        sortByCreationTime=False)
        #environLocal.printDebug([
        #    'Stream.makeMeasures(): found meterStream', meterStream[0]])
    # if meterStream is a TimeSignature, use it
    elif isinstance(meterStream, meter.TimeSignature):
        ts = meterStream
        meterStream = stream.Stream()
        meterStream.insert(0, ts)

    #assert len(meterStream), 1

    #environLocal.printDebug([
    #    'makeMeasures(): meterStream', 'meterStream[0]', meterStream[0],
    #    'meterStream[0].offset',  meterStream[0].offset,
    #    'meterStream.elements[0].activeSite',
    #    meterStream.elements[0].activeSite])

    # need a SpannerBundle to store any found spanners and place
    # at the part level
    spannerBundleAccum = spanner.SpannerBundle()

    # get a clef for the entire stream; this will use bestClef
    # presently, this only gets the first clef
    # may need to store a clefStream and access changes in clefs
    # as is done with meterStream
    #clefStream = srcObj.getClefs(searchActiveSite=True,
    #                searchContext=searchContext,
    #                returnDefault=True)
    #clefObj = clefStream[0]
    #del clefStream
    clefObj = srcObj.getContextByClass('Clef') 
    if clefObj is None:
        clefObj = srcObj.bestClef()

    #environLocal.printDebug([
    #    'makeMeasures(): first clef found after copying and flattening',
    #    clefObj])

    # for each element in stream, need to find max and min offset
    # assume that flat/sorted options will be set before procesing
    # list of start, start+dur, element
    offsetMap = srcObj.offsetMap
    #environLocal.printDebug(['makeMeasures(): offset map', offsetMap])
    #offsetMap.sort() not necessary; just get min and max
    if len(offsetMap) > 0:
        oMax = max([x['endTime'] for x in offsetMap])
    else:
        oMax = 0

    # if a ref stream is provided, get highest time from there
    # only if it is greater thant the highest time yet encountered
    if refStreamOrTimeRange is not None:
        if isinstance(refStreamOrTimeRange, stream.Stream):
            refStreamHighestTime = refStreamOrTimeRange.highestTime
        else:  # assume its a list
            refStreamHighestTime = max(refStreamOrTimeRange)
        if refStreamHighestTime > oMax:
            oMax = refStreamHighestTime

    # create a stream of measures to contain the offsets range defined
    # create as many measures as needed to fit in oMax
    post = s.__class__()
    post.derivation.origin = s
    post.derivation.method = 'makeMeasures'

    o = 0.0  # initial position of first measure is assumed to be zero
    measureCount = 0
    lastTimeSignature = None
    while True:
        m = stream.Measure()
        m.number = measureCount + 1
        #environLocal.printDebug([
        #    'handling measure', m, m.number, 'current offset value', o,
        #    meterStream._reprTextLine()])
        # get active time signature at this offset
        # make a copy and it to the meter
        thisTimeSignature = meterStream.getElementAtOrBefore(o)
        #environLocal.printDebug([
        #    'm.number', m.number, 'meterStream.getElementAtOrBefore(o)',
        #    meterStream.getElementAtOrBefore(o), 'lastTimeSignature',
        #    lastTimeSignature, 'thisTimeSignature', thisTimeSignature ])

        if thisTimeSignature is None and lastTimeSignature is None:
            raise stream.StreamException(
                'failed to find TimeSignature in meterStream; '
                'cannot process Measures')
        if thisTimeSignature is not lastTimeSignature \
            and thisTimeSignature is not None:
            lastTimeSignature = thisTimeSignature
            # this seems redundant
            #lastTimeSignature = meterStream.getElementAtOrBefore(o)
            m.timeSignature = copy.deepcopy(thisTimeSignature)
            #environLocal.printDebug(['assigned time sig', m.timeSignature])

        # only add a clef for the first measure when automatically
        # creating Measures; this clef is from getClefs, called above
        if measureCount == 0:
            m.clef = clefObj
            #environLocal.printDebug(
            #    ['assigned clef to measure', measureCount, m.clef])

        # add voices if necessary (voiceCount > 0)
        for voiceIndex in range(voiceCount):
            v = stream.Voice()
            v.id = voiceIndex  # id is voice index, starting at 0
            m._insertCore(0, v)

        # avoid an infinite loop
        if thisTimeSignature.barDuration.quarterLength == 0:
            raise stream.StreamException(
                'time signature {0!r} has no duration'.format(
                    thisTimeSignature))
        post._insertCore(o, m)  # insert measure
        # increment by meter length
        o += thisTimeSignature.barDuration.quarterLength
        if o >= oMax:  # may be zero
            break  # if length of this measure exceedes last offset
        else:
            measureCount += 1

    # populate measures with elements
    for ob in offsetMap:
        start, end, e, voiceIndex = (
            ob['offset'],
            ob['endTime'],
            ob['element'],
            ob['voiceIndex'],
            )

        #environLocal.printDebug(['makeMeasures()', start, end, e, voiceIndex])
        # iterate through all measures, finding a measure that
        # can contain this element

        # collect all spanners and move to outer Stream
        if e.isSpanner:
            spannerBundleAccum.append(e)
            continue

        match = False
        lastTimeSignature = None
        for i in range(len(post)):
            m = post[i]
            if m.timeSignature is not None:
                lastTimeSignature = m.timeSignature
            # get start and end offsets for each measure
            # seems like should be able to use m.duration.quarterLengths
            mStart = m.getOffsetBySite(post)
            mEnd = mStart + lastTimeSignature.barDuration.quarterLength
            # if elements start fits within this measure, break and use
            # offset cannot start on end
            if start >= mStart and start < mEnd:
                match = True
                #environLocal.printDebug([
                #    'found measure match', i, mStart, mEnd, start, end, e])
                break
        if not match:
            raise stream.StreamException(
                'cannot place element %s with start/end %s/%s '
                'within any measures' % (e, start, end))

        # find offset in the temporal context of this measure
        # i is the index of the measure that this element starts at
        # mStart, mEnd are correct
        oNew = start - mStart  # remove measure offset from element offset

        # insert element at this offset in the measure
        # not copying elements here!

        # in the case of a Clef, and possibly other measure attributes,
        # the element may have already been placed in this measure
        # we need to only exclude elements that are placed in the special
        # first position
        if m.clef is e:
            continue
        # do not accept another time signature at the zero position: this
        # is handled above
        if oNew == 0 and 'TimeSignature' in e.classes:
            continue

        #environLocal.printDebug(['makeMeasures()', 'inserting', oNew, e])
        # NOTE: cannot use _insertCore here for some reason
        if voiceIndex is None:
            m.insert(oNew, e)
        else:  # insert into voice specified by the voice index
            m.voices[voiceIndex].insert(oNew, e)

    # add found spanners to higher-level; could insert at zero
    for sp in spannerBundleAccum:
        post.append(sp)

    post._elementsChanged()

    # clean up temporary streams to avoid extra site accumulation
    del srcObj

    # set barlines if necessary
    lastIndex = len(post.getElementsByClass('Measure')) - 1
    for i, m in enumerate(post.getElementsByClass('Measure')):
        if i != lastIndex:
            if innerBarline not in ['regular', None]:
                m.rightBarline = innerBarline
        else:
            if finalBarline not in ['regular', None]:
                m.rightBarline = finalBarline
        if bestClef:
            m.clef = m.bestClef()  # may need flat for voices

    if not inPlace:
        return post  # returns a new stream populated w/ new measure streams
    else:  # clear the stored elements list of this Stream and repopulate
        # with Measures created above
        s._elements = []
        s._endElements = []
        s._elementsChanged()
        for e in post.sorted:
            # may need to handle spanners; already have s as site
            s.insert(e.getOffsetBySite(post), e)
Пример #17
0
    tbl = pickle.load(handle)

with open('inv_tbl_entre_dos_aguas.pickle', 'rb') as handle:
    inv_tbl = pickle.load(handle)

with open('lcm.pickle', 'rb') as handle:
    lcm = pickle.load(handle)

# In[ ]:

display(midi_stream)

# In[8]:

midi_stream = stream.Stream()
guitar_part = stream.Voice()
midi_stream.append(instrument.Guitar())

for index, row in encoded_part.iterrows():
    note_name = inv_tbl[row['Note']]
    if (note_name == 'REST'):
        nt = note.Rest()
    else:
        if (' ' in note_name):
            nt = chord.Chord(note_name)
        else:
            nt = note.Note(note_name)
    nt.duration.quarterLength = float(row['Duration']) / lcm
    nt.offset = float(row['Offset']) / lcm
    guitar_part.append(nt)