Пример #1
0
    def testTrillExtensionA(self):
        '''Test basic wave line creation and output, as well as passing
        objects through make measure calls. 
        '''
        from music21 import stream, note, chord, expressions
        from music21.musicxml import m21ToString
        s = stream.Stream()
        s.repeatAppend(note.Note(), 12)
        n1 = s.notes[0]
        n2 = s.notes[-1]
        sp1 = expressions.TrillExtension(n1, n2)
        s.append(sp1)
        raw = m21ToString.fromMusic21Object(s)
        self.assertEqual(raw.count('wavy-line'), 2)

        s = stream.Stream()
        s.repeatAppend(chord.Chord(['c-3', 'g4']), 12)
        n1 = s.notes[0]
        n2 = s.notes[-1]
        sp1 = expressions.TrillExtension(n1, n2)
        s.append(sp1)
        raw = m21ToString.fromMusic21Object(s)
        #s.show()
        self.assertEqual(raw.count('wavy-line'), 2)
Пример #2
0
    def testOneOffDeletionStream(self):
        '''
        two streams, both the same, but one has an extra note should 
        have .75 percentage similarity
        '''
        from music21 import stream
        from music21 import note

        target = stream.Stream()
        source = stream.Stream()

        note1 = note.Note("C4")
        note2 = note.Note("D4")
        note3 = note.Note("E4")
        note4 = note.Note("F4")

        target.append([note1, note2, note3, note4])
        source.append([note1, note2, note3])

        sa = StreamAligner(target, source)
        sa.align()
        sa.showChanges()

        self.assertEqual(sa.similarityScore, .75)
Пример #3
0
    def testCompound(self):
        a = stream.Stream()
        meterStrDenominator = [1, 2, 4, 8, 16, 32]
        meterStrNumerator = [2, 3, 4, 5, 6, 7, 9, 11, 12, 13]

        for i in range(30):
            msg = []
            for j in range(1, random.choice([2, 4])):
                msg.append('%s/%s' % (random.choice(meterStrNumerator),
                                      random.choice(meterStrDenominator)))
            ts = TimeSignature('+'.join(msg))
            m = stream.Measure()
            m.timeSignature = ts
            a.insert(m.timeSignature.barDuration.quarterLength, m)
        a.show()
Пример #4
0
    def testReferences(self):
        s = stream.Stream()
        note1 = note.Note('C4')
        note2 = note.Note('G4')
        s.append([note1, note2])

        h = Hasher()
        h.includeReference = True
        hashes = h.hashStream(s)

        note1ref = hashes[0].reference
        note2ref = hashes[1].reference

        self.assertEqual(note1.id, note1ref.id)
        self.assertEqual(note2.id, note2ref.id)
Пример #5
0
    def testRealizeVolumeC(self):
        from music21 import stream, note, articulations

        s = stream.Stream()
        s.repeatAppend(note.Note('g3'), 16)

        for i in range(0, 16, 3):
            s.notes[i].articulations.append(articulations.Accent())
        for i in range(0, 16, 4):
            s.notes[i].articulations.append(articulations.StrongAccent())

        match = [n.volume.cachedRealizedStr for n in s.notes]
        self.assertEqual(match, ['0.96', '0.71', '0.71', '0.81', '0.86', '0.71', '0.81', 
                                 '0.71', '0.86', '0.81', '0.71', '0.71', '0.96', '0.71', 
                                 '0.71', '0.81'])
Пример #6
0
def convert_to_midi(sequence, bpm=60, output_file='./midi_output/music.mid'):
    """Save sequence as a midi file (with path = output_file). sequence
    can be from the original dataset or a new sequence generated by a
    trained model"""
    offset = 0  # the distance in quarter-notes of the note/chord/rest
    # being written from the beginning
    output_notes = [instrument.Piano(), tempo.MetronomeMark(number=bpm)]

    bps = bpm / 60  # beats per second
    converted_duration = duration.Duration()
    # create note, chord, and rest objects
    for vector in sequence:
        # convert from seconds to beats
        converted_duration.quarterLength = vector[-1] * bps

        if (np.sum(vector[:-1]) > 1):  # chord
            indices_in_chord = np.argsort(vector[:-1])[-int(np.sum(\
                vector[:-1])):]
            notes_in_chord = [piano_idx_to_note(i) for i in indices_in_chord]
            notes = []
            for cur_note in notes_in_chord:
                new_note = note.Note(cur_note)
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            new_chord.duration = converted_duration
            output_notes.append(new_chord)

        elif (np.sum(vector[:-1]) == 1):  # note
            index = np.argmax(vector[:-1])
            new_note = piano_idx_to_note(index)
            new_note = note.Note(new_note)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            new_note.duration = converted_duration
            output_notes.append(new_note)

        elif (np.sum(vector[:-1]) == 0):  # rest
            new_rest = note.Rest()
            new_rest.offset = offset
            new_rest.duration = converted_duration
            output_notes.append(new_rest)

        offset += vector[-1]

    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp=output_file)
Пример #7
0
    def image2midi(self, outdir="output_midi"):
        """ convert the output from the prediction to notes and create a midi file
            from the notes """
        offset = 0
        output_notes = []

        # create note and chord objects based on the values generated by the model

        prev_notes = updateNotes(self.im_arr.T[0, :], {})
        for column in self.im_arr.T[1:, :]:
            notes = column2notes(column)
            # pattern is a chord
            notes_in_chord = notes
            old_notes = prev_notes.keys()
            for old_note in old_notes:
                if not old_note in notes_in_chord:
                    new_note = note.Note(old_note,
                                         quarterLength=prev_notes[old_note])
                    new_note.storedInstrument = instrument.Piano()
                    if offset - prev_notes[old_note] >= 0:
                        new_note.offset = offset - prev_notes[old_note]
                        output_notes.append(new_note)
                    elif offset == 0:
                        new_note.offset = offset
                        output_notes.append(new_note)
                    else:
                        print(offset, prev_notes[old_note], old_note)

            prev_notes = updateNotes(notes_in_chord, prev_notes)

            # increase offset each iteration so that notes do not stack
            offset += resolution

        for old_note in prev_notes.keys():
            new_note = note.Note(old_note, quarterLength=prev_notes[old_note])
            new_note.storedInstrument = instrument.Piano()
            new_note.offset = offset - prev_notes[old_note]

            output_notes.append(new_note)

        prev_notes = updateNotes(notes_in_chord, prev_notes)

        midi_stream = stream.Stream(output_notes)
        Path(outdir).mkdir(parents=True, exist_ok=True)
        out_path = outdir + "/" + self.image_path.split("/")[-1].replace(
            ".png", ".mid")
        midi_stream.write('midi', fp=out_path)
        return out_path
Пример #8
0
 def xtestRealize(self):
     from music21 import note
     from music21 import stream
     n1 = note.Note("D4")
     n1.quarterLength = 4
     n1.expressions.append(WholeStepMordent())
     expList = realizeOrnaments(n1)
     st1 = stream.Stream()
     st1.append(expList)
     st1n = st1.notes
     self.assertEqual(st1n[0].name, "D")
     self.assertEqual(st1n[0].quarterLength, 0.125)
     self.assertEqual(st1n[1].name, "C")
     self.assertEqual(st1n[1].quarterLength, 0.125)
     self.assertEqual(st1n[2].name, "D")
     self.assertEqual(st1n[2].quarterLength, 3.75)
def noteArrayToStream(note_array):
    """
    Convert a numpy array containing a Melody-RNN sequence into a music21 stream.
    """
    df = noteArrayToDataFrame(note_array)
    melody_stream = stream.Stream()
    for index, row in df.iterrows():
        if row.code == MELODY_NO_EVENT:
            new_note = note.Rest() # bit of an oversimplification, doesn't produce long notes.
        elif row.code == MELODY_NOTE_OFF:
            new_note = note.Rest()
        else:
            new_note = note.Note(row.code)
        new_note.quarterLength = row.duration
        melody_stream.append(new_note)
    return melody_stream
Пример #10
0
    def runStreamIterationByElements(self):
        '''Stream iteration by .elements access
        '''
        from music21 import note, stream
        # create a stream with 750 notes, 250 rests
        s = stream.Stream()
        for i in range(1000):
            n = note.Note()
            s.append(n)
        for i in range(500):
            r = note.Rest()
            s.append(r)

        for i in range(100):
            for j in s.elements:  # this will create an iterator instances
                pass
Пример #11
0
    def testGetContextSearchA(self):
        from music21 import stream, note, volume, dynamics

        s = stream.Stream()
        d1 = dynamics.Dynamic('mf')
        s.insert(0, d1)
        d2 = dynamics.Dynamic('f')
        s.insert(2, d2)

        n1 = note.Note('g')
        v1 = volume.Volume(client=n1)
        s.insert(4, n1)

        # can get dynamics from volume object
        self.assertEqual(v1.client.getContextByClass('Dynamic'), d2)
        self.assertEqual(v1.getDynamicContext(), d2)
Пример #12
0
def export_midi(notes):
    s = stream.Stream()
    for nt in notes:
        if '.' in nt[0]:
            s.append(chord.Chord([pitch for pitch in nt[0].split('.')], quarterLength=nt[1]))
        else:
            s.append(note.Note(nt[0], quarterLength=nt[1]))

    try:
        o_file = sys.argv[1]
        filepath = 'output/' + o_file + '.mid'
        path = s.write('midi', fp=filepath)
    except IndexError:
        path = s.write('midi', fp='output/song.mid')

    print(f"Song written to {path}")
def create_midi(prediction_output):
    """ Convert the output from the prediction to notes and create a midi file from the notes.
    
    Args:
        prediction_output (list): Generated sequence of notes, rests, and chords.
        
    Returns:
        None.
    """
    
    # Set offset to zero
    offset = 0
    
    # Create an empty list to record music21 note, chord, and rest objects
    output_notes = []

    # Create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        if ('.' in pattern) or pattern.isdigit():
            # Pattern is a chord
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        elif pattern == 'Rest':
            # Pattern is a rest
            output_notes.append(note.Rest())
        else:
            # Pattern is a note
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # Increase offset each iteration so that notes do not stack
        offset += 0.5

    # Create music21 Stream object from the list of music21 notes, chords, and rests
    midi_stream = stream.Stream(output_notes)

    # Write the music21 Stream to a midi file
    midi_stream.write('midi', fp='generations/generated_song.mid')
Пример #14
0
def save_song(song, folder):
    mt = midi.MidiTrack(0)
    dtime = midi.DeltaTime(mt)
    dtime.time = 0.5
    new_stream = stream.Stream()
    new_stream.duration.quarterLength
    save_dir = params['save_dir'] + folder + '\\'

    for element in song:
        if ('.' in element) or element.isdigit():
            chord_component = element.split('-')
            duration_ = 1.0
            if '/' in chord_component[1]:
                duration_components = chord_component[1].split('/')
                duration_ = (int(duration_components[0])) / (int(
                    duration_components[1]))
            else:
                duration_ = chord_component[1]

            notes_in_chord = chord_component[0].split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.quarterLength = float(duration_)
            new_stream.append(new_chord)

        elif '.' not in element and (len(element) != 0):
            note_component = element.split('-')
            duration_ = None
            if '/' in note_component[1]:
                duration_components = note_component[1].split('/')
                duration_ = (int(duration_components[0])) / (int(
                    duration_components[1]))
            else:
                duration_ = note_component[1]
            new_note = note.Note(int(note_component[0]))
            new_note.quarterLength = float(duration_)
            new_stream.append(new_note)
        elif element is "":
            new_stream.append(rest.Rest())

    name = folder + 'file_'
    count = len(os.listdir(save_dir)) + 1
    midi_out = new_stream.write('midi',
                                fp=save_dir + name + str(count) + ".mid")
Пример #15
0
    def create_midi(self, prediction_output):
        """ convert the output from the prediction to notes and create a midi file
            from the notes """
        offset = 0
        output_notes = []

        # create note and chord objects based on the values generated by the model
        for pattern in prediction_output:
            if "$" in pattern:
                pattern, dur = pattern.split("$")
                if "/" in dur:
                    a, b = dur.split("/")
                    dur = float(a) / float(b)
                else:
                    dur = float(dur)

            # pattern is a chord
            if ("." in pattern) or pattern.isdigit():
                notes_in_chord = pattern.split(".")
                notes = []
                for current_note in notes_in_chord:
                    new_note = note.Note(int(current_note))
                    new_note.storedInstrument = instrument.Piano()
                    notes.append(new_note)
                new_chord = chord.Chord(notes)
                new_chord.offset = offset
                new_chord.duration = duration.Duration(dur)
                output_notes.append(new_chord)
            # pattern is a rest
            elif pattern is "NULL":
                offset += TIMESTEP
            # pattern is a note
            else:
                new_note = note.Note(pattern)
                new_note.offset = offset
                new_note.storedInstrument = instrument.Piano()
                new_note.duration = duration.Duration(dur)
                output_notes.append(new_note)

            # increase offset each iteration so that notes do not stack
            offset += TIMESTEP

        midi_stream = stream.Stream(output_notes)

        output_file = os.path.basename(self.weights) + ".mid"
        print("output to " + output_file)
        midi_stream.write("midi", fp=output_file)
Пример #16
0
    def create_midi(self, prediction_output):
        """ convert the output from the prediction to notes and create a midi file
            from the notes """
        offset = 0
        output_notes = []

        # create note and chord objects based on the values generated by the model
        for pattern in prediction_output:
            if '$' in pattern:
                pattern, dur = pattern.split('$')
                if '/' in dur:
                    a, b = dur.split('/')
                    dur = float(a) / float(b)
                else:
                    dur = float(dur)

            # pattern is a chord
            if ('.' in pattern) or pattern.isdigit():
                notes_in_chord = pattern.split('.')
                notes = []
                for current_note in notes_in_chord:
                    new_note = note.Note(int(current_note))
                    new_note.storedInstrument = instrument.Piano()
                    notes.append(new_note)
                new_chord = chord.Chord(notes)
                new_chord.offset = offset
                new_chord.duration = duration.Duration(dur)
                output_notes.append(new_chord)
            # pattern is a rest
            elif pattern is 'NULL':
                offset += TIMESTEP
            # pattern is a note
            else:
                new_note = note.Note(pattern)
                new_note.offset = offset
                new_note.storedInstrument = instrument.Piano()
                new_note.duration = duration.Duration(dur)
                output_notes.append(new_note)

            # increase offset each iteration so that notes do not stack
            offset += TIMESTEP

        midi_stream = stream.Stream(output_notes)

        output_file = MODEL_NAME + '.mid'
        print('output to ' + output_file)
        midi_stream.write('midi', fp=output_file)
Пример #17
0
def corpusFindMelodicSevenths(show=True):
    # find and display melodic sevenths
    import os
    from music21 import corpus
    from music21.analysis import discrete

    mid = discrete.MelodicIntervalDiversity()
    groupEast = corpus.search('shanxi', 'locale')
    groupWest = corpus.search('niederlande', 'locale')

    found = []
    for name, group in [('shanxi', groupEast), ('niederlande', groupWest)]:
        for fp, n in group:
            s = converter.parse(fp, number=n)
            intervalDict = mid.countMelodicIntervals(s)

            for key in sorted(intervalDict.keys()):
                if key in ['m7', 'M7']:
                    found.append([fp, n, s])

    results = stream.Stream()
    for fp, num, s in found:
        environLocal.printDebug(['working with found', fp, num])
        # this assumes these are all monophonic
        noteStream = s.flat.getElementsByClass('Note')
        for i, n in enumerate(noteStream):
            if i <= len(noteStream) - 2:
                nNext = noteStream[i + 1]
            else:
                nNext = None

            if nNext is not None:
                #environLocal.printDebug(['creating interval from notes:', n, nNext, i])
                i = interval.notesToInterval(n, nNext)
                environLocal.printDebug(['got interval', i.name])
                if i.name in ['m7', 'M7']:
                    #n.addLyric(s.metadata.title)
                    junk, fn = os.path.split(fp)
                    n.addLyric('%s: %s' % (fn, num))

                    m = noteStream.extractContext(
                        n, 1, 2, forceOutputClass=stream.Measure)
                    m.makeAccidentals()
                    m.timeSignature = m.bestTimeSignature()
                    results.append(m)
    if show == True:
        results.show()
Пример #18
0
def produce_midi(model_output, file):
    """ convert to notes and make a midi file """
    offset = 0
    output_notes = []

    # create note, chord, rests with note durations
    for element in model_output:
        # chord
        if (':' in element) or element[0].isdigit():
            notes_in_chord = element.split(':')
            notes = []
            dur = ''
            for current_note in notes_in_chord:
                if '~' in current_note:
                    # last element of chord will have duration appended
                    get_duration = current_note.split('~')
                    new_note = note.Note(int(get_duration[0]))
                    new_note.storedInstrument = instrument.Piano()
                    notes.append(new_note)
                    dur = get_duration[1]
                else:
                    new_note = note.Note(int(current_note))
                    new_note.storedInstrument = instrument.Piano()
                    notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            new_chord.quarterLength = float(dur)
            output_notes.append(new_chord)
        # rest
        elif 'r' in element:
            new_rest = note.Rest()
            new_rest.offset = offset
            output_notes.append(new_rest)
        # note
        else:
            get_duration = element.split('~')
            new_note = note.Note(get_duration[0])
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            new_note.quarterLength = float(get_duration[1])
            output_notes.append(new_note)

        offset += 0.5

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp=f'test_output_{file[0:17]}.mid')
Пример #19
0
def part_to_codestring(part):
    s = stream.Stream()
    notes = list(part.notes)
    codestring = ''
    for i in range(len(notes) - 2):
        n_0 = notes[i]
        n_1 = notes[i + 1]
        if isinstance(n_0, chord.Chord):
            n_0 = n_0[0]
        if isinstance(n_1, chord.Chord):
            n_1 = n_1[0]

        interval_obj = interval.notesToGeneric(n_0, n_1)
        interval_num = interval_obj.directed
        # print(interval_num)
        codestring += interval_to_codestring(interval_num)
    return codestring
def convert_pred_to_midi(prediction_output, params):

    midi_stream = stream.Stream()

    for pattern in prediction_output:
        n_pattern, d_pattern = pattern

        if ('.' in n_pattern):

            notes_in_chord = n_pattern.split('.')
            chord_notes = []

            for curr_note in notes_in_chord:

                new_note = note.Note(curr_note)
                new_note.duration = duration.Duration(d_pattern)
                new_note.storedInstrument = instrument.Violoncello()
                chord_notes.append(new_note)

            new_chord = chord.Chord(chord_notes)
            midi_stream.append(new_chord)

        elif n_pattern == 'rest':

            new_note = note.Rest()
            new_note.duration = duration.Duration(d_pattern)
            new_note.storedInstrument = instrument.Violoncello()
            midi_stream.append(new_note)

        elif n_pattern != 'START':

            new_note = note.Note(n_pattern)
            new_note.duration = duration.Duration(d_pattern)
            new_note.storedInstrument = instrument.Violoncello()
            midi_stream.append(new_note)

    midi_stream = midi_stream.chordify()
    timestr = time.strftime('%Y%m%d-%H%M%S')
    out = params['output_folder']
    fp = os.path.join(out, f'output-{timestr}.mid')
    midi_stream.write('midi', fp=fp)

    print(f'Midi Generated \n \
            Saved at: {fp}')

    return None
Пример #21
0
 def testDynamicsPositionB(self):
     import random
     from music21 import stream, note, layout
     s = stream.Stream()
     for i in range(6):
         m = stream.Measure(number=i + 1)
         m.append(layout.SystemLayout(isNew=True))
         m.append(note.Rest(type='whole'))
         s.append(m)
     for m in s.getElementsByClass('Measure'):
         offsets = [x * .25 for x in range(16)]
         random.shuffle(offsets)
         offsets = offsets[:4]
         for o in offsets:
             d = Dynamic('mf')
             d.positionVertical = 20
             m.insert(o, d)
Пример #22
0
    def runGetElementsByClassString(self):
        '''Getting elements by string
        '''
        from music21 import note, stream

        # create a stream with 750 notes, 250 rests
        s = stream.Stream()
        for i in range(1000):
            n = note.Note()
            s.append(n)
        for i in range(500):
            r = note.Rest()
            s.append(r)

        for i in range(2):
            post = s.flat.getElementsByClass(['Rest', 'Note'])
            self.assertEqual(len(post), 1500)
def create_midi(prediction_output):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset_plus = 0
    output_notes = []
    offset = 0
    # Уменьшаем количество одновременно играющих нот
    zero_counter = 0
    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        s = pattern.split("|")
        pattern = s[0]
        octave = s[2]
        try:
            offset += float(s[1])
            if float(s[1]) == 0:
                zero_counter += 1
            else:
                zero_counter = 0
        except:
            print("error", s[1])
            continue
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.PipeOrgan()
                new_note.volume.velocity = 60
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.volume.velocity = 60
            new_note.octave = octave
            new_note.storedInstrument = instrument.PipeOrgan()
            output_notes.append(new_note)

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp='uploads/out.mid')
Пример #24
0
def notelist_to_midi(notes, filename='test.mid'):
    s = stream.Stream()

    for n in notes:
        if n.midi == 0:
            midinote = note.Rest()
            print('REST!!')
        else:
            print('NOTE: midi=', n.midi)
            midinote = note.Note(midinote_to_pc_octave(n.midi))
        midinote.quarterLength = dur_string_to_quarterlength(n.dur_string)
        s.append(midinote)

    mf = streamToMidiFile(s)
    mf.open(filename, 'wb')
    mf.write()
    mf.close()
Пример #25
0
def demoCombineTransform():
    from music21 import interval

    s1 = corpus.parse('bach/bwv103.6')
    s2 = corpus.parse('bach/bwv18.5-lz')

    keyPitch1 = s1.analyze('key')[0]
    unused_gap1 = interval.Interval(keyPitch1, pitch.Pitch('C'))

    keyPitch2 = s2.analyze('key')[0]
    unused_gap2 = interval.Interval(keyPitch2, pitch.Pitch('C'))

    sCompare = stream.Stream()
    sCompare.insert(0, s1.parts['bass'])
    sCompare.insert(0, s2.parts['bass'])

    sCompare.show()
Пример #26
0
def save_mat2_mid(mat, fname='output/test.mid'):
    music_stream = stream.Stream()

    for dense_line in np.array(mat):
        (notes,) = np.where(dense_line[:-1]>0.5)

        pitches = []
        for n in notes:
            pitches.append(pitch.Pitch(midi=n))

        crd = chord.Chord(notes= pitches, quarterLength=np.round(dense_line[-1]*2048)/2048)
        music_stream.append(crd)
        
    md = midi.translate.streamToMidiFile(music_stream)
    md.open(fname, 'wb')
    md.write()
    md.close()
Пример #27
0
    def runGetElementsByClassType(self):
        '''Getting elements by class type
        '''
        from music21 import note
        from music21 import stream

        # create a stream with 750 notes, 250 rests
        s = stream.Stream()
        for i in range(1000):
            n = note.Note()
            s.append(n)
        for i in range(500):
            r = note.Rest()
            s.append(r)

        for i in range(2):
            post = s.recurse().getElementsByClass([note.Rest, note.Note])
            self.assertEqual(len(post), 1500)
Пример #28
0
def ch1_basic_II_C(data, intervalShift):
    '''Function for C1, C2, C3, and C4
    '''
    from music21 import stream, common, chord
    ex = stream.Stream()
    for chunk in data:
        m = stream.Measure()    
        for e in chunk:
            if common.isStr(e):
                n1 = note.Note(e)
                n1.quarterLength = 4
                n2 = n1.transpose(intervalShift)
                m.append(chord.Chord([n1, n2])) # chord to show both
            else:
                m.append(e)
        m.timeSignature = m.bestTimeSignature()
        ex.append(m)
    return ex
Пример #29
0
def gliss_ratio(glissandi, length=0.25, bpm=60):
    """."""
    parts = []
    longest = longest_gliss(glissandi)
    total_length = longest.length * length
    for i, gliss in enumerate(glissandi):
        part = build_note_sequence(gliss, longest, length)
        parts.append(part)

    # for part in parts:
    # 	print(part.notes)
    # 	for note in part.notes:
    # 		print(note)
    # 		print(note.duration)
    # 		print()

    s = stream.Stream(parts)
    return s
Пример #30
0
    def testGraceB(self):
        ng1 = note.Note('c4', quarterLength=.5).getGrace()
        ng1.duration.stealTimeFollowing = .5
        ng1.duration.slash = False
        n1 = note.Note('g4', quarterLength=2)

        ng2 = note.Note('c4', quarterLength=.5).getGrace()
        ng2.duration.stealTimePrevious = .25
        n2 = note.Note('d4', quarterLength=2)

        s = stream.Stream()
        s.append([ng1, n1, ng2, n2])
        #s.show()

        # test roundtrip output
        raw = fromMusic21Object(s)
        self.assertEqual(raw.count('slash="no"'), 1)
        self.assertEqual(raw.count('slash="yes"'), 1)