Example #1
0
 def test_noterest_ind_func_1(self):
     # Check the indexer_func on note, rest, and chord objects
     expected = pandas.Series(('A-4', 'Rest', 'F#5'))
     n1 = note.Note('A-4')
     n2 = note.Note('D#5')
     n3 = note.Note('F#5')
     r1 = note.Rest()
     c1 = chord.Chord([n3, n2, n1])
     temp = pandas.Series((n1, r1, c1))
     actual = temp.apply(noterest.noterest_ind_func)
     self.assertTrue(actual.equals(expected))
Example #2
0
def create_note(length, is_rest):
    """Creates a music21 note based on length and whether it is a rest or not"""
    if is_rest == True:
        new_note = note.Rest()
    else:
        new_note = note.Note()

    #new_note.offset = length
    new_note.duration.quarterLength = length
    new_note.storedInstrument = instrument.Piano()
    return new_note
Example #3
0
def save_song(song):
    '''
	Takes in the converted sequence and creates a midi stream, saves each note/chord and rest
	object to the stream then saves as a midi file in the VAE_generated_sequences fdir
	'''

    mt = midi.MidiTrack(0)
    dtime = midi.DeltaTime(mt)
    dtime.time = 0.5
    new_stream = stream.Stream()

    for element in song:
        if ('.' in element) or element.isdigit():
            chord_component = element.split('-')
            if '/' in chord_component[1]:
                duration_components = chord_component[1].split('/')
                duration_ = (float(duration_components[0])) / (float(
                    duration_components[1]))
            else:
                duration_ = chord_component[1]

            notes_in_chord = chord_component[0].split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                notes.append(new_note)

            new_chord = chord.Chord(notes)
            new_chord.quarterLength = float(duration_)
            new_stream.append(new_chord)

        elif '.' not in element and (len(element) != 0):
            note_component = element.split('-')
            duration_ = None
            if '/' in note_component[1]:
                duration_components = note_component[1].split('/')
                duration_ = (float(duration_components[0])) / (float(
                    duration_components[1]))
            else:
                duration_ = note_component[1]
            new_note = note.Note(int(note_component[0]))
            new_note.quarterLength = float(duration_)
            new_stream.append(new_note)
        elif element is "":
            new_stream.append(note.Rest())

    count = len(os.listdir(params['save_dir'])) + 1
    midi_out = new_stream.write('midi',
                                fp=params['save_dir'] + str(count) + '_' +
                                dataset_name + '_' + str(params['input_dim']) +
                                '_' + str(timestamp) + ".mid")
    print("--------------------------------------------------")
    print("Generated Sequence Saved")
    print("--------------------------------------------------")
Example #4
0
    def generateBassLine(self):
        '''
        Generates the bass line as a :class:`~music21.stream.Score`.

        >>> from music21.figuredBass import realizer
        >>> from music21 import key
        >>> from music21 import meter
        >>> from music21 import note
        >>> fbLine = realizer.FiguredBassLine(key.Key('B'), meter.TimeSignature('3/4'))
        >>> fbLine.addElement(note.Note('B2'))
        >>> fbLine.addElement(note.Note('C#3'), '6')
        >>> fbLine.addElement(note.Note('D#3'), '6')
        >>> #_DOCS_SHOW fbLine.generateBassLine().show()

        .. image:: images/figuredBass/fbRealizer_bassLine.*
            :width: 200


        >>> from music21 import corpus
        >>> sBach = corpus.parse('bach/bwv307')
        >>> sBach['bass'].measure(0).show('text')
        {0.0} ...
        {0.0} <music21.clef.BassClef>
        {0.0} <music21.key.Key of B- major>
        {0.0} <music21.meter.TimeSignature 4/4>
        {0.0} <music21.note.Note B->
        {0.5} <music21.note.Note C>

        >>> fbLine = realizer.figuredBassFromStream(sBach['bass'])
        >>> fbLine.generateBassLine().measure(1).show('text')
        {0.0} <music21.clef.BassClef>
        {0.0} <music21.key.KeySignature of 2 flats>
        {0.0} <music21.meter.TimeSignature 4/4>
        {3.0} <music21.note.Note B->
        {3.5} <music21.note.Note C>
        '''
        bassLine = stream.Part()
        bassLine.append(clef.BassClef())
        bassLine.append(key.KeySignature(self.inKey.sharps))
        bassLine.append(copy.deepcopy(self.inTime))
        r = None
        if self._paddingLeft != 0.0:
            r = note.Rest(quarterLength=self._paddingLeft)
            bassLine.append(r)

        for (bassNote, unused_notationString) in self._fbList:
            bassLine.append(bassNote)

        bl2 = bassLine.makeNotation(inPlace=False, cautionaryNotImmediateRepeat=False)
        if r is not None:
            m0 = bl2.getElementsByClass('Measure')[0]
            m0.remove(m0.getElementsByClass('Rest')[0])
            m0.padAsAnacrusis()
        return bl2
def standard_note(note_or_rest_string):
    """
    :param note_or_rest_string: string representation of note or rest

    :return: music21.note.Note object
    """
    if note_or_rest_string == 'rest':
        return note.Rest()
    # treat other additional symbols as rests
    elif note_or_rest_string == END_SYMBOL:
        return note.Note('D~3', quarterLength=1)
    elif note_or_rest_string == START_SYMBOL:
        return note.Note('C~3', quarterLength=1)
    elif note_or_rest_string == PAD_SYMBOL:
        return note.Note('E~3', quarterLength=1)
    elif note_or_rest_string == SLUR_SYMBOL:
        return note.Rest()
    elif note_or_rest_string == OUT_OF_RANGE:
        return note.Rest()
    else:
        return note.Note(note_or_rest_string)
Example #6
0
 def __init__(self):
     from music21 import stream
     from music21 import note
     from music21 import clef
     from music21 import meter
     from music21 import chord
     self.s = stream.Stream()
     self.s.repeatAppend(note.Note(), 300)
     self.s.repeatAppend(note.Rest(), 300)
     self.s.repeatAppend(chord.Chord(), 300)
     self.s.repeatInsert(meter.TimeSignature(), [0, 50, 100, 150])
     self.s.repeatInsert(clef.BassClef(), [0, 50, 100, 150])
Example #7
0
def melody_texture(token_seq, duration=0.25, use_last=False):
    for token in token_seq:
        if token[0] == REST_WORD:
            yield note.Rest()
        elif token[0] in (START_WORD, END_WORD):
            yield bar.Barline('double')
        else:
            if use_last:
                yield (note.Note(token[-1], quarterLength=duration))
            else:
                for elem in token:
                    yield (note.Note(elem, quarterLength=duration))
Example #8
0
def create_midi(prediction_output):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        elif pattern == 'Rest':
            # Pattern is a rest
            output_notes.append(note.Rest())
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)

    counter = 0

    for note_index in range(1, len(midi_stream.notes)):
        if midi_stream.pitches[note_index] == midi_stream.pitches[note_index -
                                                                  1]:
            counter += 1
        else:

            for between_notes in range(1, counter + 1):
                randomizer = random.uniform(0, 1)
                print(randomizer)
                if randomizer < .85:
                    midi_stream.notes[note_index - between_notes].volume = 0.1
            midi_stream.notes[note_index -
                              (counter +
                               1)].duration.quarterLength = float(1 + counter)
            counter = 0

    midi_stream.write('midi', fp='duration_accounted_output.mid')
Example #9
0
def chordsToAnalysis(chordStream, manifest, scale):
    '''
    manifest is a list of tuples in the following form:
    (measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay, durationTypeDisplay)
    '''
    from music21 import note, bar

    chordMeasures = chordStream.getElementsByClass('Measure')
    measureTemplate = copy.deepcopy(chordMeasures)
    for i, m in enumerate(measureTemplate):
        m.removeByClass(['GeneralNote'])
        # assuming we have measure numbers

    for (measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay,
         durationTypeDisplay, textDisplay) in manifest:
        # assume measures are in order; replace with different method
        m = chordMeasures[measureNumber - 1]
        mPost = measureTemplate[measureNumber - 1]
        if chordNumberOrNone is None:
            c = m.notes[0]
        else:
            c = m.notes[chordNumberOrNone - 1]  # assume counting from 1

        pTarget = scale.pitchFromDegree(scaleDegree)
        match = False
        p = None
        for p in c.pitches:
            if p.name == pTarget.name:
                match = True
                break
        if not match:
            print('no scale degree found in specified chord', p, pTarget)
        pTarget.octave = octaveDisplay
        n = note.Note(pTarget)
        if durationTypeDisplay in ['whole']:
            n.noteheadFill = False
        else:
            n.noteheadFill = True
        n.stemDirection = 'noStem'
        n.addLyric(textDisplay)
        mPost.insert(c.getOffsetBySite(m), n)

    # fill with rests
    for m in measureTemplate:
        m.rightBarline = bar.Barline('none')
        # need to hide rests
        if len(m.notes) == 0:
            r = note.Rest(quarterLength=4)
            r.hideObjectOnPrint = True
            m.append(r)

    return measureTemplate
Example #10
0
 def mix_note_chord(self, chord_list, groove):
     pick_list = [0, 1, 2]
     mix_list = []
     for g in groove:
         pick = random.choice(pick_list)
         if pick == 0:
             mix_list.append(note.Rest(quarterLength=g))
         elif pick == 1:
             mix_list.append(
                 note.Note(random.choice(chord_list), quarterLength=g))
         else:
             mix_list.append(chord.Chord(chord_list[:-1], quarterLength=g))
     return mix_list
Example #11
0
def train_network():
    """ Train a Neural Network to generate music """
    notes = parser()
    notes.append(str(note.Rest().fullName))
    # get amount of pitch names
    print(set(notes))
    n_vocab = len(set(notes))

    network_input, network_output = prepare_sequences(notes, n_vocab)

    model = create_network(network_input, n_vocab)

    train(model, network_input, network_output)
Example #12
0
def decode_to_stream(song_data, filename=None):
    df = np_to_df(song_data)
    melody_stream = stream.Stream()
    for _, row in df.iterrows():
        if row.pitch == MELODY_NO_EVENT or row.pitch == MELODY_NOTE_OFF:
            new_note = note.Rest()
        else:
            new_note = note.Note(row.pitch)
        new_note.quarterLength = row.duration * 0.25
        melody_stream.append(new_note)
    if filename:
        melody_stream.write('midi', fp=f'./music_data/output/{filename}')
    return melody_stream
Example #13
0
def standard_note(note_or_rest_string):
    """
    Convert str representing a music21 object to this object
    :param note_or_rest_string:
    :return:
    """
    if note_or_rest_string == 'rest':
        return note.Rest()
    # treat other additional symbols as rests
    elif (note_or_rest_string == END_SYMBOL
          or note_or_rest_string == START_SYMBOL
          or note_or_rest_string == PAD_SYMBOL):
        # print('Warning: Special symbol is used in standard_note')
        return note.Rest()
    elif note_or_rest_string == SLUR_SYMBOL:
        # print('Warning: SLUR_SYMBOL used in standard_note')
        return note.Rest()
    elif note_or_rest_string == OUT_OF_RANGE:
        # print('Warning: OUT_OF_RANGE used in standard_note')
        return note.Rest()
    else:
        return note.Note(note_or_rest_string)
def standard_note(string):
    ''' 
    Converts str (name) representing a music21 object to the corresponding object 
    :param string : str (name) representing a music21 object 
    :return : corresponding music21 object
    '''
    if string == "rest":
        return note.Rest()

    ### Treat additional symbols as RESTS ###
    if (string == START_SYMBOL) or (string == END_SYMBOL) or (string
                                                              == PAD_SYMBOL):
        return note.Rest()

    if string == SLUR_SYMBOL:
        return note.Rest()

    if string == OUT_OF_RANGE:
        return note.Rest()

    else:
        return note.Note(string)
Example #15
0
def mergeLeadSheetAndBassLine(leadsheet, bassLine):
    '''
    method to combine the lead sheet (i.e. from the ex-wikifonia) with just the melody line
    and chord symbols with the newly realized bassLine (i.e. from fbRealizer) which 
    consists of two parts, the treble line and bass line.
    '''
    s = stream.Score()
    
    s.insert(metadata.Metadata()) 
    s.metadata.title = leadsheet.metadata.title
    cs = leadsheet.flat.getElementsByClass(harmony.ChordSymbol)
    if cs[0].offset > 0:
        bassLine.parts[0].insertAndShift(0, note.Rest(quarterLength=cs[0].offset))
        bassLine.parts[1].insertAndShift(0, note.Rest(quarterLength=cs[0].offset))
    voicePart = leadsheet.parts[0]
    pianoTreble = bassLine.parts[0]
    pianoBass = bassLine.parts[1]
    s.insert(0, voicePart)
    s.insert(0, pianoTreble)
    s.insert(0, pianoBass)

    return s
Example #16
0
	def make_base(self, base):

		base_melody = stream.Stream()

		for itr_note in base:
			name = itr_note[0]
			duration = itr_note[1]
			if name == 'r':
				temp_note = note.Rest(type='quarter')
			else:
				temp_note = note.Note(name, quarterLength=duration)
			base_melody.append(temp_note)
		return base_melody
Example #17
0
def create_part(prediction_output, instrumentType):

    offset = 0
    output_notes = []

    for pattern in prediction_output:

        # pattern is invalid (start or end symbol)
        if pattern == 1 or pattern == 2:
            continue

        # pattern is a rest
        if pattern == 0:
            new_note = note.Rest()
        else:
            new_note = note.Note(get_note(pattern))

        # assign instrument
        if instrumentType == 'piano':
            new_note.storedInstrument = instrument.Piano()
        elif instrumentType == 'bass':
            new_note.storedInstrument = instrument.Bass()
        else:
            new_note.storedInstrument = instrument.Saxophone()

        # assign duration
        new_note.duration.quarterLength = 0.25

        # assign and update offset
        new_note.offset = offset
        offset += .25

        # add note to part
        output_notes.append(new_note)

    # merge consecutive notes
    output = [output_notes[0]]
    for i in range(1, len(output_notes)):
        if isinstance(output_notes[i], note.Rest):
            output.append(output_notes[i])
        else:
            current_note = output_notes[i].nameWithOctave

            if not isinstance(
                    output[-1],
                    note.Rest) and current_note == output[-1].nameWithOctave:
                output[-1].duration.quarterLength += .25
            else:
                output.append(output_notes[i])

    return stream.Part(output)
Example #18
0
    def undo_full_chords_no_duration(self, chords):

        part = stream.Part()

        for c in chords:
            if c != 'r':
                pitches = c.split(' ')
                if pitches[0] == 'r':
                    newChord = note.Rest()
                else:
                    newChord = chord.Chord([int(p) for p in pitches[:-1]])
                part.append(newChord)

        return part
Example #19
0
    def tabToM21(self):
        '''
        Creates and returns a music21.roman.RomanNumeral() object
        from a TabChord with all shared attributes.
        NB: call changeRepresentation() first if .representationType is not 'm21'
        but you plan to process it with m21 (e.g. moving it into a stream).

        >>> tabCd = romanText.tsvConverter.TabChord()
        >>> tabCd.numeral = 'vii'
        >>> tabCd.global_key = 'F'
        >>> tabCd.local_key = 'V'
        >>> tabCd.representationType = 'm21'
        >>> m21Ch = tabCd.tabToM21()

        Now we can check it's a music21 RomanNumeral():

        >>> m21Ch.figure
        'vii'
        '''

        if self.numeral:
            if self.form:
                if self.figbass:
                    combined = ''.join([self.numeral, self.form, self.figbass])
                else:
                    combined = ''.join([self.numeral, self.form])
            else:
                combined = self.numeral

            if self.relativeroot:  # special case requiring '/'.
                combined = ''.join([combined, '/', self.relativeroot])

            localKeyNonRoman = getLocalKey(self.local_key, self.global_key)

            thisEntry = roman.RomanNumeral(combined, localKeyNonRoman)
            thisEntry.quarterLength = self.length

            thisEntry.op = self.op
            thisEntry.no = self.no
            thisEntry.mov = self.mov

            thisEntry.pedal = self.pedal

            thisEntry.phraseend = None

        else:  # handling case of '@none'
            thisEntry = note.Rest()
            thisEntry.quarterLength = self.length

        return thisEntry
Example #20
0
def seqs_to_stream(seqs):
    """
    :param seqs: list of sequences
    a sequence is a list (one for each voice) of list of (pitch, articulation)
    add rests between sequences
    :return:
    """
    score = stream.Score()
    for voice_index in range(len(seqs[0])):
        part = stream.Part(id='part' + str(voice_index))
        for s_index, seq in enumerate(seqs):
            # print(voice_index, s_index)
            voice = seq[voice_index]
            dur = 0
            f = note.Rest()
            for k, n in enumerate(voice):
                if n[1] == 1:
                    # add previous note
                    if not f.name == 'rest':
                        f.duration = duration.Duration(dur / SUBDIVISION)
                        part.append(f)

                    dur = 1
                    f = note.Note()
                    f.pitch.midi = n[0]
                else:
                    dur += 1
            # add last note
            f.duration = duration.Duration(dur / SUBDIVISION)
            part.append(f)
            # add rests (8 beats)
            f = note.Rest()
            f.duration = duration.Duration(SUBDIVISION * 8)
            part.append(f)

        score.insert(part)
    return score
Example #21
0
    def _convert_to_midi_file(self, notes):
        """Convert encoded input notes array into a midi file"""

        offset = 0
        output_notes = []
        # create note and chord objects based on the values generated by the model
        for pattern in notes:
            # pattern is a chord
            if (',' in pattern) or pattern.isdigit():
                pitch = pattern.split(":")[0]
                duration = pattern.split(":")[1]
                notes_in_chord = pitch.split(',')
                chord_notes = []
                for current_note in notes_in_chord:
                    new_note = note.Note(int(current_note))
                    new_note.storedInstrument = instrument.AltoSaxophone()
                    chord_notes.append(new_note)
                new_chord = chord.Chord(chord_notes)
                new_chord.offset = offset
                output_notes.append(new_chord)
            # pattern is a note
            elif "rest" in pattern:
                duration = pattern.split(":")[1]
                new_note = note.Rest()
                new_note.offset = offset

                output_notes.append(new_note)
            else:
                pitch = pattern.split(":")[0]
                duration = pattern.split(":")[1]

                try:
                    new_note = note.Note(pitch)
                except:
                    continue
                new_note.offset = offset
                new_note.storedInstrument = instrument.AltoSaxophone()
                output_notes.append(new_note)
            # increase offset each iteration so that notes do not stack

            if '/' in duration:
                duration = float(int(duration.split('/')[0]) / int(duration.split('/')[1]))

            offset += float(duration)

        midi_stream = stream.Stream(output_notes)
        output_fp = os.path.join(SCRATCH_DIR, f"{uuid1().hex}_{self.composer.get_name()}.midi")
        midi_stream.write('midi', fp=output_fp)
        return output_fp
Example #22
0
def predict_and_create_midi(model, X, unique_info, unique_size):

    int_to_note = dict((number, note) for number, note in enumerate(unique_info))
    Y_pred = [] 

    random = np.random.randint(0, len(X) - 1)
    seq = X[random] #sequence of music information
    
    for number in range(300):
        X_pred = np.reshape(seq, (1, len(seq), 1))
        X_pred = X_pred / float(unique_size)
        pred = model.predict(X_pred, verbose=0)
        best = np.argmax(pred) #taking the  muisic info with the highest probability  
        best_info = int_to_note[best]  # un-encoding
        Y_pred.append(best_info)
        seq.append(best)
        seq = seq[1:len(seq)] #create the next sequence

    song = []
    offset= 0.5
    # create note and chord objects based on the values generated by the model
    for info in predictions:
        # info=chord
        if ('.' in info) or info.isdigit():
            chord_notes = info.split('.')
            notes = []
            for current in chord_notes:
                new_note = note.Note(int(current))
                new_note.offset = offset
                new_note.storedInstrument = instrument.SnareDrum()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            song.append(new_chord)
        # info = Rest
        elif ('Re' in info):
            new_rest= note.Rest()
            new_rest.offset = offset
            new_rest.quarterLength = float(info[1:])
            new_rest.storedInstrument = instrument.SnareDrum()
            song.append(new_rest)
        # info = note
        else:
            new_note = note.Note(info)
            new_note.offset = offset
            new_note.storedInstrument = instrument.SnareDrum()
            song.append(new_note)
        offset += 0.5
    midi = stream.Stream(song)
    midi.write('midi', fp='C:/Users/PycharmProjects/AI-music/generate-music/generated_song.mid')
def translate(int_note, dur):
    """
    Given an integer value of a note, gets a corresponding music21.note object
    :param int_note: integer value of the note
    :param dur: duration of desired note
    :return music21.note
    """
    first_char_arr = [
        "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"
    ]
    pitch = first_char_arr[int_note % 12] + str(int(2 + int_note / 12))

    if dur < 0:
        return note.Rest(duration=duration.Duration(quarterLength=-dur))
    return note.Note(pitch, duration=duration.Duration(quarterLength=dur))
Example #24
0
def _left_hand_interlude():
    lh_interlude = stream.Voice()
    lh_interlude.append(meter.TimeSignature("6/4"))
    for _ in range(2):
        lh_interlude.append(note.Rest(duration=duration.Duration(2.75)))
        note_1 = note.Note("E1", duration=duration.Duration(0.25))
        note_2 = note.Note("A0", duration=duration.Duration(3))
        ottava = spanner.Ottava()
        ottava.type = (8, "down")
        ottava.addSpannedElements([note_1, note_2])
        lh_interlude.append(ottava)
        lh_interlude.append(note_1)
        lh_interlude.append(note_2)
        lh_interlude.makeMeasures(inPlace=True, finalBarline=None)
    return lh_interlude
Example #25
0
def convert_measure_to_music21_measure(m: Measure):
    m.notes: [Note]
    measure = stream.Measure(1)
    for j in m.notes:
        if j.pitch == 'REST':
            n_1 = note.Rest()
            n_1.duration = duration.Duration(
                quarterLength=j.duration.duration_value / 0.25)
        else:
            n_1 = note.Note(j.pitch)
            n_1.duration = duration.Duration(
                quarterLength=j.duration.duration_value / 0.25)
        measure.append(n_1)

    return measure
Example #26
0
    def testOutOfBoundsExpressionDoesNotCreateForward(self):
        '''
        A metronome mark at an offset exceeding the bar duration was causing
        <forward> tags, i.e. hidden rests. Prefer <offset> instead.
        '''
        m = stream.Measure()
        m.append(meter.TimeSignature('1/4'))
        m.append(note.Rest())
        m.insert(2, tempo.MetronomeMark('slow', 40))

        gex = GeneralObjectExporter()
        tree = self.getET(gex.fromGeneralObject(m))
        self.assertFalse(tree.findall('.//forward'))
        self.assertEqual(int(tree.findall('.//direction/offset')[0].text),
                         defaults.divisionsPerQuarter)
def convert_to_midi(sequence, bpm=60, output_file='./midi_output/music.mid'):
    """Save sequence as a midi file (with path = output_file). sequence
    can be from the original dataset or a new sequence generated by a
    trained model"""
    offset = 0  # the distance in quarter-notes of the note/chord/rest
    # being written from the beginning
    output_notes = [instrument.Piano(), tempo.MetronomeMark(number=bpm)]

    bps = bpm / 60  # beats per second
    converted_duration = duration.Duration()
    # create note, chord, and rest objects
    for vector in sequence:
        # convert from seconds to beats
        converted_duration.quarterLength = vector[-1] * bps

        if (np.sum(vector[:-1]) > 1):  # chord
            indices_in_chord = np.argsort(vector[:-1])[-int(np.sum(\
                vector[:-1])):]
            notes_in_chord = [piano_idx_to_note(i) for i in indices_in_chord]
            notes = []
            for cur_note in notes_in_chord:
                new_note = note.Note(cur_note)
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            new_chord.duration = converted_duration
            output_notes.append(new_chord)

        elif (np.sum(vector[:-1]) == 1):  # note
            index = np.argmax(vector[:-1])
            new_note = piano_idx_to_note(index)
            new_note = note.Note(new_note)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            new_note.duration = converted_duration
            output_notes.append(new_note)

        elif (np.sum(vector[:-1]) == 0):  # rest
            new_rest = note.Rest()
            new_rest.offset = offset
            new_rest.duration = converted_duration
            output_notes.append(new_rest)

        offset += vector[-1]

    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp=output_file)
Example #28
0
    def testExportChordSymbolsWithRealizedDurations(self):
        gex = GeneralObjectExporter()

        def realizeDurationsAndAssertTags(mm: stream.Measure,
                                          forwardTag=False,
                                          offsetTag=False):
            mm = copy.deepcopy(mm)
            harmony.realizeChordSymbolDurations(mm)
            obj = gex.fromGeneralObject(mm)
            tree = self.getET(obj)
            self.assertIs(bool(tree.findall('.//forward')), forwardTag)
            self.assertIs(bool(tree.findall('.//offset')), offsetTag)

        # Two consecutive chord symbols, no rests
        cs1 = harmony.ChordSymbol('C7')
        cs2 = harmony.ChordSymbol('F7')
        m = stream.Measure()
        m.insert(0, cs1)
        m.insert(2, cs2)
        realizeDurationsAndAssertTags(m, forwardTag=True, offsetTag=False)

        # Two consecutive chord symbols, rest coinciding with first one
        r1 = note.Rest(type='half')
        m.insert(0, r1)
        realizeDurationsAndAssertTags(m, forwardTag=False, offsetTag=False)

        # One chord symbol midmeasure, no rests
        m.remove(cs1)
        m.remove(r1)
        realizeDurationsAndAssertTags(m, forwardTag=True, offsetTag=False)

        # One chord symbol midmeasure coinciding with whole note
        n1 = note.Note(type='whole')
        m.insert(0, n1)
        # Need an offset tag to show the -2.0 offset to get from end to midmeasure
        realizeDurationsAndAssertTags(m, forwardTag=False, offsetTag=True)

        # One chord symbol at beginning of measure coinciding with whole note
        m.remove(cs2)
        m.insert(0, cs1)
        realizeDurationsAndAssertTags(m, forwardTag=False, offsetTag=False)

        # One chord symbol at beginning of measure with writeAsChord=True
        # followed by a half note
        cs1.writeAsChord = True
        n1.offset = 2
        n1.quarterLength = 2
        realizeDurationsAndAssertTags(m, forwardTag=False, offsetTag=False)
Example #29
0
    def restFromRest(self, restElement):
        '''
        Returns a :class:`~music21.rest.Rest` object from a <rest> tag.

        >>> ci = capella.fromCapellaXML.CapellaImporter()
        >>> restElement = ci.domElementFromText('<rest><duration base="1/2"/></rest>')
        >>> r = ci.restFromRest(restElement)
        >>> r
        <music21.note.Rest rest>
        >>> r.duration.type
        'half'
        '''
        r = note.Rest()
        durationList = restElement.findall('duration')
        r.duration = self.durationFromDuration(durationList[0])
        return r
Example #30
0
    def runStreamIterationByElements(self):
        '''Stream iteration by .elements access
        '''
        from music21 import note, stream
        # create a stream with 750 notes, 250 rests
        s = stream.Stream()
        for i in range(1000):
            n = note.Note()
            s.append(n)
        for i in range(500):
            r = note.Rest()
            s.append(r)

        for i in range(100):
            for j in s.elements:  # this will create an iterator instances
                pass