def indexed_seq_to_score(seq, index2note, note2index): """ :param note2index: :param index2note: :param seq: voice major :return: """ num_pitches = len(index2note) slur_index = note2index[SLUR_SYMBOL] score = stream.Score() voice_index = SOP_INDEX part = stream.Part(id='part' + str(voice_index)) dur = 0 f = note.Rest() for k, n in enumerate(seq): # if it is a played note if not n == slur_index: # add previous note if dur > 0: f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) dur = 1 f = standard_note(index2note[n]) else: dur += 1 # add last note f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) score.insert(part) return score
def make_midi_file(sequence): """Creates a midi file for the given sequence. """ output_folder = Path( __file__ ).parent / 'data' / main.load_files_window.model_name_string / 'output' output_folder.mkdir(parents=True, exist_ok=True) midi_stream = stream.Stream() for pattern in sequence: note_pattern, duration_pattern = pattern if '.' in note_pattern: notes_in_chord = note_pattern.split('.') chord_notes = [] for current_note in notes_in_chord: new_note = note.Note(current_note) new_note.duration = duration.Duration(duration_pattern) new_note.storedInstrument = instrument.Piano() chord_notes.append(new_note) new_chord = chord.Chord(chord_notes) midi_stream.append(new_chord) elif note_pattern == 'rest': new_note = note.Rest() new_note.duration = duration.Duration(duration_pattern) new_note.storedInstrument = instrument.Piano() midi_stream.append(new_note) elif note_pattern != 'START': new_note = note.Note(note_pattern) new_note.duration = duration.Duration(duration_pattern) new_note.storedInstrument = instrument.Piano() midi_stream.append(new_note) midi_stream = midi_stream.chordify() file_name_string = f'output-{main.load_files_window.model_name_string}-{time.strftime("%H%M%S")}.mid' midi_stream.write('midi', fp=str(output_folder / file_name_string))
def test_note(): parts = [stream.Part() for i in range(3)] # note1 = m21note.Note(pitch.Pitch("C4"), quarterLength=3/7) # note2 = m21note.Note(pitch.Pitch("C4"), quarterLength=3/7) # note3 = m21note.Note(pitch.Pitch("C4"), quarterLength=1/7) note1 = m21note.Note(pitch.Pitch("C4")) note2 = m21note.Note(pitch.Pitch("C4")) note3 = m21note.Note(pitch.Pitch("C4")) # sep = duration.Tuplet(7,4) # sep.setDurationType('16th') print(f"type !! {note1.duration.type}") # note1.duration.appendTuplet(duration.Tuplet(7,4, '16th')) note1.duration = duration.Duration(type='eighth', dots=1) note1.duration.appendTuplet(duration.Tuplet(7, 4, '16th')) # note2.duration.appendTuplet(duration.Tuplet(7,4, '16th')) note2.duration = duration.Duration(type='eighth', dots=1) note2.duration.appendTuplet(duration.Tuplet(7, 4, '16th')) # note3.duration.appendTuplet(duration.Tuplet(7,4, '16th')) note3.duration = duration.Duration(type='16th') note3.duration.appendTuplet(duration.Tuplet(7, 4, '16th')) # note2.duration.Tuplet(7,4) # note3.duration.Tuplet() parts[0].append(note1) parts[0].append(note2) parts[0].append(note3) parts.append(tempo.MetronomeMark(number=60)) s = stream.Stream(parts) s.write("musicxml", "dur_test" + ".musicxml") return True
def seq_to_stream(seq): """ :param seq: list (one for each voice) of list of (pitch, articulation) :return: """ score = stream.Score() for voice, v in enumerate(seq): part = stream.Part(id='part' + str(voice)) dur = 0 f = note.Rest() for k, n in enumerate(v): if n[1] == 1: # add previous note if not f.name == 'rest': f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) dur = 1 f = note.Note() f.pitch.midi = n[0] else: dur += 1 # add last note f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) score.insert(part) return score
def createNonTrillMeasure(): ''' Returns a dictionary with the following keys returnDict = { 'name': string, 'midi': measure stream, 'omr': measure stream, 'expected': measure stream, } ''' noteDuration = duration.Duration('quarter') n0 = note.Note('A') # omr n0.duration = noteDuration n1 = note.Note('C') n1.duration = duration.Duration(.25) n2 = note.Note('D') n2.duration = duration.Duration(.25) nonTrill = [n1, n2, deepcopy(n1), deepcopy(n2)] midiMeasure = stream.Measure() midiMeasure.append(nonTrill) omrMeasure = stream.Measure() omrMeasure.append(n0) returnDict = { 'name': 'Non-Trill Measure Wrong Notes', 'midi': midiMeasure, 'omr': omrMeasure, 'expected': deepcopy(omrMeasure), } return returnDict
def indexed_chorale_to_score(seq, pickled_dataset): """ :param seq: voice major :param pickled_dataset: :return: """ _, _, _, index2notes, note2indexes, _ = pickle.load( open(pickled_dataset, 'rb')) num_pitches = list(map(len, index2notes)) slur_indexes = list(map(lambda d: d[SLUR_SYMBOL], note2indexes)) score = stream.Score() for voice_index, v in enumerate(seq): part = stream.Part(id='part' + str(voice_index)) dur = 0 f = note.Rest() for k, n in enumerate(v): # if it is a played note if not n == slur_indexes[voice_index]: # add previous note if dur > 0: f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) dur = 1 f = standard_note(index2notes[voice_index][n]) else: dur += 1 # add last note f.duration = duration.Duration(dur / SUBDIVISION) part.append(f) score.insert(part) return score
def createNonTrillMeasure(): ''' Returns a dictionary with the following keys returnDict = { "name": string, "midi": measure stream, "omr": measure stream, "expected": measure stream, } ''' noteDuration = duration.Duration('quarter') n0 = note.Note("A") # omr n0.duration = noteDuration n1 = note.Note("C") n1.duration = duration.Duration(.25) n2 = note.Note("D") n2.duration = duration.Duration(.25) nonTrill = [n1, n2, deepcopy(n1), deepcopy(n2)] midiMeasure = stream.Measure() midiMeasure.append(nonTrill) omrMeasure = stream.Measure() omrMeasure.append(n0) returnDict = { "name": "Non-Trill Measure Wrong Notes", "midi": midiMeasure, "omr": omrMeasure, "expected": deepcopy(omrMeasure), } return returnDict
def getPopulationScore(population: [individual.Individual]): s = stream.Score(id='mainScore') part = stream.Part(id='part0') part1 = stream.Part(id='part1') for i in range(len(population)): # For each measure for m in population[i].measures: measure = stream.Measure() chord_measure = stream.Measure() if m.chord is not None: chord_measure.append(chord.Chord(m.chord, quarterLength=4.0)) duration_count = 0.0 # For each note for j in m.notes: if j.pitch == 'REST': n = note.Rest() n.duration = duration.Duration( quarterLength=j.duration.duration_value / 0.25) else: n = note.Note(j.pitch) n.duration = duration.Duration( quarterLength=j.duration.duration_value / 0.25) measure.append(n) duration_count += j.duration.duration_value # Add rest if measure is not filled if duration_count < 1.0: measure[len(measure) - 1].duration.quarterLength += (1.0 - duration_count) / 0.25 part.append(measure) part1.append(chord_measure) s.append(part) s.append(part1) return s
def testExportMetronomeMarksC(self): from music21 import tempo from music21 import duration # set metronome positions at different offsets in a measure or part p = stream.Part() p.repeatAppend(note.Note('g#3'), 8) # default quarter assumed p.insert( 0, tempo.MetronomeMark(number=222.2, referent=duration.Duration(quarterLength=.75))) p.insert(3, tempo.MetronomeMark(number=106, parentheses=True)) p.insert( 7, tempo.MetronomeMark(number=93, referent=duration.Duration(quarterLength=.25))) #p.show() raw = fromMusic21Object(p) match1 = '<beat-unit>eighth</beat-unit>' match2 = '<beat-unit-dot/>' match3 = '<per-minute>222.2</per-minute>' match4 = '<metronome parentheses="yes">' match5 = '<metronome parentheses="no">' self.assertEqual(raw.find(match1) > 0, True) self.assertEqual(raw.find(match2) > 0, True) self.assertEqual(raw.find(match3) > 0, True) self.assertEqual(raw.count(match4) == 1, True) self.assertEqual(raw.count(match5) == 2, True)
def postProcess( output, n_tracks=4, n_bars=2, n_steps_per_bar=16, ): parts = stream.Score() parts.append(tempo.MetronomeMark(number=66)) max_pitches = binarise_output(output) midi_note_score = np.vstack([ max_pitches[i].reshape([n_bars * n_steps_per_bar, n_tracks]) for i in range(len(output)) ]) for i in range(n_tracks): last_x = int(midi_note_score[:, i][0]) s = stream.Part() dur = 0 for idx, x in enumerate(midi_note_score[:, i]): x = int(x) if (x != last_x or idx % 4 == 0) and idx > 0: n = note.Note(last_x) n.duration = duration.Duration(dur) s.append(n) dur = 0 last_x = x dur = dur + 0.25 n = note.Note(last_x) n.duration = duration.Duration(dur) s.append(n) parts.append(s) return parts
def make_music(model_file): with open(model_file) as f: model = json.load(f) chain = Chain.from_json(model) score = stream.Score() soprano_part = stream.Part() soprano_part.insert(0, instrument.Soprano()) alto_part = stream.Part() alto_part.insert(0, instrument.Alto()) tenor_part = stream.Part() tenor_part.insert(0, instrument.Tenor()) bass_part = stream.Part() bass_part.insert(0, instrument.Bass()) counter = { Voice.Soprano: Decimal(0.), Voice.Alto: Decimal(0.), Voice.Tenor: Decimal(0.), Voice.Bass: Decimal(0.), } current_state = { Voice.Soprano: None, Voice.Alto: None, Voice.Tenor: None, Voice.Bass: None, } parts = { Voice.Soprano: soprano_part, Voice.Alto: alto_part, Voice.Tenor: tenor_part, Voice.Bass: bass_part, } for state in chain.walk(): S, A, T, B = make_tuple(state) current_state[Voice.Soprano] = S current_state[Voice.Alto] = A current_state[Voice.Tenor] = T current_state[Voice.Bass] = B min_value = min(counter.values()) min_voices = [k for k in counter if counter[k] == min_value] for voice in min_voices: pitch, d = current_state[voice] if pitch == 'rest': n = note.Rest(duration=duration.Duration(d)) else: n = note.Note(pitch, duration=duration.Duration(d)) parts[voice].append(n) counter[voice] += Decimal(d) for k, v in parts.items(): score.insert(Voice.order(k), v) score.show()
def createDoubleInvertedTurnMeasure(): ''' Returns a dictionary with the following keys returnDict = { "name": string, "midi": measure stream, "omr": measure stream, "expected": measure stream, } ''' omrMeasure = stream.Measure() omrNote1 = note.Note("B-") middleNote = note.Note("G") omrNote2 = note.Note("B-") # enharmonic to trill omrMeasure.append([omrNote1, middleNote, omrNote2]) expectedFixedOmrMeasure = stream.Stream() expectOmrNote1 = deepcopy(omrNote1) expectOmrNote1.expressions.append(expressions.InvertedTurn()) expectOmrNote2 = deepcopy(omrNote2) expectOmrNote2.expressions.append(expressions.InvertedTurn()) expectedFixedOmrMeasure.append( [expectOmrNote1, deepcopy(middleNote), expectOmrNote2]) midiMeasure = stream.Measure() turn1 = [ note.Note("A"), note.Note("B-"), note.Note("C5"), note.Note("B-") ] turn2 = [ note.Note("G#"), note.Note("A#"), note.Note("B"), note.Note("A#") ] for n in turn1: n.duration = duration.Duration(.25) for n in turn2: n.duration = duration.Duration(.25) midiMeasure.append([*turn1, deepcopy(middleNote), *turn2]) returnDict = { "name": "Inverted turns with accidentals separated By non-ornament Note", "midi": midiMeasure, "omr": omrMeasure, "expected": expectedFixedOmrMeasure, } return returnDict
def createDoubleInvertedTurnMeasure(): ''' Returns a dictionary with the following keys returnDict = { 'name': string, 'midi': measure stream, 'omr': measure stream, 'expected': measure stream, } ''' omrMeasure = stream.Measure() omrNote1 = note.Note('B-') middleNote = note.Note('G') omrNote2 = note.Note('B-') # enharmonic to trill omrMeasure.append([omrNote1, middleNote, omrNote2]) expectedFixedOmrMeasure = stream.Stream() expectOmrNote1 = deepcopy(omrNote1) expectOmrNote1.expressions.append(expressions.InvertedTurn()) expectOmrNote2 = deepcopy(omrNote2) expectOmrNote2.expressions.append(expressions.InvertedTurn()) expectedFixedOmrMeasure.append( [expectOmrNote1, deepcopy(middleNote), expectOmrNote2]) midiMeasure = stream.Measure() turn1 = [ note.Note('A'), note.Note('B-'), note.Note('C5'), note.Note('B-') ] turn2 = [ note.Note('G#'), note.Note('A#'), note.Note('B'), note.Note('A#') ] for n in turn1: n.duration = duration.Duration(.25) for n in turn2: n.duration = duration.Duration(.25) midiMeasure.append([*turn1, deepcopy(middleNote), *turn2]) returnDict = { 'name': 'Inverted turns with accidentals separated By non-ornament Note', 'midi': midiMeasure, 'omr': omrMeasure, 'expected': expectedFixedOmrMeasure, } return returnDict
def createNachschlagTrillMeasure(): ''' Returns a dictionary with the following keys returnDict = { "name": string, "midi": measure stream, "omr": measure stream, "expected": measure stream, } ''' noteDuration = duration.Duration('quarter') trillDuration = duration.Duration(.125) n0 = note.Note("E") n0.duration = noteDuration tn1 = note.Note("E") tn1.duration = trillDuration tn2 = note.Note("F") tn2.duration = trillDuration tn3 = note.Note("D") tn3.duration = trillDuration firstHalfTrill = [tn1, tn2, deepcopy(tn1), deepcopy(tn2)] secondHalfTrill = [ deepcopy(tn1), deepcopy(tn2), deepcopy(tn1), tn3 ] expandedTrill = firstHalfTrill + secondHalfTrill midiMeasure = stream.Measure() midiMeasure.append(expandedTrill) omrMeasure = stream.Measure() omrMeasure.append(n0) nachschlagTrill = expressions.Trill() nachschlagTrill.nachschlag = True nachschlagTrill.quarterLength = trillDuration.quarterLength expectedFixedOmrMeasure = stream.Measure() noteWithTrill = deepcopy(n0) noteWithTrill.expressions.append(deepcopy(nachschlagTrill)) expectedFixedOmrMeasure.append(noteWithTrill) returnDict = { "name": "Nachschlag Trill", "midi": midiMeasure, "omr": omrMeasure, "expected": expectedFixedOmrMeasure, } return returnDict
def convert_measure_to_music21_measure(m: Measure): m.notes: [Note] measure = stream.Measure(1) for j in m.notes: if j.pitch == 'REST': n_1 = note.Rest() n_1.duration = duration.Duration( quarterLength=j.duration.duration_value / 0.25) else: n_1 = note.Note(j.pitch) n_1.duration = duration.Duration( quarterLength=j.duration.duration_value / 0.25) measure.append(n_1) return measure
def _left_hand_interlude(): lh_interlude = stream.Voice() lh_interlude.append(meter.TimeSignature("6/4")) for _ in range(2): lh_interlude.append(note.Rest(duration=duration.Duration(2.75))) note_1 = note.Note("E1", duration=duration.Duration(0.25)) note_2 = note.Note("A0", duration=duration.Duration(3)) ottava = spanner.Ottava() ottava.type = (8, "down") ottava.addSpannedElements([note_1, note_2]) lh_interlude.append(ottava) lh_interlude.append(note_1) lh_interlude.append(note_2) lh_interlude.makeMeasures(inPlace=True, finalBarline=None) return lh_interlude
def translate(int_note, dur): """ Given an integer value of a note, gets a corresponding music21.note object :param int_note: integer value of the note :param dur: duration of desired note :return music21.note """ first_char_arr = [ "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B" ] pitch = first_char_arr[int_note % 12] + str(int(2 + int_note / 12)) if dur < 0: return note.Rest(duration=duration.Duration(quarterLength=-dur)) return note.Note(pitch, duration=duration.Duration(quarterLength=dur))
def notes_to_midi(self, run_folder, output, filename=None): for score_num in range(len(output)): max_pitches = self.binarise_output(output) midi_note_score = max_pitches[score_num].reshape( [self.n_bars * self.n_steps_per_bar, self.n_tracks]) parts = stream.Score() parts.append(tempo.MetronomeMark(number=66)) '''self.notes_to_score_0(midi_note_score, parts, max_pitches, 0) self.notes_to_score_1(midi_note_score, parts, max_pitches)''' for i in range(self.n_tracks): last_x = int(midi_note_score[:, i][0]) s = stream.Part() dur = 0 for idx, x in enumerate(midi_note_score[:, i]): x = int(x) if (x != last_x or idx % 4 == 0) and idx > 0: n = note.Note(last_x) n.duration = duration.Duration(dur) s.append(instrument.Trumpet()) s.append(n) dur = 0 last_x = x dur = dur + 0.25 n = note.Note(last_x) n.duration = duration.Duration(dur) s.append(instrument.Trumpet()) s.append(n) parts.append(s) if filename is None: parts.write('midi', fp=os.path.join( run_folder, "samples/sample_{}_{}.midi".format( self.epoch, score_num))) else: parts.write( 'midi', fp=os.path.join(run_folder, "samples/{}.midi".format(filename)))
def createSingleTurnMeasure(): ''' Returns a dictionary with the following keys returnDict = { "name": string, "midi": measure stream, "omr": measure stream, "expected": measure stream, } ''' omrMeasure = stream.Measure() omrNote = note.Note("F") omrNote.duration = duration.Duration("whole") omrMeasure.append(omrNote) expectedFixedOmrMeasure = stream.Stream() expectedOmrNote = deepcopy(omrNote) expectedOmrNote.expressions.append(expressions.Turn()) expectedFixedOmrMeasure.append(expectedOmrNote) midiMeasure = stream.Measure() turn = [note.Note("G"), note.Note("F"), note.Note("E"), note.Note("F")] midiMeasure.append(turn) returnDict = { "name": "Single Turn Measure", "midi": midiMeasure, "omr": omrMeasure, "expected": expectedFixedOmrMeasure, } return returnDict
def createMidiFromMat(noteMat): new_notes_lst = [] for i in range(noteMat.shape[0]): j = 0 while j < noteMat.shape[1]: count = 1 if int(noteMat[i, j]) == 1: while int(noteMat[i, j + count]) == 1: count += 1 durations = breakInstances(count) newDuration = duration.Duration() qlength = 0.0 #print(durations) for d in durations: qlength += d / 4.0 newDuration.quarterLength = qlength octave = (i + 4) // 12 + 2 pitchClass = ((i % 12) + 4) % 12 newNote = note.Note(pitchClass) newNote.octave = octave newNote.duration = newDuration newNote.offset = j / 4.0 new_notes_lst.append(newNote) #if i == 21: #print("NOTE: pitch: {0}, index {3}, duration: {1}, offset: {2}\n\n\n".format(pitchClass, newDuration.quarterLength, newNote.offset, j)) j = j + count reconstructedMidi = stream.Stream(new_notes_lst) return reconstructedMidi
def durations_to_stream(durations: Sequence[Union[numbers.Number, duration.Duration, note.Note]]): """Converts a sequence of durations to a Stream containing note objects of that duration. Args: durations: Sequence of durations to convert to a stream. Sequence can consist of numeric values (1 = quarter note), music21 Duration objects or music21 Note objects. Returns: Stream containing a sequence of notes with the corresponding durations. """ post_stream = stream.Stream() for duration_ in durations: if isinstance(duration_, numbers.Number): new_note = note.Note() new_note.duration = duration.Duration(duration_) post_stream.append(new_note) elif isinstance(duration_, duration.Duration): new_note = note.Note() new_note.duration = duration_ post_stream.append(new_note) elif isinstance(duration_, note.Note): post_stream.append(duration_) return post_stream
def createNonTurnMeasure(): ''' Returns a dictionary with the following keys returnDict = { 'name': string, 'midi': measure stream, 'omr': measure stream, 'expected': measure stream, } ''' omrMeasure = stream.Measure() omrNote = note.Note('A') omrNote.duration = duration.Duration('whole') omrMeasure.append(omrNote) midiMeasure = stream.Measure() turn = [note.Note('B'), note.Note('A'), note.Note('G'), note.Note('F')] midiMeasure.append(turn) returnDict = { 'name': 'Non-Turn Measure', 'midi': midiMeasure, 'omr': omrMeasure, 'expected': deepcopy(omrMeasure), } return returnDict
def createSingleTurnMeasure(): ''' Returns a dictionary with the following keys returnDict = { 'name': string, 'midi': measure stream, 'omr': measure stream, 'expected': measure stream, } ''' omrMeasure = stream.Measure() omrNote = note.Note('F') omrNote.duration = duration.Duration('whole') omrMeasure.append(omrNote) expectedFixedOmrMeasure = stream.Stream() expectedOmrNote = deepcopy(omrNote) expectedOmrNote.expressions.append(expressions.Turn()) expectedFixedOmrMeasure.append(expectedOmrNote) midiMeasure = stream.Measure() turn = [note.Note('G'), note.Note('F'), note.Note('E'), note.Note('F')] midiMeasure.append(turn) returnDict = { 'name': 'Single Turn Measure', 'midi': midiMeasure, 'omr': omrMeasure, 'expected': expectedFixedOmrMeasure, } return returnDict
def create_midi(prediction_output): """ convert the output from the prediction to notes and create a midi file from the notes """ offset = 0 output_notes = [] x = 0 # create note and chord objects based on the values generated by the model while x < len(prediction_output): print(prediction_output[x]) if prediction_output[x] != "Z": #write note if not empty notelength = 0.25 new_note = note.Note(prediction_output[x]) new_note.offset = offset new_note.storedInstrument = instrument.Piano() y = int(offset * 4) + 1 while y < 128 or y == int(offset * 4) + 3: if (prediction_output[y] == prediction_output[x]): notelength += 0.25 offset += 0.25 x += 1 y += 1 new_note.duration = duration.Duration(notelength) output_notes.append(new_note) # increase offset each iteration so that notes do not stack x += 1 offset += 0.25 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp='test_output.mid')
def createNonTurnMeasure(): ''' Returns a dictionary with the following keys returnDict = { "name": string, "midi": measure stream, "omr": measure stream, "expected": measure stream, } ''' omrMeasure = stream.Measure() omrNote = note.Note("A") omrNote.duration = duration.Duration("whole") omrMeasure.append(omrNote) midiMeasure = stream.Measure() turn = [note.Note("B"), note.Note("A"), note.Note("G"), note.Note("F")] midiMeasure.append(turn) returnDict = { "name": "Non-Turn Measure", "midi": midiMeasure, "omr": omrMeasure, "expected": deepcopy(omrMeasure), } return returnDict
def to_tuple(letter_note, octave, length): from music21 import note, duration letter_note = note.Note(str(letter_note) + str(octave)) letter_note.duration = duration.Duration(float(length)) freq = letter_note.pitch.frequency duration = letter_note.duration.quarterLength return freq, duration
def generate_cycle_pairs_for_all_string_sets(root_scale, tonic, pair_type, voicing=Voicing.Closed): cycle_pairs = [] for strings in iteration_function(voicing): string_set = (GuitarRange.get_string(strings[0]), GuitarRange.get_string(strings[1]), GuitarRange.get_string(strings[2])) tonic_triad = generate_tonic_triad(root_scale, tonic, string_set, voicing) cycle_pair = generate_cycle_pair(root_scale, tonic_triad, pair_type, string_set, voicing) # populate all the metadata to make the titling and everything automatic cycle_pair.metadata = metadata.Metadata() cycle_pair.metadata.title = "Cycle " + pair_type + " Progression in " + root_scale.name + "\nString Set: " + \ str(string_set[0].number.value) + "-" + str(string_set[1].number.value) + "-" + \ str(string_set[2].number.value) + "; " + Voicing.to_string(voicing) + " Triads" cycle_pair.metadata.composer = "Graham Smith" cycle_pair.metadata.date = "2020" # add system breaks at the end each measure to make it one measure per line cycle_pair.definesExplicitSystemBreaks = True for s in cycle_pair.getElementsByClass(stream.Stream): measures = s.getElementsByClass(stream.Measure) for m in measures: m.append(layout.SystemLayout(isNew=True)) cycle_pair.definesExplicitSystemBreaks = True # add notation to make the score easier to read cycle_pair[1][0][1].lyric = "First cycle starts" cycle_pair[2][0][0].lyric = "Second cycle starts" # add an ending measure to make the line breaks and formatting a bit cleaner/more consistent m = stream.Measure() last_chord_as_list = list(tonic_triad.pitches) check_note_ranges_and_transpose(last_chord_as_list, string_set) last_chord = chord.Chord(last_chord_as_list) last_chord.duration = duration.Duration(4.0) m.append(last_chord) r = note.Rest() r.duration = duration.Duration(2.0) m.append(r) cycle_pair[2].append(m) ensure_unique_chords(cycle_pair) # cycle_pair.show() cycle_pairs.append(cycle_pair) return cycle_pairs
def create_midi(self, prediction_output): """ convert the output from the prediction to notes and create a midi file from the notes """ offset = 0 output_notes = [] # create note and chord objects based on the values generated by the model for pattern in prediction_output: if "$" in pattern: pattern, dur = pattern.split("$") if "/" in dur: a, b = dur.split("/") dur = float(a) / float(b) else: dur = float(dur) # pattern is a chord if ("." in pattern) or pattern.isdigit(): notes_in_chord = pattern.split(".") notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) new_note.storedInstrument = instrument.Piano() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset new_chord.duration = duration.Duration(dur) output_notes.append(new_chord) # pattern is a rest elif pattern is "NULL": offset += TIMESTEP # pattern is a note else: new_note = note.Note(pattern) new_note.offset = offset new_note.storedInstrument = instrument.Piano() new_note.duration = duration.Duration(dur) output_notes.append(new_note) # increase offset each iteration so that notes do not stack offset += TIMESTEP midi_stream = stream.Stream(output_notes) output_file = os.path.basename(self.weights) + ".mid" print("output to " + output_file) midi_stream.write("midi", fp=output_file)
def create_midi(self, prediction_output): """ convert the output from the prediction to notes and create a midi file from the notes """ offset = 0 output_notes = [] # create note and chord objects based on the values generated by the model for pattern in prediction_output: if '$' in pattern: pattern, dur = pattern.split('$') if '/' in dur: a, b = dur.split('/') dur = float(a) / float(b) else: dur = float(dur) # pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) new_note.storedInstrument = instrument.Piano() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset new_chord.duration = duration.Duration(dur) output_notes.append(new_chord) # pattern is a rest elif pattern is 'NULL': offset += TIMESTEP # pattern is a note else: new_note = note.Note(pattern) new_note.offset = offset new_note.storedInstrument = instrument.Piano() new_note.duration = duration.Duration(dur) output_notes.append(new_note) # increase offset each iteration so that notes do not stack offset += TIMESTEP midi_stream = stream.Stream(output_notes) output_file = MODEL_NAME + '.mid' print('output to ' + output_file) midi_stream.write('midi', fp=output_file)
def test_duration_to_lily_17(self): # This should be rounded to qL==8.0 ... but I don't know how to make a # single-component duration with this qL, so I can't run this test as it # gets rounded, only as it produces an error. #self.assertEqual(_functions.duration_to_lily(duration.Duration(7.99609375)), '\\breve') self.assertRaises(problems.ImpossibleToProcessError, functions.duration_to_lily, duration.Duration(7.99609375))