def create_midi(prediction_output): """ convert the output from the prediction to notes and create a midi file from the notes """ offset = 0 output_notes = [] # create note and chord objects based on the values generated by the model for pattern in prediction_output: # pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) new_note.storedInstrument = instrument.Violin() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) # pattern is a note else: new_note = note.Note(pattern) new_note.offset = offset new_note.storedInstrument = instrument.Violin() output_notes.append(new_note) # increase offset each iteration so that notes do not stack offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp='test_violin3.mid')
def deextract_notes(prediction_output): offset = 0 output_notes = [] # Create note and chord objects based on the values generated by the model for pattern in prediction_output: # Pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) new_note.storedInstrument = instrument.Violin() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) # Pattern is a note else: new_note = note.Note(pattern) new_note.offset = offset new_note.storedInstrument = instrument.Violin() output_notes.append(new_note) # increase offset each iteration so that notes do not stack offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp=GENERATED_MIDI)
def generate_music(result_data, instr, filename): """生成mid音乐,然后进行保存 :param result_data: [音符列表] :type result_data: [list] :param filename: [文件名] :type filename: [str] """ result_data = [str(data) for data in result_data] offset = 0 output_notes = [] # 生成 Note(音符)或 Chord(和弦)对象 for data in result_data: if ('.' in data) or data.isdigit(): notes_in_chord = data.split('.') notes = [] if instr == 'Flute': output_notes.append(instrument.Flute()) elif instr == 'Piano': output_notes.append(instrument.Piano()) elif instr == 'Bass': output_notes.append(instrument.Bass()) elif instr == 'Guitar': output_notes.append(instrument.Guitar()) elif instr == 'Saxophone': output_notes.append(instrument.Saxophone()) elif instr == 'Violin': output_notes.append(instrument.Violin()) for current_note in notes_in_chord: new_note = note.Note(int(current_note)) #new_note.storedInstrument = instrument.Flute() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) else: if instr == 'Flute': output_notes.append(instrument.Flute()) elif instr == 'Piano': output_notes.append(instrument.Piano()) elif instr == 'Bass': output_notes.append(instrument.Bass()) elif instr == 'Guitar': output_notes.append(instrument.Guitar()) elif instr == 'Saxophone': output_notes.append(instrument.Saxophone()) elif instr == 'Violin': output_notes.append(instrument.Violin()) new_note = note.Note(data) new_note.offset = offset #new_note.storedInstrument = instrument.Flute() output_notes.append(new_note) offset += 1 # 创建音乐流(Stream) midi_stream = stream.Stream(output_notes) # 写入 MIDI 文件 midi_stream.write('midi', fp=filename + '.mid')
def test(): from music21 import instrument as j sc1 = stream.Score() # instruments = [Piccolo(), Glockenspiel(), 72, 69, 41, 27, 47, 1, 1, 1, 1, 34] instrument = [ j.Piccolo(), j.Xylophone(), j.Clarinet(), j.Oboe(), j.Violin(), j.ElectricGuitar(), j.Harp(), j.Piano(), j.Piano(), j.Piano(), j.Piano(), j.ElectricBass() ] instrumentOctave = [3, 2, 2, 2, 1, 1, 1, 2, 1, 0, -1, -2] for i in range(12): inst = instrument[i] if i < 9: inst.midiChannel = i else: inst.midiChannel = i + 1 part = addPart(instrument=inst) if instrumentOctave[i] != 0: part.transpose(12 * instrumentOctave[i], inPlace=True) sc1.insert(0, part) sc1.show()
class Instrument: # TODO ''' represents an instrument; associated with midi channel etc (see music21 docs) # TODO detector for whether instrument is capable of specific technique, # double-stop, etc. # TODO instruments like cello may use multiple clefs ''' name_to_instrument21 = { 'violin': instrument21.Violin(), 'viola': instrument21.Viola(), 'cello': instrument21.Violoncello() } name_to_clef21 = { 'violin': clef21.TrebleClef(), 'viola': clef21.AltoClef(), 'cello': clef21.BassClef() } def __init__(self, name): self.instrument = Instrument.name_to_instrument21[name] self.clef = Instrument.name_to_clef21[name] # there isn't a precise to_music21 analogue def check_pitch(self, pitch): # true if pitch within instrument's range low = self.instrument.lowestNote # assume these are always music21 Pitch objects high = self.instrument.highestNote pitch_21 = pitch.to_music21() if low is not None and low > pitch_21: return False if high is not None and high < pitch_21: return False return True
def change_instrument(self, choice): instruments = [ instrument.Piano(), instrument.AcousticGuitar(), instrument.Violin(), instrument.Flute(), instrument.Mandolin() ] self.actual_instrument = instruments[choice] self.melody_stream[0] = self.actual_instrument
def create_midi(prediction_output): offset = 0 output_notes = [] for pattern in prediction_output: if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) new_note.storedInstrument = instrument.Violin() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) else: new_note = note.Note(pattern) new_note.offset = offset new_note.storedInstrument = instrument.Violin() output_notes.append(new_note) offset += 0.5
def matrix_to_score(self, matrix, verbose=False): ''' Takes a matrix of (P, T, 2) and turn it into a music21.stream.Score object, where P is the number of parts, T is the number of time slices, and dim is the note vector. ''' # (4 parts, # ticks, 2) assert len(matrix.shape) == 3, \ "Input matrix needs to have 3-dimensions." num_parts, num_ticks, num_dim = matrix.shape assert num_parts == 4, "Input matrix needs to have 4 parts." assert num_ticks > 0, "No time slices in this matrix." assert num_dim == 2, "Note vector size mismatch." # need to make sure all pieces start with an articulated note, even if # it's a rest. matrix[:, 0, 1] = [1, 1, 1, 1] score = Score() parts = list(map(self._matrix_to_part, matrix)) parts[0].insert(0, instrument.Violin()) parts[0].partName = "Violin I" parts[0].clef = clef.TrebleClef() parts[1].insert(0, instrument.Violin()) parts[1].partName = "Violin II" parts[1].clef = clef.TrebleClef() parts[2].insert(0, instrument.Viola()) parts[2].clef = clef.AltoClef() parts[3].insert(0, instrument.Violoncello()) parts[3].clef = clef.BassClef() _ = list(map(lambda part: score.append(part), parts)) return score
def generate(genre, instr, duration): duration = int(duration) note_len = int(250 / 63 * duration) print(genre) model = load_model('./encoding/rock/' + genre + '_model.h5') notes_dir = './encoding/' + genre + '/notes' ''' if genre == 'pop': model = load_model('./encoding/pop/pop_model.h5') notes_dir = './encoding/pop/notes' ''' if instr == 'guitar': instrmt = instrument.ElectricGuitar() elif instr == 'violin': instrmt = instrument.Violin() elif instr == 'piano': instrmt = instrument.Piano() else: instrmt = instrument.BassDrum() with open(notes_dir, 'rb') as filepath: notes = pickle.load(filepath) # Get all pitch names pitchnames = sorted(set(item for item in notes)) # Get all pitch names n_vocab = len(set(notes)) #sa = time.time() network_input, normalized_input = prepare_sequences1( notes, pitchnames, n_vocab) int_to_note = dict( (number, note) for number, note in enumerate(pitchnames)) #print(time.time() - sa) #sb = time.time() prediction_output = generate_notes(model, network_input, int_to_note, n_vocab, note_len) #print(time.time() - sb) #sc = time.time() retmd = gen_midi(prediction_output, genre, instrmt) #print(time.time() - sc) return retmd
def get_instruments(genre, ontology): programs = [] if genre.label[0] == "Blues": programs.append(instrument.AcousticGuitar().midiProgram) programs.append(instrument.Harmonica().midiProgram) programs.append(instrument.TomTom().midiProgram) elif genre.label[0] == "Folk": programs.append(instrument.Banjo().midiProgram) programs.append(instrument.AcousticBass().midiProgram) programs.append(instrument.Piano().midiProgram) elif genre.label[0] == "Rock": programs.append(instrument.ElectricGuitar().midiProgram) programs.append(instrument.ElectricBass().midiProgram) programs.append(instrument.BassDrum().midiProgram) elif genre.label[0] == "Classical": programs.append(instrument.Violin().midiProgram) programs.append(instrument.Oboe().midiProgram) programs.append(instrument.Flute().midiProgram) programs.append(instrument.Viola().midiProgram) elif genre.label[0] == "Country": programs.append(instrument.AcousticGuitar().midiProgram) programs.append(instrument.Banjo().midiProgram) programs.append(instrument.TomTom().midiProgram) return programs
def convertor(out, s): if(out == "Flute"): print("Flute") for p in s.parts: p.insert(0, instrument.Flute()) s.write('midi', 'C:/Users/Ciprian/PycharmProjects/AI-music/static/midi/Flute.mid') elif (out == "Violin"): print("Violin") for p in s.parts: p.insert(0, instrument.Violin()) s.write('midi', 'C:/Users/Ciprian/PycharmProjects/AI-music/static/midi/Violin.mid') elif (out == "Piano"): print("Piano") for p in s.parts: p.insert(0, instrument.Piano()) s.write('midi', 'C:/Users/Ciprian/PycharmProjects/AI-music/static/midi/Piano.mid') elif (out == "Celesta"): print("Celesta") for p in s.parts: p.insert(0, instrument.Celesta()) s.write('midi', 'C:/Users/Ciprian/PycharmProjects/AI-music/static/midi/Celesta.mid')
for note_index in range(400): prediction_input = np.reshape(sequence, (1, len(sequence), 1)) # normalize as in training prediction_input = prediction_input / float(num_vocab) prediction = model.predict(prediction_input, verbose=0) index = np.argmax(prediction) result = int_to_pitch[index] prediction_output.append(result) # append the prediction note and shift sequence over by one sequence.append(index) sequence = sequence[1:len(sequence)] print('prediction sequence complete') output_notes = [] output_notes.append(instrument.Violin()) offset = 0 # create note and chord objects based on the values generated by the model for pattern in prediction_output: pitch_and_duration = pattern.split(',') pitch = pitch_and_duration[0] pitch_duration = float(Fraction((pitch_and_duration[1]))) # pitch is a chord if ('.' in pitch) or pitch.isdigit(): notes_in_chord = pitch.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) notes.append(new_note) new_chord = chord.Chord(notes) new_chord.duration = duration.Duration(pitch_duration)
import pathlib from mido import MidiFile from music21 import converter, instrument from format import convert_to_axillary_representation, convert_to_midi_track, count_up_steps, count_down_steps import pretty_midi instruments_dictionary = { "Piano": instrument.Piano(), "EnglishHorn": instrument.EnglishHorn(), "ElectricOrgan": instrument.ElectricOrgan(), "Harpsichord": instrument.Harpsichord(), "PipeOrgan": instrument.PipeOrgan(), "Violin": instrument.Violin() } def change_instrument_in_mid(mid, instrument_name, buffer_file_path="./data/buffer/temporary.mid"): pathlib.Path(buffer_file_path).parent.mkdir(parents=True, exist_ok=True) mid.save(buffer_file_path) s = converter.parse(buffer_file_path) instrument_to_play = instruments_dictionary[instrument_name] for el in s.recurse(): if 'Instrument' in el.classes: el.activeSite.replace(el, instrument_to_play) s.write('midi', buffer_file_path) return MidiFile(buffer_file_path, clip=True)