def create_midi(prediction_output, BPM=120, offset=0, cycles=2): #convert the output from the prediction to notes and create a midi file #from the notes Offset = 0 output_notes = [] if not offset: offset = 480/(BPM*cycles*timeSignature) # create note and chord objects based on the values generated by the model for pattern in prediction_output: # pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) new_note.storedInstrument = instrument.Guitar() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) # pattern is a note else: new_note = note.Note(pattern) new_note.offset = offset new_note.storedInstrument = instrument.Guitar() output_notes.append(new_note) # increase offset each iteration so that notes do not stack Offset += offset #offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp=f'./converted/{fileName}.mid')
def generate_music(result_data, instr, filename): """生成mid音乐,然后进行保存 :param result_data: [音符列表] :type result_data: [list] :param filename: [文件名] :type filename: [str] """ result_data = [str(data) for data in result_data] offset = 0 output_notes = [] # 生成 Note(音符)或 Chord(和弦)对象 for data in result_data: if ('.' in data) or data.isdigit(): notes_in_chord = data.split('.') notes = [] if instr == 'Flute': output_notes.append(instrument.Flute()) elif instr == 'Piano': output_notes.append(instrument.Piano()) elif instr == 'Bass': output_notes.append(instrument.Bass()) elif instr == 'Guitar': output_notes.append(instrument.Guitar()) elif instr == 'Saxophone': output_notes.append(instrument.Saxophone()) elif instr == 'Violin': output_notes.append(instrument.Violin()) for current_note in notes_in_chord: new_note = note.Note(int(current_note)) #new_note.storedInstrument = instrument.Flute() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) else: if instr == 'Flute': output_notes.append(instrument.Flute()) elif instr == 'Piano': output_notes.append(instrument.Piano()) elif instr == 'Bass': output_notes.append(instrument.Bass()) elif instr == 'Guitar': output_notes.append(instrument.Guitar()) elif instr == 'Saxophone': output_notes.append(instrument.Saxophone()) elif instr == 'Violin': output_notes.append(instrument.Violin()) new_note = note.Note(data) new_note.offset = offset #new_note.storedInstrument = instrument.Flute() output_notes.append(new_note) offset += 1 # 创建音乐流(Stream) midi_stream = stream.Stream(output_notes) # 写入 MIDI 文件 midi_stream.write('midi', fp=filename + '.mid')
def changeInstrument(): s = converter.parse("music/forGuitar.mid") for i, p in enumerate(s.parts): if i == 0: p.insert(i, instrument.Guitar()) s.write('midi', 'music/Guitar1.mid')
def create_midi(prediction_output, Scale, fileName, BPM=120, offset=0, cycles=1, timeSignature=4): print('THIS IS offset MAN:', offset) #convert the output from the prediction to notes and create a midi file #from the notes Offset = 0 output_notes = [] if offset == 0: offset = 480 / (BPM * cycles * timeSignature) mode = Scale.split()[-1] if mode == 'Major': key = scale.MajorScale(Scale.split()[0]) scaleNotes = list(set(list(note.name for note in key.getPitches()))) # create note and chord objects based on the values generated by the model for pattern in prediction_output: # pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) if new_note.name not in scaleNotes: #new_note = note.Rest() #output_notes.append(new_note) #continue print(f'WTF {new_note.name}') new_note = note.Note(int(current_note) - 1) print(f'YAY {new_note.name}') notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = Offset output_notes.append(new_chord) # pattern is a note else: new_note = note.Note(pattern) if new_note.name not in scaleNotes: print(f'WTF {new_note.name}') new_note = new_note.transpose(-1) print(f'YAY {new_note.name}') new_note.offset = Offset new_note.storedInstrument = instrument.Guitar() output_notes.append(new_note) # increase offset each iteration so that notes do not stack Offset += offset / random.randint(1, 2) print(Offset) #offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write( 'midi', fp= f'/content/drive/My Drive/Colab Notebooks/music gen/data/{fileName}.mid' )
def create_midi(prediction_output, Scale, fileName, BPM=120, offset=0, cycles=1, timeSignature=4): #convert the output from the prediction to notes and create a midi file from the notes Offset = 0 output_notes = [] if not offset: offset = 480 / (BPM * cycles * timeSignature) mode = Scale.split()[-1] if mode == 'Major': key = scale.MajorScale(Scale.split()[0]) diff = majors[key.tonic.name] elif mode == 'Minor': key = scale.MinorScale(Scale.split()[0]) diff = minors[key.tonic.name] scaleNotes = list(set(list(note.name for note in key.getPitches()))) # create note and chord objects based on the values generated by the model for pattern in prediction_output: # pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note) - diff) if new_note.name not in scaleNotes: new_note = note.Note(int(current_note) - 1) notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = Offset output_notes.append(new_chord) # pattern is a note else: new_note = note.Note(pattern) new_note = new_note.transpose(-diff) if new_note.name not in scaleNotes: new_note = new_note.transpose(-1) new_note.offset = Offset new_note.storedInstrument = instrument.Guitar() output_notes.append(new_note) seed = random.randint(1, 1000000000) # increase offset each iteration so that notes do not stack; the modulos are arbitrary if seed % 32: Offset += offset / 2 else: Offset += offset #offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp=f'./static/userMIDIs/{fileName}.mid')
def create_midi2(prediction_output): """ convert the output from the prediction to notes and create a midi file from the notes """ offset = 0 output_notes = [instrument.Guitar()] # create note and chord objects based on the values generated by the model for pattern in prediction_output: print(1) # pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] print(2) for current_note in notes_in_chord: print(3) new_note = note.Note(int(current_note)) notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(instrument.Guitar()) output_notes.append(new_chord) # pattern is a note else: new_note = note.Note(pattern) new_note.offset = offset output_notes.append(instrument.Bass()) output_notes.append(new_note) # increase offset each iteration so that notes do not stack offset += 0.5 midi_stream = stream.Stream(output_notes) print('Saving Output file as midi....') midi_stream.write('midi', fp='test_output6.mid')
def matrix_to_midi(matrix, instName): first_touch = 1.0 continuation = 0.0 y_axis, x_axis = matrix.shape output_notes = [] offset = 0 matrix = matrix.astype(float) print (y_axis, x_axis) # ADAM YOU'RE HERE debugging why the output fails for y_axis_num in range(y_axis): one_freq_interval = matrix[y_axis_num,:] # get a column # freq_val = 0 # columdaki hangi rowa baktığımızı akılda tutmak için one_freq_interval_norm = converter_func(one_freq_interval) # print (one_freq_interval) i = 0 offset = 0 while (i < len(one_freq_interval)): how_many_repetitive = 0 temp_i = i if (one_freq_interval_norm[i] == first_touch): how_many_repetitive = how_many_repetitive_func(one_freq_interval_norm, from_where=i+1, continuation=continuation) i += how_many_repetitive if (how_many_repetitive > 0): new_note = note.Note(int_to_note(y_axis_num),duration=duration.Duration(0.25*how_many_repetitive)) new_note.offset = 0.25*temp_i if instName is "Bass": new_note.storedInstrument = instrument.Bass() elif instName is "Guitar": new_note.storedInstrument = instrument.Guitar() elif instName is "Drums": new_note.storedInstrument = instrument.ElectricOrgan() # THIS IS HACKISH!!! else: new_note.storedInstrument = instrument.Piano() output_notes.append(new_note) else: i += 1 return output_notes
prediction_output.append(result) pattern = np.append(pattern, index) pattern = pattern[1:len(pattern)] offlen = len(offset) DifferentialOffset = (max(offset) - min(offset)) / len(offset) offset2 = offset.copy() output_notes = [] i = 0 offset = [] initial = 0 for i in range(len(offset2)): offset.append(initial) initial = initial + DifferentialOffset i = 0 for pattern in prediction_output: if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for check_note in notes_in_chord: gen_note = note.Note(int(check_note)) gen_note.storedInstrument = instrument.Guitar()
autoencoder = tf.keras.Model(encoderInput, decoder(encoded)) autoencoder.compile(loss='binary_crossentropy', optimizer='rmsprop') # Train autoencoder autoencoder.fit(trainChordsFlat, trainChordsFlat, epochs=500) print(4) generatedChords = decoder(np.random.normal(size=(1, latentDim))).numpy().reshape( nChords, sequenceLength).argmax(0) chordSequence = [intToChord[c] for c in generatedChords] generated_dir = '../Output/' # Generate stream with guitar as instrument generatedStream = stream.Stream() generatedStream.append(instrument.Guitar()) print(5) # Append notes and chords to stream object for j in range(len(chordSequence)): try: generatedStream.append(note.Note(chordSequence[j].replace('.', ' '))) except: generatedStream.append(chord.Chord(chordSequence[j].replace('.', ' '))) generatedStream.write('midi', fp=generated_dir + 'Beethoven.mid')
with open('inv_tbl_entre_dos_aguas.pickle', 'rb') as handle: inv_tbl = pickle.load(handle) with open('lcm.pickle', 'rb') as handle: lcm = pickle.load(handle) # In[ ]: display(midi_stream) # In[8]: midi_stream = stream.Stream() guitar_part = stream.Voice() midi_stream.append(instrument.Guitar()) for index, row in encoded_part.iterrows(): note_name = inv_tbl[row['Note']] if (note_name == 'REST'): nt = note.Rest() else: if (' ' in note_name): nt = chord.Chord(note_name) else: nt = note.Note(note_name) nt.duration.quarterLength = float(row['Duration']) / lcm nt.offset = float(row['Offset']) / lcm guitar_part.append(nt) # switch params to being loaded from pickle, instead of being hardcoded