def create_midi(prediction_output): """ convert the output from the prediction to notes and create a midi file from the notes """ offset = 0 output_notes = [] # create note and chord objects based on the values generated by the model for pattern in prediction_output: # pattern is a chord if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) new_note.storedInstrument = instrument.ElectricGuitar() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) # pattern is a note else: new_note = note.Note(pattern) new_note.offset = offset new_note.storedInstrument = instrument.ElectricGuitar() output_notes.append(new_note) # increase offset each iteration so that notes do not stack offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp='../tmp/created_music.mid')
def create_midi(prediction, instrument_type, i): # crée les fichiers .midi offset = 0 output_notes = [] for pattern in prediction: if pattern != 'Rest': if ('.' in pattern) or pattern.isdigit(): notes_in_chord = pattern.split('.') notes = [] for current_note in notes_in_chord: new_note = note.Note(int(current_note)) if str(instrument_type[i]) == "KeyboardInstrument": new_note.storedInstrument = instrument.KeyboardInstrument( ) if str(instrument_type[i]) == "Piano": new_note.storedInstrument = instrument.Piano() if str(instrument_type[i]) == "Harpsichord": new_note.storedInstrument = instrument.Harpsichord() if str(instrument_type[i]) == "Clavichord": new_note.storedInstrument = instrument.Clavichord() if str(instrument_type[i]) == "Celesta": new_note.storedInstrument = instrument.Celesta() if str(instrument_type[i]) == "ElectricBass": new_note.storedInstrument = instrument.ElectricBass() if str(instrument_type[i]) == "ElectricGuitar": new_note.storedInstrument = instrument.ElectricGuitar() if str(instrument_type[i]) == "StringInstrument": new_note.storedInstrument = instrument.StringInstrument( ) notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) else: new_note = note.Note(pattern) new_note.offset = offset if str(instrument_type[i]) == "KeyboardInstrument": new_note.storedInstrument = instrument.KeyboardInstrument() if str(instrument_type[i]) == "Piano": new_note.storedInstrument = instrument.Piano() if str(instrument_type[i]) == "Harpsichord": new_note.storedInstrument = instrument.Harpsichord() if str(instrument_type[i]) == "Clavichord": new_note.storedInstrument = instrument.Clavichord() if str(instrument_type[i]) == "Celesta": new_note.storedInstrument = instrument.Celesta() if str(instrument_type[i]) == "ElectricBass": new_note.storedInstrument = instrument.ElectricBass() if str(instrument_type[i]) == "ElectricGuitar": new_note.storedInstrument = instrument.ElectricGuitar() if str(instrument_type[i]) == "StringInstrument": new_note.storedInstrument = instrument.StringInstrument() output_notes.append(new_note) offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp='music/' + str(instrument_type[i]) + '.mid')
def test(): from music21 import instrument as j sc1 = stream.Score() # instruments = [Piccolo(), Glockenspiel(), 72, 69, 41, 27, 47, 1, 1, 1, 1, 34] instrument = [ j.Piccolo(), j.Xylophone(), j.Clarinet(), j.Oboe(), j.Violin(), j.ElectricGuitar(), j.Harp(), j.Piano(), j.Piano(), j.Piano(), j.Piano(), j.ElectricBass() ] instrumentOctave = [3, 2, 2, 2, 1, 1, 1, 2, 1, 0, -1, -2] for i in range(12): inst = instrument[i] if i < 9: inst.midiChannel = i else: inst.midiChannel = i + 1 part = addPart(instrument=inst) if instrumentOctave[i] != 0: part.transpose(12 * instrumentOctave[i], inPlace=True) sc1.insert(0, part) sc1.show()
def __init__(self): super(MultiInstrumentTransformer, self).__init__( 'TransformerMultiInstrumentModel', "ml_models/Transformer_guitar.h5;ml_models/Transformer_bass_short.h5" ) self.target_instruments_str = ['Electric Guitar', 'Electric Bass'] self.target_instruments = [ instrument.ElectricGuitar(), instrument.ElectricBass() ] self.instrument_name = "guitar+bass" self.slice_len = 20
def __init__(self, instrument_str): if 'bass' in instrument_str.lower(): self.target_instrument_str = 'Electric Bass' self.target_instrument = instrument.ElectricBass() self.instrument_name = 'bass' db_name = "MarkovBass" if 'guitar' in instrument_str.lower(): self.target_instrument_str = 'Electric Guitar' self.target_instrument = instrument.ElectricGuitar() self.instrument_name = 'guitar' db_name = "MarkovGuitar" super(MarkovModel, self).__init__(db_name, "")
def __init__(self, instrument_str): if "guitar" in instrument_str: self.target_instrument_str = 'Electric Guitar' self.target_instrument = instrument.ElectricGuitar() self.instrument_name = "guitar" super(LSTMModel, self).__init__("LSTMModel", "ml_models/LSTM_guitar.h5") elif "bass" in instrument_str: self.target_instrument_str = 'Electric Bass' self.target_instrument = instrument.ElectricBass() self.instrument_name = "bass" super(LSTMModel, self).__init__("LSTMBassModel", "ml_models/LSTM_bass.h5") self.slice_len = 10
def generate(genre, instr, duration): duration = int(duration) note_len = int(250 / 63 * duration) print(genre) model = load_model('./encoding/rock/' + genre + '_model.h5') notes_dir = './encoding/' + genre + '/notes' ''' if genre == 'pop': model = load_model('./encoding/pop/pop_model.h5') notes_dir = './encoding/pop/notes' ''' if instr == 'guitar': instrmt = instrument.ElectricGuitar() elif instr == 'violin': instrmt = instrument.Violin() elif instr == 'piano': instrmt = instrument.Piano() else: instrmt = instrument.BassDrum() with open(notes_dir, 'rb') as filepath: notes = pickle.load(filepath) # Get all pitch names pitchnames = sorted(set(item for item in notes)) # Get all pitch names n_vocab = len(set(notes)) #sa = time.time() network_input, normalized_input = prepare_sequences1( notes, pitchnames, n_vocab) int_to_note = dict( (number, note) for number, note in enumerate(pitchnames)) #print(time.time() - sa) #sb = time.time() prediction_output = generate_notes(model, network_input, int_to_note, n_vocab, note_len) #print(time.time() - sb) #sc = time.time() retmd = gen_midi(prediction_output, genre, instrmt) #print(time.time() - sc) return retmd
def __init__(self, instrument_str): if "bass" in instrument_str.lower(): self.target_instrument_str = "Electric Bass" self.target_instrument = instrument.ElectricBass() self.instrument_name = "bass" super(MusicVAE, self).__init__("MusicVAEBass", "ml_models/VAE_bassdecoder.h5") if "guitar" in instrument_str.lower(): self.target_instrument_str = "Electric Guitar" self.target_instrument = instrument.ElectricGuitar() self.instrument_name = "guitar" super(MusicVAE, self).__init__("MusicVAEGuitar", "ml_models/VAE_guitar_long_decoder.h5") self.slice_len = 256 self.latent_dim = 256
def __init__(self, instrument_str): if "guitar" in instrument_str: self.target_instrument_str = 'Electric Guitar' self.target_instrument = instrument.ElectricGuitar() self.instrument_name = "guitar" super(AttentionModel, self).__init__("TransformerModel", "ml_models/Transformer_guitar.h5") self.slice_len = 20 elif "bass" in instrument_str: self.target_instrument_str = 'Electric Bass' self.target_instrument = instrument.ElectricBass() self.instrument_name = "bass" super(AttentionModel, self).__init__("TransformerModelBass", "ml_models/Transformer_bass_short.h5") self.slice_len = 20
def convertSong(): s = converter.parse('Disney_Themes_-_Whole_New_World.mid') count = 0 for el in s.recurse(): #print (el.classes) if 'Piano' in el.classes: el.activeSite.replace(el, instrument.ElectricGuitar()) print(1) elif 'StringInstrument' in el.classes: el.activeSite.replace(el, instrument.Bass()) print(2) # if 'Instrument' in el.classes: # or 'Piano' # el.activeSite.replace(el, instrument.ElectricGuitar()) count += 1 print(count) s.write('midi', 'newfilename.mid')
def get_instruments(genre, ontology): programs = [] if genre.label[0] == "Blues": programs.append(instrument.AcousticGuitar().midiProgram) programs.append(instrument.Harmonica().midiProgram) programs.append(instrument.TomTom().midiProgram) elif genre.label[0] == "Folk": programs.append(instrument.Banjo().midiProgram) programs.append(instrument.AcousticBass().midiProgram) programs.append(instrument.Piano().midiProgram) elif genre.label[0] == "Rock": programs.append(instrument.ElectricGuitar().midiProgram) programs.append(instrument.ElectricBass().midiProgram) programs.append(instrument.BassDrum().midiProgram) elif genre.label[0] == "Classical": programs.append(instrument.Violin().midiProgram) programs.append(instrument.Oboe().midiProgram) programs.append(instrument.Flute().midiProgram) programs.append(instrument.Viola().midiProgram) elif genre.label[0] == "Country": programs.append(instrument.AcousticGuitar().midiProgram) programs.append(instrument.Banjo().midiProgram) programs.append(instrument.TomTom().midiProgram) return programs
def __init__(self): super(Music_GPT_2, self).__init__("GPT-2Model", "") self.target_instrument = instrument.ElectricGuitar() self.instrument_name = "guitar"
def __init__(self): super(MultiInstrumentLSTM, self).__init__('LSTMMultiInstrumentModel', "ml_models/LSTM_multi.h5") self.target_instruments_str = ['Electric Guitar', 'Electric Bass', 'Piano'] self.target_instruments = [instrument.ElectricGuitar(), instrument.ElectricBass(), instrument.Percussion()] self.instrument_name = "guitar+bass" self.slice_len = 20
def __parse_midi(data_fn): ''' Helper function to parse a MIDI file into its measures and chords ''' # Parse the MIDI data for separate melody and accompaniment parts. midi_data = converter.parse(data_fn) # Get melody part, compress into single voice. melody_stream = midi_data[5] # For Metheny piece, Melody is Part #5. melody1, melody2 = melody_stream.getElementsByClass(stream.Voice) for j in melody2: melody1.insert(j.offset, j) melody_voice = melody1 for i in melody_voice: if i.quarterLength == 0.0: i.quarterLength = 0.25 # Change key signature to adhere to comp_stream (1 sharp, mode = major). # Also add Electric Guitar. melody_voice.insert(0, instrument.ElectricGuitar()) # melody_voice.insert(0, key.KeySignature(sharps=1, mode='major')) melody_voice.insert(0, key.KeySignature(sharps=1)) # The accompaniment parts. Take only the best subset of parts from # the original data. Maybe add more parts, hand-add valid instruments. # Should add least add a string part (for sparse solos). # Verified are good parts: 0, 1, 6, 7 ''' partIndices = [0, 1, 6, 7] comp_stream = stream.Voice() comp_stream.append( [j.flat for i, j in enumerate(midi_data) if i in partIndices]) # Full stream containing both the melody and the accompaniment. # All parts are flattened. full_stream = stream.Voice() for i in range(len(comp_stream)): full_stream.append(comp_stream[i]) full_stream.append(melody_voice) # Extract solo stream, assuming you know the positions ..ByOffset(i, j). # Note that for different instruments (with stream.flat), you NEED to use # stream.Part(), not stream.Voice(). # Accompanied solo is in range [478, 548) solo_stream = stream.Voice() for part in full_stream: curr_part = stream.Part() curr_part.append(part.getElementsByClass(instrument.Instrument)) curr_part.append(part.getElementsByClass(tempo.MetronomeMark)) curr_part.append(part.getElementsByClass(key.KeySignature)) curr_part.append(part.getElementsByClass(meter.TimeSignature)) curr_part.append( part.getElementsByOffset(476, 548, includeEndBoundary=True)) cp = curr_part.flat solo_stream.insert(cp) # Group by measure so you can classify. # Note that measure 0 is for the time signature, metronome, etc. which have # an offset of 0.0. melody_stream = solo_stream[-1] measures = OrderedDict() offsetTuples = [(int(n.offset / 4), n) for n in melody_stream] measureNum = 0 # for now, don't use real m. nums (119, 120) for key_x, group in groupby(offsetTuples, lambda x: x[0]): measures[measureNum] = [n[1] for n in group] measureNum += 1 # Get the stream of chords. # offsetTuples_chords: group chords by measure number. chordStream = solo_stream[0] chordStream.removeByClass(note.Rest) chordStream.removeByClass(note.Note) offsetTuples_chords = [(int(n.offset / 4), n) for n in chordStream] # Generate the chord structure. Use just track 1 (piano) since it is # the only instrument that has chords. # Group into 4s, just like before. chords = OrderedDict() measureNum = 0 for key_x, group in groupby(offsetTuples_chords, lambda x: x[0]): chords[measureNum] = [n[1] for n in group] measureNum += 1 # Fix for the below problem. # 1) Find out why len(measures) != len(chords). # ANSWER: resolves at end but melody ends 1/16 before last measure so doesn't # actually show up, while the accompaniment's beat 1 right after does. # Actually on second thought: melody/comp start on Ab, and resolve to # the same key (Ab) so could actually just cut out last measure to loop. # Decided: just cut out the last measure. del chords[len(chords) - 1] assert len(chords) == len(measures) return measures, chords