import argparse import essen import sys import numpy as np import matplotlib.pyplot as plt from scipy import stats from keras.models import Sequential from keras.layers import Dense, Activation, LSTM parser = argparse.ArgumentParser() parser.add_argument("vocab_filename") parser.add_argument("weights_file") args = parser.parse_args() maxlen = 3 idx_to_notes, notes_to_idx = essen.load_notes_vocab(args.vocab_filename) stop_note = len(idx_to_notes) vocab_size = len(idx_to_notes) + 1 idx_to_notes[stop_note] = (-1, "STOP_NOTE") model = Sequential() model.add(LSTM(200, return_sequences=False, input_shape=(maxlen, vocab_size))) model.add(Dense(vocab_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.load_weights(args.weights_file) starting_notes = [ notes_to_idx[(72, "1")], notes_to_idx[(60, "1/3")], notes_to_idx[(73, "1/3")] ]
import argparse import essen import sys import numpy as np import matplotlib.pyplot as plt from scipy import stats from keras.models import Sequential from keras.layers import Dense, Activation, LSTM parser = argparse.ArgumentParser() parser.add_argument("vocab_filename") parser.add_argument("weights_file") args = parser.parse_args() maxlen = 3 idx_to_notes, notes_to_idx = essen.load_notes_vocab(args.vocab_filename) stop_note = len(idx_to_notes) vocab_size = len(idx_to_notes) + 1 idx_to_notes[stop_note] = (-1, "STOP_NOTE") model = Sequential() model.add(LSTM(200, return_sequences=False, input_shape=(maxlen, vocab_size))) model.add(Dense(vocab_size)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.load_weights(args.weights_file) starting_notes = [notes_to_idx[(72, "1")], notes_to_idx[(60, "1/3")], notes_to_idx[(73, "1/3")]] x = np.zeros((1, maxlen, vocab_size), dtype=np.bool) sys.stdout.write("Starting notes: ") for i, note in enumerate(starting_notes):
def load_vocab(file): idx_to_notes, notes_to_idx = essen.load_notes_vocab(file) return idx_to_notes, notes_to_idx