song = model.generate(code, img=True)[0] if mode == '2x': codeA = sample(1) codeA = np.reshape(np.tile(np.expand_dims(codeA, 1), (1, 128, 1)), (-1, codeA.shape[-1])) codeB = sample(1) codeB = np.reshape(np.tile(np.expand_dims(codeB, 1), (1, 128, 1)), (-1, codeB.shape[-1])) code = np.concatenate([codeA, codeB, codeA, codeB, codeA], axis=0) song = model.generate([code], code_img=True, img=True)[0] print song[song.nonzero()].mean() final = song > -0.3 coder.decode(final, speed=1.).write('midi', 'example.mid') coder = NoteDurationCoder(first_voice=False) note, dura = coder.encode(ms.converter.parse('example.mid'), force=True) coder.decode(note, 2).write('midi', 'quantized.mid') import matplotlib.pyplot as plt plt.subplot(211) plt.imshow(song.T[::-1, :]) plt.subplot(212) plt.imshow(final.T[::-1, :]) # plt.colorbar(orientation='horizontal') plt.savefig('example.png') plt.show() # t, p = (song > 0.80).nonzero() # song = []
learning_rate=1e-3, iterations=2000, continued=False, only_train_quantized_rec=False, vocab_size=128 + 1, debug=False, overwrite_workdir=True, # workdir='./temp/SeqGAN_ablation/', clip_norm=1., alpha=5e-3, # 5-->2, alpha*con_loss beta=1.00, gamma=1e-3) # gamma*q_loss model = ContinuousSeqAE(hparam) model.build() # coder = ExampleCoder() coder = NoteDurationCoder(normalize_key='C5', ) # single=True, # first_voice=True) try: data = np.load('temp/easy.npz')['data'] except: data = np.array( map_dir(lambda fn: coder.encode(ms.converter.parse(fn))[0], './datasets/easymusicnotes/')) np.savez('temp/easy.npz', data=data) print(len(data), map(lambda x: len(x), data)) data = filter(lambda x: len(x) > hparam.timesteps, data) data = map(remove_con_dup, data) print(len(data), map(lambda x: len(x), data))
output_size=16, timesteps=10, iterations=5000, learning_rate=1e-4, continued=False, overwrite_workdir=True, workdir='./temp/DurationNet/', ) mode = 'train' mode = 'refine' mode = 'oneshot' model = RefineNet(hparam) model.build() coder = NoteDurationCoder(normalize_key='C5', first_voice=False) try: data = np.load('temp/easy_with_duration.npz') notes = data['notes'] durations = data['durations'] except: data = np.array( map_dir(lambda fn: coder.encode(ms.converter.parse(fn)), './datasets/easymusicnotes/')) notes, durations = zip(*data) np.savez('temp/easy_with_duration.npz', notes=notes, durations=durations) notes = filter(lambda x: len(x) > hparam.timesteps, notes)
basic_cell=rnn.GRUCell, # hparam D_lr=1e-4, D_boost=0, G_lr=1e-3, G_k=5, G_clipnorm=1.0, # train batch_size=25, continued=False, overwrite_workdir=True, iterations=50000, workdir='./temp/RhythmGAN/') model = SeqGAN(hparam) model.build() coder = NoteDurationCoder(normalize_key='C5', first_voice=True) try: # raise Exception data = np.load('temp/easy.npz')['data'] except: data = np.array( map_dir(lambda fn: coder.encode(ms.converter.parse(fn))[0], './datasets/easymusicnotes/')) np.savez('temp/easy.npz', data=data) print(len(data), map(lambda x: len(x), data)) data = filter(lambda x: len(x) > hparam.timesteps, data) print(len(data), map(lambda x: len(x), data)) train_data, test_data = train_test_split(data,
from DeepSymphony.utils.Music21Coder import NoteDurationCoder from keras.optimizers import Adam, SGD import music21 as ms def handle(fn): return coder.encode(ms.converter.parse(fn)) if __name__ == '__main__': timesteps = 128 batch_size = 32 mode = 'train' mode = 'test' coder = NoteDurationCoder(normalize_key='C5', resolution=1. / 16.) try: # raise Exception data = np.load('temp/piano-midi_duration.npz') notes = data['notes'] durations = data['durations'] except: data = np.array(map_dir(handle, './datasets/piano-midi.de/', cores=8)) print map(lambda x: len(x[0]), data) data = filter(lambda x: len(x[0]) > 0, data) print map(lambda x: len(x[0]), data) notes, durations = zip(*data) np.savez('temp/piano-midi_duration.npz', notes=notes, durations=durations)