gen_log_dir = 'logs/mt_decoder/generate_' + current_time + '/generate' gen_summary_writer = tf.summary.create_file_writer(gen_log_dir) if mode == 'enc-dec': print(">> generate with original seq2seq wise... beam size is {}".format( beam)) mt = MusicTransformer(embedding_dim=256, vocab_size=par.vocab_size, num_layer=6, max_seq=2048, dropout=0.2, debug=False, loader_path=load_path) else: print(">> generate with decoder wise... beam size is {}".format(beam)) mt = MusicTransformerDecoder(loader_path=load_path) inputs = encode_midi('dataset/midi/BENABD10.mid') with gen_summary_writer.as_default(): result = mt.generate(inputs[:10], beam=beam, length=length, tf_board=True) for i in result: print(i) if mode == 'enc-dec': decode_midi(list(inputs[-1 * par.max_seq:]) + list(result[1:]), file_path=save_path) else: decode_midi(result, file_path=save_path)
loader_path=load_path) mt.compile(optimizer=opt, loss=callback.transformer_dist_train_loss) # Train Start for e in range(epochs): mt.reset_metrics() for b in range(len(dataset.files) // batch_size): try: batch_x, batch_y = dataset.seq2seq_batch(batch_size, max_seq) except: continue result_metrics = mt.train_on_batch(batch_x, batch_y) if b % 100 == 0: eval_x, eval_y = dataset.seq2seq_batch(batch_size, max_seq, 'eval') print('eval_x', len(eval_x[0]), eval_x) print('eval_y', len(eval_y[0]), eval_y) # print('generating ...',len(eval_x[0])) gen_res = mt.generate(eval_x[0][:1024], beam=3, length=1024) print('generated sequence: ', gen_res) midi0 = decode_midi( gen_res[0], file_path='result/midi/result-{}-{}.mid'.format(e, b)) eval_result_metrics = mt.evaluate(eval_x, eval_y) mt.save(save_path, e) print('\n====================================================') print('Epoch/Batch: {}/{}'.format(e, b)) print('Train >>>> Loss: {:6.6}, Accuracy: {}'.format( result_metrics[0], result_metrics[1])) print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format( eval_result_metrics[0], eval_result_metrics[1]))