def preprocess(midi_dir, preproc_dir):
    midi_dir = Path(midi_dir)
    preproc_dir = Path(preproc_dir)
    preproc_dir.mkdir(exist_ok=True, parents=True)
    if list(preproc_dir.iterdir()):
        print(f"'{preproc_dir}' is not empty. Continuing anyway.")

    files = []
    files.extend(midi_dir.glob("*.mid"))
    files.extend(midi_dir.glob("*.midi"))
    files.sort()

    # TODO: make it output only one file, in tf format
    with click.progressbar(files, label="Processing",
                           item_show_func=str) as prog:
        for file in prog:
            outfile = preproc_dir / (file.name + ".pickle")
            with file.open("rb") as f:
                data = encode_midi(f)
            with outfile.open("wb") as f:
                pickle.dump(data, f)
def preprocess_midi(path):
    return encode_midi(path)
示例#3
0
else:
    config.device = torch.device('cpu')

current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
gen_log_dir = 'logs/mt_decoder/generate_' + current_time + '/generate'
gen_summary_writer = SummaryWriter(gen_log_dir)

mt = MusicTransformer(embedding_dim=config.embedding_dim,
                      vocab_size=config.vocab_size,
                      num_layer=config.num_layers,
                      max_seq=config.max_seq,
                      dropout=0,
                      debug=False)
mt.load_state_dict(torch.load(args.model_dir + '/final.pth'))
mt.test()

print(config.condition_file)
if config.condition_file is not None:
    inputs = np.array([encode_midi('dataset/midi/BENABD10.mid')[:500]])
else:
    inputs = np.array([[24, 28, 31]])
inputs = torch.from_numpy(inputs)
result = mt(inputs, config.length, gen_summary_writer)

for i in result:
    print(i)

decode_midi(result, file_path=config.save_path)

gen_summary_writer.close()
示例#4
0
def preprocess_midi(path):
    #   return encode_midi(path, augumentation=False)
    return encode_midi(path)
gen_log_dir = 'logs/mt_decoder/generate_'+current_time+'/generate'
gen_summary_writer = tf.summary.create_file_writer(gen_log_dir)


if mode == 'enc-dec':
    print(">> generate with original seq2seq wise... beam size is {}".format(beam))
    mt = MusicTransformer(
            embedding_dim=256,
            vocab_size=par.vocab_size,
            num_layer=6,
            max_seq=2048,
            dropout=0.2,
            debug=False, loader_path=load_path)
else:
    print(">> generate with decoder wise... beam size is {}".format(beam))
    mt = MusicTransformerDecoder(loader_path=load_path)

inputs = encode_midi(args.inputs)


with gen_summary_writer.as_default():
    result = mt.generate(inputs[:10], beam=beam, length=length, tf_board=True)

for i in result:
    print(i)

if mode == 'enc-dec':
    decode_midi(list(inputs[-1*par.max_seq:]) + list(result[1:]), file_path=save_path)
else:
    decode_midi(result, file_path=save_path)
示例#6
0
gen_log_dir = 'logs/mt_decoder/generate_' + current_time + '/generate'
gen_summary_writer = tf.summary.create_file_writer(gen_log_dir)

if mode == 'enc-dec':
    print(">> generate with original seq2seq wise... beam size is {}".format(
        beam))
    mt = MusicTransformer(embedding_dim=256,
                          vocab_size=par.vocab_size,
                          num_layer=6,
                          max_seq=2048,
                          dropout=0.2,
                          debug=False,
                          loader_path=load_path)
else:
    print(">> generate with decoder wise... beam size is {}".format(beam))
    mt = MusicTransformerDecoder(loader_path=load_path)

inputs = encode_midi('dataset/midi/BENABD10.mid')

with gen_summary_writer.as_default():
    result = mt.generate(inputs[:10], beam=beam, length=length, tf_board=True)

for i in result:
    print(i)

if mode == 'enc-dec':
    decode_midi(list(inputs[-1 * par.max_seq:]) + list(result[1:]),
                file_path=save_path)
else:
    decode_midi(result, file_path=save_path)
if mode == 'enc-dec':
    print(">> generate with original seq2seq wise... beam size is {}".format(
        beam))
    mt = MusicTransformer(embedding_dim=256,
                          vocab_size=par.vocab_size,
                          num_layer=6,
                          max_seq=2048,
                          dropout=0.2,
                          debug=False,
                          loader_path=load_path)
else:
    print(">> generate with decoder wise... beam size is {}".format(beam))
    mt = MusicTransformerDecoder(loader_path=load_path, max_seq=max_seq)

inputs = encode_midi(midi_path)

with gen_summary_writer.as_default():
    result = mt.generate(inputs[:crop_length],
                         beam=beam,
                         length=length,
                         tf_board=True)

for i in result:
    print(i)

if mode == 'enc-dec':
    decode_midi(list(inputs[-1 * par.max_seq:]) + list(result[1:]),
                file_path=save_path)
else:
    decode_midi(result, file_path=save_path)
gen_summary_writer = tf.summary.create_file_writer(gen_log_dir)

if mode == 'enc-dec':
    print(">> generate with original seq2seq wise... beam size is {}".format(
        beam))
    mt = MusicTransformer(embedding_dim=256,
                          vocab_size=par.vocab_size,
                          num_layer=6,
                          max_seq=2048,
                          dropout=0.2,
                          debug=False,
                          loader_path=load_path)
else:
    print(">> generate with decoder wise... beam size is {}".format(beam))
    mt = MusicTransformerDecoder(loader_path=load_path)

#inputs = encode_midi('dataset/midi/BENABD10.mid')
inputs = encode_midi(prior_midi)

with gen_summary_writer.as_default():
    result = mt.generate(inputs[:10], beam=beam, length=length, tf_board=True)

for i in result:
    print(i)

if mode == 'enc-dec':
    decode_midi(list(inputs[-1 * par.max_seq:]) + list(result[1:]),
                file_path=save_path)
else:
    decode_midi(result, file_path=save_path)
示例#9
0
mt = MusicTransformer(embedding_dim=config.embedding_dim,
                      vocab_size=config.vocab_size,
                      num_layer=config.num_layers,
                      max_seq=config.max_seq,
                      dropout=0,
                      debug=False)
mt.load_state_dict(torch.load(join(args.model_dir, args.ckpt)))
mt.test()
mt.cuda()
#%%
## %%time
if condition_file is not None:
    print("use condition file %s, first %d notes" %
          (condition_file, args.condition_len))
    inputs = np.array([encode_midi(condition_file)[:args.condition_len]])
    print(inputs[0])
else:
    inputs = np.array([[24, 28, 31]])
inputs = torch.from_numpy(inputs).cuda()
with torch.no_grad():
    result = mt(inputs, 2048, gen_summary_writer)
# mid = decode_midi(result.cpu(), file_path=None) #
mid = decode_midi(result,
                  file_path='result/generated_mod%s_cond%s.mid' %
                  (args.ckpt.split('.')[0], condition_fn))
gen_summary_writer.close()
ps_roll = mid.get_piano_roll()
figh = plot_piano_roll(ps_roll)
figh.savefig('result/generated_mod%s_cond%s.jpg' %
             (args.ckpt.split('.')[0], condition_fn))
示例#10
0
gen_log_dir = 'logs/mt_decoder/generate_' + current_time + '/generate'
gen_summary_writer = tf.summary.create_file_writer(gen_log_dir)

if mode == 'enc-dec':
    print(">> generate with original seq2seq wise... beam size is {}".format(
        beam))
    mt = MusicTransformer(embedding_dim=256,
                          vocab_size=par.vocab_size,
                          num_layer=6,
                          max_seq=2048,
                          dropout=0.2,
                          debug=False,
                          loader_path=load_path)
else:
    print(">> generate with decoder wise... beam size is {}".format(beam))
    mt = MusicTransformerDecoder(loader_path=load_path)

inputs = encode_midi('./dataset/midi/chopin_canon_in_F_minor.mid')

with gen_summary_writer.as_default():
    result = mt.generate(inputs[:500], beam=beam, length=length, tf_board=True)

for i in result:
    print(i)

if mode == 'enc-dec':
    decode_midi(list(inputs[-1 * par.max_seq:]) + list(result[1:]),
                file_path='bin/generated.mid')
else:
    decode_midi(result, file_path='bin/generated.mid')
def preprocess_midi(path):

    #     revised code
    return encode_midi(path)