Ejemplo n.º 1
0
def main(_):
    model_path = os.path.join('./model/', FLAGS.name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    if os.path.isdir(model_path):
        checkpoint_path =\
            tf.train.latest_checkpoint(model_path)

    dataset = load_dataset()
    batch_gen = dataset.batches(FLAGS.num_seqs, FLAGS.num_steps, 10)
    print(EventSeq.dim())
    model = CharRNN(EventSeq.dim(),
                    ControlSeq.dim(),
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size)
    model.sess.run(tf.global_variables_initializer())
    model.load(checkpoint_path)

    model.train(
        batch_gen,
        FLAGS.max_steps,
        model_path,
        FLAGS.save_every_n,
        FLAGS.log_every_n,
    )
Ejemplo n.º 2
0
def main(_):
    if os.path.isfile(FLAGS.control) or os.path.isdir(FLAGS.control):
        if os.path.isdir(FLAGS.control):
            files = list(utils.find_files_by_extensions(FLAGS.control))
            assert len(files) > 0, 'no file in "{control}"'.format(
                control=FLAGS.control)
            control = np.random.choice(files)
        events, compressed_controls = torch.load(FLAGS.control)
        controls = ControlSeq.recover_compressed_array(compressed_controls)
        max_len = FLAGS.max_length
        if FLAGS.max_length == 0:
            max_len = controls.shape[0]

        control = np.expand_dims(controls, 1).repeat(1, 1)
        control = 'control sequence from "{control}"'.format(control=control)

    assert max_len > 0, 'either max length or control sequence length should be given'

    #FLAGS.start_string = FLAGS.start_string.decode('utf-8')

    if os.path.isdir(FLAGS.checkpoint_path):
        FLAGS.checkpoint_path =\
            tf.train.latest_checkpoint(FLAGS.checkpoint_path)

    model = CharRNN(EventSeq.dim(),
                    ControlSeq.dim(),
                    sampling=True,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size)
    model.sess.run(tf.global_variables_initializer())
    model.load(FLAGS.checkpoint_path)

    outputs = model.sample(1000,
                           prime=events[0:100],
                           vocab_size=EventSeq.dim())

    outputs = outputs.reshape([-1, 1])
    print(outputs)
    name = 'output-{i:03d}.mid'.format(i=0)
    path = os.path.join("output/", name)
    n_notes = utils.event_indeces_to_midi_file(outputs[:, 0], path)
    print('===> {path} ({n_notes} notes)'.format(path=path, n_notes=n_notes))
Ejemplo n.º 3
0
sess_path = options.sess_path
data_path = options.data_path
saving_interval = options.saving_interval

learning_rate = options.learning_rate
batch_size = options.batch_size
window_size = options.window_size
stride_size = options.stride_size
use_transposition = options.use_transposition
control_ratio = options.control_ratio
teacher_forcing_ratio = options.teacher_forcing_ratio
reset_optimizer = options.reset_optimizer
enable_logging = options.enable_logging

event_dim = EventSeq.dim()
control_dim = ControlSeq.dim()
model_config = config.model
model_params = utils.params2dict(options.model_params)
model_config.update(model_params)
device = config.device

print('-' * 70)

print('Session path:', sess_path)
print('Dataset path:', data_path)
print('Saving interval:', saving_interval)
print('-' * 70)

print('Hyperparameters:', utils.dict2params(model_config))
print('Learning rate:', learning_rate)
print('Batch size:', batch_size)
Ejemplo n.º 4
0
import torch
from sequence import EventSeq, ControlSeq

#pylint: disable=E1101

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = {
    'init_dim': 32,
    'event_dim': EventSeq.dim(),
    'control_dim': ControlSeq.dim(),
    'hidden_dim': 512,
    'gru_layers': 3,
    'gru_dropout': 0.3,
}

train = {
    'learning_rate': 0.001,    
    'batch_size': 64,
    'window_size': 200,
    'stride_size': 10,
    'use_transposition': False,
    'control_ratio': 1.0,
    'teacher_forcing_ratio': 1.0
}