note_seq.sequence_proto_to_midi_file(x1, 'input_mel.mid')
note_seq.sequence_proto_to_midi_file(y1, 'target_bass.mid')

#%%
# Set seed
np.random.seed(42)

path_to_midi_dir = folder_name

#instruments_to_extract = (54, 34) # voice and bass
instruments_to_extract = (0, 1)  # voice and bass

# Hyper-parameters
num_epochs = 200
training_set, validation_set, test_set, tokenizer \
    = create_dataset_from_midi(path_to_midi_dir, instruments_to_extract, print_info=True)

#%%
encoder_decoder = tokenizer.encoder_decoder
num_sequences = tokenizer.song_count
vocab_size = tokenizer.vocab_size

#training_set, validation_set, test_set, word_to_idx, idx_to_word, num_sequences, vocab_size = load_dummy_dataset(True)

# Initialize a new LSTM network
net = MyRecurrentNet(vocab_size)
training_loss, validation_loss = train_lstm(net, num_epochs, training_set,
                                            validation_set, vocab_size,
                                            encoder_decoder)

# Plot training and validation loss
Esempio n. 2
0
    length_per_seq = tokenizer_kwargs[
        'steps_per_quarter'] * 4 * tokenizer_kwargs['split_in_bar_chunks']
    print('length_per_seq: ', length_per_seq)

    # Dataset creation, splitting and batching parameters
    dataset_split_kwargs = {
        'p_train': 0.6,
        'p_val': 0.3,
        'p_test': 0.0,
        'batch_size': 1,
        'eval_batch_size': 1
    }

    train, val, test, t = create_dataset_from_midi(sequences,
                                                   lead_instrument,
                                                   accomp_instrument,
                                                   print_info=False,
                                                   **tokenizer_kwargs,
                                                   **dataset_split_kwargs)
    encoder_decoder = t.encoder_decoder
    num_sequences = t.song_count

    #LSTM hyperparameters
    vocab_size = t.vocab_size
    num_epochs = 30  #100
    learning_rate = 1e-4



    mel_notes, bass_notes = get_histograms_from_dataloader(test, vocab_size=vocab_size, \
                                                        plot=False)
    #print('Melody histogram: ', Counter(mel_notes))
Esempio n. 3
0
import torch.optim as optim
import utils.paths as paths

# Set seed such that we always get the same dataset
np.random.seed(42)

instruments = [0, 1]
lead_instrument = ('melody', instruments[0])
accomp_instrument = ('bass', instruments[1])

split_in_bar_chunks = 8

# Hyper-parameters
num_epochs = 100
training_set, validation_set, test_set, tokenizer \
    = create_dataset_from_midi(paths.midi_dir, lead_instrument, accomp_instrument, split_in_bar_chunks, print_info=True)
encoder_decoder = tokenizer.encoder_decoder
num_sequences = tokenizer.song_count
vocab_size = tokenizer.vocab_size

# Initialize a new LSTM network
net = MusicLSTMNet(vocab_size)
training_loss, validation_loss = train_lstm(net, num_epochs, training_set,
                                            validation_set, vocab_size,
                                            encoder_decoder)

torch.save(net, paths.model_serialized_dir + 'music_lstm.pt')

# Plot training and validation loss
epoch = np.arange(len(training_loss))
plt.figure()
Esempio n. 4
0
import utils.paths as paths

# Set seed such that we always get the same dataset
np.random.seed(42)

instruments = [0,1]
lead_instrument   = ('melody',instruments[0])
accomp_instrument = ('bass',instruments[1])

split_in_bar_chunks = 8

# Hyper-parameters
num_epochs = 100
train, val, test, t = create_dataset_from_midi(paths.midi_dir_small, 
                                               lead_instrument, 
                                               accomp_instrument, 
                                               split_in_bar_chunks, 
                                               print_info=True)
encoder_decoder = t.encoder_decoder
num_sequences = t.song_count


# Hyperparameters

num_epochs = 100
batch_size = 16

vocab_size = t.vocab_size

#%%
Esempio n. 5
0
length_per_seq = tokenizer_kwargs['steps_per_quarter'] * 4 * tokenizer_kwargs[
    'split_in_bar_chunks']
print('length_per_seq: ', length_per_seq)

# Dataset creation, splitting and batching parameters
dataset_split_kwargs = {
    'p_train': 0.7,
    'p_val': 0.3,
    'p_test': 0.0,
    'batch_size': 16,
    'eval_batch_size': 16
}

train, val, test, t = create_dataset_from_midi(dataset_path,
                                               lead_instrument,
                                               accomp_instrument,
                                               print_info=True,
                                               **tokenizer_kwargs,
                                               **dataset_split_kwargs)

encoder_decoder = t.encoder_decoder
num_sequences = t.song_count

#LSTM hyperparameters
vocab_size = t.vocab_size
num_epochs = 20
learning_rate = 0.7e-4
dropout_prob = 0.5
num_layers = 2
hidden_size = 256

print('train Histogram')