Пример #1
0
                       cfg['data']['data_augmentation'])
dataloader = DataLoader(dataset,
                        batch_size=cfg['model']['batch_size'],
                        shuffle=False)

print('-> START TRAINING')
if cfg['hyperparams']['optimiser'] == 'adam':
    optimiser = torch.optim.Adam(lstm_model.parameters(),
                                 lr=cfg['hyperparams']['learning_rate'])

for batch_idx, batch_data in enumerate(dataloader):
    # zero grad model
    optimiser.zero_grad()

    # re-init hidden states
    lstm_model.hidden = lstm_model.init_hidden()

    # sort batch based on sequence length
    sort_batch(batch_data)

    # put batch on GPU
    batch_data = to_cuda(batch_data)

    # feed batch through model
    Y_output = lstm_model(batch_data[0], batch_data[2],
                          cfg['hyperparams']['sequence_length'])
    Y_target = batch_data[1]
    Y_lenghts = batch_data[2]

    # calculate loss
    loss = ce_loss(Y_output, Y_target, Y_lenghts)
with open(args.seed_file, 'rb') as f:
    id_to_sheet = pickle.load(f)
    data = pickle.load(f)

### BOOTSTRAPPING

# get seed sequence
numpy_seed_sequence = data[args.seed_index][:, 130:]
# convert to tensor + add batch dimension
seed_sequence = torch.FloatTensor(numpy_seed_sequence).unsqueeze(0)

print('-> INFERENCE')
### SAMPLING LOOP
for n in range(args.n_samples):
    # reset RNN hidden states
    model.hidden = model.init_hidden()

    # feed sequence through RNN and get last output
    o = torch.exp(
        model.forward(seed_sequence, None, None,
                      temperature=args.temperature)[0, -1, :])

    # sample rhythm and chord
    rhythm = torch.multinomial(o[:13], 1)[0]
    chord = torch.multinomial(o[13:], 1)[0]

    if chord == 48 or rhythm == 12:  #enforce consistent barlines
        rhythm = 12
        chord = 48

    # generate one-hot vector
Пример #3
0
VLoader = DataLoader(vset,
                     batch_size=batch_size,
                     shuffle=False,
                     drop_last=True,
                     num_workers=num_workers)

model = LSTM(n_mels, batch_size, num_layers=n_layers)
loss_function = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=l_rate)
#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, verbose=True)
""" stateD = torch.load("lstm_399.nn")
model.load_state_dict(stateD['state_dict']) """
val_loss_list, val_accuracy_list, epoch_list = [], [], []
loss_function.to(device)
model.to(device)
model.hidden = model.init_hidden(device)
#optimizer.load_state_dict(stateD['optim'])

for epoch in tqdm(range(n_epochs), desc='Epoch'):
    train_running_loss, train_acc = 0.0, 0.0
    model.train()
    for idx, (X, y) in enumerate(tqdm(TLoader, desc="Training")):
        X, y = X.to(device), y.to(device)
        model.zero_grad()
        out = model(X)
        loss = loss_function(out, y)
        loss.backward()
        optimizer.step()
        train_running_loss += loss.detach().item()
        train_acc += model.get_accuracy(out, y)
        if LOG and idx != 0 and idx % log_intervall == 0: