# loss_function = nn.L1Loss() import random sequence = 10 def sample(data, targets, s): n = random.randint(0, len(data) - (s + 5)) # return data[n: n + s + 1], targets[n: n + s + 1] for _ in range(1, epochs + 1): epoch_loss = 0 l.reset_hidden() # sq, t = sample(data, targets, sequence) outputs = l.forward(data[:-1]) loss = loss_function(outputs.unsqueeze(-1), targets) epoch_loss += loss loss.backward() optimizer.step() if _ % 5 == 0: print(f"Epoch Number {_} Loss ---------------> {epoch_loss}") epoch_outputs = [] l.reset_hidden() for xi in range(len(data) - 1): # if xi % sequence == 0: # l.reset_hidden() # l.reset_hidden() outputs = l.forward(data[xi].unsqueeze(0)) loss = loss_function(outputs, targets[xi])
### BOOTSTRAPPING # get seed sequence numpy_seed_sequence = data[args.seed_index][:, 130:] # convert to tensor + add batch dimension seed_sequence = torch.FloatTensor(numpy_seed_sequence).unsqueeze(0) print('-> INFERENCE') ### SAMPLING LOOP for n in range(args.n_samples): # reset RNN hidden states model.hidden = model.init_hidden() # feed sequence through RNN and get last output o = torch.exp( model.forward(seed_sequence, None, None, temperature=args.temperature)[0, -1, :]) # sample rhythm and chord rhythm = torch.multinomial(o[:13], 1)[0] chord = torch.multinomial(o[13:], 1)[0] if chord == 48 or rhythm == 12: #enforce consistent barlines rhythm = 12 chord = 48 # generate one-hot vector one_hot = torch.zeros([1, 1, len(o)], dtype=torch.float32) one_hot[0, 0, rhythm] = 1.0 one_hot[0, 0, 13 + chord] = 1.0 seed_sequence = torch.cat((seed_sequence, one_hot), 1)