def train(encoder, decoder, epochs, learning_rate=0.01): encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate) piece, notes = entchen.get() encoderInput, decoderInput, decoderTarget = generateInput(notes, delta=1) criterion = nn.CrossEntropyLoss() input_tensor = torch.Tensor(encoderInput) target_tensor = torch.Tensor(decoderTarget) for i in range(0, epochs): loss = trainSingleExample(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print("Epoch", i+1, "finished. Loss:", loss)
def train(encoder, decoder, epochs, learning_rate=0.01): #encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate) #decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate) encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate) piece, notes = entchen.get() encoderInput, decoderInput, decoderTarget = generateInput(notes, delta=1, useTied=True, split=0.5) #criterion = nn.BCEWithLogitsLoss() #criterion = nn.MultiLabelSoftMarginLoss() criterion = nn.BCELoss() input_tensor = torch.Tensor(encoderInput) target_tensor = torch.Tensor(decoderTarget) for i in range(0, epochs): loss = trainSingleExample(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print("Epoch", i+1, "finished. Loss:", loss)
from dataset import entchen from deprecated.encodeNotes import generateInput from keras.models import Model from keras.layers import Input, LSTM, Dense piece, notes = entchen.get() encoderInput, decoderInput, decoderTarget = generateInput(notes, delta=1) print(encoderInput.shape, decoderInput.shape, decoderTarget.shape) encoderInput = encoderInput.reshape( (1, encoderInput.shape[0], encoderInput.shape[1])) decoderInput = decoderInput.reshape( (1, decoderInput.shape[0], decoderInput.shape[1])) decoderTarget = decoderTarget.reshape( (1, decoderTarget.shape[0], decoderTarget.shape[1])) print(encoderInput.shape, decoderInput.shape, decoderTarget.shape) ### num_encoder_tokens = 132 num_decoder_tokens = num_encoder_tokens epochs = 100 batch_size = 1 hidden_state_size = 150 ### encoder_input_data = encoderInput decoder_input_data = decoderInput decoder_target_data = decoderTarget ### Model ### encoder_inputs = Input(shape=(None, num_encoder_tokens))