output = self.softmax(output) return output, hidden def init_hidden(self): return torch.zeros(1, self.hidden_size) category_lines, all_categories = load_data() n_categories = len(all_categories) n_hidden = 128 rnn = RNN(N_LETTERS, n_hidden, n_categories) #One step input_tensor = letter_to_tensor('A') hidden_tensor = rnn.init_hidden() output, next_hidden = rnn(input_tensor, hidden_tensor) #print(output.size()) #print(next_hidden.size()) # whole sequence/name input_tensor = line_to_tensor('Albert') hidden_tensor = rnn.init_hidden() output, next_hidden = rnn(input_tensor[0], hidden_tensor) #print(output.size()) #print(next_hidden.size())
output = self.i2o(concatenated) output = self.softmax(output) return output, hidden def init_hidden(self): return torch.zeros(1, self.hidden_size) category_lines, all_categories = load_data() n_categories = len(all_categories) n_hidden = 128 rnn = RNN(N_LETTERS, n_hidden, n_categories) # One step input_tensor = letter_to_tensor("A") hidden_tensor = rnn.init_hidden() output, next_hidden = rnn(input_tensor, hidden_tensor) print(output.size()) print(next_hidden.size()) # Whole sequence name input_tensor = line_to_tensor("Albert") hidden_tensor = rnn.init_hidden() output, next_hidden = rnn(input_tensor[0], hidden_tensor) print(output.size()) print(next_hidden.size())