Esempio n. 1
0
class Scorer(object):
    def __init__(self, char_list, model_path, rnn_type, ninp, nhid, nlayers,
                 device):
        char_list = list(char_list) + ['sil_start', 'sil_end']
        self.inv_vocab_map = dict([(i, c) for (i, c) in enumerate(char_list)])
        self.vocab_map = dict([(c, i) for (i, c) in enumerate(char_list)])
        self.criterion = nn.CrossEntropyLoss()
        self.device = device
        self.rnn = RNN(rnn_type, len(char_list), ninp, nhid,
                       nlayers).to(self.device)
        self.rnn.load_state_dict(torch.load(model_path))
        self.rnn.eval()
        self.history = defaultdict(tuple)

    def get_score(self, string):
        if len(string) < 2:
            return 0, self.rnn.init_hidden(1)
        string_idx = map(lambda x: self.vocab_map[x], string)
        input = string_idx[:-1]
        grt = string_idx[1:]
        input, grt = torch.LongTensor(input).to(
            self.device), torch.LongTensor(grt).to(self.device)
        input = input.view(1, input.size()[0])
        init_hidden = self.rnn.init_hidden(1)
        pred, hidden = self.rnn(input, init_hidden)
        pred = pred.view(-1, pred.size(-1))
        loss = self.criterion(pred, grt)
        return -(len(string_idx) - 1) * loss.item(), hidden

    def get_score_fast(self, strings):
        strings = [''.join(x) for x in strings]
        history_to_update = defaultdict(tuple)
        scores = []
        for string in strings:
            if len(string) <= 2:
                score, hidden_state = self.get_score(string)
                scores.append(score)
                history_to_update[string] = (score, hidden_state)
            elif string in self.history:
                history_to_update[string] = self.history[string]
                scores.append(self.history[string][0])
            elif string[:-1] in self.history:
                score, hidden = self.history[string[:-1]]
                input, grt = torch.LongTensor([
                    self.vocab_map[string[-2]]
                ]).view(1, 1).to(self.device), torch.LongTensor(
                    [self.vocab_map[string[-1]]]).to(self.device)
                pred, hidden = self.rnn(input, hidden)
                loss = self.criterion(pred.view(-1, pred.size(-1)), grt).item()
                history_to_update[string] = (score - loss, hidden)
                scores.append(score - loss)
            else:
                raise ValueError("%s not stored" % (string[:-1]))
        self.history = history_to_update
        return scores
Esempio n. 2
0
def rnn_train_single(rnn: RNN, x, y, learning_rate, criterion=nn.MSELoss()):
    hidden = rnn.init_hidden()
    rnn.zero_grad()

    for i in range(x.size()[0]):
        output, hidden = rnn(x[i], hidden)

    loss = criterion(output, y)
    loss.backward()

    # Update paramteres based on gradient
    for p in rnn.parameters():
        p.data.add_(p.grad.data, alpha=-learning_rate)

    return output, loss.item()
Esempio n. 3
0
class CRNN(nn.Module):
    """ C-RNN Module. """
    def __init__(self):
        super(CRNN, self).__init__()
        self.cnn = CNN()
        self.rnn = RNN(input_size=2048, hidden_size=256, output_size=4)

    def forward(self, x, hidden):
        x_feats = self.cnn.forward(x)
        output, hidden = self.rnn.forward(x_feats, hidden)
        return output, hidden

    def init_hidden(self, batch_size):
        """
        Initialize hidden units to zero.
        """
        return self.rnn.init_hidden(batch_size)
Esempio n. 4
0
                n_layers=config['rnn_layers'],
                dropout=config['dropout'])
    optimizer = optim.Adam(model.parameters(), config['learning_rate'])
    criterion = nn.CrossEntropyLoss()

    # print_every = 10
    all_losses = []
    total_loss = 0
    saves = 0

    start = time.time()

    for i in range(config['epochs']):
        for training_example in names:
            model.zero_grad()
            model.hidden = model.init_hidden()
            training_example_tensor = to_tensor(training_example, char_to_ix)
            input = training_example_tensor[:-1]
            target = training_example_tensor[1:]
            loss = 0

            for j in range(input.size(0)):
                output = model(input[j])
                loss += criterion(output, target[j].view(-1))

            total_loss += loss.data.item()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
            optimizer.step()

        avg_loss = total_loss / len(names)