Esempio n. 1
0
class TestRNN(unittest.TestCase):
    def setUp(self):
        Wx = np.random.randn(3, 3)
        Wh = np.random.randn(7, 3)
        b  = np.random.randn(3,)
        self.rnn = RNN(Wx, Wh, b)
        self.x = np.random.randn(7, 3)
        self.h_prev = np.random.randn(7, 7)

    def test_forward(self):
        h_next = self.rnn.forward(self.x, self.h_prev)
        self.assertEqual((7, 3), h_next.shape)

    def test_backward(self):
        h_next = self.rnn.forward(self.x, self.h_prev)
        dx, dh_prev = self.rnn.backward(h_next)
        self.assertEqual((7, 3), dx.shape)
        self.assertEqual((7, 7), dh_prev.shape)
def sim_test():

    samples = generate_dataset(100)

    word2id, id2word = build_vocab(samples)
    vocab_size = len(word2id)
    inputs = []
    targets = []

    rnn = RNN(input_dim=vocab_size, output_dim=vocab_size, hidden_dim=256)

    test_input = text2token(samples[0])[:-1]
    test_target = text2token(samples[0])[1:]
    print("Test Input:", test_input)
    print("Test Target:", test_target)
    inputs = one_hot_seq(token2id(test_input, word2id), vocab_size)
    outputs, hidden_states = rnn.forward(inputs)
    test_output = [id2word[np.argmax(out)] for out in outputs]
    print("Test Output:", test_output)

    for epoch in range(150):
        losses = []
        for sample in samples:
            ids = token2id(text2token(sample), word2id)
            inputs = one_hot_seq(ids[:-1], vocab_size)
            targets = one_hot_seq(ids[1:], vocab_size)
            #print(inputs[0].shape)

            outputs, hidden_states = rnn.forward(inputs)
            #print(rnn.grads['d_W_x'])
            rnn.zero_grad()
            loss = rnn.backward(outputs, targets)

            rnn.update_params(lr=2e-4)
            losses.append(loss)
            #print(loss)
        print(np.array(losses).mean())

    print("Test Input:", test_input)
    print("Test Target:", test_target)
    inputs = one_hot_seq(token2id(test_input, word2id), vocab_size)
    outputs, hidden_states = rnn.forward(inputs)
    test_output = [id2word[np.argmax(out)] for out in outputs]
    print("Test Output:", test_output)
 def forward(self, xs):
     Wx, Wh, b = self.params
     N, T, D   = xs.shape
     D, H      = Wx.shape
     self.layers = []
     hs = np.empty((N, T, H), dtype='f')
     if not self.stateful or self.h is None:
         self.h = np.zeros((N, H), dtype='f')
     for t in range(T):
         layer = RNN(*self.params)
         self.h = layer.forward(xs[:, t, :], self.h)
         hs[:, t, :] = self.h
         self.layers.append(layer)
     return hs
Esempio n. 4
0
class CRNN(nn.Module):
    """ C-RNN Module. """
    def __init__(self):
        super(CRNN, self).__init__()
        self.cnn = CNN()
        self.rnn = RNN(input_size=2048, hidden_size=256, output_size=4)

    def forward(self, x, hidden):
        x_feats = self.cnn.forward(x)
        output, hidden = self.rnn.forward(x_feats, hidden)
        return output, hidden

    def init_hidden(self, batch_size):
        """
        Initialize hidden units to zero.
        """
        return self.rnn.init_hidden(batch_size)
def real_test():
    if not os.path.exists('train.csv'):
        prepare_data()

    data = pd.read_csv('train.csv')
    texts = data['text'].tolist()
    labels = data['target'].tolist()

    word2id, id2word = build_vocab(texts)
    vocab_size = len(word2id)

    ids_list = []
    for text in texts:
        ids_list.append(token2id(text2token(text), word2id))

    num_class = len(set(labels))
    rnn = RNN(input_dim=vocab_size, output_dim=num_class, hidden_dim=256)

    accs = []
    ids_list = ids_list[:100]
    labels = labels[:100]
    for epoch in range(10):
        losses = []
        for inputs, label in zip(ids_list, labels):
            inputs = one_hot_seq(inputs, vocab_size)
            label = one_hot_seq([label], num_class)[0]
            outputs, hidden_states = rnn.forward(inputs)
            #print(rnn.grads['d_W_x'])
            pred = outputs[-1]
            rnn.zero_grad()
            loss = rnn.backward(pred, label)
            accs.append(np.argmax(label) == np.argmax(pred))
            rnn.update_params(lr=2e-4)
            losses.append(loss)
            #print(loss)
        print("Epoch", epoch, "Loss:",
              np.array(losses).mean(), "Acc:",
              np.array(accs).mean())
Esempio n. 6
0
            d_L_d_y = probs
            d_L_d_y[target] -= 1

            # Backward
            rnn.backprop(d_L_d_y)

    return loss / len(data), num_correct / len(data)


def train():
    # Training loop
    for epoch in range(1000):
        train_loss, train_acc = processData(train_data)

        if epoch % 100 == 99:
            print('--- Epoch %d' % (epoch + 1))
            print('Train:\tLoss %.3f | Accuracy: %.3f' %
                  (train_loss, train_acc))

            test_loss, test_acc = processData(test_data, backprop=False)
            print('Test:\tLoss %.3f | Accuracy: %.3f' % (test_loss, test_acc))


train()

new_text = 'i am very not happy'
out, _ = rnn.forward(createInputs(new_text))
probs = softmax(out)
print('prediction: %.2f' % (np.max(probs) * 100) + '%')
print('true') if np.argmax(probs) == 1 else print('false')
        if backprop:
            # Build dL/dy
            d_L_d_y = probs
            d_L_d_y[target] -= 1

            # Backward
            rnn.backprop(d_L_d_y)

    return loss / len(data), num_correct / len(data)


# Training loop
for epoch in range(1000):
    train_loss, train_acc = processData(train_data)

    if epoch % 100 == 99:
        print('--- Epoch %d' % (epoch + 1))
        print('Train:\tLoss %.3f | Accuracy: %.3f' % (train_loss, train_acc))

        test_loss, test_acc = processData(test_data, backprop=False)
        print('Test:\tLoss %.3f | Accuracy: %.3f' % (test_loss, test_acc))

#Testing starts from here
inputs = createInputs('i am very good earlier but very sad now')
out, h = rnn.forward(inputs)
probs = softmax(out)
# print(probs) # [[0.50000095], [0.49999905]]
if probs[0] > probs[1]:
    print("Negative Comment")
else:
    print("Positive Comment")