Ejemplo n.º 1
0
 def __init__(self, n_inputs, n_outputs):
     super().__init__()
     W = np.random.randn(n_inputs, n_outputs) * np.sqrt(2.0 / (n_inputs))
     self.weight = Tensor(W, autograd=True)
     self.bias = Tensor(np.zeros(n_outputs), autograd=True)
     self.parameters.append(self.weight)
     self.parameters.append(self.bias)
Ejemplo n.º 2
0
 def test_mm_001(self):
     a1 = Tensor(np.array([[1.0], [2.0], [3.0]]), autograd=True)
     w_2 = Tensor(np.array([[11.0, 21.0, 31.0], [12.0, 22.0, 32.0],
                            [13.0, 23.0, 33.0], [14.0, 24.0, 34.0]]),
                  autograd=True)
     z2 = w_2.mm(a1)
     print('z2: {0};\r\n{1}'.format(z2.data.shape, z2))
     z2.backward(Tensor(np.ones_like(z2.data)))
     print('a1.grad: {0};'.format(a1.grad))
     print('w_2.grad: {0};'.format(w_2.grad))
Ejemplo n.º 3
0
    def __init__(self, vocab_size, dim):
        super().__init__()

        self.vocab_size = vocab_size
        self.dim = dim

        # this random initialiation style is just a convention from word2vec
        self.weight = Tensor((np.random.rand(vocab_size, dim) - 0.5) / dim,
                             autograd=True)

        self.parameters.append(self.weight)
Ejemplo n.º 4
0
 def test_mul_backward_001(self):
     a = Tensor(np.array([1, 2, 3, 4, 5]), autograd=True)
     b = Tensor(np.array([10, 10, 10, 10, 10]), autograd=True)
     c = Tensor(np.array([5, 4, 3, 2, 1]), autograd=True)
     d = a + b
     e = b - c
     f = d * e
     f.backward(Tensor(np.array([1, 1, 1, 1, 1])))
     print('f: {0};'.format(f.to_string()))
     print('d: {0};'.format(d.to_string()))
     print('e: {0};'.format(e.to_string()))
     print('a: {0};'.format(a.to_string()))
     print('b: {0};'.format(b.to_string()))
     print('c: {0};'.format(c.to_string()))
Ejemplo n.º 5
0
 def test_train_nn_003(self):
     np.random.seed(0)
     data = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
                   autograd=True)
     target = Tensor(np.array([[0], [1], [0], [1]]), autograd=True)
     model = Sequential([Linear(2, 3), Linear(3, 1)])
     optim = OptimSgd(parameters=model.get_parameters(), alpha=0.05)
     for i in range(10):
         pred = model.forward(data)
         loss = ((pred - target) * (pred - target)).sum(0)
         loss.backward(Tensor(np.ones_like(loss.data)))
         optim.step()
         print('epoch_{0}: loss={1};'.format(i, loss))
     print(loss)
Ejemplo n.º 6
0
 def test_train_nn_006(self):
     np.random.seed(0)
     data = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
                   autograd=True)
     target = Tensor(np.array([[0], [1], [0], [1]]), autograd=True)
     model = Sequential([Linear(2, 3), AfRelu(), Linear(3, 1), AfSigmoid()])
     criterion = LossMse()
     optim = OptimSgd(parameters=model.get_parameters(), alpha=0.8)
     for i in range(10):
         pred = model.forward(data)
         loss = criterion.forward(pred, target)
         loss.backward(Tensor(np.ones_like(loss.data)))
         optim.step()
         print('epoch_{0}: loss={1};'.format(i, loss))
     print(loss)
Ejemplo n.º 7
0
 def test_init_001(self):
     a = Tensor(np.array([1, 2, 3, 4, 5]), autograd=True)
     b = Tensor(np.array([10, 10, 10, 10, 10]), autograd=True)
     c = Tensor(np.array([5, 4, 3, 2, 1]), autograd=True)
     d = a + b
     e = b + c
     f = d + e
     print('f: {0};'.format(f.to_string()))
     print('d: {0};'.format(d.to_string()))
     print('e: {0};'.format(e.to_string()))
     print('a: {0};'.format(a.to_string()))
     print('b: {0};'.format(b.to_string()))
     print('c: {0};'.format(c.to_string()))
Ejemplo n.º 8
0
 def test_embedding_001(self):
     np.random.seed(0)
     data = Tensor(np.array([1, 2, 1, 2]), autograd=True)
     target = Tensor(np.array([[0], [1], [0], [1]]), autograd=True)
     embed = Embedding(5, 3)
     model = Sequential([embed, AfTanh(), Linear(3, 1), AfSigmoid()])
     criterion = LossMse()
     optim = OptimSgd(parameters=model.get_parameters(), alpha=0.5)
     for i in range(10):
         # Predict
         pred = model.forward(data)
         # Compare
         loss = criterion.forward(pred, target)
         # Learn
         loss.backward(Tensor(np.ones_like(loss.data)))
         optim.step()
         print('epoch_{0}: loss={1};'.format(i, loss))
Ejemplo n.º 9
0
 def test_cross_entropy_001(self):
     np.random.seed(0)
     # data indices
     data = Tensor(np.array([1, 2, 1, 2]), autograd=True)
     # target indices
     target = Tensor(np.array([0, 1, 0, 1]), autograd=True)
     model = Sequential([Embedding(3, 3), AfTanh(), Linear(3, 4)])
     criterion = LossCrossEntropy()
     optim = OptimSgd(parameters=model.get_parameters(), alpha=0.1)
     for i in range(10):
         # Predict
         pred = model.forward(data)
         # Compare
         loss = criterion.forward(pred, target)
         # Learn
         loss.backward(Tensor(np.ones_like(loss.data)))
         optim.step()
         print('epoch_{0}: loss={1};'.format(i, loss))
Ejemplo n.º 10
0
class Linear(Layer):
    def __init__(self, n_inputs, n_outputs):
        super().__init__()
        W = np.random.randn(n_inputs, n_outputs) * np.sqrt(2.0 / (n_inputs))
        self.weight = Tensor(W, autograd=True)
        self.bias = Tensor(np.zeros(n_outputs), autograd=True)
        self.parameters.append(self.weight)
        self.parameters.append(self.bias)

    def forward(self, input):
        return input.mm(self.weight) + self.bias.expand(0, len(input.data))
Ejemplo n.º 11
0
class Embedding(Layer):
    def __init__(self, vocab_size, dim):
        super().__init__()

        self.vocab_size = vocab_size
        self.dim = dim

        # this random initialiation style is just a convention from word2vec
        self.weight = Tensor((np.random.rand(vocab_size, dim) - 0.5) / dim,
                             autograd=True)

        self.parameters.append(self.weight)

    def forward(self, input):
        return self.weight.index_select(input)
Ejemplo n.º 12
0
 def test_train_nn_002(self):
     np.random.seed(0)
     data = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
                   autograd=True)
     target = Tensor(np.array([[0], [1], [0], [1]]), autograd=True)
     w = list()
     w.append(Tensor(np.random.rand(2, 3), autograd=True))
     w.append(Tensor(np.random.rand(3, 1), autograd=True))
     optim = OptimSgd(parameters=w, alpha=0.1)
     for i in range(10):
         pred = data.mm(w[0]).mm(w[1])
         loss = ((pred - target) * (pred - target)).sum(0)
         loss.backward(Tensor(np.ones_like(loss.data)))
         optim.step()
         print('epoch_{0}: loss={1};'.format(i, loss))
     print(loss)
Ejemplo n.º 13
0
 def test_train_nn_001(self):
     np.random.seed(0)
     data = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
                   autograd=True)
     target = Tensor(np.array([[0], [1], [0], [1]]), autograd=True)
     w = list()
     w.append(Tensor(np.random.rand(2, 3), autograd=True))
     w.append(Tensor(np.random.rand(3, 1), autograd=True))
     for i in range(10):
         pred = data.mm(w[0]).mm(w[1])
         loss = ((pred - target) * (pred - target)).sum(0)
         loss.backward(Tensor(np.ones_like(loss.data)))
         for w_ in w:
             w_.data -= w_.grad.data * 0.1
             w_.grad.data *= 0
         print('epoch_{0}: loss={1};'.format(i, loss))
     print(loss)
Ejemplo n.º 14
0
 def test_sum_001(self):
     v = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
     print(v.sum(0))
     print(v.sum(1))
Ejemplo n.º 15
0
 def test_sum_grad_001(self):
     v = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), autograd=True)
     u = v.sum(0)
     u.backward(Tensor(np.array([1, 1, 1])))
     print('grad: {0};'.format(v.to_string()))
Ejemplo n.º 16
0
 def init_hidden(self, batch_size=1):
     return Tensor(np.zeros((batch_size, self.n_hidden)), autograd=True)
Ejemplo n.º 17
0
 def test_transpose_001(self):
     v = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), autograd=True)
     v_t = v.transpose()
     print('v_t: \r\n{0};'.format(v_t))
     v_t.backward(Tensor(np.array([[1, 1], [1, 1], [1, 1]])))
     print('grad v: \r\n{0};'.format(v.grad))
Ejemplo n.º 18
0
 def test_rnn_cell_001(self):
     f = open('apps/drl/chpZ01/qa1_single-supporting-fact_train.txt', 'r')
     raw = f.readlines()
     f.close()
     tokens = list()
     for line in raw[0:1000]:
         tokens.append(line.lower().replace("\n", "").split(" ")[1:])
     new_tokens = list()
     for line in tokens:
         new_tokens.append(['-'] * (6 - len(line)) + line)
     tokens = new_tokens
     vocab = set()
     for sent in tokens:
         for word in sent:
             vocab.add(word)
     vocab = list(vocab)
     word2index = {}
     for i, word in enumerate(vocab):
         word2index[word] = i
     indices = list()
     for line in tokens:
         idx = list()
         for w in line:
             idx.append(word2index[w])
         indices.append(idx)
     data = np.array(indices)
     # train process
     embed = Embedding(vocab_size=len(vocab), dim=16)
     model = RnnCell(n_inputs=16, n_hidden=16, n_output=len(vocab))
     criterion = LossCrossEntropy()
     optim = OptimSgd(parameters=model.get_parameters() +
                      embed.get_parameters(),
                      alpha=0.05)
     for iter in range(1000):
         batch_size = 100
         total_loss = 0
         hidden = model.init_hidden(batch_size=batch_size)
         for t in range(5):
             input = Tensor(data[0:batch_size, t], autograd=True)
             rnn_input = embed.forward(input=input)
             output, hidden = model.forward(input=rnn_input, hidden=hidden)
         target = Tensor(data[0:batch_size, t + 1], autograd=True)
         loss = criterion.forward(output, target)
         loss.backward()
         optim.step()
         total_loss += loss.data
         if (iter % 200 == 0):
             p_correct = (target.data == np.argmax(output.data,
                                                   axis=1)).mean()
             print("Loss:", total_loss / (len(data) / batch_size),
                   "% Correct:", p_correct)
     # test process
     batch_size = 1
     hidden = model.init_hidden(batch_size=batch_size)
     for t in range(5):
         input = Tensor(data[0:batch_size, t], autograd=True)
         rnn_input = embed.forward(input=input)
         output, hidden = model.forward(input=rnn_input, hidden=hidden)
     target = Tensor(data[0:batch_size, t + 1], autograd=True)
     loss = criterion.forward(output, target)
     ctx = ""
     for idx in data[0:batch_size][0][0:-1]:
         ctx += vocab[idx] + " "
     print("Context:", ctx)
     print("True:", vocab[target.data[0]])
     print("Pred:", vocab[output.data.argmax()])
Ejemplo n.º 19
0
 def test_expand_002(self):
     v = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
     print('v.expand: {0}'.format(v.expand(1, 4)))