Beispiel #1
0
    def test(self, glove_vocab, glove_embed):
        test1 = ["its", "a", "good", "product"]
        test2 = ["This", "is", "bad", "indeed"]

        print("\n\nTest 1: Its a good product", end=": ")
        #test 1
        self.node.c = autoTensor(torch.zeros(1, 25))
        h = autoTensor(torch.zeros(1, 25))
        for word in test1:
            try:
                index = glove_vocab.index(word)
                sub_x = glove_embed[index].view(1, 25)
            except:
                sub_x = glove_embed[0].view(1, 25) * 0
            x = autoTensor(sub_x)
            h = self.node.forward(h, x, j)
        z = F.sigmoid(self.out(h))
        print(z)

        print("\n\nTest 2: [This,is,bad,indeed]", end=": ")
        #test 1
        self.node.c = autoTensor(torch.zeros(1, 25))
        h = autoTensor(torch.zeros(1, 25))
        for word in test2:
            try:
                index = glove_vocab.index(word)
                sub_x = glove_embed[index].view(1, 25)
            except:
                sub_x = glove_embed[0].view(1, 25) * 0
            x = autoTensor(sub_x)
            h = self.node.forward(h, x, j)
        z = F.sigmoid(self.out(h))
        print(z)
Beispiel #2
0
    def forward(self,h,x,it):
        it = str(it)
        f = F.sigmoid(self.f(h,x))
        i = F.sigmoid(self.i(h,x))
        c_ = F.tanh(self.c_(h,x))
        o = F.sigmoid(self.o(h,x))
        self.c = f*self.c + i*c_
        h.requires_grad=False
        h = o*F.tanh(self.c)
        f.name = "g_f"+it
        i.name = "g_i"+it
        c_.name = "g_c_"+it
        o.name = "g_o"+it

        return h
Beispiel #3
0
    def test_der(self):
        obj1 = make_autoTensor([[-3,3],[4,5]])
        obj1.requires_grad = True
        obj = sigmoid(obj1)

        obj.backprop(make_autoTensor([[1,1],[1,1]]))
    
        assert torch.sum(obj1.grad.value - make_autoTensor([[1,1],[1,1]]).value*obj.value *(1-obj.value)) == 0
Beispiel #4
0
    def run(self, X, y, glove_vocab, glove_embed):
        for epoch in range(115):
            print("epoch: ", epoch)
            epoch_loss = 0
            acc = 0
            for i in range(len(X)):
                if i == 6:
                    break
                self.node.c = autoTensor(torch.zeros(1, 25))
                y_t = autoTensor(y[i])
                h = autoTensor(torch.zeros(1, 25), requires_grad=True)

                batch = X[i]
                print("\t", i, len(batch[0]), end=" ")

                t = time()
                for j in range(len(batch[0])):
                    for k in range(len(batch)):
                        try:
                            word = str(batch[k][j])
                            index = glove_vocab.index(word)
                            sub_x = glove_embed[index].view(1, 25)
                        except:
                            sub_x = glove_embed[0].view(1, 25) * 0
                        if k == 0:
                            x = sub_x
                        else:
                            x = torch.cat((x, sub_x))

                    x = autoTensor(x)

                    h = self.node.forward(h, x, j)
                print("t:", round(time() - t, 4), end="\t")

                s = time()
                z = F.sigmoid(self.out(h))
                loss = Loss.BinaryCrossEntropy(z, y_t)
                print(loss, (get_accuracy_value(z, y_t), loss.value.size()[0]),
                      end=" ")

                #loss.trace_backprop()
                #loss.reset_count()
                loss.backward()
                op = Optimizer("sgd", loss, 0.00005)
                op.step()
                gc.collect()
                print(round(time() - s, 4))
                epoch_loss += loss.single()
                acc += get_accuracy_value(z, y_t)
            print("\nepoch summary: loss: ", epoch_loss / 5, " ACC: ",
                  acc / 316)
Beispiel #5
0
    def forward(self, X, y, glove_vocab, glove_embed):

        for i, sentence in enumerate(X):
            self.node.c = autoTensor(torch.zeros(1, 25))
            y_t = autoTensor(y[i])
            h = autoTensor(torch.zeros(1, 25), requires_grad=True)
            print(i, end=" ")
            for word in sentence:
                try:
                    i = glove_vocab.index(word)
                    x = autoTensor(glove_embed[i].view(1, 25))
                except:
                    x = autoTensor(glove_embed[0].view(1, 25) * 0)
                h = self.node.forward(h, x)
            s = time()
            z = F.sigmoid(self.out(h))
            loss = Loss.BinaryCrossEntropy(z, y_t)
            print(loss, z.value.item(), y_t.value.item())
            loss.backward()
            op = Optimizer("sgd", loss, 0.0001)
            op.step()
            gc.collect()
            print(time() - s, end=" ")
Beispiel #6
0
        Y[i, y[i] - 1] = 1
    y = Y.astype(np.float64)

    X = autoTensor(X)
    y = autoTensor(y)

    initer = Initializer("xavier")

    layer1 = Linear(400,25,initializer=initer)
    layer2 = Linear(25,25,initializer=initer)
    layer3 = Linear(25,10,initializer=initer)

    lr = 0.0001

    for x in range(1000):

        l1 = layer1(X)
        l2 = F.relu(layer2(l1))
        l3 = F.sigmoid(layer3(l2))
        loss = Loss.SquareError(l3,y)
        loss.backward()
        SGD = Optimizer("gdmomentum",loss,lr,beta=0.2)
        SGD.step()


        if x%50 == 0:
            print(x,loss,get_accuracy_value(l3,y))
        loss.grad_sweep()


Beispiel #7
0
    def test_init(self):
        obj1 = make_autoTensor([[-3,3],[4,5]])
        obj1.requires_grad = True
        obj = sigmoid(obj1)

        assert obj.channels[0].autoVariable == obj1