def test_init(self): obj1 = make_autoTensor([[-3,3,7],[4,5,9]]) obj2 = make_autoTensor([[-3,3],[4,5],[0,0]]) obj1.requires_grad = True obj = MatMul(obj1,obj2) assert obj.channels[0].autoVariable == obj1
def test_init(self): obj1 = make_autoTensor([[-3,3],[4,5]]) obj2 = make_autoTensor([[-3,3],[4,5]]) obj1.requires_grad = True obj = Divide(obj1,obj2) assert obj.channels[0].autoVariable == obj1
def test_init(self): y = make_autoTensor(torch.ones(5, 1)) y_pred = make_autoTensor(torch.rand(5, 1)) loss = SquareError(y_pred=y_pred, y_target=y) assert loss.channels[0].autoVariable == y_pred
def test_der(self): obj1 = make_autoTensor([[-3,3],[4,5]]) obj2 = make_autoTensor([[-3,3],[4,5]]) obj2.requires_grad = True obj = Substract(obj1,obj2) obj.backprop(make_autoTensor([[1,1],[1,1]])) assert torch.sum(obj2.grad.value + make_autoTensor([[1,1],[1,1]]).value) == 0
def test_der(self): obj1 = make_autoTensor([[-3,3],[4,5]]) obj2 = make_autoTensor([[-3,3],[4,5]]) obj1.requires_grad = True obj = Multiply(obj1,obj2) obj.backprop(make_autoTensor([[1,1],[1,1]])) assert torch.sum(obj1.grad.value - obj2.value) == 0
def test_der(self): obj1 = make_autoTensor([[-3,3],[4,5]]) obj1.requires_grad = True obj = relu(obj1) obj.backprop(make_autoTensor([[1,1],[1,1]])) assert torch.sum(obj1.grad.value - make_autoTensor([[0,1],[1,1]]).value) == 0
def test_der(self): obj1 = make_autoTensor([[-3,3,7],[4,5,9]]) obj2 = make_autoTensor([[-3,3],[4,5],[0,0]]) obj1.requires_grad = True obj = MatMul(obj1,obj2) obj.backprop(make_autoTensor([[1,1],[1,1]])) assert torch.sum(obj1.grad.value - torch.mm(make_autoTensor([[1,1],[1,1]]).value,obj2.value.transpose(1,0))) == 0
def test_der(self): obj1 = make_autoTensor([[-3,3],[4,5]]) obj1.requires_grad = True obj = Power(obj1,3) obj.backprop(make_autoTensor([[1,1],[1,1]])) assert torch.sum(obj1.grad.value - 3 * make_autoTensor([[1,1],[1,1]]).value * obj1.value**2 ) == 0
def test_der(self): obj1 = make_autoTensor([[-3,3],[4,5]]) obj1.requires_grad = True obj = Sum(obj1) obj.backprop(make_autoTensor([1,1])) assert torch.sum(obj1.grad.value - make_autoTensor([1,1]).value * torch.ones(2,2) ) == 0
def test_der(self): y = make_autoTensor(torch.ones(5, 1)) y_pred = make_autoTensor(torch.rand(5, 1)) y_pred.requires_grad = True loss = SquareError(y_pred, y) loss.backward() assert torch.equal(y_pred.grad.value, y_pred.value - y.value)
def test_der(self): obj1 = make_autoTensor([[-3,3],[4,5]]) obj2 = make_autoTensor([[-3,3],[4,5]]) obj1.requires_grad = True obj2.requires_grad = True obj = Divide(obj1,obj2) obj.backprop(make_autoTensor([[1,1],[1,1]])) assert torch.sum(obj1.grad.value - 1/obj1.value) == 0 assert torch.sum(obj2.grad.value + obj1.value/obj2.value**2) == 0
def test_make_autoTensor(self): obj = make_autoTensor(0) assert isinstance(obj, autoTensor) obj = make_autoTensor(True) assert isinstance(obj, autoTensor) obj = make_autoTensor(torch.rand(1)) assert isinstance(obj, autoTensor) obj = make_autoTensor(obj) assert isinstance(obj, autoTensor)
def test_grad_sweep(self): obj1 = make_autoTensor([[-3, 3], [4, 5]]) obj2 = make_autoTensor([[-3, 3], [4, 5]]) obj3 = make_autoTensor([[-3, 3], [4, 5]]) obj4 = make_autoTensor([[1, 1], [1, 1]]) obj1.requires_grad = True obj2.requires_grad = False obj3.requires_grad = True obj4.requires_grad = True obj5 = (obj1 + obj2) + (obj3 * obj4) obj5.backprop(make_autoTensor([[-3, 3], [4, 5]])) obj5.grad_sweep() assert torch.sum(obj1.grad.value) == 0 assert obj2.grad == None assert torch.sum(obj3.grad.value) == 0 assert torch.sum(obj4.grad.value) == 0
def test_grad_zeros(self): obj = make_autoTensor([[-3, 3], [4, 5]]) obj.grad = make_autoTensor([[-3, 3], [4, 5]]) obj.grad_zeros() assert torch.sum(obj.grad.value) == 0