def test_Exponent():
    a = Tensor.randn(8, 10, requires_grad=True)

    o = MLlib.exp(a)

    o.backward()

    if not_close(a.grad.data, o.data):
        raise AssertionError
def test_ExponentWithMathModule():
    a = Tensor.randn(8, 10, requires_grad=True)

    o = math.e**a

    o.backward()

    if not_close(a.grad.data, o.data):
        raise AssertionError
def test_Cos():
    a = Tensor.randn(6, 8, requires_grad=True)

    o = MLlib.cos(a)

    o.backward()

    if not_close(a.grad.data, -np.sin(a.data)):
        raise AssertionError
def test_Tan():
    a = Tensor.randn(6, 8, requires_grad=True)

    o = MLlib.tan(a)

    o.backward()

    if not_close(a.grad.data, 1 / (np.cos(a.data))**2):
        raise AssertionError
예제 #5
0
 def __init__(self, in_features, out_features):
     self.bias = Tensor(0., requires_grad=True)
     self.weight = Tensor.randn(out_features, in_features)
     self.weight.requires_grad = True
예제 #6
0
from MLlib import Tensor
from MLlib.regularizer import LinearRegWith_Regularization
from MLlib.regularizer import L1_Regularizer
from MLlib.optim import SGDWithMomentum
from MLlib.utils.misc_utils import printmat
import numpy as np

np.random.seed(5322)

x = Tensor.randn(10, 8)  # (batch_size, features)

y = Tensor.randn(10, 1)

reg = LinearRegWith_Regularization(8,
                                   L1_Regularizer,
                                   optimizer=SGDWithMomentum,
                                   Lambda=7)

# Regularizer,optimizer and Lambda as per user's choice

printmat("Total Loss", reg.fit(x, y, 800))