Пример #1
0
    def forward(ctx, prediction, target):
        if not (type(prediction).__name__ == 'Tensor'
                and type(target).__name__ == 'Tensor'):

            raise RuntimeError("Expected Tensors, got {} and {}. Please use "
                               ".loss() method for non-Tensor data".format(
                                   type(prediction).__name__,
                                   type(target).__name__))

        requires_grad = prediction.requires_grad

        batch_size = target.data.shape[0]

        out = prediction.data - target.data

        if requires_grad:
            ctx.derivative_core = out

        out = np.sum(np.power(out, 2)) / (2 * batch_size)

        output = Tensor(out,
                        requires_grad=requires_grad,
                        is_leaf=not requires_grad)

        return output
    def get_loss(self):
        """
        Calculates the returns the  L2 Regression Loss

        """
        reg_loss = Tensor(0., requires_grad=True)
        for param in self.params:
            reg_loss += (param**2).sum()
        return (reg_loss*self.Lambda)
def test_Exponent():
    a = Tensor.randn(8, 10, requires_grad=True)

    o = MLlib.exp(a)

    o.backward()

    if not_close(a.grad.data, o.data):
        raise AssertionError
def test_Cos():
    a = Tensor.randn(6, 8, requires_grad=True)

    o = MLlib.cos(a)

    o.backward()

    if not_close(a.grad.data, -np.sin(a.data)):
        raise AssertionError
def test_ExponentWithMathModule():
    a = Tensor.randn(8, 10, requires_grad=True)

    o = math.e**a

    o.backward()

    if not_close(a.grad.data, o.data):
        raise AssertionError
def test_Tan():
    a = Tensor.randn(6, 8, requires_grad=True)

    o = MLlib.tan(a)

    o.backward()

    if not_close(a.grad.data, 1 / (np.cos(a.data))**2):
        raise AssertionError
def test_LogWithNegativeValue():
    na = -np.abs(np.random.randn(6, 8))
    a = Tensor(na, requires_grad=True)

    b = MLlib.log(a)

    b.backward()

    if not_close(a.grad.data, 1 / a.data):
        raise AssertionError
def test_TanAtPiOverTwoWithMathModule():
    na = np.ones((8, 12)) * (math.pi) / 2

    a = Tensor(na, requires_grad=True)

    o = MLlib.tan(a)

    o.backward()

    if not_close(a.grad.data, 1 / (np.cos(a.data))**2):
        raise AssertionError
Пример #9
0
    def backward(ctx, grad_output):
        derivative = ctx.derivative_core

        grad_prediction = (derivative / derivative.shape[0]) * grad_output.data

        return Tensor(unbroadcast(grad_prediction, derivative.shape))
def gen_mT(*args):
    # generates Tensor from np.arrays with requires_grad=True
    tnsrs = list()
    for na in args:
        tnsrs.append(Tensor(na, requires_grad=True))
    return tuple(tnsrs)
from MLlib import Tensor
from MLlib.functional import Pad2d, im2col, Conv2d

a = Tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
           requires_grad=True)

kernel = Tensor([[1, 2], [3, 4]], requires_grad=True)

b = Pad2d.apply(a, 0, pad=(1, 1))

c = im2col.apply(b, (kernel.shape[-2], kernel.shape[-1]), 2)

d = Conv2d.apply(c, kernel, b, (kernel.shape[-2], kernel.shape[-1]), 2)

print(d)

d.backward()

print(a.grad)
from MLlib import Tensor
import MLlib.optim as optim
import MLlib.nn as nn
from MLlib.models import Sequential
from MLlib.activations import Relu
from MLlib.loss_func import MSELoss
import numpy as np  # for features and target generation

np.random.seed(5322)

model = Sequential(nn.Linear(4, 16, activation_fn=Relu),
                   nn.Linear(16, 8, activation_fn=Relu), nn.Linear(8, 2))

X = Tensor(np.random.randn(10, 4))  # (batch_size, features)
Y = Tensor(np.random.randn(10, 2))  # (batch_size, output)

nb_epochs = 800  # number of epochs

alpha = 0.001  # learning rate

# SGD optimizer
optimizer = optim.SGDWithMomentum(model.parameters(), alpha, momentum=0.9)

loss_fn = MSELoss()  # Mean Squared Error loss

for i in range(nb_epochs):

    pred = model(X)

    loss = loss_fn(pred, Y)
Пример #13
0
 def __init__(self, in_features, out_features):
     self.bias = Tensor(0., requires_grad=True)
     self.weight = Tensor.randn(out_features, in_features)
     self.weight.requires_grad = True
Пример #14
0
from MLlib import Tensor
from MLlib.regularizer import LinearRegWith_Regularization
from MLlib.regularizer import L1_Regularizer
from MLlib.optim import SGDWithMomentum
from MLlib.utils.misc_utils import printmat
import numpy as np

np.random.seed(5322)

x = Tensor.randn(10, 8)  # (batch_size, features)

y = Tensor.randn(10, 1)

reg = LinearRegWith_Regularization(8,
                                   L1_Regularizer,
                                   optimizer=SGDWithMomentum,
                                   Lambda=7)

# Regularizer,optimizer and Lambda as per user's choice

printmat("Total Loss", reg.fit(x, y, 800))