def testMulGrad(self):
        d1 = np.random.uniform(0, 10, size=(5, 5))
        d2 = np.random.uniform(0, 10, size=(5, 5))

        t1 = sn.Tensor(d1, requires_grad=True)
        t2 = sn.Tensor(d2, requires_grad=True)
        t3 = t1 * t2  # 5x5

        initialGrad = sn.Tensor(np.ones_like(t1.data))
        t3.backward(initialGrad)

        assert_array_equal(t1.grad.data, t2.data)
        assert_array_equal(t2.grad.data, t1.data)
    def testSumGrad(self):
        d1 = np.random.uniform(0, 10, size=(5, 5))
        t1 = sn.Tensor(d1, requires_grad=True)

        t2 = t1.sum()
        t2.backward()

        assert_array_equal(t2.grad.data, np.ones_like(t1.data))
        assert t2.requires_grad == True
    def testNoGrad(self):
        data1 = np.random.uniform(-10, 10, size=(5, 5))
        data2 = np.random.uniform(-10, 10, size=(5, 5))
        with sn.no_grad():

            a = sn.Tensor(data1, requires_grad=True)
            b = sn.Tensor(data2, requires_grad=True)
            c = a.sum()
            d = a + b
            e = sn.matmul(a, b)
            f = a * b

            assert a.requires_grad == False
            assert b.requires_grad == False
            assert c.requires_grad == False
            assert d.requires_grad == False
            assert e.requires_grad == False
            assert f.requires_grad == False
import synapse as sn
from synapse import Tensor
from synapse.autograd.tensor import Node
from synapse.nn.loss import MSE
from synapse.nn.activations import ReLU
from synapse.testing.graph import showParents

data = np.arange(1, 10, dtype=np.float)
data = np.expand_dims(data, 0)

x = Tensor(data)
print(x.shape)

W = Tensor(np.zeros_like(data), requiresGrad=True)
y = Tensor(2 * data)

maxEpoch = 1000
lr = 0.1
initialGrad = sn.Tensor([[1.0]])

for epoch in range(maxEpoch):
    predicted = W * x

    loss = MSE(predicted, y)
    loss.backward()
    W.data = W.data - lr * W.grad.data
    W.zeroGrad()
    print(loss.data)

print(f"Prediction: {W.data}")