Beispiel #1
0
 def test_tinygrad():
     x = Tensor(x_init, device=self.device)
     W = Tensor(W_init, device=self.device)
     m = Tensor(m_init, device=self.device)
     out = x.dot(W).relu()
     out = out.logsoftmax()
     out = out.mul(m).add(m).sum()
     out.backward()
     return out.cpu().data, x.grad.cpu().data, W.grad.cpu().data
Beispiel #2
0
 def __call__(self, x: Tensor) -> Tensor:
     """
     >>> x = Tensor.zeros(2, 10)
     >>> layer = Dense(10, 5)
     >>> y = layer(x)
     >>> y.shape
     (2, 5)
     """
     return x.dot(self.weight) + self.bias
Beispiel #3
0
 def test_tinygrad():
     x = Tensor(x_init)
     W = Tensor(W_init)
     m = Tensor(m_init)
     out = x.dot(W).relu()
     out = out.logsoftmax()
     out = out.mul(m).add(m).sum()
     out.backward()
     return out.data, x.grad, W.grad
Beispiel #4
0
 def test_tinygrad():
     x = Tensor(x_init)
     W = Tensor(W_init)
     m = Tensor(m_init, requires_grad=False)
     out = x.dot(W).relu()
     out = out.logsoftmax()
     out = out.mul(m).add(m).sum()
     out.backward()
     return out.cpu().data, x.grad.cpu().data, W.grad.cpu().data
Beispiel #5
0
class TinyNet():
  def __init__(self):
    self.x = Tensor(x_init.copy())
    self.W = Tensor(W_init.copy())
    self.m = Tensor(m_init.copy())

  def forward(self):
    out = self.x.dot(self.W).relu()
    out = out.logsoftmax()
    out = out.mul(self.m).add(self.m).sum()
    return out