Пример #1
0
 def mini_nn_forward():
     # MiniNN
     relu = ReLU()
     input = Tensor(x_init)
     weights = Tensor(weight_init)
     bias = Tensor(bias_init)
     out = relu(input.dot(weights) + bias)
     return out.value
Пример #2
0
    def __init__(self, in_feats, out_feats, bias=True):
        self.in_feats = in_feats
        self.out_feats = out_feats
        self.weights = Tensor.random_uniform(self.in_feats, self.out_feats)
        self.bias_flag = bias

        if self.bias_flag:
            self.bias = Tensor.random_uniform(1, self.out_feats)
Пример #3
0
    def __call__(input):

        val = np.maximum(input.value, 0)
        output = Tensor(val, parents=(input, ), fun='ReLUBackward')

        def _backward():
            input.grad += output.grad * ((val > 0).astype(np.float32))

        output._backward = _backward

        return output
Пример #4
0
    def __call__(input):
        with np.warnings.catch_warnings():
            np.warnings.filterwarnings('ignore')

            val = np.tanh(input.value)
        output = Tensor(val, parents=(input, ), fun='TanhBackard')

        def _backward():
            input.grad += output.grad * (1 - (val**2))

        output._backward = _backward

        return output
Пример #5
0
    def __call__(input, alpha=0.01):

        val = np.maximum(input.value, alpha * input.value)
        output = Tensor(val, parents=(input, ), fun='LeakyReLUBackward')

        grad = np.zeros_like(val)
        grad[(val > 0)] = 1
        grad[(val <= 0)] = alpha

        def _backward():
            input.grad += output.grad * (grad)

        output._backward = _backward

        return output
Пример #6
0
    def __call__(input, dim):
        exp = np.exp(input.value)
        sum = np.sum(np.exp(input.value), 1)
        sum = np.expand_dims(sum, 1)
        val = exp / sum
        output = Tensor(val, parents=(input, ), fun='SoftmaxBackward')

        def _backward():
            R_bar = -np.sum(output.grad * exp, axis=1, keepdims=True) / (sum**
                                                                         2)
            input.grad += (output.grad) * (exp / sum) + R_bar * exp

        output._backward = _backward

        return output
Пример #7
0
 def __next__(self):
     if self.iter < self.num_batches:
         self.iter += 1
         # Could be some overlap with this sample, could use np.argpartition or something
         samp = np.random.randint(0,
                                  self.data.shape[0],
                                  size=(self.batch_size))
         X = self.data[samp].reshape((-1, 28 * 28)).astype(np.float32)
         Y = self.labels[samp]
         y = np.zeros((len(samp), 10), np.float32)
         y[range(y.shape[0]), Y] = 1
         self.raw_labels = Y
         # return Tensor(X, requires_grad=False), Tensor(y, requires_grad=False)
         return Tensor(X), Tensor(y)
     else:
         raise StopIteration
Пример #8
0
    def __call__(input):
        # Disable overflow warnings
        with np.warnings.catch_warnings():
            np.warnings.filterwarnings('ignore')

            # These cases are needed, overflow warning else
            val = np.where(input.value >= 0, 1 / (1 + np.exp(-input.value)),
                           np.exp(input.value) / (1 + np.exp(input.value)))
        output = Tensor(val, parents=(input, ), fun='SigmoidBackard')

        def _backward():
            input.grad += output.grad * (val * (1 - val))

        output._backward = _backward

        return output
Пример #9
0
    def __call__(input, dim):

        exp = np.exp(input.value)
        sum = np.sum(np.exp(input.value), 1)
        sum = np.expand_dims(sum, 1)
        val = np.log(exp / sum)

        output = Tensor(val, parents=(input, ), fun='LogSoftmaxBackward')

        # Complete credit to TinyGrad for this gradient...
        def _backward():
            input.grad += (output.grad) - (
                np.exp(val) * (np.sum(output.grad, axis=1).reshape((-1, 1))))

        output._backward = _backward

        return output
Пример #10
0
    def __call__(self, input):
        if self.p > 0:
            dropout_vals = np.random.binomial([np.ones(input.shape)],
                                              1 - self.p)[0]

            val = input.value * dropout_vals * (1 / (1 - self.p))

            output = Tensor(val, parents=(input, ), fun='DropoutBackward')

            def _backward():
                # Same issue as ReLU
                input.grad += output.grad * ((val != 0) * (1 / (1 - self.p)))

            output._backward = _backward

            return output

        else:
            return input
Пример #11
0
        def mini_nn_backward():
            relu = ReLU()
            criterion = MSELoss()

            input = Tensor(x_init)
            weights = Tensor(weight_regression_init)
            bias = Tensor(bias_regression_init)

            y = Tensor(y_init)

            out = relu((input.dot(weights)) + bias)

            loss = criterion(out, y)

            loss.backward()

            return out.value, weights.grad, bias.grad
Пример #12
0
        def mini_nn_backward():
            relu = ReLU()
            logsoftmax = LogSoftmax()
            criterion = NLLLoss()

            input = Tensor(x_init)
            weights = Tensor(weight_init)
            bias = Tensor(bias_init)

            # Making target tensor - construction should be done within NLLLoss
            targets = np.zeros((len(targets_init), 500), np.float32)
            targets[range(targets.shape[0]), targets_init] = 1
            targets = Tensor(targets)

            out = relu((input.dot(weights)) + bias)
            out = logsoftmax(out, 1)

            loss = criterion(out, targets)

            loss.backward()

            return out.value, weights.grad, bias.grad