Ejemplo n.º 1
0
    def backward(ctx, grad_output):
        a, b = ctx.saved_tensors

        grad_a = grad_output.data / b.data
        grad_b = (-1) * a.data * grad_output.data / (b.data**2)

        grad_a = MLlib.Tensor(unbroadcast(grad_a, a.shape))
        grad_b = MLlib.Tensor(unbroadcast(grad_b, b.shape))

        return grad_a, grad_b
Ejemplo n.º 2
0
    def backward(ctx, grad_output):
        shape_a, shape_b = ctx.shape_a, ctx.shape_b

        grad_a = np.ones(shape_a) * grad_output.data
        grad_b = np.ones(shape_b) * grad_output.data * (-1)

        grad_a = MLlib.Tensor(unbroadcast(grad_a, shape_a))
        grad_b = MLlib.Tensor(unbroadcast(grad_b, shape_b))

        return grad_a, grad_b
Ejemplo n.º 3
0
    def backward(ctx, grad_output):

        grad_output = grad_output.data
        a, b = ctx.saved_tensors

        grad_a = (grad_output) @ (b.data.T)
        grad_b = (a.data.T) @ (grad_output)

        grad_a = MLlib.Tensor(unbroadcast(grad_a, a.shape))
        grad_b = MLlib.Tensor(unbroadcast(grad_b, b.shape))

        return grad_a, grad_b
Ejemplo n.º 4
0
    def backward(ctx, grad_output):
        a, b, output = ctx.saved_tensors

        grad_a = grad_b = None

        if ctx.a_req_grad:
            grad_a = b.data * np.power(a.data, b.data - 1) * grad_output.data
            grad_a = MLlib.Tensor(unbroadcast(grad_a, a.shape))

        if ctx.b_req_grad:
            grad_b = output.data * np.log(a.data) * grad_output.data
            grad_b = MLlib.Tensor(unbroadcast(grad_b, b.shape))

        return grad_a, grad_b
    def backward(ctx, grad_output):
        o = ctx.saved_tensors[0]

        grad_o = o.data * (1 - o.data) * grad_output.data
        grad_o = MLlib.Tensor(unbroadcast(grad_o, o.shape))

        return grad_o
    def backward(ctx, grad_output):
        o = ctx.saved_tensors[0]

        grad_o = np.greater(o.data, 0).astype(int) * grad_output.data
        grad_o = MLlib.Tensor(unbroadcast(grad_o, o.shape))

        return grad_o
Ejemplo n.º 7
0
    def backward(ctx, grad_output):
        grad_output = grad_output.data
        a, b = ctx.saved_tensors

        if len(grad_output.shape) > 0:
            grad_a = (grad_output).dot(b.data.T)
            grad_b = (a.data.T).dot(grad_output)

            grad_a = MLlib.Tensor(unbroadcast(grad_a, a.shape))
            grad_b = MLlib.Tensor(unbroadcast(grad_b, b.shape))

        else:
            grad_a = (grad_output) * (b.data.T)
            grad_b = (a.data.T) * (grad_output)

            grad_a = MLlib.Tensor(unbroadcast(grad_a, a.shape))
            grad_b = MLlib.Tensor(unbroadcast(grad_b, b.shape))

        return grad_a, grad_b
def test_DivSum():
    a = np.random.randn(4, 6, 8)
    b = np.random.randn(6, 8)

    ma, mb = gen_mT(a, b)

    mo = (ma / mb).sum()

    mo.backward()

    if not_close(mb.grad.data, unbroadcast(-a / (b**2), b.shape)):
        raise AssertionError

    if not_close(ma.grad.data, 1 / (mb.data)):
        raise AssertionError
Ejemplo n.º 9
0
    def backward(ctx, grad_output):
        derivative = ctx.derivative_core

        grad_prediction = (derivative / derivative.shape[0]) * grad_output.data

        return Tensor(unbroadcast(grad_prediction, derivative.shape))