Пример #1
0
def test_gradients():
    a, b = Tensor.from_builtin([1, 2, 3]), Tensor.from_builtin([[1, 2, 3], [4, 5, 6]])
    x, y = coalesce(a, b)

    x_gradients = Gradients.trace((x * x).sum())
    assert np.all(x_gradients[a] == [4, 8, 12])
    with pytest.raises(KeyError):
        x_gradients[b]

    y_gradients = Gradients.trace((y * y).sum())
    assert np.all(y_gradients[b] == [[2, 4, 6], [8, 10, 12]])
    with pytest.raises(KeyError):
        y_gradients[a]
Пример #2
0
def test_gradients():
    logits = Tensor.from_builtin([1, 2, 3])
    probabilities = logits.sigmoid()
    gradients = Gradients._trace(
        Gradient(tensor=probabilities, gradient=np.array([1, 0, -1]))
    )
    assert np.allclose(gradients[logits], [0.1966, 0, -0.0452], atol=1e-4)
Пример #3
0
def test_gradients():
    left = Tensor.from_builtin([2, 3, 4])
    right = Tensor.from_builtin([3, 2, 1])
    result = left - right
    gradients = Gradients._trace(Gradient(tensor=result, gradient=np.array([1, 2, 3])))
    assert np.all(gradients[left] == [1, 2, 3])
    assert np.all(gradients[right] == [-1, -2, -3])
Пример #4
0
def test_gradients_full():
    logits = Tensor.from_builtin([1, 2, 3])
    probabilities = logits.softmax()
    gradients = Gradients._trace(
        Gradient(tensor=probabilities, gradient=np.array([1, 0, 0]))
    )
    assert np.allclose(gradients[logits], [0.0819, -0.0220, -0.0599], atol=1e-4)
Пример #5
0
def test_gradients():
    left = Tensor.from_builtin([2, 3, 4])
    right = Tensor.from_builtin([3, 2, 1])
    result = left / right
    gradients = Gradients._trace(Gradient(tensor=result, gradient=np.array([1, 2, 3])))
    assert np.allclose(gradients[left], [1 / 3, 2 / 2, 3 / 1])
    assert np.allclose(gradients[right], [-2 / 9, -6 / 4, -12 / 1])
Пример #6
0
def test_chain():
    tensor = Tensor.from_builtin([1, 2, 3])
    output = -ops.sum(tensor)
    gradients = Gradients.trace(output)

    assert np.allclose(gradients[tensor], [-1, -1, -1])
    assert np.allclose(gradients[output], 1)
Пример #7
0
def test_accumulate():
    start = Tensor.from_builtin([1, 2, 3])
    intermediate = start * start
    end = ops.sum(start - intermediate)
    gradients = Gradients.trace(end)

    assert np.allclose(gradients[start], [-1, -3, -5])
    assert np.allclose(gradients[intermediate], [-1, -1, -1])
    assert np.allclose(gradients[end], 1)
Пример #8
0
def test_gradients():
    tensor = Tensor.from_builtin([2, 3, 4])
    low = Tensor.from_builtin([1, 2, 1])
    high = Tensor.from_builtin([3, 4, 2])
    result = tensor.clip(low, high)
    gradients = Gradients._trace(Gradient(tensor=result, gradient=np.array([1, 2, 3])))
    assert np.allclose(gradients[tensor], [1, 2, 0])
    with pytest.raises(KeyError):
        gradients[low]
    with pytest.raises(KeyError):
        gradients[high]
Пример #9
0
def test_gradients_one_axis():
    logits = Tensor.from_builtin([[1, 2, 3], [4, 5, 6]])
    probabilities = logits.softmax(1)
    gradients = Gradients._trace(
        Gradient(tensor=probabilities, gradient=np.array([[1, 0, 0], [0, 1, 0]]))
    )
    assert np.allclose(
        gradients[logits],
        [[0.0819, -0.0220, -0.0599], [-0.0220, 0.1848, -0.1628]],
        atol=1e-4,
    )
Пример #10
0
def test_gradients():
    tensor = Tensor.from_builtin([[1, 2], [3, 4]])
    batch_norm = BatchNormalization(
        mean=np.array([4, -1]),
        variance=np.array([1, 0.25]),
        persistence=0.9,
        shift=Tensor.from_builtin([3, 2]),
        scale=Tensor.from_builtin([1, 1]),
    )
    with BatchNormalization.mode(BatchNormalization.Mode.test):
        loss = batch_norm(tensor).sum()
    gradients = Gradients.trace(loss)
    assert np.allclose(gradients[tensor], [[1, 2], [1, 2]])
    assert np.allclose(gradients[batch_norm.shift], [2, 2])
    assert np.allclose(gradients[batch_norm.scale], [-4, 16])
Пример #11
0
def test_train_step():
    data = Tensor.from_builtin([[1, 2, 3], [4, 5, 0]])
    targets = Tensor.from_builtin([[1, 0], [0, 1]])
    weights = Tensor.from_builtin([[2, 0], [1, 1], [0, 3]])
    biases = Tensor.from_builtin([-2, -7])

    def compute_loss():
        logits = matrix_multiply(data, weights) + biases
        probabilities = logits.softmax(-1)
        return -(probabilities.log() * targets).sum()

    loss = compute_loss()
    gradients = Gradients.trace(loss)
    weights -= Tensor.from_numpy(gradients[weights])
    biases -= Tensor.from_numpy(gradients[biases])

    assert compute_loss().data < loss.data