def test_train_step():
    data = Tensor.from_builtin([[1, 2, 3], [4, 5, 0]])
    targets = Tensor.from_builtin([[1, 0], [0, 1]])
    weights = Tensor.from_builtin([[2, 0], [1, 1], [0, 3]])
    biases = Tensor.from_builtin([-2, -7])

    def compute_loss():
        logits = matrix_multiply(data, weights) + biases
        probabilities = logits.softmax(-1)
        return -(probabilities.log() * targets).sum()

    loss = compute_loss()
    gradients = Gradients.trace(loss)
    weights -= Tensor.from_numpy(gradients[weights])
    biases -= Tensor.from_numpy(gradients[biases])

    assert compute_loss().data < loss.data
Пример #2
0
def make_normalizer(train_features: np.ndarray):
    mean = np.mean(train_features)
    std = np.std(train_features)
    return lambda features: Tensor.from_numpy((features - mean) / std)
Пример #3
0
def test_unequal_array():
    assert Tensor.from_numpy(np.array([1, 2, 4.5])) != Tensor.from_numpy(
        np.array([1, 2, 5.5]))
Пример #4
0
def test_convert():
    tensor = Tensor.convert([1, 2])
    assert tensor == Tensor.from_numpy(np.array([1, 2]))
    assert tensor == Tensor.from_builtin([1, 2])
    assert tensor is Tensor.convert(tensor)
Пример #5
0
def test_equal_array():
    assert Tensor.from_numpy(np.array([1, 2, 4.5])) == Tensor.from_numpy(
        np.array([1, 2, 4.5]))