Esempio n. 1
0
def test_detach():
    tensor_leaf = sl.Tensor([1, 2, 3, 4])
    tensor_interim = tensor_leaf + 100
    interim_detached = tensor_interim.detach()

    assert (interim_detached.data == tensor_interim.data).all()
    assert interim_detached.context is None
Esempio n. 2
0
def test_linear():
    # bias true
    in_features = 10
    out_features = 20
    batch = 30
    input_array = np.random.rand(batch, in_features).astype('float32')

    pt_linear = torch_nn.Linear(in_features, out_features, bias=True)
    pt_input = torch.tensor(input_array.copy(), requires_grad=True)
    pt_result = pt_linear(pt_input)
    pt_result.backward(torch.ones_like(pt_result))

    sl_linear = sl_nn.Linear(in_features, out_features, bias=True)
    sl_linear.weight.data = pt_linear.weight.data.numpy()
    sl_linear.bias.data = pt_linear.bias.data.numpy()
    sl_input = sl.Tensor(input_array.copy(), requires_grad=True)
    sl_result = sl_linear(sl_input)
    sl_result.backward()

    # check if forward pass is correct
    obtained_result = sl_result.data
    expected_result = pt_result.detach().numpy()
    np_test.assert_allclose(obtained_result, expected_result, R_TOLERANCE, A_TOLERANCE)

    sl_leaves = [sl_input, sl_linear.weight, sl_linear.bias]
    pt_leaves = [pt_input, pt_linear.weight, pt_linear.bias]

    # check if the backward pass if correct (the arguments' gradients)
    for sl_a, pt_a in zip(sl_leaves, pt_leaves):
        obtained_grad = sl_a.grad
        expected_grad = pt_a.grad.numpy()
        np_test.assert_allclose(obtained_grad, expected_grad, R_TOLERANCE, A_TOLERANCE)
Esempio n. 3
0
def compose_operations(*functions_and_args):
    sl_result = None
    pt_result = None
    sl_leaves = []
    pt_leaves = []

    for i, (function, args) in enumerate(functions_and_args):
        if not isinstance(args, (tuple, list)):
            args = (args, )

        # convert arguments to Tensors
        sl_args = [sl.Tensor(arg.copy()) for arg in args]
        pt_args = [
            torch.tensor(arg.copy().astype('float32'), requires_grad=True)
            for arg in args
        ]

        # add the arguments to the list of leaf Tensors
        sl_leaves.extend(sl_args)
        pt_leaves.extend(pt_args)

        # if it's the first operation use the provided arguments only
        if i == 0:
            sl_result = function(*sl_args)
            pt_result = function(*pt_args)
        # otherwise, use the output of the last operation as the first argument
        else:
            print(sl_args)
            sl_result = function(sl_result, *sl_args)
            pt_result = function(pt_result, *pt_args)

    # compute the backward pass
    sl_result.backward(np.ones_like(sl_result.data))
    pt_result.backward(torch.ones_like(pt_result))

    # check if the forward pass is correct
    obtained_result = sl_result.data
    expected_result = pt_result.detach().numpy()
    np_test.assert_allclose(obtained_result, expected_result, R_TOLERANCE,
                            A_TOLERANCE)

    # check if the backward pass if correct (the arguments' gradients)
    for sl_a, pt_a in zip(sl_leaves, pt_leaves):
        obtained_grad = sl_a.grad
        expected_grad = pt_a.grad.numpy()
        np_test.assert_allclose(obtained_grad, expected_grad, R_TOLERANCE,
                                A_TOLERANCE)
Esempio n. 4
0
def test_numel():
    array = np.random.randn(10, 10)
    tensor = sl.Tensor(array)
    assert tensor.numel == array.size
Esempio n. 5
0
def test_function():
    tensor_leaf = sl.Tensor([1, 2, 3, 4])
    tensor_interim = tensor_leaf + 100

    assert tensor_interim.function == sl.grad.functional.Add
Esempio n. 6
0
def evaluate_function_with_pytorch(simple_learning_function, torch_function,
                                   constructor):
    """Apply a simple_learning_function and a torch_function to the same numpy array based Tensors
       built with constructor, compare the results of both and the gradients of the leaf tensors.

    Args:
        simple_learning_function: Callable to evaluate the from the simple_learning library.
        torch_function: Callable to evaluate from the pytorch library.
        constructor: Pair of iterables ((func1, func2), (args_to_func1, args_to_func2)) to build
                     the numpy arrays used as parameters to both functions.

                     The arrays are initiated as func1(*args_to_func1), func2(*args_to_func2). The
                     result to each func will be a parameter to be used in both simple_learning and pytorch
                     functions.

                     In the case that only one constructor function is provided, but multiple
                     argument iterables, the function will be broadcasted to all other argument iterables:

                     Given: ((func1,), (args_to_func1, more_args_to_func1))
                     It's equivalent to: func1(*args_to_func1), func1(*more_args_to_func1).

    Raises:
        AssertionError if the simple_learning and pytorch functions results differ by more than the
        set absolute or relative tolerances.
    """
    constructor = Constructor(*constructor)
    if not isinstance(constructor.functions, (list, tuple)):
        constructor.functions = (constructor.functions, )

    # if the number of functions doesn't match the number of given argument iterables, repeat
    # broadcast the first given function to all args
    n_functions = len(constructor.functions)
    n_arguments = len(constructor.arguments)
    if n_functions < n_arguments:
        constructor.functions = repeat(constructor.functions[0], n_arguments)

    args_arrays = [
        func(*args)
        for (func, args) in zip(constructor.functions, constructor.arguments)
    ]

    # apply simple_learning function to Tensors
    sl_args = [sl.Tensor(arg.copy()) for arg in args_arrays]
    sl_result = simple_learning_function(*sl_args)
    sl_result.backward(np.ones_like(sl_result.data))

    # apply the same function to pytorch's Tensors
    pt_args = [
        torch.tensor(arg.copy().astype('float32'), requires_grad=True)
        for arg in args_arrays
    ]
    pt_result = torch_function(*pt_args)
    pt_result.backward(torch.ones_like(pt_result))

    # check if the forward pass is correct
    obtained_result = sl_result.data
    expected_result = pt_result.detach().numpy()
    np_test.assert_allclose(obtained_result, expected_result, R_TOLERANCE,
                            A_TOLERANCE)

    # check if the backward pass if correct (the arguments' gradients)
    for sl_a, pt_a in zip(sl_args, pt_args):
        if sl_a.grad is None and pt_a.grad is None:
            continue  # if neither of the Tensors required grad, skip them

        obtained_grad = sl_a.grad
        expected_grad = pt_a.grad.numpy()
        np_test.assert_allclose(obtained_grad, expected_grad, R_TOLERANCE,
                                A_TOLERANCE)