示例#1
0
def test_power_method_opnorm_exceptions():
    """Test the exceptions"""
    space = odl.rn(2)
    op = odl.IdentityOperator(space)

    with pytest.raises(ValueError):
        # Too small number of iterates
        power_method_opnorm(op, maxiter=0)

    with pytest.raises(ValueError):
        # Negative number of iterates
        power_method_opnorm(op, maxiter=-5)

    with pytest.raises(ValueError):
        # Input vector is zero
        power_method_opnorm(op, maxiter=2, xstart=space.zero())

    with pytest.raises(ValueError):
        # Input vector in the nullspace
        op = odl.MatrixOperator([[0., 1.], [0., 0.]])

        power_method_opnorm(op, maxiter=2, xstart=op.domain.one())

    with pytest.raises(ValueError):
        # Uneven number of iterates for non square operator
        op = odl.MatrixOperator([[1., 2., 3.], [4., 5., 6.]])

        power_method_opnorm(op, maxiter=1, xstart=op.domain.one())
示例#2
0
文件: layer.py 项目: yochju/odl
def test_backward(dtype):
    """Test gradient evaluation with pytorch-wrapped operators/functionals."""
    # Define ODL operator and cost functional
    matrix = np.random.rand(2, 3).astype(dtype)
    odl_op = odl.MatrixOperator(matrix)
    odl_cost = odl.solvers.L2NormSquared(odl_op.range)
    odl_functional = odl_cost * odl_op

    # Wrap operator and cost with pytorch
    torch_op = TorchOperator(odl_op)
    torch_cost = TorchOperator(odl_cost)

    # Define evaluation point and wrap into a variable. Mark as
    # `requires_gradient`, otherwise `backward()` doesn't do anything.
    # This is supported by the ODL wrapper.
    x = torch.from_numpy(np.ones(3, dtype=dtype))
    x_var = torch.autograd.Variable(x, requires_grad=True)

    # Compute forward pass
    y_var = torch_op(x_var)
    res_var = torch_cost(y_var)

    # Populate gradients by backwards pass
    res_var.backward()
    torch_grad = x_var.grad

    # ODL result
    odl_grad = odl_functional.gradient(x.numpy())

    assert torch_grad.data.numpy().dtype == dtype
    assert all_almost_equal(torch_grad.data.numpy(), odl_grad)
示例#3
0
def functional(request):
    """functional with optimum 0 at 0."""
    name = request.param

    if name == 'l2_squared':
        space = odl.rn(3)
        return odl.solvers.L2NormSquared(space)
    elif name == 'l2_squared_scaled':
        space = odl.uniform_discr(0, 1, 3)
        scaling = odl.MultiplyOperator(space.element([1, 2, 3]), domain=space)
        return odl.solvers.L2NormSquared(space) * scaling
    elif name == 'quadratic_form':
        space = odl.rn(3)
        # Symmetric and diagonally dominant matrix
        matrix = odl.MatrixOperator([[7.0, 1, 2], [1, 5, -3], [2, -3, 8]])
        vector = space.element([1, 2, 3])

        # Calibrate so that functional is zero in optimal point
        constant = 1 / 4 * vector.inner(matrix.inverse(vector))

        return odl.solvers.QuadraticForm(operator=matrix,
                                         vector=vector,
                                         constant=constant)
    elif name == 'rosenbrock':
        # Moderately ill-behaved rosenbrock functional.
        rosenbrock = odl.solvers.RosenbrockFunctional(odl.rn(2), scale=2)

        # Center at zero
        return rosenbrock.translated([-1, -1])
    else:
        assert False
示例#4
0
def solve_game(payoff_matrix,
               num_iters,
               tau,
               sigma,
               restart,
               fixed_restart_frequency=None):
    linear_operator = odl.MatrixOperator(payoff_matrix)
    primal_space = linear_operator.domain
    dual_space = linear_operator.range
    indicator_primal_simplex = odl.solvers.IndicatorSimplex(primal_space)
    conjugate_of_indicator_dual_simplex = IndicatorSimplexConjugate(dual_space)
    x = primal_space.zero()
    y = dual_space.zero()
    callback = CallbackStore(payoff_matrix)
    restarted_pdhg.restarted_pdhg(
        x,
        f=indicator_primal_simplex,
        g=conjugate_of_indicator_dual_simplex,
        L=linear_operator,
        niter=num_iters,
        tau=tau,
        sigma=sigma,
        y=y,
        callback=callback,
        restart=restart,
        fixed_restart_frequency=fixed_restart_frequency)
    return callback.residuals_at_current, callback.residuals_at_avg
示例#5
0
def test_as_tensorflow_layer():
    # Define ODL operator
    matrix = np.random.rand(3, 2)
    odl_op = odl.MatrixOperator(matrix)

    # Define evaluation points
    x = np.random.rand(2)
    z = np.random.rand(3)

    # Add empty axes for batch and channel
    x_tf = tf.constant(x)[None, ..., None]
    z_tf = tf.constant(z)[None, ..., None]

    # Create tensorflow layer from odl operator
    odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
        odl_op, 'MatrixOperator')
    y_tf = odl_op_layer(x_tf)

    # Evaluate using tensorflow
    result = y_tf.eval().ravel()
    expected = odl_op(x)

    assert all_almost_equal(result, expected)

    # Evaluate the adjoint of the derivative, called gradient in tensorflow
    result = tf.gradients(y_tf, [x_tf], z_tf)[0].eval().ravel()
    expected = odl_op.derivative(x).adjoint(z)

    assert all_almost_equal(result, expected)
示例#6
0
def test_module_forward_diff_shapes(device):
    """Test operator module with different shapes of input and output."""
    # Define ODL operator and wrap as module
    matrix = np.random.rand(2, 3).astype('float32')
    odl_op = odl.MatrixOperator(matrix)
    op_mod = odl_torch.OperatorModule(odl_op)

    # Input data
    x_arr = np.ones(3, dtype='float32')

    # Test with 1 extra dim (minimum)
    x = torch.from_numpy(x_arr).to(device)[None, ...]
    x.requires_grad_(True)
    res = op_mod(x)
    res_arr = res.detach().cpu().numpy()
    assert res_arr.shape == (1, ) + odl_op.range.shape
    assert all_almost_equal(res_arr, np.asarray(odl_op(x_arr))[None, ...])
    assert x.device.type == res.device.type == device

    # Test with 2 extra dims
    x = torch.from_numpy(x_arr).to(device)[None, None, ...]
    x.requires_grad_(True)
    res = op_mod(x)
    res_arr = res.detach().cpu().numpy()
    assert res_arr.shape == (1, 1) + odl_op.range.shape
    assert all_almost_equal(res_arr,
                            np.asarray(odl_op(x_arr))[None, None, ...])
    assert x.device.type == res.device.type == device
示例#7
0
def test_autograd_function_backward(dtype, device):
    """Test backprop with operators/functionals as autograd functions."""
    # Define ODL operator and cost functional
    matrix = np.random.rand(2, 3).astype(dtype)
    odl_op = odl.MatrixOperator(matrix)
    odl_cost = odl.solvers.L2NormSquared(odl_op.range)
    odl_functional = odl_cost * odl_op

    # Define evaluation point and mark as `requires_grad` to enable
    # backpropagation
    x_arr = np.ones(3, dtype=dtype)
    x = torch.from_numpy(x_arr).to(device)
    x.requires_grad_(True)

    # Compute forward pass
    y = odl_torch.OperatorFunction.apply(odl_op, x)
    res = odl_torch.OperatorFunction.apply(odl_cost, y)

    # Populate gradients by backwards pass
    res.backward()
    grad = x.grad
    grad_arr = grad.detach().cpu().numpy()

    # Compute gradient with ODL
    odl_grad = odl_functional.gradient(x_arr)

    assert grad_arr.dtype == dtype
    assert all_almost_equal(grad_arr, odl_grad)
    assert x.device.type == grad.device.type == device
示例#8
0
文件: theano_test.py 项目: yochju/odl
def test_theano_operator():
    """Test the ODL->Theano operator wrapper."""
    # Define ODL operator
    matrix = np.random.rand(3, 2)
    odl_op = odl.MatrixOperator(matrix)

    # Define evaluation points
    x = [1., 2.]
    dy = [1., 2., 3.]

    # Create Theano placeholders
    x_theano = T.dvector()
    dy_theano = T.dvector()

    # Create Theano layer from odl operator
    odl_op_layer = odl.contrib.theano.TheanoOperator(odl_op)

    # Build computation graphs
    y_theano = odl_op_layer(x_theano)
    y_theano_func = theano.function([x_theano], y_theano)
    dy_theano_func = theano.function([x_theano, dy_theano],
                                     T.Rop(y_theano, x_theano, dy_theano))

    # Evaluate using Theano
    result = y_theano_func(x)
    expected = odl_op(x)

    assert all_almost_equal(result, expected)

    # Evaluate the adjoint of the derivative, called gradient in Theano
    result = dy_theano_func(x, dy)
    expected = odl_op.derivative(x).adjoint(dy)

    assert all_almost_equal(result, expected)
示例#9
0
def optimization_problem(request):
    problem_name = request.param

    if problem_name == 'MatVec':
        # Define problem
        op_arr = np.eye(5) * 5 + np.ones([5, 5])
        op = odl.MatrixOperator(op_arr)

        # Simple right hand side
        rhs = op.range.one()

        # Initial guess
        x = op.domain.element([0.6, 0.8, 1.0, 1.2, 1.4])

        return op, x, rhs
    elif problem_name == 'Identity':
        # Define problem
        space = odl.uniform_discr(0, 1, 5)
        op = odl.IdentityOperator(space)

        # Simple right hand side
        rhs = op.range.element([0, 0, 1, 0, 0])

        # Initial guess
        x = op.domain.element([0.6, 0.8, 1.0, 1.2, 1.4])

        return op, x, rhs
    else:
        raise ValueError('problem not valid')
示例#10
0
def test_autograd_function_forward(dtype, use_cuda):
    """Test forward evaluation with operators as autograd functions."""
    # Define ODL operator
    matrix = np.random.rand(2, 3).astype(dtype)
    odl_op = odl.MatrixOperator(matrix)

    # Wrap as torch autograd function
    torch_op = odl_torch.OperatorAsAutogradFunction(odl_op)

    # Define evaluation point and wrap into a variable
    x = torch.from_numpy(np.ones(3, dtype=dtype))
    if use_cuda:
        x = x.cuda()
    x_var = autograd.Variable(x)

    # Evaluate torch operator
    res_var = torch_op(x_var)

    # ODL result
    odl_res = odl_op(x.cpu().numpy())

    assert res_var.data.cpu().numpy().dtype == dtype
    assert all_almost_equal(res_var.data.cpu().numpy(), odl_res)

    # Make sure data stays on the GPU
    if use_cuda:
        assert res_var.is_cuda
示例#11
0
def step_sizes(lp, tau_sigma_ratio):
    estimated_norm = odl.MatrixOperator(-lp.constraint_matrix).norm(
        estimate=True)
    # tau * sigma = 0.9 / estimated_norm**2
    # and tau/sigma = scale
    sigma = np.sqrt(0.9 / tau_sigma_ratio) / estimated_norm
    tau = sigma * tau_sigma_ratio
    return tau, sigma
示例#12
0
def test_matrix_representation():
    """Verify that the matrix repr returns the correct matrix"""
    n = 3
    A = np.random.rand(n, n)

    Aop = odl.MatrixOperator(A)
    matrix_repr = matrix_representation(Aop)

    assert all_almost_equal(A, matrix_repr)
示例#13
0
def test_matrix_representation():
    # Verify that the matrix representation function returns the correct matrix

    n = 3
    A = np.random.rand(n, n)

    Aop = odl.MatrixOperator(A)
    matrix_repr = matrix_representation(Aop)

    assert almost_equal(np.sum(np.abs(A - matrix_repr)), 1e-6)
示例#14
0
def test_matrix_representation_product_to_product():
    """Verify that the matrix repr works for product spaces.

    Here, since the domain and range has shape ``(2, 3)``, the shape of the
    matrix representation will be ``(2, 3, 2, 3)``.
    """
    n = 3
    A = np.random.rand(n, n)
    Aop = odl.MatrixOperator(A)

    B = np.random.rand(n, n)
    Bop = odl.MatrixOperator(B)

    ABop = ProductSpaceOperator([[Aop, 0], [0, Bop]])
    matrix_repr = matrix_representation(ABop)

    assert matrix_repr.shape == (2, n, 2, n)
    assert np.linalg.norm(A - matrix_repr[0, :, 0, :]) == pytest.approx(0)
    assert np.linalg.norm(B - matrix_repr[1, :, 1, :]) == pytest.approx(0)
示例#15
0
def test_matrix_representation_product_to_product_two():
    # Verify that the matrix representation function returns the correct matrix

    n = 3
    rn = odl.rn(n)
    A = np.random.rand(n, n)
    Aop = odl.MatrixOperator(A)

    B = np.random.rand(n, n)
    Bop = odl.MatrixOperator(B)

    ran_and_dom = ProductSpace(rn, 2)

    AB_matrix = np.vstack(
        [np.hstack([A, np.zeros((n, n))]),
         np.hstack([np.zeros((n, n)), B])])
    ABop = ProductSpaceOperator([[Aop, 0], [0, Bop]], ran_and_dom, ran_and_dom)
    the_matrix = matrix_representation(ABop)

    assert almost_equal(np.sum(np.abs(AB_matrix - the_matrix)), 1e-6)
示例#16
0
def test_matrix_representation_lin_space_to_product():
    # Verify that the matrix representation function returns the correct matrix

    n = 3
    rn = odl.rn(n)
    A = np.random.rand(n, n)
    Aop = odl.MatrixOperator(A)

    m = 2
    rm = odl.rn(m)
    B = np.random.rand(m, n)
    Bop = odl.MatrixOperator(B)

    dom = ProductSpace(rn, 1)
    ran = ProductSpace(rn, rm)

    AB_matrix = np.vstack([A, B])
    ABop = ProductSpaceOperator([[Aop], [Bop]], dom, ran)

    the_matrix = matrix_representation(ABop)

    assert almost_equal(np.sum(np.abs(AB_matrix - the_matrix)), 1e-6)
示例#17
0
def test_power_method_opnorm_symm():
    """Test the power method on a symmetrix matrix operator"""
    # Test matrix with singular values 1.2 and 1.0
    mat = np.array([[0.9509044, -0.64566614], [-0.44583952, -0.95923051]])

    op = odl.MatrixOperator(mat)
    true_opnorm = 1.2
    opnorm_est = power_method_opnorm(op, maxiter=100)
    assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)

    # Start at a different point
    xstart = odl.rn(2).element([0.8, 0.5])
    opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=100)
    assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)
示例#18
0
def test_module_backward(use_cuda):
    """Test backpropagation with operators as modules."""
    matrix = np.random.rand(2, 3).astype('float32')
    odl_op = odl.MatrixOperator(matrix)
    op_mod = odl_torch.OperatorAsModule(odl_op)
    loss_fun = nn.MSELoss()

    # Test with linear layers (1 extra dim)
    layer_before = nn.Linear(3, 3)
    layer_after = nn.Linear(2, 2)
    model = nn.Sequential(layer_before, op_mod, layer_after)
    x = torch.from_numpy(np.ones(3, dtype='float32'))
    target = torch.from_numpy(np.zeros(2, dtype='float32'))

    if use_cuda:
        x = x.cuda()
        target = target.cuda()
        model = model.cuda()

    x_var = autograd.Variable(x, requires_grad=True)[None, ...]
    target_var = autograd.Variable(target)[None, ...]

    loss = loss_fun(model(x_var), target_var)
    loss.backward()
    assert all(p is not None for p in model.parameters())

    # Test with conv layers (2 extra dims)
    layer_before = nn.Conv1d(1, 2, 2)  # 1->2 channels
    layer_after = nn.Conv1d(2, 1, 2)  # 2->1 channels
    model = nn.Sequential(layer_before, op_mod, layer_after)
    # Input size 4 since initial convolution reduces by 1
    x = torch.from_numpy(np.ones(4, dtype='float32'))
    # Output size 1 since final convolution reduces by 1
    target = torch.from_numpy(np.zeros(1, dtype='float32'))

    if use_cuda:
        x = x.cuda()
        target = target.cuda()
        model = model.cuda()

    x_var = autograd.Variable(x, requires_grad=True)[None, None, ...]
    target_var = autograd.Variable(target)[None, None, ...]

    loss = loss_fun(model(x_var), target_var)
    loss.backward()
    assert all(p is not None for p in model.parameters())

    # Make sure data stays on the GPU
    if use_cuda:
        assert x_var.is_cuda
示例#19
0
def test_power_method_opnorm_symm():
    """Test the power method on a symmetrix matrix operator"""
    # Test matrix with eigenvalues 1 and -2
    # Rather nasty case since the eigenvectors are almost parallel
    mat = np.array([[10, -18], [6, -11]], dtype=float)

    op = odl.MatrixOperator(mat)
    true_opnorm = 2
    opnorm_est = power_method_opnorm(op)
    assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)

    # Start at a different point
    xstart = odl.rn(2).element([0.8, 0.5])
    opnorm_est = power_method_opnorm(op, xstart=xstart)
    assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)
示例#20
0
def test_autograd_function_forward(dtype, device):
    """Test forward evaluation with operators as autograd functions."""
    # Define ODL operator
    matrix = np.random.rand(2, 3).astype(dtype)
    odl_op = odl.MatrixOperator(matrix)

    # Compute forward pass with both ODL and PyTorch
    x_arr = np.ones(3, dtype=dtype)
    x = torch.from_numpy(x_arr).to(device)
    res = odl_torch.OperatorFunction.apply(odl_op, x)
    res_arr = res.detach().cpu().numpy()
    odl_res = odl_op(x_arr)

    assert res_arr.dtype == dtype
    assert all_almost_equal(res_arr, odl_res)
    assert x.device.type == res.device.type == device
示例#21
0
def test_power_method_opnorm_nonsymm():
    # Test the power method on a matrix operator

    # Singular values 5.5 and 6
    mat = np.array([[-1.52441557, 5.04276365], [1.90246927, 2.54424763],
                    [5.32935411, 0.04573162]])

    op = odl.MatrixOperator(mat)
    true_opnorm = 6

    # Start vector (1, 1) is close to the wrong eigenvector
    opnorm_est = power_method_opnorm(op, maxiter=50)
    assert almost_equal(opnorm_est, true_opnorm, places=2)

    # Start close to the correct eigenvector, converges very fast
    xstart = odl.rn(2).element([-0.8, 0.5])
    opnorm_est = power_method_opnorm(op, maxiter=6, xstart=xstart)
    assert almost_equal(opnorm_est, true_opnorm, places=2)
示例#22
0
def test_power_method_opnorm_nonsymm():
    """Test the power method on a nonsymmetrix matrix operator"""
    # Singular values 5.5 and 6
    mat = np.array([[-1.52441557, 5.04276365], [1.90246927, 2.54424763],
                    [5.32935411, 0.04573162]])

    op = odl.MatrixOperator(mat)
    true_opnorm = 6

    # Start vector (1, 1) is close to the wrong eigenvector
    xstart = odl.rn(2).element([1, 1])
    opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=50)
    assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)

    # Start close to the correct eigenvector, converges very fast
    xstart = odl.rn(2).element([-0.8, 0.5])
    opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=6)
    assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2)
示例#23
0
def solve_lp(lp, num_iters, tau, sigma, restart, fixed_restart_frequency=None):
    # Using the notation of ODL's primal_dual_hybrid_gradient.py, the LP is
    # formulated as
    # min_x max_y f(x) + y'Lx - g^*(y)
    # where:
    #  f(x) = objective_vector'x +
    #    Indicator([variable_lower_bound, variable_upper_bound])
    #  L = -constraint_matrix
    #  g^*(x) = -right_hand_side'y +
    #    Indicator(R_+^{num_equalities} x R^{num_variables - num_equalities}
    # The objective constant is ignored in the formulation.

    linear_operator = odl.MatrixOperator(-lp.constraint_matrix)

    primal_space = linear_operator.domain
    dual_space = linear_operator.range

    f = LinearOnBox(primal_space, lp.objective_vector, lp.variable_lower_bound,
                    lp.variable_upper_bound)
    num_constraints = lp.constraint_matrix.shape[0]
    g = LinearOnBoxConjugate(
        dual_space, -lp.right_hand_side,
        np.concatenate(
            (np.full(lp.num_equalities,
                     -np.inf), np.zeros(num_constraints - lp.num_equalities))),
        np.full(num_constraints, np.inf))

    x = primal_space.zero()
    y = dual_space.zero()
    callback = CallbackStore(lp)
    restarted_pdhg.restarted_pdhg(
        x,
        f=f,
        g=g,
        L=linear_operator,
        niter=num_iters,
        y=y,
        tau=tau,
        sigma=sigma,
        callback=callback,
        restart=restart,
        fixed_restart_frequency=fixed_restart_frequency)
    return callback.dataframe()
示例#24
0
文件: layer.py 项目: yochju/odl
def test_forward(dtype):
    """Test forward evaluation with pytorch-wrapped operators."""
    # Define ODL operator
    matrix = np.random.rand(2, 3).astype(dtype)
    odl_op = odl.MatrixOperator(matrix)

    # Wrap as torch operator
    torch_op = TorchOperator(odl_op)

    # Define evaluation point and wrap into a variable
    x = torch.from_numpy(np.ones(3, dtype=dtype))
    x_var = torch.autograd.Variable(x)

    # Evaluate torch operator
    res_var = torch_op(x_var)

    # ODL result
    odl_res = odl_op(x.numpy())

    assert res_var.data.numpy().dtype == dtype
    assert all_almost_equal(res_var.data.numpy(), odl_res)
示例#25
0
def test_module_backward(device):
    """Test backpropagation with operators as modules."""
    # Define ODL operator and wrap as module
    matrix = np.random.rand(2, 3).astype('float32')
    odl_op = odl.MatrixOperator(matrix)
    op_mod = odl_torch.OperatorModule(odl_op)
    loss_fn = nn.MSELoss()

    # Test with linear layers (1 extra dim)
    layer_before = nn.Linear(3, 3)
    layer_after = nn.Linear(2, 2)
    model = nn.Sequential(layer_before, op_mod, layer_after).to(device)
    x = torch.from_numpy(np.ones(3, dtype='float32'))[None, ...].to(device)
    x.requires_grad_(True)
    target = torch.from_numpy(np.zeros(2, dtype='float32'))[None,
                                                            ...].to(device)
    loss = loss_fn(model(x), target)
    loss.backward()
    assert all(p is not None for p in model.parameters())
    assert x.grad.detach().cpu().abs().sum() != 0
    assert x.device.type == loss.device.type == device

    # Test with conv layers (2 extra dims)
    layer_before = nn.Conv1d(1, 2, 2)  # 1->2 channels
    layer_after = nn.Conv1d(2, 1, 2)  # 2->1 channels
    model = nn.Sequential(layer_before, op_mod, layer_after).to(device)
    # Input size 4 since initial convolution reduces by 1
    x = torch.from_numpy(np.ones(4, dtype='float32'))[None, None,
                                                      ...].to(device)
    x.requires_grad_(True)
    # Output size 1 since final convolution reduces by 1
    target = torch.from_numpy(np.zeros(1, dtype='float32'))[None, None,
                                                            ...].to(device)

    loss = loss_fn(model(x), target)
    loss.backward()
    assert all(p is not None for p in model.parameters())
    assert x.grad.detach().cpu().abs().sum() != 0
    assert x.device.type == loss.device.type == device
示例#26
0
def test_module_forward_diff_shapes(use_cuda):
    """Test operator module with different shapes of input and output."""
    matrix = np.random.rand(2, 3)
    odl_op = odl.MatrixOperator(matrix)
    op_mod = odl_torch.OperatorAsModule(odl_op)

    x = torch.from_numpy(np.ones(3))
    if use_cuda:
        x = x.cuda()

    # Test with 1 extra dim (minimum)
    x_var = autograd.Variable(x, requires_grad=True)[None, ...]
    y_var = op_mod(x_var)
    assert y_var.data.shape == (1, ) + odl_op.range.shape
    assert all_almost_equal(y_var.data.cpu().numpy(),
                            odl_op(np.ones(3)).asarray().reshape((1, 2)))

    # Test with 2 extra dims
    x_var = autograd.Variable(x, requires_grad=True)[None, None, ...]
    y_var = op_mod(x_var)
    assert y_var.data.shape == (1, 1) + odl_op.range.shape
    assert all_almost_equal(y_var.data.cpu().numpy(),
                            odl_op(np.ones(3)).asarray().reshape((1, 1, 2)))
示例#27
0
def test_autograd_function_backward(dtype, use_cuda):
    """Test backprop with operators/functionals as autograd functions."""
    # Define ODL operator and cost functional
    matrix = np.random.rand(2, 3).astype(dtype)
    odl_op = odl.MatrixOperator(matrix)
    odl_cost = odl.solvers.L2NormSquared(odl_op.range)
    odl_functional = odl_cost * odl_op

    # Wrap operator and cost with pytorch
    torch_op = odl_torch.OperatorAsAutogradFunction(odl_op)
    torch_cost = odl_torch.OperatorAsAutogradFunction(odl_cost)

    # Define evaluation point and wrap into a variable. Mark as
    # `requires_gradient`, otherwise `backward()` doesn't do anything.
    # This is supported by the ODL wrapper.
    x = torch.from_numpy(np.ones(3, dtype=dtype))
    if use_cuda:
        x = x.cuda()
    x_var = autograd.Variable(x, requires_grad=True)

    # Compute forward pass
    y_var = torch_op(x_var)
    res_var = torch_cost(y_var)

    # Populate gradients by backwards pass
    res_var.backward()
    torch_grad = x_var.grad

    # ODL result
    odl_grad = odl_functional.gradient(x.cpu().numpy())

    assert torch_grad.data.cpu().numpy().dtype == dtype
    assert all_almost_equal(torch_grad.data.cpu().numpy(), odl_grad)

    # Make sure data stays on the GPU
    if use_cuda:
        assert torch_grad.is_cuda
示例#28
0
文件: theano_test.py 项目: yochju/odl
def test_theano_gradient():
    """Test the gradient of ODL functionals wrapped as Theano Ops."""
    # Define ODL operator
    matrix = np.random.rand(3, 2)
    odl_op = odl.MatrixOperator(matrix)

    # Define evaluation point
    x = [1., 2.]

    # Define ODL cost and the composed functional
    odl_cost = odl.solvers.L2NormSquared(odl_op.range)
    odl_functional = odl_cost * odl_op

    # Create Theano placeholder
    x_theano = T.dvector()

    # Create Theano layers from odl operators
    odl_op_layer = odl.contrib.theano.TheanoOperator(odl_op)
    odl_cost_layer = odl.contrib.theano.TheanoOperator(odl_cost)

    # Build computation graph
    y_theano = odl_op_layer(x_theano)
    cost_theano = odl_cost_layer(y_theano)
    cost_theano_func = theano.function([x_theano], cost_theano)
    cost_grad_theano = T.grad(cost_theano, x_theano)
    cost_grad_theano_func = theano.function([x_theano], cost_grad_theano)

    # Evaluate using Theano
    result = cost_theano_func(x)
    expected = odl_functional(x)
    assert result == pytest.approx(expected)

    # Evaluate the gradient of the cost, should be 2 * matrix^T.dot(x)
    result = cost_grad_theano_func(x)
    expected = odl_functional.gradient(x)
    assert all_almost_equal(result, expected)
示例#29
0
We also demonstrate that we can compute the gradient of the scalar-valued
squared L2-norm function properly using either Theano or ODL.
"""

import theano
import theano.tensor as T
import numpy as np
import odl
import odl.contrib.theano

# --- Wrap ODL operator as Theano operator --- #

# Define ODL operator
matrix = np.array([[1., 2.], [0., 0.], [0., 1.]])
odl_op = odl.MatrixOperator(matrix)

# Define evaluation point
x = [1., 2.]

# Create Theano placeholders
x_theano = T.fvector('x')

# Create Theano layer from ODL operator
odl_op_layer = odl.contrib.theano.TheanoOperator(odl_op)

# Build computation graph
y_theano = odl_op_layer(x_theano)
y_theano_func = theano.function([x_theano], y_theano)

# Evaluate using Theano and compare to odl_op(x)
示例#30
0
 def derivative(self, x):
     return 2 * odl.MatrixOperator(self.matrix)