Ejemplo n.º 1
0
def potrf_wrapper(A: torch.Tensor, clean: bool, upper: bool, use_cuda: bool,
                  opt: FalkonOptions) -> torch.Tensor:
    if use_cuda:
        from falkon.ooc_ops.ooc_potrf import gpu_cholesky
        return gpu_cholesky(A,
                            upper=upper,
                            clean=clean,
                            overwrite=True,
                            opt=opt)
    else:
        return potrf(A, upper=upper, clean=clean, overwrite=True, cuda=False)
Ejemplo n.º 2
0
def test_potrf_speed():
    t = 5000
    mat = gen_random_pd(t, np.float32, F=False, seed=12345)
    t_s = time.time()
    our_chol = potrf(mat, upper=False, clean=True, overwrite=False, cuda=False)
    our_time = time.time() - t_s

    t_s = time.time()
    np_chol = np.linalg.cholesky(mat)
    np_time = time.time() - t_s

    np.testing.assert_allclose(np_chol, our_chol, rtol=1e-5)
    print("Time for cholesky(%d): Numpy %.2fs - Our %.2fs" %
          (t, np_time, our_time))
Ejemplo n.º 3
0
    def test_lower(self, mat, exp_lower, clean, overwrite, order, dtype):
        mat = fix_mat(mat, order=order, dtype=dtype, copy=False, numpy=True)
        inpt = mat.copy(order="K")

        our_chol = potrf(inpt,
                         upper=False,
                         clean=clean,
                         overwrite=overwrite,
                         cuda=False)
        if overwrite:
            assert inpt.ctypes.data == our_chol.ctypes.data, "Overwriting failed"

        if clean:
            np.testing.assert_allclose(exp_lower,
                                       our_chol,
                                       rtol=self.rtol[dtype])
            assert np.triu(our_chol, 1).sum() == 0
        else:
            np.testing.assert_allclose(exp_lower,
                                       np.tril(our_chol),
                                       rtol=self.rtol[dtype])
            np.testing.assert_allclose(np.triu(mat, 1), np.triu(our_chol, 1))