示例#1
0
def test_gemm():
    size = 32
    A = np.random.randn(size, size)
    B = np.random.randn(size, size)
    C = np.dot(A, B)
    shard_sizes = (8, 8)
    A_sharded = BigMatrix("Gemm_test_A",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    B_sharded = BigMatrix("Gemm_test_B",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    shard_matrix(A_sharded, A)
    shard_matrix(B_sharded, B)
    program, meta = gemm(A_sharded, B_sharded)
    executor = fs.ProcessPoolExecutor(1)
    program.start()
    future = executor.submit(job_runner.lambdapack_run,
                             program,
                             timeout=60,
                             idle_timeout=6,
                             pipeline_width=1)
    program.wait()
    program.free()
    C_sharded = meta["outputs"][0]
    C_npw = C_sharded.numpy()
    assert (np.allclose(C_npw, C))
    return
def test_cholesky():
    X = np.random.randn(64, 64)
    A = X.dot(X.T) + np.eye(X.shape[0])
    shard_size = 8
    shard_sizes = (shard_size, shard_size)
    A_sharded = BigMatrix("cholesky_test_A",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    A_sharded.free()
    shard_matrix(A_sharded, A)
    program, meta = cholesky(A_sharded)
    executor = fs.ProcessPoolExecutor(1)
    print("starting program")
    program.start()
    future = executor.submit(job_runner.lambdapack_run,
                             program,
                             timeout=30,
                             idle_timeout=6)
    program.wait()
    program.free()
    L_sharded = meta["outputs"][0]
    L_npw = L_sharded.numpy()
    L = np.linalg.cholesky(A)
    assert (np.allclose(L_npw, L))
    print("great success!")
示例#3
0
    def test_multiple_shard_matrix_multiply(self):
        fexec = lithops.FunctionExecutor(runtime='jsampe/numpy-lithops:04',
                                         log_level='DEBUG')

        X = np.random.randn(16, 16)
        X_shard_sizes = tuple(map(int, np.array(X.shape) / 2))
        X_sharded = BigMatrix("gemm_test_1",
                              shape=X.shape,
                              shard_sizes=X_shard_sizes,
                              storage=fexec.storage)

        Y = np.random.randn(16, 16)
        Y_shard_sizes = tuple(map(int, np.array(Y.shape) / 2))
        Y_sharded = BigMatrix("gemm_test_2",
                              shape=Y.shape,
                              shard_sizes=Y_shard_sizes,
                              storage=fexec.storage)

        shard_matrix(X_sharded, X)
        shard_matrix(Y_sharded, Y)

        XY_sharded = binops.gemm(fexec, X_sharded, Y_sharded, X_sharded.bucket,
                                 1)

        XY_sharded_local = XY_sharded.numpy()
        XY = X.dot(Y)
        X_sharded.free()
        Y_sharded.free()
        XY_sharded.free()
        assert (np.all(np.isclose(XY, XY_sharded_local)))
        os.system("rm -rf /dev/shm/*")
示例#4
0
 def test_if_run(self):
     X = np.random.randn(64)
     shard_sizes = (int(X.shape[0]/8),)
     X_sharded = BigMatrix("if_test", shape=X.shape,
                           shard_sizes=shard_sizes, write_header=True)
     O_sharded = BigMatrix("if_test_output", shape=X.shape,
                           shard_sizes=shard_sizes, write_header=True)
     X_sharded.free()
     shard_matrix(X_sharded, X)
     f = frontend.lpcompile(f1_if)
     p = f(X_sharded, O_sharded, X_sharded.num_blocks(0))
     num_cores = 1
     executor = fs.ProcessPoolExecutor(num_cores)
     config = npw.config.default()
     p_ex = lp.LambdaPackProgram(p, config=config)
     p_ex.start()
     all_futures = []
     for i in range(num_cores):
         all_futures.append(executor.submit(
             job_runner.lambdapack_run, p_ex, pipeline_width=1, idle_timeout=5, timeout=60))
     p_ex.wait()
     time.sleep(5)
     p_ex.free()
     for i in range(X_sharded.num_blocks(0)):
         Ob = O_sharded.get_block(i)
         Xb = X_sharded.get_block(i)
         if ((i % 2) == 0):
             assert(np.allclose(Ob, 1*Xb))
         else:
             assert(np.allclose(Ob, 2*Xb))
示例#5
0
def test_cholesky_multiprocess():
    X = np.random.randn(128, 128)
    A = X.dot(X.T) + 1e9 * np.eye(X.shape[0])
    shard_size = 8
    shard_sizes = (shard_size, shard_size)
    A_sharded = BigMatrix("job_runner_test",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    A_sharded.free()
    shard_matrix(A_sharded, A)
    program, meta = cholesky(A_sharded)
    executor = fs.ProcessPoolExecutor(8)
    print("starting program")
    program.start()
    futures = []
    for i in range(8):
        future = executor.submit(job_runner.lambdapack_run,
                                 program,
                                 timeout=25)
        futures.append(future)
    print("Waiting for futures")
    fs.wait(futures)
    [f.result() for f in futures]
    futures = []
    for i in range(8):
        future = executor.submit(job_runner.lambdapack_run,
                                 program,
                                 timeout=25)
        futures.append(future)
    print("Waiting for futures..again")
    fs.wait(futures)
    [f.result() for f in futures]
    print("great success!")
    return 0
def test_bdfac_truncated():
    N = 16
    shard_size = 4
    shard_sizes = (shard_size, shard_size)
    np.random.seed(0)
    X = np.random.randn(N, N)
    U, S, V = bdfac_python(X, block_size=shard_size)
    svd_bdfac = np.linalg.svd(S, compute_uv=False)
    svd_local = np.linalg.svd(X, compute_uv=False)
    print(svd_bdfac)
    print(svd_local)
    assert (np.allclose(svd_bdfac, svd_local))
    X_sharded = BigMatrix("BDFAC_input_X",
                          shape=X.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    N_blocks = X_sharded.num_blocks(0)
    shard_matrix(X_sharded, X)
    program, meta = bdfac(X_sharded, truncate=2)
    executor = fs.ProcessPoolExecutor(1)
    program.start()
    executor.submit(job_runner.lambdapack_run,
                    program,
                    timeout=200,
                    idle_timeout=200,
                    pipeline_width=1)
    program.wait()
    print("returned..")
def test_gemm():
    size = 64
    # np.random.seed(0)
    A = np.random.randn(size, size)
    B = np.random.randn(size, size)
    C = np.dot(A, B)
    shard_sizes = (16, 16)
    A_sharded = BigMatrix("Gemm_test_A",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    B_sharded = BigMatrix("Gemm_test_B",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    shard_matrix(A_sharded, A)
    shard_matrix(B_sharded, B)
    program, meta = gemm(A_sharded, B_sharded)
    program.start()
    job_runner.lambdapack_run(program,
                              timeout=60,
                              idle_timeout=6,
                              pipeline_width=3)
    program.wait()
    program.free()
    C_sharded = meta["outputs"][0]
    C_npw = C_sharded.numpy()
    assert (np.allclose(C_npw, C))
    return
def test_gemm_lambda():
    size = 32
    A = np.random.randn(size, size)
    B = np.random.randn(size, size)
    C = np.dot(A, B)
    shard_sizes = (8, 8)
    A_sharded = BigMatrix("Gemm_test_A",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    B_sharded = BigMatrix("Gemm_test_B",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    shard_matrix(A_sharded, A)
    shard_matrix(B_sharded, B)
    program, meta = gemm(A_sharded, B_sharded)
    executor = fs.ProcessPoolExecutor(1)
    program.start()
    run_program_in_pywren(program)
    program.wait()
    program.free()
    C_sharded = meta["outputs"][0]
    C_npw = C_sharded.numpy()
    assert (np.allclose(C_npw, C))
    return
def test_qr_lambda():
    N = 16
    shard_size = 8
    shard_sizes = (shard_size, shard_size)
    X = np.random.randn(N, N)
    X_sharded = BigMatrix("QR_input_X", shape=X.shape,
                          shard_sizes=shard_sizes, write_header=True)
    N_blocks = X_sharded.num_blocks(0)
    shard_matrix(X_sharded, X)
    program, meta = qr(X_sharded)
    print(program.hash)
    program.start()
    print("starting program...")
    futures = run_program_in_pywren(program, num_workers=1)
    # futures[0].result()
    program.wait()
    program.free()
    Rs = meta["outputs"][0]
    R_remote = Rs.get_block(N_blocks - 1, N_blocks - 1, 0)
    R_local = np.linalg.qr(X)[1][-shard_size:, -shard_size:]
    sign_matrix_local = np.eye(R_local.shape[0])
    sign_matrix_remote = np.eye(R_local.shape[0])
    sign_matrix_local[np.where(np.diag(R_local) <= 0)] *= -1
    sign_matrix_remote[np.where(np.diag(R_remote) <= 0)] *= -1
    # make the signs match
    R_remote *= np.diag(sign_matrix_remote)[:, np.newaxis]
    R_local *= np.diag(sign_matrix_local)[:, np.newaxis]
    assert(np.allclose(R_local, R_remote))
示例#10
0
    def test_bdfac(self):
        N = 8
        shard_size = 2
        shard_sizes = (shard_size, shard_size)
        X = np.random.randn(8, 8)
        X_sharded = BigMatrix("BDFAC_input_X", shape=X.shape,
                              shard_sizes=shard_sizes, write_header=True)
        shard_matrix(X_sharded, X)
        N_blocks = X_sharded.num_blocks(0)
        b_fac = 2
        num_tree_levels = max(
            int(np.ceil(np.log2(X_sharded.num_blocks(0))/np.log2(b_fac))), 1)

        async def parent_fn(self, loop, *block_idxs):
            if (block_idxs[-1] == 0 and block_idxs[-2] == 0):
                return await X_sharded.get_block_async(None, *block_idxs[:-2])
        VLs = BigMatrix("VL", shape=(num_tree_levels, N, N), shard_sizes=(
            1, shard_size, shard_size), write_header=True, safe=False)
        TLs = BigMatrix("TL", shape=(num_tree_levels, N, N), shard_sizes=(
            1, shard_size, shard_size), write_header=True, safe=False)
        VRs = BigMatrix("VR", shape=(num_tree_levels, N, N), shard_sizes=(
            1, shard_size, shard_size), write_header=True, safe=False)
        TRs = BigMatrix("TR", shape=(num_tree_levels, N, N), shard_sizes=(
            1, shard_size, shard_size), write_header=True, safe=False)
        Rs = BigMatrix("R", shape=(num_tree_levels, N, N), shard_sizes=(
            1, shard_size, shard_size), write_header=True, safe=False)
        S0 = BigMatrix("S0", shape=(N, N, N, num_tree_levels*shard_size), shard_sizes=(shard_size,
                                                                                       shard_size, shard_size, shard_size), write_header=True, parent_fn=parent_fn, safe=False)
        S1 = BigMatrix("S1", shape=(N, N, N, num_tree_levels*shard_size), shard_sizes=(shard_size,
                                                                                       shard_size, shard_size, shard_size), write_header=True, parent_fn=parent_fn, safe=False)
        Sigma = BigMatrix("Sigma", shape=(num_tree_levels, N, N), shard_sizes=(
            1, shard_size, shard_size), write_header=True, safe=False, parent_fn=parent_fn)
        pc = frontend.lpcompile(BDFAC)(
            VLs, TLs, Rs, Sigma, VRs, TRs, S0, S1, N_blocks, 0)
def test_tsqr():
    np.random.seed(1)
    size = 256
    shard_size = 32
    X = np.random.randn(size, shard_size)
    Q, R = np.linalg.qr(X)
    q0, r0 = np.linalg.qr(X[:2, :2])
    q1, r1 = np.linalg.qr(X[2:, :2])
    r2 = np.linalg.qr(np.vstack((r0, r1)))[1]
    shard_sizes = (shard_size, X.shape[1])
    X_sharded = BigMatrix("tsqr_test_X", shape=X.shape,
                          shard_sizes=shard_sizes, write_header=True)
    shard_matrix(X_sharded, X)
    program, meta = tsqr(X_sharded)
    executor = fs.ProcessPoolExecutor(1)
    print("starting program")
    program.start()
    future = executor.submit(job_runner.lambdapack_run,
                             program, timeout=10, idle_timeout=6)
    program.wait()
    program.free()
    R_sharded = meta["outputs"][0]
    num_tree_levels = int(np.log(np.ceil(size/shard_size))/np.log(2))
    print("num_tree_levels", num_tree_levels)
    R_npw = R_sharded.get_block(max(num_tree_levels, 0), 0)
    sign_matrix_local = np.eye(R.shape[0])
    sign_matrix_remote = np.eye(R.shape[0])
    sign_matrix_local[np.where(np.diag(R) <= 0)] *= -1
    sign_matrix_remote[np.where(np.diag(R_npw) <= 0)] *= -1
    # make the signs match
    R_npw *= np.diag(sign_matrix_remote)[:, np.newaxis]
    R *= np.diag(sign_matrix_local)[:, np.newaxis]
    assert(np.allclose(R_npw, R))
def test_qr():
    N = 28
    shard_size = 7
    shard_sizes = (shard_size, shard_size)
    X = np.random.randn(N, N)
    X_sharded = BigMatrix("QR_input_X", shape=X.shape,
                          shard_sizes=shard_sizes, write_header=True)
    N_blocks = X_sharded.num_blocks(0)
    shard_matrix(X_sharded, X)
    program, meta = qr(X_sharded)
    executor = fs.ProcessPoolExecutor(2)
    program.start()
    print("starting program...")
    future = executor.submit(job_runner.lambdapack_run,
                             program, timeout=60, idle_timeout=6, pipeline_width=1)
    future = executor.submit(job_runner.lambdapack_run,
                             program, timeout=60, idle_timeout=6, pipeline_width=1)
    future = executor.submit(job_runner.lambdapack_run,
                             program, timeout=60, idle_timeout=6, pipeline_width=1)
    program.wait()
    program.free()
    Rs = meta["outputs"][0]
    R_remote = Rs.get_block(N_blocks - 1, N_blocks - 1, 0)
    R_local = np.linalg.qr(X)[1][-shard_size:, -shard_size:]
    sign_matrix_local = np.eye(R_local.shape[0])
    sign_matrix_remote = np.eye(R_local.shape[0])
    sign_matrix_local[np.where(np.diag(R_local) <= 0)] *= -1
    sign_matrix_remote[np.where(np.diag(R_remote) <= 0)] *= -1
    # make the signs match
    R_remote *= np.diag(sign_matrix_remote)[:, np.newaxis]
    R_local *= np.diag(sign_matrix_local)[:, np.newaxis]
    assert(np.allclose(R_local, R_remote))
示例#13
0
def test_cholesky_lambda():
    X = np.random.randn(128, 128)
    A = X.dot(X.T) + np.eye(X.shape[0])
    shard_size = 128
    shard_sizes = (shard_size, shard_size)
    A_sharded = BigMatrix("job_runner_test",
                          shape=A.shape,
                          shard_sizes=shard_sizes,
                          write_header=True)
    A_sharded.free()
    shard_matrix(A_sharded, A)
    program, meta = cholesky(A_sharded)
    executor = fs.ProcessPoolExecutor(1)
    print("starting program")
    program.start()
    pwex = pywren.default_executor()
    futures = pwex.map(
        lambda x: job_runner.lambdapack_run(
            program, timeout=60, idle_timeout=6), range(16))
    pywren.wait(futures)
    print("RESULTSSS")
    print([f.result() for f in futures])
    futures = pwex.map(
        lambda x: job_runner.lambdapack_run(
            program, timeout=60, idle_timeout=6), range(16))
    program.wait()
    #program.free()
    L_sharded = meta["outputs"][0]
    L_npw = L_sharded.numpy()
    L = np.linalg.cholesky(A)
    assert (np.allclose(L_npw, L))
    print("great success!")
示例#14
0
    def test_matmul(self):
        size = 4
        shard_size = 2
        np.random.seed(0)
        A = np.random.randn(size, size)
        B = np.random.randn(size, size)
        C = np.dot(A, B)

        shard_sizes = (shard_size, shard_size)
        A_sharded = BigMatrix("matmul_test_A",
                              shape=A.shape,
                              shard_sizes=shard_sizes,
                              write_header=True)
        A_sharded.free()
        shard_matrix(A_sharded, A)
        B_sharded = BigMatrix("matmul_test_B",
                              shape=B.shape,
                              shard_sizes=shard_sizes,
                              write_header=True)
        B_sharded.free()
        shard_matrix(B_sharded, B)
        Temp = BigMatrix("matmul_test_Temp",
                         shape=[A.shape[0], B.shape[1], B.shape[0], 100],
                         shard_sizes=[
                             A_sharded.shard_sizes[0],
                             B_sharded.shard_sizes[1], 1, 1
                         ],
                         write_header=True)
        C_sharded = BigMatrix("matmul_test_C",
                              shape=C.shape,
                              shard_sizes=shard_sizes,
                              write_header=True)

        b_fac = 2
        config = npw.config.default()
        compiled_matmul = frontend.lpcompile(matmul)
        program = compiled_matmul(A_sharded, B_sharded,
                                  A_sharded.num_blocks(0),
                                  A_sharded.num_blocks(1),
                                  B_sharded.num_blocks(1), b_fac, Temp,
                                  C_sharded)
        program_executable = lp.LambdaPackProgram(program, config=config)
        program_executable.start()
        job_runner.lambdapack_run(program_executable,
                                  pipeline_width=1,
                                  idle_timeout=5,
                                  timeout=60)
        executor = fs.ThreadPoolExecutor(1)
        all_futures = [
            executor.submit(job_runner.lambdapack_run,
                            program_executable,
                            pipeline_width=1,
                            idle_timeout=5,
                            timeout=60)
        ]
        program_executable.wait()
        program_executable.free()
        C_remote = C_sharded.numpy()
        assert (np.allclose(C, C_remote))
示例#15
0
 def test_single_shard_transpose_matrix(self):
     X = np.random.randn(128, 128)
     X_sharded = BigMatrix("test_0", shape=X.shape, shard_sizes=X.shape)
     shard_matrix(X_sharded, X)
     X_sharded_local = X_sharded.T.numpy()
     print(X_sharded_local)
     print(X.T)
     assert(np.all(X_sharded_local == X.T))
示例#16
0
 def test_complex_slices(self):
     X = np.random.randn(21, 67, 53)
     shard_sizes = [21, 16, 11]
     X_sharded = BigMatrix("test_5", shape=X.shape, shard_sizes=shard_sizes)
     shard_matrix(X_sharded, X)
     assert (np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy()))
     assert (np.all(X[:, 64:67,
                      44:53] == X_sharded.submatrix(0, 4, 4).numpy()))
示例#17
0
 def test_multiple_shard_transpose_matrix(self):
     X = np.random.randn(128, 128)
     shard_sizes = tuple(map(int, np.array(X.shape)/2))
     X_sharded = BigMatrix("test_1", shape=X.shape, shard_sizes=shard_sizes)
     shard_matrix(X_sharded, X)
     X_sharded_local = X_sharded.T.numpy()
     X_sharded.free()
     assert(np.all(X.T == X_sharded_local))
示例#18
0
 def test_single_multiaxis(self):
     X = np.random.randn(8, 8, 8, 8)
     X_sharded = BigMatrix("multiaxis", shape=X.shape, shard_sizes=X.shape)
     print("BLOCK_IDXS", X_sharded.block_idxs)
     shard_matrix(X_sharded, X)
     print("BLOCK_IDXS_EXIST", X_sharded.block_idxs_exist)
     X_sharded_local = X_sharded.numpy()
     X_sharded.free()
     assert (np.all(X_sharded_local == X))
示例#19
0
 def test_simple_slices(self):
     X = np.random.randn(128, 128)
     shard_sizes = [32, 32]
     X_sharded = BigMatrix("test_3", shape=X.shape, shard_sizes=shard_sizes)
     shard_matrix(X_sharded, X)
     assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy()))
     assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy()))
     assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy()))
     assert(np.all(X[:, 96:128] == X_sharded.submatrix(
         None, [3, None]).numpy()))
示例#20
0
 def test_sharded_multiaxis(self):
     X = np.random.randn(8, 8, 8, 8)
     shard_sizes = tuple(map(int, np.array(X.shape)/2))
     X_sharded = BigMatrix("multiaxis_2", shape=X.shape,
                           shard_sizes=shard_sizes)
     shard_matrix(X_sharded, X)
     print("BLOCK_IDXS", X_sharded.block_idxs)
     X_sharded_local = X_sharded.numpy()
     print(X_sharded.free())
     assert(np.all(X_sharded_local == X))
示例#21
0
 def test_elemwise_uop(self, f, f_numpy):
     X = np.random.randn(16, 16)
     pwex = pywren.default_executor()
     X_sharded = BigMatrix("{0}_uop_test".format(f),
                           shape=X.shape,
                           shard_sizes=X.shape)
     shard_matrix(X_sharded, X)
     res_sharded = f(pwex, X_sharded)
     res = res_sharded.numpy()
     res.free()
     assert (np.isclose(f_numpy(X), res))
def test_bdfac():
    N = 16
    shard_size = 4
    shard_sizes = (shard_size, shard_size)
    np.random.seed(0)
    X = np.random.randn(N, N)
    U, S, V = bdfac_python(X, block_size=shard_size)
    svd_bdfac = np.linalg.svd(S, compute_uv=False)
    svd_local = np.linalg.svd(X, compute_uv=False)
    print(svd_bdfac)
    print(svd_local)
    assert(np.allclose(svd_bdfac, svd_local))
    X_sharded = BigMatrix("BDFAC_input_X", shape=X.shape,
                          shard_sizes=shard_sizes, write_header=True)
    N_blocks = X_sharded.num_blocks(0)
    shard_matrix(X_sharded, X)
    program, meta = bdfac(X_sharded)
    executor = fs.ProcessPoolExecutor(1)
    program.start()
    job_runner.lambdapack_run(program, timeout=200,
                              idle_timeout=200, pipeline_width=1)
    program.wait()
    print("returned..")
    program.free()
    R = meta["outputs"][1]
    L = meta["outputs"][0]
    print("====="*10)
    R_remote = R.get_block(N_blocks - 1, 0, N_blocks - 1)
    R_local = S[-shard_size:, -shard_size:]
    print("original", R_local)
    print("remote", R_remote)
    print('==='*10)
    sign_matrix_local = np.eye(R_local.shape[0])
    sign_matrix_remote = np.eye(R_local.shape[0])
    sign_matrix_local[np.where(np.diag(R_local) <= 0)] *= -1
    sign_matrix_remote[np.where(np.diag(R_remote) <= 0)] *= -1
    # make the signs match
    R_remote *= np.diag(sign_matrix_remote)[:, np.newaxis]
    R_local *= np.diag(sign_matrix_local)[:, np.newaxis]
    print(R_local)
    print(R_remote)
    assert(np.allclose(np.abs(R_local), np.abs(R_remote)))
    fac = np.block([[R.get_block(0, 2, 0), L.get_block(0, 2, 1), np.zeros(shard_sizes), np.zeros(shard_sizes)],
                    [np.zeros(shard_sizes), R.get_block(1, 2, 1),
                     L.get_block(1, 1, 2), np.zeros(shard_sizes)],
                    [np.zeros(shard_sizes), np.zeros(shard_sizes),
                     R.get_block(2, 1, 2), L.get_block(2, 0, 3)],
                    [np.zeros(shard_sizes), np.zeros(shard_sizes), np.zeros(shard_sizes), R.get_block(3, 0, 3)]])

    svd_remote = np.linalg.svd(fac, compute_uv=False)
    svd_local = np.linalg.svd(X, compute_uv=False)
    assert(np.allclose(svd_remote, svd_local))
    return 0
示例#23
0
 def test_multiple_shard_index_get(self):
     X = np.random.randn(128, 128)
     shard_sizes = [64, 64]
     X_sharded = BigMatrix("test_2", shape=X.shape, shard_sizes=shard_sizes)
     shard_matrix(X_sharded, X)
     assert (np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0)))
     assert (np.all(X[64:128,
                      64:128] == X_sharded.submatrix(1, 1).get_block()))
     assert (np.all(X[0:64,
                      64:128] == X_sharded.submatrix(0, 1).get_block()))
     assert (np.all(X[64:128,
                      0:64] == X_sharded.submatrix(None, 0).get_block(1)))
示例#24
0
 def test_lambdav(self):
     X = np.random.randn(18, 18)
     X = X.dot(X.T)
     X_sharded = BigSymmetricMatrix("lambdav",
                                    shape=X.shape,
                                    shard_sizes=[4, 4],
                                    lambdav=7.0)
     shard_matrix(X_sharded, X)
     X += 7.0 * np.eye(X.shape[0])
     X_local = X_sharded.numpy()
     X_sharded.free()
     assert (np.all(np.isclose(X, X_local)))
     os.system("rm -rf /dev/shm/*")
示例#25
0
 def test_step_slices(self):
     X = np.random.randn(128, 128)
     shard_sizes = [16, 16]
     X_sharded = BigMatrix("test_4", shape=X.shape, shard_sizes=shard_sizes)
     shard_matrix(X_sharded, X)
     assert (np.all(
         X[::32] == X_sharded.submatrix([None, None, 2]).numpy()[::16]))
     assert (np.all(
         X[16::32] == X_sharded.submatrix([1, None, 2]).numpy()[::16]))
     assert (np.all(X[:, 0:96:64] == X_sharded.submatrix(
         None, [0, 6, 4]).numpy()[:, ::16]))
     assert (np.all(X[:, 96:128:64] == X_sharded.submatrix(
         None, [6, 8, 4]).numpy()[:, ::16]))
示例#26
0
 def test_single_shard_matrix_multiply(self):
     X = np.random.randn(16, 16)
     X_sharded = BigMatrix("gemm_test_0",
                           shape=X.shape,
                           shard_sizes=X.shape)
     shard_matrix(X_sharded, X)
     pwex = pywren.lambda_executor()
     XXT_sharded = binops.gemm(pwex, X_sharded, X_sharded.T,
                               X_sharded.bucket, 1)
     XXT_sharded_local = XXT_sharded.numpy()
     XXT = X.dot(X.T)
     X_sharded.free()
     XXT_sharded.free()
     assert (np.all(np.isclose(XXT, XXT_sharded_local)))
     os.system("rm -rf /dev/shm/*")
示例#27
0
 def test_if_static(self):
     X = np.random.randn(64, 64)
     shard_sizes = (int(X.shape[0]/8), X.shape[1])
     X_sharded = BigMatrix("if_test", shape=X.shape,
                           shard_sizes=shard_sizes, write_header=True)
     O_sharded = BigMatrix("if_test_output", shape=X.shape,
                           shard_sizes=shard_sizes, write_header=True)
     X_sharded.free()
     shard_matrix(X_sharded, X)
     f = frontend.lpcompile(f1_if)
     p = f(X_sharded, O_sharded, X_sharded.num_blocks(0))
     assert(p.starters == p.find_terminators())
     for s, var_values in p.starters:
         if(var_values['i'] % 2 == 0):
             assert s == 0
         else:
             assert s == 1
示例#28
0
 def test_multiple_shard_cholesky(self):
     np.random.seed(1)
     size = 128
     shard_size = 64
     np.random.seed(1)
     print("Generating X")
     executor = fs.ProcessPoolExecutor(cpu_count)
     X = np.random.randn(size, 128)
     print("Generating A")
     A = X.dot(X.T) + np.eye(X.shape[0])
     y = np.random.randn(size)
     pwex = pywren.default_executor()
     print("sharding A")
     shard_sizes = (shard_size, shard_size)
     A_sharded = BigSymmetricMatrix("cholesky_test_A",
                                    shape=A.shape,
                                    shard_sizes=shard_sizes)
     y_sharded = BigMatrix("cholesky_test_y",
                           shape=y.shape,
                           shard_sizes=shard_sizes[:1])
     A_sharded.free()
     y_sharded.free()
     A_sharded = BigSymmetricMatrix("cholesky_test_A",
                                    shape=A.shape,
                                    shard_sizes=shard_sizes)
     y_sharded = BigMatrix("cholesky_test_y",
                           shape=y.shape,
                           shard_sizes=shard_sizes[:1])
     t = time.time()
     shard_matrix(A_sharded, A, executor=executor)
     e = time.time()
     print("A_sharded", e - t)
     t = time.time()
     shard_matrix(y_sharded, y, executor=executor)
     e = time.time()
     print("y_sharded time", e - t)
     print("Computing LL^{T}")
     L = cholesky(A)
     print(L)
     L_sharded = uops.chol(pwex, A_sharded)
     L_sharded_local = L_sharded.numpy()
     print(L_sharded_local)
     print(L)
     print("L_{infty} difference ", np.max(np.abs(L_sharded_local - L)))
     assert (np.allclose(L, L_sharded_local))
     os.system("rm -rf /dev/shm/*")
示例#29
0
 def test_single_shard_cholesky(self):
     X = np.random.randn(4, 4)
     A = X.dot(X.T) + np.eye(X.shape[0])
     y = np.random.randn(16)
     pwex = pywren.default_executor()
     A_sharded = BigMatrix("cholesky_test_A",
                           shape=A.shape,
                           shard_sizes=A.shape)
     y_sharded = BigMatrix("cholesky_test_y",
                           shape=y.shape,
                           shard_sizes=y.shape)
     shard_matrix(A_sharded, A)
     shard_matrix(y_sharded, y)
     L_sharded = uops.chol(pwex, A_sharded)
     L_sharded_local = L_sharded.numpy()
     L = cholesky(A)
     assert (np.allclose(L, L_sharded_local))
     os.system("rm -rf /dev/shm/*")
def test_cholesky_lambda():
    X = np.random.randn(64, 64)
    A = X.dot(X.T) + np.eye(X.shape[0])
    shard_size = 16
    shard_sizes = (shard_size, shard_size)
    A_sharded = BigMatrix("cholesky_test_A", shape=A.shape,
                          shard_sizes=shard_sizes, write_header=True)
    A_sharded.free()
    shard_matrix(A_sharded, A)
    program, meta = cholesky(A_sharded)
    futures = run_program_in_pywren(program)
    program.start()
    program.wait()
    program.free()
    L_sharded = meta["outputs"][0]
    L_npw = L_sharded.numpy()
    L = np.linalg.cholesky(A)
    assert(np.allclose(L_npw, L))
    print("great success!")