def test_if_run(self): X = np.random.randn(64) shard_sizes = (int(X.shape[0]/8),) X_sharded = BigMatrix("if_test", shape=X.shape, shard_sizes=shard_sizes, write_header=True) O_sharded = BigMatrix("if_test_output", shape=X.shape, shard_sizes=shard_sizes, write_header=True) X_sharded.free() shard_matrix(X_sharded, X) f = frontend.lpcompile(f1_if) p = f(X_sharded, O_sharded, X_sharded.num_blocks(0)) num_cores = 1 executor = fs.ProcessPoolExecutor(num_cores) config = npw.config.default() p_ex = lp.LambdaPackProgram(p, config=config) p_ex.start() all_futures = [] for i in range(num_cores): all_futures.append(executor.submit( job_runner.lambdapack_run, p_ex, pipeline_width=1, idle_timeout=5, timeout=60)) p_ex.wait() time.sleep(5) p_ex.free() for i in range(X_sharded.num_blocks(0)): Ob = O_sharded.get_block(i) Xb = X_sharded.get_block(i) if ((i % 2) == 0): assert(np.allclose(Ob, 1*Xb)) else: assert(np.allclose(Ob, 2*Xb))
def test_bdfac(self): N = 8 shard_size = 2 shard_sizes = (shard_size, shard_size) X = np.random.randn(8, 8) X_sharded = BigMatrix("BDFAC_input_X", shape=X.shape, shard_sizes=shard_sizes, write_header=True) shard_matrix(X_sharded, X) N_blocks = X_sharded.num_blocks(0) b_fac = 2 num_tree_levels = max( int(np.ceil(np.log2(X_sharded.num_blocks(0))/np.log2(b_fac))), 1) async def parent_fn(self, loop, *block_idxs): if (block_idxs[-1] == 0 and block_idxs[-2] == 0): return await X_sharded.get_block_async(None, *block_idxs[:-2]) VLs = BigMatrix("VL", shape=(num_tree_levels, N, N), shard_sizes=( 1, shard_size, shard_size), write_header=True, safe=False) TLs = BigMatrix("TL", shape=(num_tree_levels, N, N), shard_sizes=( 1, shard_size, shard_size), write_header=True, safe=False) VRs = BigMatrix("VR", shape=(num_tree_levels, N, N), shard_sizes=( 1, shard_size, shard_size), write_header=True, safe=False) TRs = BigMatrix("TR", shape=(num_tree_levels, N, N), shard_sizes=( 1, shard_size, shard_size), write_header=True, safe=False) Rs = BigMatrix("R", shape=(num_tree_levels, N, N), shard_sizes=( 1, shard_size, shard_size), write_header=True, safe=False) S0 = BigMatrix("S0", shape=(N, N, N, num_tree_levels*shard_size), shard_sizes=(shard_size, shard_size, shard_size, shard_size), write_header=True, parent_fn=parent_fn, safe=False) S1 = BigMatrix("S1", shape=(N, N, N, num_tree_levels*shard_size), shard_sizes=(shard_size, shard_size, shard_size, shard_size), write_header=True, parent_fn=parent_fn, safe=False) Sigma = BigMatrix("Sigma", shape=(num_tree_levels, N, N), shard_sizes=( 1, shard_size, shard_size), write_header=True, safe=False, parent_fn=parent_fn) pc = frontend.lpcompile(BDFAC)( VLs, TLs, Rs, Sigma, VRs, TRs, S0, S1, N_blocks, 0)
def test_matmul(self): size = 4 shard_size = 2 np.random.seed(0) A = np.random.randn(size, size) B = np.random.randn(size, size) C = np.dot(A, B) shard_sizes = (shard_size, shard_size) A_sharded = BigMatrix("matmul_test_A", shape=A.shape, shard_sizes=shard_sizes, write_header=True) A_sharded.free() shard_matrix(A_sharded, A) B_sharded = BigMatrix("matmul_test_B", shape=B.shape, shard_sizes=shard_sizes, write_header=True) B_sharded.free() shard_matrix(B_sharded, B) Temp = BigMatrix("matmul_test_Temp", shape=[A.shape[0], B.shape[1], B.shape[0], 100], shard_sizes=[ A_sharded.shard_sizes[0], B_sharded.shard_sizes[1], 1, 1 ], write_header=True) C_sharded = BigMatrix("matmul_test_C", shape=C.shape, shard_sizes=shard_sizes, write_header=True) b_fac = 2 config = npw.config.default() compiled_matmul = frontend.lpcompile(matmul) program = compiled_matmul(A_sharded, B_sharded, A_sharded.num_blocks(0), A_sharded.num_blocks(1), B_sharded.num_blocks(1), b_fac, Temp, C_sharded) program_executable = lp.LambdaPackProgram(program, config=config) program_executable.start() job_runner.lambdapack_run(program_executable, pipeline_width=1, idle_timeout=5, timeout=60) executor = fs.ThreadPoolExecutor(1) all_futures = [ executor.submit(job_runner.lambdapack_run, program_executable, pipeline_width=1, idle_timeout=5, timeout=60) ] program_executable.wait() program_executable.free() C_remote = C_sharded.numpy() assert (np.allclose(C, C_remote))
def test_if_static(self): X = np.random.randn(64, 64) shard_sizes = (int(X.shape[0]/8), X.shape[1]) X_sharded = BigMatrix("if_test", shape=X.shape, shard_sizes=shard_sizes, write_header=True) O_sharded = BigMatrix("if_test_output", shape=X.shape, shard_sizes=shard_sizes, write_header=True) X_sharded.free() shard_matrix(X_sharded, X) f = frontend.lpcompile(f1_if) p = f(X_sharded, O_sharded, X_sharded.num_blocks(0)) assert(p.starters == p.find_terminators()) for s, var_values in p.starters: if(var_values['i'] % 2 == 0): assert s == 0 else: assert s == 1
def test_types_simple_2(self): parser, type_checker, f3_ast = frontend.lpcompile(F3) tree = astor.dump_tree(f3_ast) assert type_checker.decl_types['c'] == frontend.ConstFloatType assert type_checker.decl_types['d'] == frontend.ConstFloatType assert type_checker.decl_types['e'] == frontend.ConstIntType
def test_types_simple(self): parser, type_checker, f2_ast = frontend.lpcompile(F2) tree = astor.dump_tree(f2_ast) assert type_checker.decl_types['a'] == frontend.ConstFloatType assert type_checker.decl_types['b'] == frontend.ConstIntType
def test_types_for_loop_nested_if_statment(self): parser, type_checker, f_ast = frontend.lpcompile(F9) assert type_checker.decl_types['z'] == frontend.LinearIntType assert type_checker.decl_types['i'] == frontend.LinearIntType
def test_types_if_statement_no_err(self): parser, type_checker, f_ast = frontend.lpcompile(F7_no_err) assert type_checker.decl_types['f'] == frontend.ConstFloatType assert type_checker.decl_types['d'] == frontend.ConstFloatType
def test_types_if_statement_err(self): try: parser, type_checker, f_ast = frontend.lpcompile(F7_err) except exceptions.LambdaPackParsingException: pass
def test_types_compound_expr_3(self): parser, type_checker, f_ast = frontend.lpcompile(F6) assert type_checker.return_node_type == frontend.ConstFloatType