def test_parallel_split(ndim=2, npts=50): total_npts = npts comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() if rank == 0: total_pts = np.random.rand(total_npts, ndim).astype('float64') else: total_pts = None pts, orig_idx = parallel_utils.py_parallel_distribute(total_pts) npts = pts.shape[0] p = int(total_npts) / 2 + int(total_npts) % 2 q, pivot_dim, piv, idx = parallel_utils.py_parallel_split(pts, p) assert_equal(idx.size, npts) total_pts = comm.bcast(total_pts, root=0) if npts == 0: assert_equal(q, -1) else: med = np.median(total_pts[:, pivot_dim]) if (total_npts % 2): np.testing.assert_approx_equal(piv, med) else: np.testing.assert_array_less(piv, med) if q >= 0: assert_less_equal(pts[idx[:(q + 1)], pivot_dim], piv) np.testing.assert_array_less(piv, pts[idx[(q + 1):], pivot_dim]) if rank == 0: sq, sd, sidx = utils.py_split(total_pts) assert_equal(pivot_dim, sd) assert_equal(piv, total_pts[sidx[sq], sd])
def test_pivot(N=10, ndim=2): d = ndim - 1 np.random.seed(10) pts = np.random.rand(N, ndim).astype('float64') q, idx = utils.py_pivot(pts, d) if (N == 0): np.testing.assert_equal(q, -1) else: med = np.median(pts) piv = pts[idx[q], d] nmax = (7 * N / 10 + 6) assert_less_equal(np.sum(pts[:, d] < piv), nmax) assert_less_equal(np.sum(pts[:, d] > piv), nmax)
def test_partition_given_pivot(N=10, ndim=2): d = ndim - 1 np.random.seed(10) pts = np.random.rand(N, ndim).astype('float64') if N == 0: piv_list = [0.5] else: piv_list = [0.5, np.median(pts[:, d])] for piv in piv_list: q, idx = utils.py_partition_given_pivot(pts, d, piv) if (N == 0): assert_equal(q, -1) else: assert_less_equal(pts[idx[q], d], piv) np.testing.assert_array_less(pts[idx[:q], d], piv) np.testing.assert_array_less(piv, pts[idx[(q + 1):], d])
def test_kdtree_parallel_distribute(ndim=2, npts=50): total_npts = npts comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() if rank == 0: total_pts = np.random.rand(total_npts, ndim).astype('float64') else: total_pts = None pts, idx, le, re, ple, pre = parallel_utils.py_kdtree_parallel_distribute( total_pts) total_pts = comm.bcast(total_pts, root=0) assert_equal(pts.shape[0], idx.size) np.testing.assert_array_equal(pts, total_pts[idx, :]) for d in range(ndim): assert_less_equal(pts[:, d], re[d]) assert_less_equal(le[d], pts[:, d])
def test_redistribute_split(ndim=2, npts=50, split_left=None): total_npts = npts comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() if split_left is None: split_rank = -1 else: split_rank = size / 2 if split_left: split_rank += size % 2 if rank == 0: total_pts = np.random.rand(total_npts, ndim).astype('float64') else: total_pts = None pts, orig_idx = parallel_utils.py_parallel_distribute(total_pts) npts = pts.shape[0] total_pts = comm.bcast(total_pts, root=0) new_pts, new_idx, sidx, sdim, sval = parallel_utils.py_redistribute_split( pts, orig_idx, split_rank=split_rank) # Assume split_left is default for split_rank == -1 if split_rank < 0: split_rank = size / 2 + size % 2 assert_equal(new_pts.shape[0], new_idx.size) assert_equal(new_pts.shape[1], ndim) np.testing.assert_array_equal(new_pts, total_pts[new_idx, :]) if rank < split_rank: assert_less_equal(new_pts[:, sdim], sval) else: np.testing.assert_array_less(sval, new_pts[:, sdim]) med = np.median(total_pts[:, sdim]) if (total_npts % 2): np.testing.assert_approx_equal(sval, med) else: np.testing.assert_array_less(sval, med)