def _pyske_bcast(input_list: PList): size = input_list.distribution[0] nprocs = len(par.procs()) return input_list\ .get_partition()\ .mapi(lambda pid, a_list: list(map(lambda _: a_list, par.procs())) if pid == 0 else []) \ .flatten(Distribution([nprocs if pid == 0 else 0 for pid in par.procs()])) \ .distribute(Distribution(map(lambda _: 1, par.procs())))\ .flatten(Distribution(map(lambda _: size, par.procs())))
def fft(input_list: PList[float]) -> PList[complex]: # pylint: disable=unsubscriptable-object """ Return the Discrete Fourier Transform. Examples:: >>> from pyske.core import PList >>> fft(PList.init(lambda _: 1.0, 128)).to_seq()[0] (128+0j) :param input_list: a PySke list of floating point numbers :return: a parallel list of complex numbers """ size = len(input_list) log2_size = int(math.log2(size)) nprocs = len(par.procs()) log2_nprocs = int(math.log2(nprocs)) assert size == 2**log2_size assert nprocs == 2**log2_nprocs result = input_list.map(complex) for index_j in range(0, log2_nprocs): permutation = result.get_partition() \ .permute(partial(_bit_complement, log2_nprocs - index_j - 1)) \ .flatten() result = permutation.map2i(partial(_combine, size, log2_size, index_j), result) for index_j in range(log2_nprocs, log2_size): permutation = result.get_partition() \ .map(lambda l: l.permute(partial(_bit_complement, log2_size - index_j - 1))) \ .flatten() result = permutation.map2i(partial(_combine, size, log2_size, index_j), result) return result
def test_gather_distr(): # pylint: disable=missing-docstring data = generate_str_plist() size = data.length() dst = par.randpid() res = get_distribution(data.gather(dst)) exp = [size if i == dst else 0 for i in par.procs()] assert res == exp
def test_distribute_distr(): # pylint: disable=missing-docstring data = generate_str_plist() size = data.length() dst = par.randpid() exp = Distribution([0 for _ in par.procs()]) exp[dst] = size res = get_distribution(data.distribute(exp)) assert res == exp
def test_balance_distr(): # pylint: disable=missing-docstring data = generate_str_plist() size = data.length() dst = par.randpid() distr = Distribution([0 for _ in par.procs()]) distr[dst] = size res = get_distribution(data.distribute(distr).balance()) exp = Distribution.balanced(size) assert res == exp
def test_distribute_data(): # pylint: disable=missing-docstring dst = par.randpid() data = generate_int_plist() size = data.length() distr = Distribution([0 for _ in par.procs()]) distr[dst] = size res = data.distribute(distr).to_seq() exp = SList(range(0, size)) assert res == exp
def bcast(input_list: PList, src_pid: int) -> PList: """ Broadcast the data at source processor to all processors. Example:: >>> from pyske.core import PList, par >>> bcast(PList.from_seq([42]), 0).to_seq() == \ list(map(lambda _: 42, par.procs())) True :param input_list: a parallel list. :param src_pid: the source processor identifier. Pre-condition: ``src_pid in par.procs()`` :return: a parallel list. """ assert src_pid in par.procs() size = input_list.distribution[src_pid] nprocs = len(par.procs()) return input_list \ .get_partition() \ .mapi(lambda pid, lst: list(map(lambda _: lst, par.procs())) if pid == src_pid else []) \ .flatten(Distribution([nprocs if pid == src_pid else 0 for pid in par.procs()])) \ .distribute(Distribution(map(lambda _: 1, par.procs()))) \ .flatten(Distribution(map(lambda _: size, par.procs())))
def _main(): size, num_iter, _ = util.standard_parse_command_line(data_arg=False) assert _is_power_of_2(size), "The size should be a power of 2." assert _is_power_of_2(len( par.procs())), "The number of processors should be a power of 2." input_list = PList.init(lambda _: 1.0, size) timing = Timing() gc.disable() for iteration in range(1, 1 + num_iter): timing.start() result = fft(input_list) timing.stop() gc.collect() result = result.to_seq()[0] util.print_experiment(result, timing.get(), par.at_root, iteration)
def pssr(input_list: PList) -> PList: """ Sort the input list. Sorts using ``<`` only. Example:: >>> from pyske.core import PList >>> pssr(PList.init(lambda i: 10-i, 10)).to_seq() [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] :param input_list: a parallel list. Pre-condition: the size of the local lists should be at least equal to the number of processors. :return: a sorted list that is a permutation of ``input_list``. """ nprocs = len(par.procs()) if nprocs == 1: return input_list.get_partition().map(sorted).flatten() for local_size in input_list.distribution: assert local_size >= nprocs def permutation(index: int): return int(index / nprocs) + nprocs * (index % nprocs) def _sample(list_to_sample): if list_to_sample: size = len(list_to_sample) step = int(size / nprocs) return list_to_sample[step:size:step] return [] locally_sorted = input_list.get_partition().map(sorted) first_samples = locally_sorted.map(_sample).gather(0).get_partition() second_samples = bcast(first_samples.map(_merge).map(_sample), 0) slices = locally_sorted.map2(_slice, second_samples).flatten() result = slices.permute(permutation).get_partition().map(_merge).flatten() return result
PARSER.add_argument("--test", help="choice of the test", type=int, default=2) PARSER.add_argument("-v", help="verbose mode", action='store_true') ARGS = PARSER.parse_args() ITERATIONS = ARGS.iter SIZE = ARGS.size SEQ = ARGS.seq TST = ARGS.test VRB = ARGS.v if VRB: par.at_root(lambda: print("Iterations:\t", ITERATIONS, "\nSize:\t", SIZE, "\nSeq: \t", SEQ, "\nTest:\t", TST, "\nNprocs:\t", len(par.procs()))) def _test_mmr_direct(lst): lst1 = lst.map(_f_map) lst2 = lst1.map(_f_map) res = lst2.reduce(_f_reduce, 0) return res def _test_mr_direct(lst): def fct(num): return _f_map(_f_map(num)) lst1 = lst.map(fct) res = lst1.reduce(_f_reduce, 0) return res