def test_multiple_executors_restart(s, a, b): e1 = Executor((s.ip, s.port), start=False) yield e1._start() e2 = Executor((s.ip, s.port), start=False) yield e2._start() x = e1.submit(inc, 1) y = e2.submit(inc, 2) xx = yield x._result() yy = yield y._result() assert xx == 2 assert yy == 3 yield e1._restart() assert x.cancelled() assert y.cancelled() yield e1._shutdown(fast=True) yield e2._shutdown(fast=True)
def test_framework_runs(self): with MesosCluster() as cluster: time.sleep(2) driver = DistributedDriver().create_driver(DistributedScheduler) driver.start() time.sleep(5) expect(cluster).to(have_activated_slaves(1)) expect(cluster).to(have_framework_name('distributed-framework')) # distributed test - this probably doesnt belong here executor = Executor('127.0.0.1:8787') A = executor.map(lambda x: x**2, range(10)) B = executor.map(lambda x: -x, A) total = executor.submit(sum, B) expect(total.result()).to(equal(-285)) driver.stop()
def test__futures_to_dask_array(s, a, b): import dask.array as da e = Executor((s.ip, s.port), start=False) yield e._start() remote_arrays = [[[ e.submit(np.full, (2, 3, 4), i + j + k) for i in range(2) ] for j in range(2)] for k in range(4)] x = yield _futures_to_dask_array(remote_arrays, executor=e) assert x.chunks == ((2, 2, 2, 2), (3, 3), (4, 4)) assert x.dtype == np.full((), 0).dtype assert isinstance(x, da.Array) expr = x.sum() result = yield e._get(expr.dask, expr._keys()) assert isinstance(result[0], np.number) yield e._shutdown()
def f(c, a, b): e = Executor((c.ip, c.port), start=False, loop=loop) yield e._start() remote_arrays = [[[e.submit(np.full, (2, 3, 4), i + j + k) for i in range(2)] for j in range(2)] for k in range(4)] x = yield _futures_to_dask_array(remote_arrays, executor=e) assert x.chunks == ((2, 2, 2, 2), (3, 3), (4, 4)) assert x.dtype == np.full((), 0).dtype assert isinstance(x, da.Array) expr = x.sum() result = yield e._get(expr.dask, expr._keys()) assert isinstance(result[0], np.number) yield e._shutdown()
from conf_analysis.meg import artifacts, preprocessing from conf_analysis.behavior import empirical, metadata, keymap import mne, locale import numpy as np import pickle locale.setlocale(locale.LC_ALL, "en_US") result = {} raw = mne.io.read_raw_ctf(filename, system_clock='ignore') trials = preprocessing.blocks(raw, full_file_cache=True) trl, bl = trials['trial'], trials['block'] bcnt = 0 for b in np.unique(bl): if len(trl[bl == b]) >= 75: result[b] = bcnt bcnt += 1 print((b, bcnt)) return result block_map = {} for snum in range(1, 16): filenames = [metadata.get_raw_filename(snum, b) for b in range(4)] block_map[snum] = {} for session, filename in enumerate(filenames): block_map[snum][session] = executor.submit(do_one, filename) diagnostics.progress(block_map) block_map = executor.gather(block_map) pickle.dump(block_map, open('blockmap.pickle', 'w'))
#!/home/vagrant/miniconda3/bin/python import os from distributed import Executor def count_words(filename): with open(filename, "r", encoding="latin-1") as f: data = f.read() return len(data.split(" ")) def remote_word_count(): root_dir = "/enron/lay-k" word_count = 0 for directory, subdir, filenames in os.walk(root_dir): for filename in filenames: word_count += count_words(os.path.join(directory, filename)) return word_count executor = Executor("127.0.0.1:8786") words = executor.submit(remote_word_count) print("\n\n Word Count on remote machine is: ", words.result())
#!/home/vagrant/miniconda3/bin/python import os from distributed import Executor def count_words(filename): with open(filename, "r", encoding = "latin-1") as f: data = f.read() return len(data.split(" ")) def remote_word_count(): root_dir = "/enron/lay-k" word_count = 0 for directory, subdir, filenames in os.walk(root_dir): for filename in filenames: word_count += count_words(os.path.join(directory, filename)) return word_count executor = Executor("127.0.0.1:8786") words = executor.submit(remote_word_count) print("\n\n Word Count on remote machine is: ", words.result())