示例#1
0
 async def put(self):
     # TODO: extract json request data stuff into mixin?
     request_data = tornado.escape.json_decode(self.request.body)
     connection = request_data['connection']
     if connection["type"].lower() == "tcp":
         dask_client = await AioClient(address=connection['address'])
         executor = DaskJobExecutor(client=dask_client,
                                    is_local=connection['isLocal'])
     elif connection["type"].lower() == "local":
         # NOTE: we can't use DaskJobExecutor.make_local as it doesn't use AioClient
         # which then conflicts with LocalCluster(asynchronous=True)
         # error message: "RuntimeError: Non-thread-safe operation invoked on an event loop
         # other than the current one"
         # related: debugging via env var PYTHONASYNCIODEBUG=1
         cluster_kwargs = {
             "threads_per_worker": 1,
             "asynchronous": True,
         }
         if "numWorkers" in connection:
             cluster_kwargs.update({"n_workers": connection["numWorkers"]})
         cluster = dd.LocalCluster(**cluster_kwargs)
         dask_client = await AioClient(address=cluster)
         executor = DaskJobExecutor(client=dask_client, is_local=True)
     self.data.set_executor(executor, request_data)
     msg = Message(self.data).initial_state(
         jobs=self.data.serialize_jobs(),
         datasets=self.data.serialize_datasets(),
     )
     log_message(msg)
     self.event_registry.broadcast_event(msg)
     self.write({
         "status": "ok",
         "connection": connection,
     })
示例#2
0
def initialize_dask(n, factor = 5, slurm = False):

    if not slurm:
        cores =  len(os.sched_getaffinity(0))
        cluster = distributed.LocalCluster(processes = False,
                                           n_workers = 1,
                                           threads_per_worker = 1)

    else:
        n = min(100, n)
        py = './enter_conda.sh python3'
        params = {
            'python' : py,
            'cores' : 1,
            'memory' : '512MB',
            'walltime' : '180',
            'processes' : 1,
            'job_extra' : [
                '--qos use-everything',
                '--array 0-{0:d}'.format(n - 1),
                '--requeue',
                '--output "/dev/null"'
            ],
            'env_extra' : [
                'JOB_ID=${SLURM_ARRAY_JOB_ID%;*}_${SLURM_ARRAY_TASK_ID%;*}',
                'source /etc/profile.d/modules.sh',
                'cd {0!s}'.format(CONFIG['PATHS', 'root']),
            ]
        }
        cluster = SLURMCluster(**params)
        print(cluster.job_script())
        cluster.scale(1)

    print(cluster.dashboard_link)
    return distributed.Client(cluster)
示例#3
0
def main(ds_kwargs, address=None):

    if address is None:
        # Start local cluster
        cores = psutil.cpu_count(logical=False)

        if cores is None:
            cores = 2
        cluster_kwargs = {
            "threads_per_worker": 1,
            "n_workers": cores
        }
        print('starting local cluster...')
        cluster = dd.LocalCluster(**cluster_kwargs)
        print('local cluster started')
        address = cluster.scheduler_address
        print('address: ', address)
    else:
        cluster = None

    loop = asyncio.get_event_loop()
    try:
        loop.run_until_complete(async_main(ds_kwargs, address))
    finally:
        # We CAN'T close the loop here because the interpreter
        # has to continue running in DM
        # Do NOT call loop.close()!

        # Required for local cluster
        if cluster is not None:
            cluster.close()

        print("Exit processing loop")
示例#4
0
def get_or_create_cluster():
    global _GLOBAL_LOCAL_CLUSTER
    if _GLOBAL_LOCAL_CLUSTER is None:
        _GLOBAL_LOCAL_CLUSTER = dd.LocalCluster()
        atexit.register(_atexit_close_cluster)
        LOG.info('local cluster running at {}',
                 _GLOBAL_LOCAL_CLUSTER.dashboard_link)
    return _GLOBAL_LOCAL_CLUSTER
示例#5
0
 def make_local(cls, cluster_kwargs=None, client_kwargs=None):
     """
     interesting cluster_kwargs:
         threads_per_worker
         n_workers
     """
     cluster = dd.LocalCluster(**(cluster_kwargs or {}))
     client = dd.Client(cluster, **(client_kwargs or {}))
     return cls(client=client, is_local=True)
示例#6
0
def client_in_background():
    # A running Dask client can introduce a timing issue
    # between automatic closing of a numpy.memmap object and renaming
    # the underlying file
    with dd.LocalCluster() as cluster:
        client = dd.Client(cluster, set_as_default=False)
        yield
        # to fix "distributed.client - ERROR - Failed to reconnect to scheduler after 10.00 seconds, closing client"  # NOQA
        client.close()
示例#7
0
def client(tmpdir_factory, request):
    with tmpdir_factory.mktemp("dask_cluster").as_cwd():
        lc = distributed.LocalCluster(n_workers=request.param, processes=True)
        client = distributed.Client(lc)

        yield client

        client.close()
        lc.close()
def test_pickling():

    def pxfn(fp):
        """Pixel function for recipe. `ds` lives in the closure"""
        return np.ones(fp.shape) * id(ds)

    def slave():
        print('slave', dd.get_worker())
        """Slave process routine. `ds` and `oldid` live in the closure and are pickled by cloudpickle in Client.sumbit"""
        assert id(ds) != oldid, 'this test makes sense if `ds` was pickled'
        assert 'v1' in ds
        assert 'v2' not in ds
        assert 'r1' in ds
        assert 'r2' not in ds
        assert 'r3' in ds
        assert (ds._queued_count, ds._locked_count, ds.v1.activated, ds.r1.activated, ds.r3.activated) == (0, 0, False, False, True)
        assert ds.v1.get_data(0)[1] == str(oldid)
        assert (ds.r1.get_data() == oldid).all()
        assert (ds.r3.get_data() == id(ds)).all(), '`slave` and `pxfn` should share the same `ds` obj'

        ds.v1.insert_data((0, 1), ['42'])
        ds.r1.fill(42)
        assert ds.v1.get_data(1)[1] == '42'
        assert (ds.r1.get_data() == 42).all()

        ds.deactivate_all()

    ds = buzz.DataSource(max_activated=2)
    oldid = id(ds)
    fp = buzz.Footprint(
        tl=(1, 1),
        size=(10, 10),
        rsize=(10, 10),
    )
    clust = dd.LocalCluster(n_workers=1, threads_per_worker=1, scheduler_port=0)
    print()
    print(clust)
    cl = dd.Client(clust)
    print(cl)

    with ds.create_vector('v1', '/tmp/v1.shp', **V_META).delete:
        with ds.create_raster('r1', '/tmp/t1.shp', fp, float, 1).delete:
            ds.create_raster('r2', '', fp, float, 1, driver='MEM')
            ds.create_recipe_raster('r3', pxfn, fp, float)
            ds.create_vector('v2',**MEMV_META)

            ds.v1.insert_data((0, 1), [str(oldid)])
            ds.v2.insert_data((0, 1), [str(oldid)])
            ds.r1.fill(oldid)
            ds.r2.fill(oldid)

            ds.deactivate_all()
            cl.submit(slave).result()
            assert ds.v1.get_data(1)[1] == '42'
            assert (ds.r1.get_data() == 42).all()
示例#9
0
    def start(self):

        self._cluster = distributed.LocalCluster(self._number_of_workers,
                                                 1,
                                                 processes=False)

        if self._resources_per_worker.number_of_gpus > 0:

            for index, worker in self._cluster.workers.items():
                self._gpu_device_indices_by_worker[worker.id] = str(index)

        super(DaskLocalCluster, self).start()
示例#10
0
def create_client():
    """ Initializes a `dask.distributed` client for local computing.
    """
    cores = len(os.sched_getaffinity(0))
    nworkers = int(np.ceil(cores / 8))
    cluster = distributed.LocalCluster(n_workers=nworkers,
                                       threads_per_worker=min(cores, 8),
                                       resources={'foo': nworkers})
    print(cluster)
    print(cluster.dashboard_link)
    client = distributed.Client(cluster)
    return client
示例#11
0
    def connect(cls, *args, **kwargs):
        """
        Setup local cluster
        """

        kwargs["n_workers"] = kwargs.pop("n_workers", cls.processes)
        kwargs["threads_per_worker"] = kwargs.pop("threads_per_worker", 1)
        kwargs["processes"] = kwargs.pop("processes", True)
        kwargs["local_dir"] = kwargs.pop("local_dir", cls.local_dir)

        cls.local_cluster = distributed.LocalCluster(*args, **kwargs)
        cls.client = distributed.Client(cls.local_cluster)
        return True
示例#12
0
    def test_dask_chunked_input(self):
        tk = da.from_array(np.asarray(t_def) + 273.15, chunks="auto")
        rh = da.from_array(rh_def, chunks="auto")

        # Start dask cluster
        cluster = dd.LocalCluster(n_workers=3, threads_per_worker=2)
        print(cluster.dashboard_link)
        client = dd.Client(cluster)

        out = map_blocks(dewtemp, tk, rh).compute()

        assert np.allclose(out - 273.15, dt_2, atol=0.1)

        cluster.close()
        client.close()
示例#13
0
    def test_dask_chunked_input(self):
        p = da.from_array(self.p_def, chunks="auto")
        t = da.from_array(self.t_def, chunks="auto")
        q = da.from_array(self.q_def, chunks="auto")

        # Start dask cluster
        cluster = dd.LocalCluster(n_workers=3, threads_per_worker=2)
        print(cluster.dashboard_link)
        client = dd.Client(cluster)

        out = client.submit(relhum, t, q, p).result()

        assert np.allclose(out, self.rh_gt_2, atol=0.1)

        client.shutdown()
示例#14
0
    def test_dask_unchunked_input(self):
        p = da.from_array(self.p_def)
        t = da.from_array(self.t_def)
        q = da.from_array(self.q_def)

        # Start dask cluster
        cluster = dd.LocalCluster(n_workers=3, threads_per_worker=2)
        print(cluster.dashboard_link)
        client = dd.Client(cluster)

        out = map_blocks(relhum, t, q, p).compute()

        assert np.allclose(out, self.rh_gt_2, atol=0.1)

        client.shutdown()
示例#15
0
文件: dask.py 项目: ozej8y/LiberTEM
    def make_local(cls, cluster_kwargs=None, client_kwargs=None):
        """
        Spin up a local dask cluster

        interesting cluster_kwargs:
            threads_per_worker
            n_workers

        Returns
        -------
        DaskJobExecutor
            the connected JobExecutor
        """
        cluster = dd.LocalCluster(**(cluster_kwargs or {}))
        client = dd.Client(cluster, **(client_kwargs or {}))
        return cls(client=client, is_local=True)
    def connect(cls, *args, **kwargs):
        """
        Setup local cluster
        """

        kwargs["n_workers"] = kwargs.pop("n_workers", cls.processes)
        kwargs["threads_per_worker"] = kwargs.pop("threads_per_worker", 1)
        kwargs["processes"] = kwargs.pop("processes", True)

        # Ugly hack because dask-jobqueue changed this keyword arg
        local_directory = kwargs.pop("local_dir", None)
        local_directory = kwargs.pop("local_directory", None) if local_directory is None else local_directory
        kwargs["local_directory"] = local_directory if local_directory is not None else cls.local_dir

        cls.local_cluster = distributed.LocalCluster(*args, **kwargs)
        cls.client = distributed.Client(cls.local_cluster)
        return True
示例#17
0
        def fake_cluster(*args, **kwargs):
            replace_args = dict()
            replace_args["n_workers"] = kwargs.pop("n_workers", 0)
            replace_args["threads_per_worker"] = kwargs.pop("threads_per_worker", 1)
            replace_args["processes"] = kwargs.pop("processes", True)
            replace_args["local_dir"] = kwargs.pop("local_directory", None)

            clust = distributed.LocalCluster(**replace_args)
            clust._active_worker_n = 0

            def _count_active_workers(self):
                val = self._active_worker_n
                self._active_worker_n += 1
                return val

            clust._count_active_workers = types.MethodType(_count_active_workers, clust)
            return clust
示例#18
0
def test_run_and_score_submission():
    # This test will fail if run after another transaction=True test
    # See workaround in tests/test_views.py:test_run_submission

    transaction.commit()
    call_command("migrate", "core", "zero", interactive=False)
    call_command("migrate", "core", interactive=False)
    call_command("sample_data")
    transaction.commit()

    submission = models.Submission.objects.first()
    cluster = dd.LocalCluster(n_workers=4, preload=("daskworkerinit_tst.py", ))
    dask_client = dd.Client(cluster)

    print(submission.id, submission)
    future = tasks.run_and_score_submission(dask_client, submission)
    result = future.result()
    assert result
示例#19
0
    def make_local(cls, cluster_kwargs=None, client_kwargs=None):
        """
        Spin up a local dask cluster

        interesting cluster_kwargs:
            threads_per_worker
            n_workers

        Returns
        -------
        DaskJobExecutor
            the connected JobExecutor
        """
        cluster = dd.LocalCluster(**(cluster_kwargs or {}))
        client = dd.Client(cluster, **(client_kwargs or {}))

        # Disable handling Ctrl-C on the workers for a local cluster
        # since the nanny restarts workers in that case and that gets mixed
        # with Ctrl-C handling of the main process, at least on Windows
        client.run(functools.partial(signal.signal, signal.SIGINT, signal.SIG_IGN))

        return cls(client=client, is_local=True)
示例#20
0
def main():
    cores = psutil.cpu_count(logical=False)

    if cores is None:
        cores = 2
    cluster_kwargs = {
        "threads_per_worker": 1,
        # "asynchronous": True,
        "n_workers": cores
    }

    cluster = dd.LocalCluster(**cluster_kwargs)
    loop = asyncio.get_event_loop()

    try:
        # print(cluster.scheduler_address)
        # (can be replaced with asyncio.run(coro) in Python 3.7)
        loop.run_until_complete(async_main(cluster.scheduler_address))

    finally:
        # loop.close()
        print("Close cluster")
        cluster.close()
示例#21
0
def main():
    parser = argparse.ArgumentParser(
        description='Renders the towers in a given directory')
    parser.add_argument('--src',
                        type=str,
                        default='data/towers',
                        help='Path to tower jsons')

    args = parser.parse_args()

    # src = os.path.join(CONFIG['data'], args.src)
    src = args.src
    out = args.src + '_render'
    # if not os.path.isdir(out):
    #     os.mkdir(out)

    cluster = distributed.LocalCluster(n_workers=4, threads_per_worker=2)
    print(cluster.dashboard_link)
    client = distributed.Client(cluster)

    tower_jsons = glob.glob(os.path.join(src, '*.json'))
    futures = client.map(simulate_tower, tower_jsons)
    results = client.gather(futures)
    print(results[0])
示例#22
0
    # Initialize representation, regularizer, and model
    representation = BasisFeaturizer(chemical_system,
                                     bspline_config)
    knots_file = knots_file or "knots.json"
    if write_knots:
        if os.path.isfile(knots_file) and verbose >= 1:
            print("Overwriting...")
        json_io.dump_interaction_map(representation.knots_map,
                                     filename=knots_file,
                                     write=True)
        if verbose >= 1:
            print("Wrote knots:", knots_file)
    #    client = ProcessPoolExecutor(max_workers=n_jobs)

    cluster = distributed.LocalCluster(n_jobs)
    client = distributed.Client(cluster)

    # Compute representations
    if n_jobs == 1:
        n_batches = 1
    else:
        n_batches = 8  # added granularity for more progress bar updates.

    df_features = representation.evaluate_parallel(df_data,
                                                   client,
                                                   n_jobs=n_jobs * n_batches,
                                                   progress_bar=progress_bars)
    if cache_features:
        if os.path.isfile(features_filename):
            print("Overwriting...")
示例#23
0
def generate4D_frms6(data_dir, bin_factor=2, workers=0):
    current_dir = os.getcwd()
    os.chdir(data_dir)
    data_class = st.util.Frms6Reader()
    tot_files = 0

    for file in glob.glob("*.frms6"):
        tot_files += 1
    filesizes = np.zeros((tot_files, 4), dtype=int)
    filenames = np.zeros(tot_files, dtype=object)

    ii = 0
    for file in glob.glob("*.frms6"):
        fname = data_dir + file
        dshape = np.asarray(data_class.getDataShape(fname), dtype=int)
        filesizes[ii, 0:3] = dshape
        filesizes[ii, -1] = fname[-7]
        filenames[ii] = fname
        ii += 1
    os.chdir(current_dir)

    if workers == 0:
        workers = int(1 + tot_files)

    cluster = dd.LocalCluster(n_workers=workers)
    client = dd.Client(cluster)

    draw_shape = (np.mean(filesizes[filesizes[:, -1] != 0, 0:3], axis=0)).astype(int)
    dref_shape = filesizes[filesizes[:, -1] == 0, 0:3][0]
    data_shape = np.copy(dref_shape)
    data_shape[-1] = (np.sum(filesizes[:, -2]) - np.amin(filesizes[:, -2])).astype(int)
    individual_shape = np.zeros(4, dtype=int)
    individual_shape[0:3] = draw_shape
    individual_shape[-1] = int(tot_files - 1)
    data3d_before = []

    ii = np.arange(tot_files)[filesizes[:, -1] == 0][0]
    dark_read = dask.delayed(data_class.readData)(
        filenames[ii],
        image_range=(0, dref_shape[-1]),
        pixels_x=dref_shape[0],
        pixels_y=dref_shape[1],
    )
    dark_data = da.from_delayed(dark_read, filesizes[ii, 0:3], np.float32)
    del ii
    mean_dark_ref = da.mean(dark_data, axis=-1)

    for jj in np.arange(1, tot_files):
        ii = np.arange(tot_files)[filesizes[:, -1] == jj][0]
        test_read = dask.delayed(data_class.readData)(
            filenames[ii],
            image_range=(0, draw_shape[-1]),
            pixels_x=draw_shape[0],
            pixels_y=draw_shape[1],
        )
        test_data = da.from_delayed(test_read, filesizes[ii, 0:3], np.float32)
        test_data = test_data.rechunk(-1, -1, 256)
        data3d_before.append(test_data)

    data3d_dask = da.concatenate(data3d_before, axis=-1)

    data_shape = data3d_dask.shape
    con_shape = tuple((np.asarray(data_shape[0:2]) * np.asarray((0.5, 2))).astype(int))
    xvals = int(data_shape[-1] ** 0.5)

    d3r = da.transpose(data3d_dask, (2, 0, 1))
    d3s = d3r - mean_dark_ref
    d3D_dref = da.transpose(d3s, (1, 2, 0))
    top_part = d3D_dref[0 : con_shape[0], :, :]
    bot_part = d3D_dref[con_shape[0] : data_shape[0], :, :]
    top_part_rs = top_part[::-1, ::-1, :]
    data3d_arranged = da.concatenate([bot_part, top_part_rs], axis=1)
    shape4d = (con_shape[0], con_shape[1], xvals, xvals)
    data4d_dask = da.reshape(data3d_arranged, shape4d)

    bin_nums = int((xvals / bin_factor) ** 2)
    xvals_bin = int(xvals / bin_factor)
    if np.logical_not((np.mod(xvals, bin_factor)).astype(bool)):
        yyb = np.arange(data4d_dask.shape[2])[::bin_factor]
        xxb = np.arange(data4d_dask.shape[3])[::bin_factor]
        data3d_binY = da.reshape(
            data4d_dask[:, :, yyb, :],
            (con_shape[0], con_shape[1], int(xvals * xvals_bin)),
        )
        for ybf in np.arange(1, bin_factor):
            data3d_binY = data3d_binY + da.reshape(
                data4d_dask[:, :, yyb + ybf, :],
                (con_shape[0], con_shape[1], int(xvals * xvals_bin)),
            )
        data4d_binY = da.reshape(
            data3d_binY, (con_shape[0], con_shape[1], xvals_bin, xvals)
        )

        data3d_binYX = da.reshape(
            data4d_binY[:, :, :, xxb], (con_shape[0], con_shape[1], bin_nums)
        )
        for xbf in np.arange(1, bin_factor):
            data3d_binYX = data3d_binYX + da.reshape(
                data4d_binY[:, :, :, xxb + xbf], (con_shape[0], con_shape[1], bin_nums)
            )
        data4d_bin = da.reshape(
            data3d_binYX, (con_shape[0], con_shape[1], xvals_bin, xvals_bin)
        )
        data4D = data4d_bin.compute()
    else:
        data4D = data4d_dask.compute()
    cluster.close()
    return data4D
示例#24
0
import syncopy.tests.test_packagesetup as setupTestModule

# If dask is available, either launch a SLURM cluster on a cluster node or 
# create a `LocalCluster` object if tests are run on a single machine. If dask
# is not installed, return a dummy None-valued cluster object (tests will be 
# skipped anyway)
if __dask__:
    import dask.distributed as dd
    from syncopy.shared import esi_cluster_setup
    from syncopy.tests.misc import is_slurm_node
    if is_slurm_node():
        cluster = esi_cluster_setup(partition="DEV", n_jobs=10, mem_per_job="8GB",
                                    timeout=600, interactive=False,
                                    start_client=False)
    else:
        cluster = dd.LocalCluster(n_workers=2)
else:
    cluster = None

# Set up a pytest fixture `testcluster` that uses the constructed cluster object
@pytest.fixture
def testcluster():
    return cluster

# Re-order tests to first run stuff in test_packagesetup.py, then everything else
def pytest_collection_modifyitems(items):

    # Collect tests to be run in this session and registered setup-related tests    
    allTests = [testFunc.name if hasattr(testFunc, "name") else "" for testFunc in items]
    setupTests = [name for name in dir(setupTestModule) 
                  if not name.startswith("__") and not name.startswith("@")]    
示例#25
0
def dask_client():
    # We need at least 4 workers
    with dd.LocalCluster(n_workers=4,
                         preload=("daskworkerinit_tst.py", )) as cluster:
        yield dd.Client(cluster)
示例#26
0
def test_start_client():
    with dd.LocalCluster() as cluster:
        client = dd.Client(cluster, set_as_default=False)
示例#27
0
def main(argv):
    global DEBUG, DD_FORCE_LOAD, DASK_CLIENT

    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('filepath')
    parser.add_argument('dftype')
    parser.add_argument('base')
    parser.add_argument('x')
    parser.add_argument('y')
    parser.add_argument('categories', nargs='+')
    parser.add_argument('--debug',
                        action='store_true',
                        help='Enable increased verbosity and DEBUG messages')
    parser.add_argument(
        '--cache',
        choices=('persist', 'cachey'),
        default=None,
        help=
        'Enable caching: "persist" causes Dask dataframes to force loading into memory; "cachey" uses dask.cache.Cache with a cachesize of {}. Caching is disabled by default'
        .format(int(p.cachesize)))
    parser.add_argument(
        '--distributed',
        action='store_true',
        help=
        'Enable the distributed scheduler instead of the threaded, which is the default.'
    )
    parser.add_argument(
        '--recalc-ranges',
        action='store_true',
        help=
        'Tell datashader to recalculate the ranges on each aggregation, instead of caching them (by default).'
    )
    args = parser.parse_args(argv[1:])

    if args.cache is None:
        if args.debug:
            print("DEBUG: Cache disabled", flush=True)
    else:
        if args.cache == 'cachey':
            from dask.cache import Cache
            cache = Cache(p.cachesize)
            cache.register()
        elif args.cache == 'persist':
            DD_FORCE_LOAD = True

        if args.debug:
            print('DEBUG: Cache "{}" mode enabled'.format(args.cache),
                  flush=True)

    if args.dftype == 'dask' and args.distributed:
        local_cluster = distributed.LocalCluster(n_workers=p.n_workers,
                                                 threads_per_worker=1)
        DASK_CLIENT = distributed.Client(local_cluster)
        if args.debug:
            print('DEBUG: "distributed" scheduler is enabled')
    else:
        if args.dftype != 'dask' and args.distributed:
            raise ValueError(
                '--distributed argument is only available with the dask dataframe type (not pandas)'
            )
        if args.debug:
            print('DEBUG: "threaded" scheduler is enabled')

    filepath = args.filepath
    basename, extension = os.path.splitext(filepath)
    p.dftype = args.dftype
    p.base = args.base
    p.x = args.x
    p.y = args.y
    p.categories = args.categories
    DEBUG = args.debug

    if DEBUG:
        print('DEBUG: Memory usage (before read):\t{} MB'.format(
            get_proc_mem(), flush=True))
    df, loadtime = timed_read(filepath, p.dftype)

    if df is None:
        if loadtime == -1:
            print("{:28} {:6}  Operation not supported".format(
                filepath, p.dftype),
                  flush=True)
        return 1

    if DEBUG:
        print('DEBUG: Memory usage (after read):\t{} MB'.format(get_proc_mem(),
                                                                flush=True))

    img, aggtime1 = timed_agg(df,
                              filepath,
                              5,
                              5,
                              cache_ranges=(not args.recalc_ranges))
    if DEBUG:
        mem_usage = df.memory_usage(deep=True)
        if p.dftype == 'dask':
            mem_usage = mem_usage.compute()
        print('DEBUG:', mem_usage, flush=True)
        mem_usage_total = mem_usage.sum()
        print('DEBUG: DataFrame size:\t\t\t{} MB'.format(mem_usage_total / 1e6,
                                                         flush=True))
        for colname in df.columns:
            print('DEBUG: column "{}" dtype: {}'.format(
                colname, df[colname].dtype))
        print('DEBUG: Memory usage (after agg1):\t{} MB'.format(get_proc_mem(),
                                                                flush=True))

    img, aggtime2 = timed_agg(df,
                              filepath,
                              cache_ranges=(not args.recalc_ranges))
    if DEBUG:
        print('DEBUG: Memory usage (after agg2):\t{} MB'.format(get_proc_mem(),
                                                                flush=True))

    in_size = get_size(filepath)
    out_size = get_size(filepath + ".png")

    global_end = time.time()
    print("{:28} {:6}  Aggregate1:{:06.2f} ({:06.2f}+{:06.2f})  Aggregate2:{:06.2f}  In:{:011d}  Out:{:011d}  Total:{:06.2f}"\
          .format(filepath, p.dftype, loadtime+aggtime1, loadtime, aggtime1, aggtime2, in_size, out_size, global_end-global_start), flush=True)

    return 0
示例#28
0
def test_run_submission(client):

    # Because we have dask worker in a separate thread, we need to commit our transaction.
    # But the transaction test case will wipe out data from django's ContentTypes
    # So rerun our migrations to re-add our content types
    # Generally we don't run with processes False since it's a less thorough test
    # But for debugging with pdb, it's more convenient

    processes = True
    if processes:
        transaction.commit()
        call_command("migrate", "core", "zero", interactive=False)
        call_command("migrate", "core", interactive=False)
        call_command("sample_data")
        transaction.commit()
    else:
        call_command("sample_data")
    submission = Submission.objects.first()
    client.force_login(submission.user)

    future = None

    def save_future(l_future):
        nonlocal future
        future = l_future

    if processes:
        cluster = dd.LocalCluster(n_workers=4,
                                  preload=("daskworkerinit_tst.py", ))
    else:
        cluster = dd.LocalCluster(n_workers=1,
                                  processes=False,
                                  threads_per_worker=1)

    dask_client = dd.Client(cluster)

    mock_get_client = Mock(return_value=dask_client)
    with patch("referee.get_client", mock_get_client):
        with patch("core.views.submission.ignore_future", save_future):
            response = client.post(f"/submission/{submission.pk}/submit/", {})

        assert response.status_code == 302
        assert response.url == reverse("submission-detail",
                                       kwargs={"pk": submission.pk})
        detail_url = response.url
        response = client.get(detail_url)

        # public_run = response.context["public_run"]
        # if public_run is not None:
        #   assert public_run.status in ("PENDING", "SUCCESS")
        result = future.result()
        response = client.get(detail_url)
        public_run = response.context["public_run"]
        assert public_run.status == "SUCCESS"
        assert result

    cluster.close()

    evaluation = public_run.evaluation_set.first()

    evaluation_url = reverse("evaluation-detail", kwargs={"pk": evaluation.pk})

    response = client.get(evaluation_url)
    assert response.context["evaluation"].pk == evaluation.pk

    log_url = reverse("evaluation-log-out", kwargs={"pk": evaluation.pk})
    response = client.get(log_url)
    assert response.context["log"] == evaluation.log_stdout

    errlog_url = reverse("evaluation-log-err", kwargs={"pk": evaluation.pk})
    response = client.get(errlog_url)
    assert response.context["log"] == evaluation.log_stderr
示例#29
0
def main(
    learning_rate,
    batch_size,
    pop_size,
    l2_coeff,
    noise_std,
    noise_decay,
    n_iterations,
    n_rollouts,
    env_deterministic,
    returns_normalization,
    evals_per_step,
    eval_batch_size,
    single_threaded,
    gpu_mem_frac,
    action_noise,
    progress,
    seed,
    niche_path,
    pop_path,
    po_path,
    device,
    log_fnm,
    env_name,
):
    cluster = distributed.LocalCluster(n_workers=4,
                                       processes=True,
                                       threads_per_worker=1)
    client = distributed.Client(cluster)

    def make_env():
        from distributed_evolution.envs import CheetahEnv, AntEnv

        if env_name == "cheetah":
            Env = CheetahEnv
        elif env_name == "ant":
            Env = AntEnv
        else:
            raise Exception("Invalid env_name")

        if env_deterministic:
            return Env(seed=seed)
        return Env()

    env = make_env()
    print(env.action_space)

    model = CheetahPolicy(
        env.observation_space.shape,
        env.action_space.shape[0],
        env.action_space.low,
        env.action_space.high,
        ac_noise_std=action_noise,
        seed=seed,
        gpu_mem_frac=gpu_mem_frac,
        single_threaded=single_threaded,
    )

    niche = TorchGymNiche(make_env,
                          model,
                          ts_limit=1000,
                          n_train_rollouts=n_rollouts,
                          n_eval_rollouts=1)

    population = populations.Normal(model.get_theta(),
                                    noise_std,
                                    sigma_decay=noise_decay,
                                    device=device)
    optim = torch.optim.Adam(population.parameters(),
                             lr=learning_rate,
                             weight_decay=l2_coeff)

    if returns_normalization == "centered_ranks":
        returns_normalizer = centered_rank_normalizer
    elif returns_normalization == "normal":
        returns_normalizer = normal_normalizer
    else:
        raise ValueError(
            "Invalid returns normalizer {}".format(returns_normalizer))

    optimizer = GenVarESOptimizer(
        client,
        population,
        optim,
        niche,
        batch_size=batch_size,
        pop_size=pop_size,
        eval_batch_size=eval_batch_size,
        evals_per_step=evals_per_step,
        returns_normalizer=returns_normalizer,
        seed=seed,
        device=device,
        log_fnm=log_fnm,
    )
    optimizer.optimize(
        n_iterations,
        show_progress=progress,
        pop_path=pop_path,
        niche_path=niche_path,
        po_path=po_path,
    )
示例#30
0
    }
    #'''
    #'''
    fi = xr.DataArray(
        fi,
        coords={
            'lat': yi,
            'lon': xi,
        },
        dims=['time', 'alt', 'lat', 'lon'],
    ).chunk(chunks)
    #'''

    fo = linint2pts(
        fi,
        xo,
        yo,
    )


if __name__ == '__main__':
    cluster = dd.LocalCluster(n_workers=8, threads_per_worker=1)
    print(cluster.dashboard_link)
    client = dd.Client(cluster)
    t0 = time.time()

    fo = linint2_test()
    t1 = time.time()
    print(t1 - t0)
    client.close()