示例#1
0
def run(wf):
    with NCDisplay() as display:
        result = run_parallel_opt(wf,
                                  2,
                                  registry,
                                  "cache.json",
                                  display=display)

    return result
示例#2
0
def test_broker_logging():
    A = log_add(1, 1)
    B = sub(3, A)

    multiples = [mul(log_add(i, B), A) for i in range(6)]
    C = accumulate(noodles.gather(*multiples))

    with NCDisplay(title="Running the test") as display:
        assert run_parallel_with_display(C, 4, display) == 42
示例#3
0
def call_xenon(job,
               n_processes=1,
               cache="cache.json",
               user_name="x2sun",
               adapter="slurm",
               queue_name=None,
               host_name="cartesius.surfsara.nl",
               workdir=None,
               timeout=60000,
               **kwargs):
    """
    See :
        https://github.com/NLeSC/Xenon-examples/raw/master/doc/tutorial/xenon-tutorial.pdf
    """
    dict_properties = {
        "slurm": {
            "xenon.adaptors.slurm.ignore.version": "true"
        },
        "pbs": {
            "xenon.adaptors.pbs.ignore.version": "true"
        },
    }
    with XenonKeeper(log_level="DEBUG") as Xe:
        certificate = Xe.credentials.newCertificateCredential(
            "ssh", os.environ["HOME"] + "/.ssh/id_rsa", user_name, "", None)

        xenon_config = XenonConfig(
            jobs_scheme=adapter,
            location=host_name,
            credential=certificate,
            jobs_properties=dict_properties[adapter],
        )
        print(xenon_config.__dict__)

        if workdir is None:
            workdir = "/home/" + user_name

        job_config = RemoteJobConfig(
            registry=registry,
            init=plams.init,
            finish=plams.finish,
            queue=queue_name,
            time_out=timeout,
            working_dir=workdir,
        )

        with NCDisplay() as display:
            result = run_xenon_prov(job,
                                    Xe,
                                    cache,
                                    n_processes,
                                    xenon_config,
                                    job_config,
                                    display=display)

    return result
示例#4
0
def call_default(job, n_processes=1):
    """
    Run locally using several threads.
    """
    with NCDisplay() as display:
        return run_parallel_opt(job,
                                n_threads=n_processes,
                                registry=registry,
                                jobdb_file='cache.json',
                                display=display)
示例#5
0
def call_xenon(job,
               n_processes=1,
               cache='cache.json',
               user_name='x2sun',
               adapter='slurm',
               queue_name=None,
               host_name='cartesius.surfsara.nl',
               workdir=None,
               timeout=60000,
               **kwargs):
    """
    See :
        https://github.com/NLeSC/Xenon-examples/raw/master/doc/tutorial/xenon-tutorial.pdf
    """
    dict_properties = {
        'slurm': {
            'xenon.adaptors.slurm.ignore.version': 'true'
        },
        'pbs': {
            'xenon.adaptors.pbs.ignore.version': 'true'
        }
    }
    with XenonKeeper(log_level='DEBUG') as Xe:
        certificate = Xe.credentials.newCertificateCredential(
            'ssh', os.environ["HOME"] + '/.ssh/id_rsa', user_name, '', None)

        xenon_config = XenonConfig(jobs_scheme=adapter,
                                   location=host_name,
                                   credential=certificate,
                                   jobs_properties=dict_properties[adapter])
        print(xenon_config.__dict__)

        if workdir is None:
            workdir = '/home/' + user_name

        job_config = RemoteJobConfig(registry=registry,
                                     init=plams.init,
                                     finish=plams.finish,
                                     queue=queue_name,
                                     time_out=timeout,
                                     working_dir=workdir)

        with NCDisplay() as display:
            result = run_xenon_prov(job,
                                    Xe,
                                    cache,
                                    n_processes,
                                    xenon_config,
                                    job_config,
                                    display=display)

    return result
示例#6
0
def call_default(job, n_processes=1, cache='cache.json'):
    """
    Run locally using several threads.
    Caching can be turned off by specifying cache=None
    """
    with NCDisplay() as display:
        if cache is None:
            return run_parallel_with_display(
                job, n_threads=n_processes,
                display=display)
        else:
            return run_parallel_opt(
                job, n_threads=n_processes,
                registry=registry, jobdb_file=cache,
                display=display)
示例#7
0
def runNoodles(jsonFile, logFolder, numThreads):
    global logFolderAbsPath
    logFolderAbsPath = os.path.abspath(logFolder)
    os.makedirs(logFolderAbsPath)
    input = json.load(open(jsonFile, 'r'))
    if input[0].get('task') is None:
        jobs = [
            make_job(td['command'], td['id'], td['exclude']) for td in input
        ]
    else:
        jobs = [
            make_job(td['command'], td['task'], td['exclude']) for td in input
        ]
    wf = noodles.gather(*jobs)
    with NCDisplay(error_filter) as display:
        run(wf, display=display, n_threads=numThreads)
示例#8
0
def test_find_first():
    global counter

    wfs = [counted_sqr(x) for x in range(10)]
    w = find_first(is_sixteen, wfs)
    result = run_single(w)
    assert result == 16
    assert counter == 5

    wfs = [counted_sqr(x) for x in range(10)]
    w = find_first(is_sixteen, wfs)
    result = run_process(w, n_processes=1, registry=base)
    assert result == 16

    wfs = [display_sqr(x) for x in range(10)]
    w = find_first(is_sixteen, wfs)
    with NCDisplay() as display:
        result = run_logging(w, n_threads=2, display=display)
    assert result == 16
示例#9
0
def test_xenon_42():
    A = log_add(1, 1)
    B = sub(3, A)

    multiples = [mul(log_add(i, B), A) for i in range(6)]
    C = accumulate(noodles.gather(*multiples))

    xenon_config = XenonConfig(jobs_scheme='local')

    job_config = RemoteJobConfig(registry=serial.base, time_out=1000)

    with XenonKeeper() as Xe, NCDisplay() as display:
        result = run_xenon(C,
                           Xe,
                           "cache.json",
                           2,
                           xenon_config,
                           job_config,
                           display=display)

    assert (result == 42)
示例#10
0
    def run(self, parameter_space, kernel_options, tuning_options):
        """ Tune all instances in parameter_space using a multiple threads

        :param parameter_space: The parameter space as an iterable.
        :type parameter_space: iterable

        :param kernel_options: A dictionary with all options for the kernel.
        :type kernel_options: kernel_tuner.interface.Options

        :param tuning_options: A dictionary with all options regarding the tuning
            process.
        :type tuning_options: kernel_tuner.interface.Options

        :returns: A list of dictionaries for executed kernel configurations and their
            execution times. And a dictionary that contains a information
            about the hardware/software environment on which the tuning took place.
        :rtype: list(dict()), dict()

        """
        workflow = self._parameter_sweep(parameter_space, kernel_options,
                                         self.device_options, tuning_options)
        if tuning_options.verbose:
            with NCDisplay(_error_filter) as display:
                answer = run_parallel_with_display(workflow, self.max_threads,
                                                   display)
        else:
            answer = run_parallel(workflow, self.max_threads)

        if answer is None:
            print("Tuning did not return any results, did an error occur?")
            return None

        # Filter out None times
        result = []
        for chunk in answer:
            result += [d for d in chunk if d['time']]

        return result, {}
示例#11
0
def run(
    wf: object,
    runner: str = 'parallel',
    n_processes: int = 1,
    cache: str = 'cache.db',
) -> Any:
    """Run a workflow `wf` using `runner` and `n_processes` number of threads/process."""
    runner = runner.lower()

    if isinstance(wf, Results):
        wf = gather_dict(**wf.state)

    if runner == 'display':
        with NCDisplay() as display:
            return run_logging(wf, n_processes, display)
    elif runner == 'serial':
        return run_single(wf)
    else:
        return run_provenance(wf,
                              n_threads=n_processes,
                              db_file=cache,
                              registry=registry,
                              echo_log=False,
                              always_cache=False)
示例#12
0
    s = read_input(args.material_file)

    print(pprint_settings(cstool_model, s))
    print()
    print("Phonon loss: {:~P}".format(s.phonon.energy_loss))
    print("Total molar weight: {:~P}".format(s.M_tot))
    print("Number density: {:~P}".format(s.rho_n))
    print("Brillouin zone energy: {:~P}".format(s.phonon.E_BZ))
    print("Barrier energy: {:~P}".format(s.band_structure.barrier))
    print()
    print("# Computing Mott cross-sections using ELSEPA.")

    e_mcs = np.logspace(1, 5, 145) * units.eV
    f_mcs = s_mott_cs(s, e_mcs, split=12, mabs=False)

    with NCDisplay() as display:
        mcs = run_parallel_opt(f_mcs,
                               n_threads=4,
                               registry=registry,
                               jobdb_file='cache.json',
                               display=display)

    print("# Merging elastic scattering processes.")

    def elastic_cs_fn(a, E):
        return log_interpolate(
            lambda E: phonon_cs_fn(s)(a, E).to('cm^2').magnitude,
            lambda E: mcs.unsafe(a,
                                 E.to('eV').magnitude.flat), lambda x: x,
            100 * units.eV, 200 * units.eV)(E) * units('cm^2/rad')