def calibrate_coarse(self, monte_carlo_iterations=50):
        """
        Run SLEUTH coarse calibration.

        Parameters:

        - `monte_carlo_iterations`: iterations for the coarse calibration step.

        """
        coarse_dir = join(self.output_path, 'coarse')
        create_dir(coarse_dir)
        coarse_params = {
            'diff': 50,
            'diff_start': 0,
            'diff_step': 25,
            'diff_end': 100,
            'brd': 50,
            'brd_start': 0,
            'brd_step': 25,
            'brd_end': 100,
            'sprd': 50,
            'sprd_start': 0,
            'sprd_step': 25,
            'sprd_end': 100,
            'slp': 50,
            'slp_start': 0,
            'slp_step': 25,
            'slp_end': 100,
            'rg': 50,
            'rg_start': 0,
            'rg_step': 25,
            'rg_end': 100,
            'output_dir': coarse_dir + '/'
        }

        with open(join(self.output_path, 'scenario.%s.coarse' % self.location),
                  'w') as f:
            scenario_file_path = f.name
            f.write(
                self.create_scenario_file(coarse_params,
                                          monte_carlo_iterations))

        self.status['calibration']['coarse']['start'] = datetime.datetime.now()
        self.status['calibration']['coarse']['params'] = coarse_params
        self.save_status()
        if config['use_mpi']:
            mpirun('-np',
                   config['mpi_cores'],
                   config['grow_binary'],
                   'calibrate',
                   scenario_file_path,
                   _out=join(coarse_dir, 'mpi_out.log'),
                   _err=join(coarse_dir, 'mpi_err.log'))
        else:
            bash(
                '-c', "%s calibrate %s" %
                (config['grow_binary'], scenario_file_path))
        self.status['calibration']['coarse']['end'] = datetime.datetime.now()
        self.save_status()
    def calibrate_final(self, monte_carlo_iterations=50):
        """
        Run SLEUTH final calibration.

        Parameters:

        - `monte_carlo_iterations`: iterations for the final calibration step.

        """

        final_dir = join(self.output_path, 'final')
        create_dir(final_dir)
        default_step = 1

        cs = ControlStats(
            join(join(self.output_path, 'fine'), 'control_stats.log'),
            default_step)
        cs.params['output_dir'] = final_dir + '/'
        cs.params['monte_carlo_iterations'] = monte_carlo_iterations
        with open(join(self.output_path, 'scenario.%s.final' % self.location),
                  'w') as f:
            scenario_file_path = f.name
            f.write(
                self.create_scenario_file(cs.params, monte_carlo_iterations))

        self.status['calibration']['final']['start'] = datetime.datetime.now()
        self.status['calibration']['final']['params'] = cs.params
        self.save_status()
        if config['use_mpi']:
            mpirun('-np',
                   config['mpi_cores'],
                   config['grow_binary'],
                   'calibrate',
                   scenario_file_path,
                   _out=join(final_dir, 'mpi_out.log'),
                   _err=join(final_dir, 'mpi_err.log'))
        else:
            bash(
                '-c', "%s calibrate %s" %
                (config['grow_binary'], scenario_file_path))
        self.status['calibration']['final']['end'] = datetime.datetime.now()
        self.save_status()
Exemplo n.º 3
0
def test_not_launch():
    with raises(RuntimeError):
        Client(launch=False)

    pwd = os.getcwd()
    sh.cd(tempfile.mkdtemp())
    test = sh.mpirun(
        "-n",
        3,
        sys.executable,
        "-c",
        "from lyncs_mpi import Client; Client(1, launch=False)",
    )
    sh.cd(pwd)
    assert test.exit_code == 0
    def sleuth_predict(self,
                       end,
                       diff=None,
                       brd=None,
                       sprd=None,
                       slp=None,
                       rg=None,
                       monte_carlo_iterations=50):
        """
        Run SLEUTH prediction step for range enclosed in start, end.

        Parameters:

        - `end`: end of temporal range for prediction, beggining is infered from last input
        - `diff`: TODO: see model documentation
        - `brd`: TODO: see model documentation
        - `sprd`: spread
        - `slp`: slope
        - `rg`: TODO: see model documentation
        - `monte_carlo_iterations`: iterations for the prediction step
        """

        self.predict_end = end

        predict_dir = join(self.output_path, 'predict')
        create_dir(predict_dir)

        default_step = 0  # ignored for predict
        cs = ControlStats(
            join(join(self.output_path, 'final'), 'control_stats.log'),
            default_step)
        cs.params['output_dir'] = predict_dir + '/'

        if diff:
            cs.params['diff'] = diff

        if brd:
            cs.params['brd'] = brd

        if sprd:
            cs.params['sprd'] = sprd

        if slp:
            cs.params['slp'] = slp

        if rg:
            cs.params['rg'] = rg

        cs.params['monte_carlo_iterations'] = monte_carlo_iterations
        with open(
                join(self.output_path, 'scenario.%s.predict' % self.location),
                'w') as f:
            scenario_file_path = f.name
            f.write(
                self.create_scenario_file(cs.params, monte_carlo_iterations))

        self.status['prediction']['start'] = datetime.datetime.now()
        self.status['prediction']['params'] = cs.params
        self.save_status()
        if config['use_mpi']:
            mpirun('-np',
                   1,
                   config['grow_binary'],
                   'predict',
                   scenario_file_path,
                   _out=join(predict_dir, 'mpi_out.log'),
                   _err=join(predict_dir, 'mpi_err.log'))
        else:
            bash('-c',
                 "%s predict %s" % (config['grow_binary'], scenario_file_path))
        self.status['prediction']['end'] = datetime.datetime.now()
        self.save_status()
Exemplo n.º 5
0
    def __init__(
        self, num_workers=None, threads_per_worker=1, launch=None, out=None, err=None
    ):
        """
        Returns a Client connected to a cluster of `num_workers` workers.
        """
        self._server = None

        if launch is None:
            launch = default_comm().size == 1

        # pylint: disable=import-outside-toplevel,
        if not launch:
            # Then the script has been submitted in parallel with mpirun
            num_workers = num_workers or default_comm().size - 2
            if num_workers < 0 or default_comm().size != num_workers + 2:
                raise RuntimeError(
                    f"""
                Error: (num_workers + 2) processes required.
                The script has not been submitted on enough processes.
                Got {default_comm().size} processes instead of {num_workers + 2}.
                """
                )

            initialize(
                nthreads=threads_per_worker,
                local_directory=tempfile.mkdtemp(),
                nanny=False,
            )

            super().__init__()

        else:
            num_workers = num_workers or (multiprocessing.cpu_count() + 1)

            # Since dask-mpi produces several file we create a temporary directory
            self._dir = tempfile.mkdtemp()

            # The command runs in the background (_bg=True)
            # and the stdout(err) is stored in self._out(err)
            pwd = os.getcwd()
            sh.cd(self._dir)
            self._server = sh.mpirun(
                "-n",
                num_workers + 1,
                "dask-mpi",
                "--no-nanny",
                "--nthreads",
                threads_per_worker,
                "--scheduler-file",
                "scheduler.json",
                _bg=True,
                _out=out or sys.stdout,
                _err=err or sys.stderr,
            )
            sh.cd(pwd)

            atexit.register(self.close_server)

            super().__init__(scheduler_file=self._dir + "/scheduler.json")

        # Waiting for all the workers to connect
        def handler(signum, frame):
            if self.server is not None:
                self.close_server()
            raise RuntimeError(
                "Couldn't connect to %d processes. Got %d workers."
                % (num_workers, len(self.workers))
            )

        signal.signal(signal.SIGALRM, handler)
        signal.alarm(5)

        while len(self.workers) != num_workers:
            time.sleep(0.001)

        signal.alarm(0)

        self.ranks = {key: val["name"] for key, val in self.workers.items()}
        self._comm = self.create_comm()