def __init__(self, name: str = "", path_prefix: str = "", unique_tag: Optional[str] = None, prior_passer: PriorPasser = None, iterations_per_update: int = None, number_of_cores: int = None, session: Optional[Session] = None, **kwargs): """ An UltraNest non-linear search. UltraNest is an optional requirement and must be installed manually via the command `pip install ultranest`. It is optional as it has certain dependencies which are generally straight forward to install (e.g. Cython). For a full description of UltraNest and its Python wrapper PyUltraNest, checkout its Github and documentation webpages: https://github.com/JohannesBuchner/UltraNest https://johannesbuchner.github.io/UltraNest/readme.html Parameters ---------- name The name of the search, controlling the last folder results are output. path_prefix The path of folders prefixing the name folder where results are output. unique_tag The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database and also acts as the folder after the path prefix and before the search name. prior_passer Controls how priors are passed from the results of this `NonLinearSearch` to a subsequent non-linear search. iterations_per_update The number of iterations performed between every Dynesty back-up (via dumping the Dynesty instance as a pickle). number_of_cores The number of cores Emcee sampling is performed using a Python multiprocessing Pool instance. If 1, a pool instance is not created and the job runs in serial. session An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. """ super().__init__(name=name, path_prefix=path_prefix, unique_tag=unique_tag, prior_passer=prior_passer, iterations_per_update=iterations_per_update, session=session, **kwargs) self.number_of_cores = (self._config("parallel", "number_of_cores") if number_of_cores is None else number_of_cores) for key, value in self.config_dict_stepsampler.items(): setattr(self, key, value) if self.config_dict_stepsampler["stepsampler_cls"] is None: self.nsteps = None logger.debug("Creating UltraNest Search")
def __init__(self, name=None, path_prefix=None, unique_tag: Optional[str] = None, prior_passer=None, initializer=None, iterations_per_update: int = None, number_of_cores: int = None, session: Optional[Session] = None, **kwargs): """ A PySwarms Particle Swarm Optimizer global non-linear search. For a full description of PySwarms, checkout its Github and readthedocs webpages: https://github.com/ljvmiranda921/pyswarms https://pyswarms.readthedocs.io/en/latest/index.html Parameters ---------- name The name of the search, controlling the last folder results are output. path_prefix The path of folders prefixing the name folder where results are output. unique_tag The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database and also acts as the folder after the path prefix and before the search name. prior_passer Controls how priors are passed from the results of this `NonLinearSearch` to a subsequent non-linear search. initializer Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer). number_of_cores : int The number of cores Emcee sampling is performed using a Python multiprocessing Pool instance. If 1, a pool instance is not created and the job runs in serial. session An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. """ super().__init__(name=name, path_prefix=path_prefix, unique_tag=unique_tag, prior_passer=prior_passer, initializer=initializer, iterations_per_update=iterations_per_update, session=session, **kwargs) self.number_of_cores = (self._config("parallel", "number_of_cores") if number_of_cores is None else number_of_cores) logger.debug("Creating PySwarms Search")
def __init__(self, name: str = "", path_prefix: str = "", unique_tag: Optional[str] = None, prior_passer: PriorPasser = None, iterations_per_update: int = None, number_of_cores: int = None, session: Optional[Session] = None, **kwargs): """ A Dynesty non-linear search. For a full description of Dynesty, checkout its GitHub and readthedocs webpages: https://github.com/joshspeagle/dynesty https://dynesty.readthedocs.io/en/latest/index.html Parameters ---------- name The name of the search, controlling the last folder results are output. path_prefix The path of folders prefixing the name folder where results are output. unique_tag The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database and also acts as the folder after the path prefix and before the search name. prior_passer Controls how priors are passed from the results of this `NonLinearSearch` to a subsequent non-linear search. iterations_per_update The number of iterations performed between every Dynesty back-up (via dumping the Dynesty instance as a pickle). number_of_cores The number of cores Emcee sampling is performed using a Python multiprocessing Pool instance. If 1, a pool instance is not created and the job runs in serial. session An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. """ super().__init__(name=name, path_prefix=path_prefix, unique_tag=unique_tag, prior_passer=prior_passer, iterations_per_update=iterations_per_update, session=session, **kwargs) self.number_of_cores = (self._config("parallel", "number_of_cores") if number_of_cores is None else number_of_cores) logger.debug("Creating DynestyStatic Search")
def __init__( self, name=None, path_prefix=None, unique_tag: Optional[str] = None, prior_passer=None, session : Optional[Session] = None, **kwargs ): """ A MultiNest non-linear search. For a full description of MultiNest and its Python wrapper PyMultiNest, checkout its Github and documentation webpages: https://github.com/JohannesBuchner/MultiNest https://github.com/JohannesBuchner/PyMultiNest http://johannesbuchner.github.io/PyMultiNest/index.html# Parameters ---------- name The name of the search, controlling the last folder results are output. path_prefix The path of folders prefixing the name folder where results are output. unique_tag The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database and also acts as the folder after the path prefix and before the search name. prior_passer Controls how priors are passed from the results of this `NonLinearSearch` to a subsequent non-linear search. session An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. """ super().__init__( name=name, path_prefix=path_prefix, unique_tag=unique_tag, prior_passer=prior_passer, session=session, **kwargs ) logger.debug("Creating MultiNest Search")
def __init__(self, name=None, path_prefix=None, unique_tag: Optional[str] = None, prior_passer=None, initializer=None, auto_correlations_settings=AutoCorrelationsSettings(), iterations_per_update: int = None, number_of_cores: int = None, session: Optional[Session] = None, **kwargs): """ An Zeus non-linear search. For a full description of Zeus, checkout its Github and readthedocs webpages: https://github.com/minaskar/zeus https://zeus-mcmc.readthedocs.io/en/latest/ If you use `Zeus` as part of a published work, please cite the package following the instructions under the *Attribution* section of the GitHub page. Parameters ---------- name The name of the search, controlling the last folder results are output. path_prefix The path of folders prefixing the name folder where results are output. unique_tag The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database and also acts as the folder after the path prefix and before the search name. prior_passer Controls how priors are passed from the results of this `NonLinearSearch` to a subsequent non-linear search. nwalkers : int The number of walkers in the ensemble used to sample parameter space. nsteps : int The number of steps that must be taken by every walker. The `NonLinearSearch` will thus run for nwalkers * nsteps iterations. initializer Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer). auto_correlations_settings : AutoCorrelationsSettings Customizes and performs auto correlation calculations performed during and after the search. number_of_cores : int The number of cores Zeus sampling is performed using a Python multiprocessing Pool instance. If 1, a pool instance is not created and the job runs in serial. session An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. """ super().__init__(name=name, path_prefix=path_prefix, unique_tag=unique_tag, prior_passer=prior_passer, initializer=initializer, auto_correlations_settings=auto_correlations_settings, iterations_per_update=iterations_per_update, session=session, **kwargs) self.number_of_cores = number_of_cores or self._config( "parallel", "number_of_cores") logger.debug("Creating Zeus Search")
def __init__( self, paths=None, prior_passer=None, nwalkers=None, nsteps=None, initializer=None, auto_correlation_check_for_convergence=None, auto_correlation_check_size=None, auto_correlation_required_length=None, auto_correlation_change_threshold=None, iterations_per_update=None, number_of_cores=None, ): """ An Emcee non-linear search. For a full description of Emcee, checkout its Github and readthedocs webpages: https://github.com/dfm/emcee https://emcee.readthedocs.io/en/stable/ Extensions: - Provides the option to check the auto-correlation length of the samples during the run and terminating sampling early if these meet a specified threshold. See this page (https://emcee.readthedocs.io/en/stable/tutorials/autocorr/#autocorr) for a description. - Provides different options for walker initialization, with the default 'ball' method starting all walkers close to one another in parameter space, as recommended in the Emcee documentation (https://emcee.readthedocs.io/en/stable/user/faq/). If you use *Emcee* as part of a published work, please cite the package following the instructions under the *Attribution* section of the GitHub page. Parameters ---------- paths : af.Paths Manages all paths, e.g. where the search outputs are stored, the samples, etc. prior_passer : af.PriorPasser Controls how priors are passed from the results of this `NonLinearSearch` to a subsequent non-linear search. nwalkers : int The number of walkers in the ensemble used to sample parameter space. nsteps : int The number of steps that must be taken by every walker. The `NonLinearSearch` will thus run for nwalkers * nsteps iterations. initializer : non_linear.initializer.Initializer Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer). auto_correlation_check_for_convergence : bool Whether the auto-correlation lengths of the Emcee samples are checked to determine the stopping criteria. If `True`, this option may terminate the Emcee run before the input number of steps, nsteps, has been performed. If `False` nstep samples will be taken. auto_correlation_check_size : int The length of the samples used to check the auto-correlation lengths (from the latest sample backwards). For convergence, the auto-correlations must not change over a certain range of samples. A longer check-size thus requires more samples meet the auto-correlation threshold, taking longer to terminate sampling. However, shorter chains risk stopping sampling early due to noise. auto_correlation_required_length : int The length an auto_correlation chain must be for it to be used to evaluate whether its change threshold is sufficiently small to terminate sampling early. auto_correlation_change_threshold : float The threshold value by which if the change in auto_correlations is below sampling will be terminated early. number_of_cores : int The number of cores Emcee sampling is performed using a Python multiprocessing Pool instance. If 1, a pool instance is not created and the job runs in serial. All remaining attributes are emcee parameters and described at the emcee API webpage: https://emcee.readthedocs.io/en/stable/ """ self.nwalkers = (self._config("search", "nwalkers") if nwalkers is None else nwalkers) self.nsteps = (self._config("search", "nsteps") if nsteps is None else nsteps) self.auto_correlation_check_for_convergence = ( self._config("auto_correlation", "check_for_convergence") if auto_correlation_check_for_convergence is None else auto_correlation_check_for_convergence) self.auto_correlation_check_size = (self._config( "auto_correlation", "check_size") if auto_correlation_check_size is None else auto_correlation_check_size) self.auto_correlation_required_length = ( self._config("auto_correlation", "required_length") if auto_correlation_required_length is None else auto_correlation_required_length) self.auto_correlation_change_threshold = ( self._config("auto_correlation", "change_threshold") if auto_correlation_change_threshold is None else auto_correlation_change_threshold) super().__init__( paths=paths, prior_passer=prior_passer, initializer=initializer, iterations_per_update=iterations_per_update, ) self.number_of_cores = (self._config("parallel", "number_of_cores") if number_of_cores is None else number_of_cores) logger.debug("Creating Emcee NLO")