Пример #1
0
    def _fit_impl(self, objective, parameters):
        ndim = len(parameters.infos())
        minimums = ndim * [-np.inf]
        maximums = ndim * [+np.inf]
        initials = np.empty((ndim, self._size))
        #   for i, pinfo in enumerate(parameters.infos().values()):
        for i, name in enumerate(parameters.names()):
            pinfo = parameters.infos()[name]
            minimum = pinfo.minimum()
            maximum = pinfo.maximum()
            value = pinfo.initial_value()
            scale = pinfo.initial_scale()
            has_init = pinfo.has_initial()
            init_min = value - 0.5 * scale if has_init else minimum
            init_max = value + 0.5 * scale if has_init else maximum
            init_min = max(init_min, minimum)
            init_max = min(init_max, maximum)
            initials[i, :] = random.uniform(init_min, init_max, self._size)
            minimums[i] = minimum
            maximums[i] = maximum

    #   print(parameters.names())
    #exit()
        prb = pg.problem(Problem(objective, parameters, minimums, maximums))
        alg = pg.algorithm(self._setup_algorithm(parameters))
        alg.set_verbosity(self._verbosity)
        pop = pg.population(prb, size=self._size, seed=self._seed)
        for i in range(self._size):
            pop.set_x(i, initials[:, i])
        pop = alg.evolve(pop)
        solution = dict(mode=pop.champion_x)
        result = make_fitter_result(objective, parameters, solutions=solution)
        return result
Пример #2
0
    def _fit_impl(self, objective, parameters):

        # Setup options
        algorithm_options_init, algorithm_options_setup = self._setup_options(
            copy.deepcopy(self._algorithm_options_init),
            copy.deepcopy(self._algorithm_options_setup))

        # Initialize algorithm
        algorithm = self._algorithm_type(**algorithm_options_init)

        # Setup problem
        problem = PymooProblem(objective, parameters)

        # Run optimization
        res = pymoo.optimize.minimize(
            problem, algorithm, **algorithm_options_setup)

        #
        solution = dict(mode=list(res.X))
        extra = dict(f=res.F)

        result = make_fitter_result(
            objective, parameters, extra=extra, solutions=solution)

        return result
Пример #3
0
    def _fit_impl(self, objective, parameters):
        ndim = len(parameters.infos())
        # Setup boundary iterables
        boundary_periodic = []
        boundary_reflective = []
        for i, param in enumerate(parameters.infos().values()):
            match param.boundary():
                case 'periodic':
                    boundary_periodic.append(i)
                case 'reflective':
                    boundary_reflective.append(i)
        if not boundary_periodic:
            boundary_periodic = None
        if not boundary_reflective:
            boundary_reflective = None
        # Create sampler
        sampler = dynesty.NestedSampler(
            log_likelihood, prior_transform, ndim,
            logl_args=(parameters, objective), ptform_args=(parameters,),
            periodic=boundary_periodic, reflective=boundary_reflective,
            **self._options_init)
        # Run sampling
        sampler.run_nested(**self._options_run_nested)
        res = sampler.results
        print(res.summary())
        # Generate equally-weighted samples
        samples_weights = np.exp(res.logwt - res.logz[-1])
        samples_weighted = res.samples
        samples_unweighted = dynesty.utils.resample_equal(
            samples_weighted, samples_weights)
        loglikes = fitutils.reorder_log_likelihood(
            res.logl, samples_weighted, samples_unweighted)
        # ...
        posterior = dict(samples=samples_unweighted, loglikes=loglikes)
        # Extract additional information
        extra = iterutils.nativify(dict(
            nlive=res.nlive,
            niter=res.niter,
            efficiency=res.eff,
            log_evidence=res.logz[-1],
            log_evidence_err=res.logzerr[-1],
            information_gain=res.information[-1]))

        result = make_fitter_result(
            objective, parameters, posterior, solutions=(), extra=extra)

        exit()

        return result
Пример #4
0
    def _fit_impl(self, objective, parameters):

        ndim = len(parameters.infos())
        # Build moves list
        moves = [(m.obj(), w) for m, w in self._moves] if self._moves else None
        # Create sampler
        sampler = emcee.EnsembleSampler(
            self._nwalkers,
            ndim,
            log_prob_fn=log_probability,
            moves=moves,
            args=[parameters, objective],
            parameter_names=parameters.enames(False, False, True),
            blobs_dtype=[('log_like', float), ('log_prior', float)],
            backend=emcee.backends.HDFBackend('foo.h5'))
        # Calculate the starting positions of all walkers
        initial_values = np.empty((self._nwalkers, ndim))
        for i, (pname, pinfo) in enumerate(parameters.infos().items()):
            initial_values[:, i] = pinfo.initial_value() + random.uniform(
                pinfo.initial_value_minimum(), pinfo.initial_value_maximum(),
                self._nwalkers)
        # Run mcmc sampling
        sampler.run_mcmc(initial_values,
                         nsteps=self._nsteps,
                         tune=self._tune,
                         thin_by=self._thin_by,
                         progress=True)

        print(sampler.acceptance_fraction)
        print(sampler.get_autocorr_time())
        exit()

        samples = sampler.get_chain(discard=0, thin=1, flat=True)
        log_like = sampler.get_blobs(flat=True)['log_like']

        posterior = dict(
            samples=samples,
            loglikes=log_like,
        )

        result = make_fitter_result(objective,
                                    parameters,
                                    posterior,
                                    solutions=())

        return result
Пример #5
0
    def _fit_impl(self, objective, parameters):
        # Create lmfit parameters for all free parameters.
        # Replace brackets in parameters names
        # because lmfit does not support them.
        lmfit_params = lmfit.Parameters()
        for pname, pinfo in parameters.infos().items():
            lmfit_params.add(
                pname.replace('[', '__obr__').replace(']', '__cbr__'),
                pinfo.initial_value(), True, pinfo.minimum(), pinfo.maximum())
        # Setup minimiser-specific options
        global_options, method_options = self._setup_options(
            parameters, copy.deepcopy(self._global_options),
            copy.deepcopy(self._method_options))
        # Run minimisation
        minimizer = lmfit.Minimizer(self._residual_func,
                                    params=lmfit_params,
                                    fcn_args=(parameters, objective),
                                    **global_options,
                                    **method_options)
        lmfit_result = minimizer.minimize(method=self._method)
        # Extract the best-fit solution
        solution = dict(mode=list(lmfit_result.params.valuesdict().values()))
        # Extract covariance and std error
        if hasattr(lmfit_result, 'covar'):
            covar = lmfit_result.covar
            solution.update(covar=covar, std=list(np.sqrt(np.diag(covar))))
        # Extract trivial information (if available)
        extra = dict()
        attrs = [
            'success', 'status', 'message', 'nfev', 'chisqr', 'redchi', 'aic',
            'bic'
        ]
        for attr in attrs:
            if hasattr(lmfit_result, attr):
                extra[attr] = getattr(lmfit_result, attr)
        #
        result = make_fitter_result(objective, parameters, solutions=solution)

        print(extra)

        return result