def _calc_mean(self, data=None): if data is None: data = self._data if self._min_cov_det: lg.details("Use MCD for expectation value estimation") mcd = MCD(support_fraction=self._mcd_supp_frac).fit( self._data.transpose()) ydata = mcd.location_ else: ydata = np.mean(data, axis=1) return ydata
def _calc_cov(self, data=None): if data is None: data = self._data if self._min_cov_det: lg.details("Use MCD for covariance estimation") mcd = MCD(support_fraction=self._mcd_supp_frac).fit( self._data.transpose()) cov = mcd.covariance_ else: cov = calc_cov(data) if not self._sample_data: cov /= self._nconfs # For fit we have to normalize like an error return cov
def _calc_cov_and_mean(self, data=None): if data is None: data = self._data if self._min_cov_det: ind = self._xdata < self.xmax() mcd = MCD(support_fraction=self._mcd_supp_frac).fit( self._data.transpose()) ydata = mcd.location_ lg.details("Use MCD for covariance estimation") cov = mcd.covariance_ else: ydata = std_mean(self._data, axis=1) cov = calc_cov(data) if not self._sample_data: cov /= self._nconfs # For fit we have to normalize like an error edata = np.sqrt(np.diag(cov)) return ydata, edata, cov
def corr_fit(self, xmin=-np.inf, xmax=np.inf, start_params=None, nstates=None, nstates_osc=None, correlated=None, priorsigma=None, priorval=None): if nstates == 0: raise ValueError("Require at least one non-oscillating state") if nstates is None: nstates = self._nstates if nstates_osc is None: nstates_osc = self._nstates_osc if correlated is None: correlated = self._cov_avail #To estimate parameters we usually perform a non-correlated fit before the actual #correlated fit. This is not neccessary if we already get start parameters. #However, for an oscillating fit is is reasonable to perform a non correlated fit #in any case. This is because start parameter estimation is usually performed outside #for oscillating fits. if correlated and start_params is not None: if nstates_osc > 0: skip_uncorr = False else: skip_uncorr = True else: skip_uncorr = False try: start_params, priorval, priorsigma = self.init_start_params( xmin, xmax, start_params, priorval, priorsigma, nstates, nstates_osc) except Exception as e: lg.info("Failed to estimate start parameters. Try direct fit") lg.details("Error was", e) if lg.isLevel("DEBUG"): traceback.print_exc() start_params = None #Save the states of the last fit. This must be ensured, even if the parameter #estimation fails. finally: self._nstates = nstates self._nstates_osc = nstates_osc print_res("Start parameters for %d + %d fit" % (nstates, nstates_osc), start_params, level="INFO") if not skip_uncorr: res, res_err, chi_dof, aicc, pcov = self.simple_corr_fit( xmin, xmax, start_params, correlated=False, priorval=priorval, priorsigma=priorsigma, nstates=nstates, nstates_osc=nstates_osc) start_params = np.copy(res) self._change_order(res, res_err, nstates, nstates_osc) res, res_err = self.remove_mult_const(res, res_err) pcov = self.remove_mult_const_pcov(pcov) lg.info() print_res("Fit result for uncorrelated %d + %d fit" % (nstates, nstates_osc), res, res_err, chi_dof, level="INFO") print_scl("AICc", aicc, level='INFO') if correlated: if not self._cov_avail: raise NotAvailableError("Covariance matrix is not available") res, res_err, chi_dof, aicc, pcov = self.simple_corr_fit( xmin, xmax, start_params=start_params, correlated=True, priorval=priorval, priorsigma=priorsigma, nstates=nstates, nstates_osc=nstates_osc) self._change_order(res, res_err, nstates, nstates_osc) res, res_err = self.remove_mult_const(res, res_err) pcov = self.remove_mult_const_pcov(pcov) lg.info() print_res("Fit result for correlated %d + %d fit" % (nstates, nstates_osc), res, res_err, chi_dof, level="INFO") print_scl("AICc", aicc) return res, res_err, chi_dof, aicc, pcov
def minimize(func, jack=None, hess=None, start_params=None, tol=1e-12, maxiter=10000, use_alg=False, algorithm=None): if algorithm == "levenberg": args = (start_params, func, jack, hess) kwargs = {'eps': tol, 'use_alg': use_alg, 'max_itt': maxiter} params, nfev = levenberg(*args, **kwargs) else: args = (func, start_params) if algorithm == "BFGS": kwargs = {'method': algorithm, 'jac': jack, 'tol': tol, 'options': {'gtol': tol, 'maxiter': maxiter}} elif algorithm == "TNC": kwargs = {'method': algorithm, 'jac': jack, 'tol': tol, 'options': {'maxiter': maxiter}} elif algorithm == "COBYLA": kwargs = {'method': algorithm, 'tol': tol, 'options': {'maxiter': maxiter}} elif algorithm == "SLSQP": kwargs = {'method': algorithm, 'jac': jack, 'tol': tol, 'options': {'maxiter': maxiter}} elif algorithm == "L-BFGS-B": kwargs = {'method': algorithm, 'jac': jack, 'tol': tol, 'options': {'maxiter': maxiter}} elif algorithm == "Powell": kwargs = {'method': algorithm, 'tol': tol, 'options': {'xtol': tol, 'ftol': tol, 'maxfev': maxiter}} elif algorithm == "Nelder-Mead": kwargs = {'method': algorithm, 'tol': tol, 'options': {'maxiter': maxiter}} else: kwargs = {'method': algorithm, 'jac': jack, 'hess': hess, 'tol': tol, 'options': {'maxiter': maxiter}} # At least COBYLA sometimes get stuck in an endless loop. Use timeout to make # sure we finish res = timeout(opt.minimize, args=args, kwargs=kwargs, timeout_duration=100) if algorithm != "levenberg": params = res.x nfev = res.nfev if not res.success: lg.details(algorithm, res.message) raise ValueError(algorithm + ": Minimization did not converge!") try: params[0] except Exception: if isinstance(params, (np.ndarray, np.generic)): # somhow numpy 0D arrays have to be converted to scalar explicitly params = [np.asscalar(params)] else: params = [params] return params, nfev