def __call__(self, model, in_coords, ref_coords, sigma=5.0, maxsig=4.0, **kwargs): model_copy = _validate_model(model, ['bounds', 'fixed']) # Starting simplex step size is set to be 5% of parameter values # Need to ensure this is larger than the convergence tolerance # so move the initial values away from zero if necessary try: xtol = kwargs['options']['xtol'] except KeyError: pass else: for p in model_copy.param_names: pval = getattr(model_copy, p).value if abs(pval) < 20*xtol and 'offset' in p: getattr(model_copy, p).value = 20*xtol if pval == 0 \ else (np.sign(pval) * 20*xtol) tree = spatial.cKDTree(list(zip(*ref_coords))) # avoid _convert_input since tree can't be coerced to a float x, y = in_coords farg = (model_copy, x, y, sigma, maxsig, tree) p0, _ = _model_to_fit_params(model_copy) result = self._opt_method(self.objective_function, p0, farg, **kwargs) fitted_params = result['x'] _fitter_to_model_params(model_copy, fitted_params) return model_copy
def __call__(self, model, in_coords, ref_coords, sigma=5.0, maxsig=4.0, **kwargs): model_copy = _validate_model(model, ['bounds']) x, y = in_coords xref, yref = ref_coords xmax = max(np.max(x), np.max(xref)) ymax = max(np.max(y), np.max(yref)) landscape = self.mklandscape(ref_coords, sigma, maxsig, (int(ymax),int(xmax))) farg = (model_copy,) + _convert_input(x, y, landscape) p0, _ = _model_to_fit_params(model_copy) # TODO: Use the name of the parameter to infer the step size ranges = [] for p in model_copy.param_names: bounds = model_copy.bounds[p] try: diff = np.diff(bounds)[0] except TypeError: pass else: if diff > 0: ranges.append(slice(*(bounds+(min(0.5*sigma, 0.1*diff),)))) continue ranges.append((getattr(model_copy, p).value,) * 2) # Ns=1 limits the fitting along an axis where the range is not a slice # object: this is those were the bounds are equal (i.e. fixed param) fitted_params = self._opt_method(self.objective_function, ranges, farg, Ns=1, finish=None, **kwargs) _fitter_to_model_params(model_copy, fitted_params) return model_copy
def __call__(self, model, x, measured_raw_cts, measured_bkg_cts, t_raw, t_bkg, x_err=None, **kwargs): if x_err is not None: model = IntModel(model.__class__)(x_err, *model.parameters) model_copy = _validate_model(model, self.supported_constraints) farg = _convert_input(x, measured_raw_cts) farg = (model_copy, measured_bkg_cts, t_raw, t_bkg) + farg p0, _ = _model_to_fit_params(model_copy) # TODO: Honor estimate_jacobian in kwargs, and/or determine if # model supports jacobian, and/or if fitter supports the jac argument. fitparams, self.fit_info = self._opt_method( self.objective_function, p0, farg, jac=self.objective_derivative, **kwargs) _fitter_to_model_params(model_copy, fitparams) return model_copy
def __call__(self, model, x, y, weights=None, maxiter=DEFAULT_MAXITER, epsilon=DEFAULT_EPS): if model.linear: raise ModelLinearityError( 'Model is linear in parameters; ' 'non-linear fitting methods should not be used.') model_copy = model.copy() init_values, fit_param_indicies = _model_to_fit_params(model_copy) bounds = np.array(list(model.bounds.values()))[fit_param_indicies] minimizer_kwargs = {"method": "BFGS"} opt_res = optimize.basinhopping( lambda fps: self.objective_function(fps, model_copy, x, y, weights ), init_values, minimizer_kwargs=minimizer_kwargs, accept_test=lambda *args, **kwargs: self._bounds_check( *args, bounds=bounds, **kwargs), take_step=lambda *args, **kwargs: self._take_step( *args, bounds=bounds, **kwargs), callback=lambda x, f, acc: self._dynamic_step(x, f, acc)) _fitter_to_model_params(model_copy, opt_res.x) return model_copy
def __call__(self, model, in_coords, ref_coords, sigma=5.0, maxsig=4.0, **kwargs): model_copy = _validate_model(model, ['bounds', 'fixed']) # Starting simplex step size is set to be 5% of parameter values # Need to ensure this is larger than the convergence tolerance # so move the initial values away from zero if necessary try: xtol = kwargs['options']['xtol'] except KeyError: pass else: for p in model_copy.param_names: pval = getattr(model_copy, p).value if abs(pval) < 20 * xtol and 'offset' in p: getattr(model_copy, p).value = 20*xtol if pval == 0 \ else (np.sign(pval) * 20*xtol) tree = spatial.cKDTree(list(zip(*ref_coords))) # avoid _convert_input since tree can't be coerced to a float x, y = in_coords farg = (model_copy, x, y, sigma, maxsig, tree) p0, _ = _model_to_fit_params(model_copy) result = self._opt_method(self.objective_function, p0, farg, **kwargs) fitted_params = result['x'] _fitter_to_model_params(model_copy, fitted_params) return model_copy
def __call__(self, model, in_coords, ref_coords, sigma=5.0, maxsig=4.0, landscape=None, **kwargs): model_copy = _validate_model(model, ['bounds', 'fixed']) # Turn 1D arrays into tuples to allow iteration over axes try: iter(in_coords[0]) except TypeError: in_coords = (in_coords, ) try: iter(ref_coords[0]) except TypeError: ref_coords = (ref_coords, ) # Remember, coords are x-first (reversed python order) if landscape is None: landshape = tuple( int(max(np.max(inco), np.max(refco)) + 10) for inco, refco in zip(in_coords, ref_coords))[::-1] landscape = self.mklandscape(ref_coords, sigma, maxsig, landshape) farg = (model_copy, np.asanyarray(in_coords, dtype=float), landscape) p0, _ = _model_to_fit_params(model_copy) # TODO: Use the name of the parameter to infer the step size ranges = [] for p in model_copy.param_names: bounds = model_copy.bounds[p] try: diff = np.diff(bounds)[0] except TypeError: pass else: # We don't check that the value of a fixed param is within bounds if diff > 0 and not model_copy.fixed[p]: ranges.append( slice(*(bounds + (min(0.5 * sigma, 0.1 * diff), )))) continue ranges.append((getattr(model_copy, p).value, ) * 2) # Ns=1 limits the fitting along an axis where the range is not a slice # object: this is those were the bounds are equal (i.e. fixed param) fitted_params = self._opt_method(self.objective_function, ranges, farg, Ns=1, finish=None, **kwargs) _fitter_to_model_params(model_copy, fitted_params) return model_copy
def lnprior(theta, model): # Convert the array of parameter values back into model parameters _, fit_params_indices = _model_to_fit_params(model) bounds = np.array(list(model.bounds.values()))[fit_params_indices] if all([(bounds[i][0] or -np.inf) <= theta[i] <= (bounds[i][1] or np.inf) for i in range(len(theta))]): return 0.0 return -np.inf
def _leastsq(self, model, x, y, *args, **kwargs): model_copy = super().__call__(model, x, y, *args, **kwargs) init_values, _ = _model_to_fit_params(model) pfit, finds = _model_to_fit_params(model_copy) _output_errors = np.zeros(model.parameters.shape) pcov = self.fit_info['param_cov'] error = [] for i in range(len(pfit)): try: error.append(np.absolute(pcov[i][i])**0.5) except: error.append(0.00) _output_errors[finds] = np.array(error) self.fit_info['param_names'] = model_copy.param_names self.fit_info['param_err'] = _output_errors self.fit_info['param_fit'] = model_copy.parameters return model_copy
def __call__(self, model, *args, weights=None, maxiter=100, acc=1e-7, epsilon=1.4901161193847656e-08, estimate_jacobian=False): from scipy import optimize model_copy = _validate_model(model, self.supported_constraints) farg = ( model_copy, weights, ) + args if model_copy.fit_deriv is None or estimate_jacobian: dfunc = None else: dfunc = self._wrap_deriv init_values, _ = _model_to_fit_params(model_copy) fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq( self.objective_function, init_values, args=farg, Dfun=dfunc, col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon, xtol=acc, full_output=True) _fitter_to_model_params(model_copy, fitparams) self.fit_info.update(dinfo) self.fit_info['cov_x'] = cov_x self.fit_info['message'] = mess self.fit_info['ierr'] = ierr if ierr not in [1, 2, 3, 4]: warnings.warn( "The fit may be unsuccessful; check " "fit_info['message'] for more information.", AstropyUserWarning) # now try to compute the true covariance matrix if (len(args[-1]) > len(init_values)) and cov_x is not None: sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2) dof = len(args[-1]) - len(init_values) self.fit_info['param_cov'] = cov_x * sum_sqrs / dof else: self.fit_info['param_cov'] = None return model_copy
def __call__(self, model, x, y, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y x : array input coordinates y : array input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) farg = _convert_input(x, y) farg = (model_copy, weights) + farg p0, _ = _model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.log_probability, p0, farg, self.nsteps, save_samples=self.save_samples, **kwargs) # set the output model parameters to the "best fit" parameters _fitter_to_model_params(model_copy, fitparams) # get and set the symmetric and asymmetric uncertainties on each parameter model_copy = self._set_uncs_and_posterior(model_copy) return model_copy
def __call__(self, model, in_coords, ref_coords, sigma=5.0, maxsig=4.0, **kwargs): model_copy = _validate_model(model, ['bounds']) x, y = in_coords xref, yref = ref_coords xmax = max(np.max(x), np.max(xref)) ymax = max(np.max(y), np.max(yref)) landscape = self.mklandscape(ref_coords, sigma, maxsig, (int(ymax), int(xmax))) farg = (model_copy, ) + _convert_input(x, y, landscape) p0, _ = _model_to_fit_params(model_copy) # TODO: Use the name of the parameter to infer the step size ranges = [] for p in model_copy.param_names: bounds = model_copy.bounds[p] try: diff = np.diff(bounds)[0] except TypeError: pass else: if diff > 0: ranges.append( slice(*(bounds + (min(0.5 * sigma, 0.1 * diff), )))) continue ranges.append((getattr(model_copy, p).value, ) * 2) # Ns=1 limits the fitting along an axis where the range is not a slice # object: this is those were the bounds are equal (i.e. fixed param) fitted_params = self._opt_method(self.objective_function, ranges, farg, Ns=1, finish=None, **kwargs) _fitter_to_model_params(model_copy, fitted_params) return model_copy
def mcmc_err(self, model, x, measured_raw_cts, measured_bkg_cts, t_raw, t_bkg, cl=68.27, nruns=500, nwalkers=100, nburn=100, with_corner=True, corner_filename='triangle.pdf', corner_dpi=144, clobber_corner=True, save_chain=False, chain_filename='chain.dat', clobber_chain=True, floatfmt=".3e", tablefmt='orgtbl', **kwargs): """Run Markov Chain Monte Carlo for parameter error estimation. `model` should be a fitted model as returned by `__call__`. Return the Markov Chain as a 3-dimensional array (walker, step, parameter). """ model_copy = _validate_model(model, self.supported_constraints) params, _ = _model_to_fit_params(model_copy) # Get bounds bounds_model = model.copy() bounds_model.parameters = [bounds_model.bounds[name][0] for name in bounds_model.param_names] min_bounds, _ = _model_to_fit_params(bounds_model) bounds_model.parameters = [bounds_model.bounds[name][1] for name in bounds_model.param_names] max_bounds, _ = _model_to_fit_params(bounds_model) ndim = len(params) pos = [params + 1e-4 * np.random.randn(ndim) * params for i in range(nwalkers)] if not os.path.isfile(chain_filename) or clobber_chain: sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=8, args=(model_copy, (min_bounds, max_bounds), measured_raw_cts, measured_bkg_cts, t_raw, t_bkg, x)) sampler.run_mcmc(pos, nruns) samples = sampler.chain[:, nburn:, :].reshape((-1, ndim)) #samples = np.array(result).reshape((-1, ndim)) if save_chain: with open(chain_filename, 'wb') as f: pickle.dump(samples, f) elif os.path.isfile(chain_filename) and not clobber_chain: with open(chain_filename, 'rb') as f: samples = pickle.load(f) par_names = model_copy.param_names free_par_names = [] for par_name in par_names: is_fixed = getattr(model_copy, par_name).fixed if not is_fixed: free_par_names.append(par_name) if with_corner: if os.path.isfile(corner_filename) and not clobber_corner: raise Exception("Corner plot already exists and clobber_corner=False.") else: fig = corner.corner(samples, labels=free_par_names, bins=[20]*ndim) fig.savefig(corner_filename, dpi=corner_dpi) lim_lower = 50. - cl / 2. lim_upper = 50. + cl / 2. val_with_errs = [(v[1], v[1]-v[0], v[2]-v[1]) for v in zip(*np.percentile(samples, [lim_lower, 50., lim_upper], axis=0))] fit_data = [[free_par_names[i], val_with_errs[i][0], -val_with_errs[i][1], val_with_errs[i][2]] for i in range(len(val_with_errs))] print('\n'*2) print('FIT SUMMARY:') print('') tab_headers = ['Parameter', 'Value', 'Lower Uncertainty', 'Upper Uncertainty'] print(tabulate(fit_data, headers=tab_headers, tablefmt=tablefmt, floatfmt=floatfmt)) print('\n'*2) return fit_data
def _params(model): return _model_to_fit_params(model)[0]
def fit(self, lpost, t0, neg=True, scipy_optimize_options=None): """ Do either a Maximum A Posteriori or Maximum Likelihood fit to the data. Parameters: ----------- lpost: Posterior (or subclass) instance and instance of class Posterior or one of its subclasses that defines the function to be minized (either in loglikelihood or logposterior) t0 : {list | numpy.ndarray} List/array with set of initial parameters neg : bool, optional, default True Boolean to be passed to `lpost`, setting whether to use the *negative* posterior or the *negative* log-likelihood. Since `Posterior` and `LogLikelihood` objects are generally defined in scipy_optimize_options : dict, optional, default None A dictionary with options for `scipy.optimize.minimize`, directly passed on as keyword arguments. Returns: -------- fitparams: dict A dictionary with the fit results TODO: Add description of keywords in the class! """ if not isinstance(lpost, Posterior) and not isinstance( lpost, LogLikelihood): raise TypeError("lpost must be a subclass of " "Posterior or LogLikelihoood.") newmod = lpost.model.copy() newmod.parameters = t0 p0, _ = _model_to_fit_params(newmod) # p0 will be shorter than t0, if there are any frozen/tied parameters # this has to match with the npar attribute. if not len(p0) == lpost.npar: raise ValueError("Parameter set t0 must be of right " "length for model in lpost.") if scipy.__version__ < "0.10.0": args = [neg] else: args = (neg, ) if not scipy_optimize_options: scipy_optimize_options = {} # different commands for different fitting methods, # at least until scipy 0.11 is out funcval = 100.0 i = 0 while funcval == 100 or funcval == 200 or \ funcval == 0.0 or not np.isfinite(funcval): if i > 20: raise Exception("Fitting unsuccessful!") # perturb parameters slightly t0_p = np.random.multivariate_normal(p0, np.diag(np.abs(p0) / 100.)) # print(lpost.model, dir(lpost.model), lpost.model.parameter_constraints, lpost.model.param_names) params = [getattr(newmod, name) for name in newmod.param_names] bounds = [ p.bounds for p in params if not np.any([p.tied, p.fixed]) ] # print(params, bounds) # if max_post is True, do the Maximum-A-Posteriori Fit if self.max_post: opt = scipy.optimize.minimize(lpost, t0_p, method=self.fitmethod, args=args, tol=1.e-10, bounds=bounds, **scipy_optimize_options) # if max_post is False, then do a Maximum Likelihood Fit else: if isinstance(lpost, Posterior): # This could be a `Posterior` object opt = scipy.optimize.minimize(lpost.loglikelihood, t0_p, method=self.fitmethod, args=args, tol=1.e-10, bounds=bounds, **scipy_optimize_options) elif isinstance(lpost, LogLikelihood): # Except this could be a `LogLikelihood object # In which case, use the evaluate function # if it's not either, give up and break! opt = scipy.optimize.minimize(lpost.evaluate, t0_p, method=self.fitmethod, args=args, tol=1.e-10, bounds=bounds, **scipy_optimize_options) funcval = opt.fun i += 1 res = OptimizationResults(lpost, opt, neg=neg) return res
def __call__(self, model, in_coords, ref_coords, in_weights=None, ref_weights=None, matches=None, **kwargs): """ Perform a minimization using the KDTreeFitter Parameters ---------- model: FittableModel initial guess at model defining transformation in_coords: array-like (n x N) array of input coordinates ref_coords: array-like (n x M) array of reference coordinates in_weights: array-like (N,) weights for input coordinates ref_weights: array-like (M,) weights for reference coordinates kwargs: dict additional arguments to control fit Returns ------- Model: best-fitting model also assigns attributes: x: array-like best-fitting parameters fun: float final value of fitting function nit: int number of iterations performed """ model_copy = _validate_model(model, ['bounds', 'fixed']) # Turn 1D arrays into tuples to allow iteration over axes try: iter(in_coords[0]) except TypeError: in_coords = (in_coords, ) try: iter(ref_coords[0]) except TypeError: ref_coords = (ref_coords, ) # Starting simplex step size is set to be 5% of parameter values # Need to ensure this is larger than the convergence tolerance # so move the initial values away from zero if necessary try: xatol = kwargs['options']['xatol'] except KeyError: pass else: for p in model_copy.param_names: pval = getattr(model_copy, p).value ### EDITED THIS LINE SO TAKE A LOOK IF 2D MATCHING GOES WRONG!! if abs(pval) < 20 * xatol and not model_copy.fixed[ p]: # and 'offset' in p getattr(model_copy, p).value = 20 * xatol if pval == 0 \ else (np.sign(pval) * 20 * xatol) if in_weights is None: in_weights = np.ones((len(in_coords[0]), )) if ref_weights is None: ref_weights = np.ones((len(ref_coords[0]), )) # cKDTree.query() returns a value of n for no neighbour so make coding # easier by allowing this to match a zero-weighted reference ref_weights = np.append(ref_weights, (0, )) ref_coords = np.array(list(zip(*ref_coords))) tree = spatial.cKDTree(ref_coords) # avoid _convert_input since tree can't be coerced to a float farg = (model_copy, in_coords, ref_coords, in_weights, ref_weights, matches, tree) p0, _ = _model_to_fit_params(model_copy) arg_names = inspect.getfullargspec(self._opt_method).args args = [self.objective_function] if arg_names[1] == 'x0': args.append(p0) elif arg_names[1] == 'bounds': args.append( tuple(model_copy.bounds[p] for p in model_copy.param_names)) else: raise ValueError("Don't understand argument {}".format( arg_names[1])) if 'args' in arg_names: kwargs['args'] = farg if 'method' in arg_names: kwargs['method'] = self._method if 'minimizer_kwargs' in arg_names: kwargs['minimizer_kwargs'] = { 'args': farg, 'method': 'Nelder-Mead' } result = self._opt_method(*args, **kwargs) fitted_params = result['x'] _fitter_to_model_params(model_copy, fitted_params) self.statistic = result['fun'] self.niter = result['nit'] return model_copy
def mcmc_err(self, model, x, measured_raw_cts, measured_bkg_cts, t_raw, t_bkg, cl=68.27, nruns=500, nwalkers=100, nburn=100, with_corner=True, corner_filename='triangle.pdf', corner_dpi=144, clobber_corner=True, save_chain=False, chain_filename='chain.dat', clobber_chain=True, floatfmt=".3e", tablefmt='orgtbl', **kwargs): """Run Markov Chain Monte Carlo for parameter error estimation. `model` should be a fitted model as returned by `__call__`. Return the Markov Chain as a 3-dimensional array (walker, step, parameter). """ model_copy = _validate_model(model, self.supported_constraints) params, _ = _model_to_fit_params(model_copy) # Get bounds bounds_model = model.copy() bounds_model.parameters = [ bounds_model.bounds[name][0] for name in bounds_model.param_names ] min_bounds, _ = _model_to_fit_params(bounds_model) bounds_model.parameters = [ bounds_model.bounds[name][1] for name in bounds_model.param_names ] max_bounds, _ = _model_to_fit_params(bounds_model) ndim = len(params) pos = [ params + 1e-4 * np.random.randn(ndim) * params for i in range(nwalkers) ] if not os.path.isfile(chain_filename) or clobber_chain: sampler = emcee.EnsembleSampler( nwalkers, ndim, lnprob, threads=8, args=(model_copy, (min_bounds, max_bounds), measured_raw_cts, measured_bkg_cts, t_raw, t_bkg, x)) sampler.run_mcmc(pos, nruns) samples = sampler.chain[:, nburn:, :].reshape((-1, ndim)) #samples = np.array(result).reshape((-1, ndim)) if save_chain: with open(chain_filename, 'wb') as f: pickle.dump(samples, f) elif os.path.isfile(chain_filename) and not clobber_chain: with open(chain_filename, 'rb') as f: samples = pickle.load(f) par_names = model_copy.param_names free_par_names = [] for par_name in par_names: is_fixed = getattr(model_copy, par_name).fixed if not is_fixed: free_par_names.append(par_name) if with_corner: if os.path.isfile(corner_filename) and not clobber_corner: raise Exception( "Corner plot already exists and clobber_corner=False.") else: fig = corner.corner(samples, labels=free_par_names, bins=[20] * ndim) fig.savefig(corner_filename, dpi=corner_dpi) lim_lower = 50. - cl / 2. lim_upper = 50. + cl / 2. val_with_errs = [(v[1], v[1] - v[0], v[2] - v[1]) for v in zip( *np.percentile(samples, [lim_lower, 50., lim_upper], axis=0))] fit_data = [[ free_par_names[i], val_with_errs[i][0], -val_with_errs[i][1], val_with_errs[i][2] ] for i in range(len(val_with_errs))] print('\n' * 2) print('FIT SUMMARY:') print('') tab_headers = [ 'Parameter', 'Value', 'Lower Uncertainty', 'Upper Uncertainty' ] print( tabulate(fit_data, headers=tab_headers, tablefmt=tablefmt, floatfmt=floatfmt)) print('\n' * 2) return fit_data
def _bootstrap(self, model, x, y, z=None, yerr=0.0, weights=None, **kwargs): model_copy = super().__call__(model, x, y, **kwargs) init_values, _ = _model_to_fit_params(model) pfit, finds = _model_to_fit_params(model_copy) farg = ( model_copy, weights, ) + _convert_input(x, y, z) self._output_errors = np.zeros(model.parameters.shape) # Get the stdev of the residuals residuals = self.objective_function(pfit, *farg) sigma_res = np.std(residuals) sigma_err_total = np.sqrt(sigma_res**2 + yerr**2) # 100 random data sets are generated and fitted ps = [] for i in range(10): rand_delta = np.random.normal(0., sigma_err_total, len(y)) rand_y = y + rand_delta farg = ( model_copy, weights, ) + _convert_input(x, rand_y, z) rand_fit, rand_cov = opt.leastsq(self.objective_function, init_values, args=farg, full_output=False) ps.append(rand_fit) ps = np.array(ps) mean_pfit = np.mean(ps, 0) # You can choose the confidence interval that you want for your # parameter estimates: n_sigma = 1. # 1sigma gets approximately the same as methods above # 1sigma corresponds to 68.3% confidence interval # 2sigma corresponds to 95.44% confidence interval err_pfit = n_sigma * np.std(ps, 0) _fitter_to_model_params(model_copy, mean_pfit) self._output_errors[finds] = np.array(err_pfit) self.fit_info['param_names'] = model_copy.param_names self.fit_info['param_err'] = self._output_errors self.fit_info['param_fit'] = model_copy.parameters self.fit_info['param_units'] = [ getattr(model_copy, p).unit for p in model_copy.param_names ] return model_copy
def __call__(self, model, x, y, yerr=None, nwalkers=500, steps=200, nprocs=1): model = model.copy() # If no errors are provided, assume all errors are normalized if yerr is None: yerr = np.zeros(shape=x.shape) # Retrieve the parameters that are not considered fixed or tied fit_params, fit_params_indices = _model_to_fit_params(model) # fit_params = np.append(fit_params, np.log(1)) # TODO: The following two seem to be unnecessary, however, the fits # don't work well without *both* of them. Need to rethink. fitter = LevMarLSQFitter() fit_model = fitter(model, x, y) fit_params = fit_model.parameters[fit_params_indices] # Perform a quick optimization of the parameters nll = lambda *args: -lnlike(*args) result = op.minimize(nll, fit_params, args=(x, y, yerr, model)) fit_params = result["x"] _fitter_to_model_params(model, fit_params) # Cache the number of dimensions of the problem, and walker count ndim = len(fit_params) # Initialize starting positions of walkers in a Gaussian ball pos = [ fit_params * (1 + 1e-8 * np.random.randn(ndim)) for _ in range(nwalkers) ] with Pool(nprocs) as pool: sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr, model), pool=pool) sampler.run_mcmc(pos, steps) # for result in sampler.sample(pos, iterations=steps, storechain=False): # position = result[0] # with open("chain.dat", "a") as f: # for k in range(position.shape[0]): # f.write("{0:4d} {1:s}\n".format(k, " ".join(position[k]))) burnin = int(steps * 0.1) samples = sampler.chain[:, burnin:, :].reshape((-1, ndim)) # Compute the quantiles. res = list( map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0)))) theta = [x[0] for x in res] _fitter_to_model_params(model, theta) self._uncertainties['param_names'] = model.param_names self._uncertainties['param_fit'] = model.parameters errs = np.array([(0.0, 0.0) for _ in range(len(model.parameters))]) errs[fit_params_indices] = np.array([(res[i][1], res[i][2]) for i in range(len(res))]) self._uncertainties['param_err'] = errs self._uncertainties['param_units'] = [ getattr(model, p).unit for p in model.param_names ] return model
def __call__(self, model, in_coords, ref_coords, sigma=5.0, maxsig=4.0, landscape=None, scale=None, **kwargs): model_copy = _validate_model(model, self.supported_constraints) # Turn 1D arrays into tuples to allow iteration over axes try: iter(in_coords[0]) except TypeError: in_coords = (in_coords, ) try: iter(ref_coords[0]) except TypeError: ref_coords = (ref_coords, ) # Remember, coords are x-first (reversed python order) self.grid_model = models.Identity(len(in_coords)) if landscape is None: mins = [min(refco) for refco in ref_coords] maxs = [max(refco) for refco in ref_coords] if scale: self.grid_model = reduce(Model.__and__, [ models.Shift(-_min) | models.Scale(scale) for _min in mins ]) landshape = tuple( int((_max - _min) * scale) for _min, _max in zip(mins, maxs))[::-1] else: scale = 1 landshape = tuple(int(_max) for _max in maxs)[::-1] landscape = self.mklandscape(ref_coords, sigma * scale, maxsig, landshape) farg = (model_copy, np.asanyarray(in_coords, dtype=float), landscape) p0, _ = _model_to_fit_params(model_copy) ranges = [] for p in model_copy.param_names: bounds = model_copy.bounds[p] try: diff = np.diff(bounds)[0] except TypeError: pass else: # We don't check that the value of a fixed param is within bounds if diff > 0 and not model_copy.fixed[p]: if 'offset' in p: stepsize = min(sigma, 0.1 * diff) elif 'angle' in p: stepsize = max(0.5, 0.1 * diff) elif 'factor' in p: stepsize = max(0.01, 0.1 * diff) ranges.append(slice(*(bounds + (stepsize, )))) continue ranges.append((getattr(model_copy, p).value, ) * 2) # Ns=1 limits the fitting along an axis where the range is not a slice # object: this is those were the bounds are equal (i.e. fixed param) fitted_params = self._opt_method(self.objective_function, ranges, farg, Ns=1, finish=None, **kwargs) _fitter_to_model_params(model_copy, fitted_params) return model_copy
def _curve_fit(self, model, x, y, z=None, weights=None, yerr=None, maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC, epsilon=DEFAULT_EPS, estimate_jacobian=False, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array (optional) input coordinates weights : array (optional) Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. maxiter : int maximum number of iterations acc : float Relative error desired in the approximate solution epsilon : float A suitable step length for the forward-difference approximation of the Jacobian (if model.fjac=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. estimate_jacobian : bool If False (default) and if the model has a fit_deriv method, it will be used. Otherwise the Jacobian will be estimated. If True, the Jacobian will be estimated in any case. equivalencies : list or None, optional and keyword-only argument List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self.supported_constraints) farg = ( model_copy, weights, ) + _convert_input(x, y, z) if model_copy.fit_deriv is None or estimate_jacobian: dfunc = None else: dfunc = self._wrap_deriv init_values, finds = _model_to_fit_params(model_copy) def f(x, *p0, mod=model_copy): _fitter_to_model_params(mod, p0) return mod(x) fitparams, cov_x = opt.curve_fit(f, x, y, p0=init_values, sigma=yerr, epsfcn=epsilon, jac=dfunc, col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, xtol=acc, absolute_sigma=False, **kwargs) error = [] for i in range(len(fitparams)): try: error.append(np.absolute(cov_x[i][i])**0.5) except: error.append(0.00) _fitter_to_model_params(model_copy, fitparams) _output_errors = np.zeros(model.parameters.shape) _output_errors[finds] = np.array(error) self.fit_info['cov_x'] = cov_x self.fit_info['param_names'] = model_copy.param_names self.fit_info['param_err'] = _output_errors self.fit_info['param_fit'] = model_copy.parameters # now try to compute the true covariance matrix if (len(y) > len(init_values)) and cov_x is not None: sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2) dof = len(y) - len(init_values) self.fit_info['param_cov'] = cov_x * sum_sqrs / dof else: self.fit_info['param_cov'] = None self.fit_info['param_units'] = [ getattr(model_copy, p).unit for p in model_copy.param_names ] return model_copy