def parse_component_d2d_trait_args(rptraits, sptraits, wptraits): if not rptraits: raise RuntimeError("at least one rptrait is required") rptraits = iterutils.tuplify(rptraits) sptraits = iterutils.tuplify(sptraits) if sptraits is not None else () wptraits = iterutils.tuplify(wptraits) if wptraits is not None else () return dict(rptraits=rptraits, sptraits=sptraits, wptraits=wptraits)
def __init__(self, drivers, dmodels, gmodels): drivers = iterutils.tuplify(drivers) dmodels = iterutils.tuplify(dmodels) gmodels = iterutils.tuplify(gmodels) ndrivers = len(drivers) ndmodels = len(dmodels) ngmodels = len(gmodels) if not (ndrivers == ndmodels == ngmodels): raise RuntimeError(f"the number of " f"drivers ({ndrivers}), " f"dmodels ({ndmodels}), and " f"gmodels ({ngmodels}) must be equal") self._drivers = drivers self._dmodels = dmodels self._gmodels = gmodels self._items = tuple([(drivers[i], dmodels[i], gmodels[i]) for i in range(self.nitems())]) # Preallocate some data structures # It will make our life easier later on self._h_model_data = [{ key: dict(d=None, m=None, w=None) for key in dmodel.onames() } for dmodel in self.dmodels()] self._d_model_data = copy.deepcopy(self._h_model_data) # Merge the pdescs of all models into a dict and ensure they # have unique keys self._pdescs, self._mappings = miscutils.merge_dicts_and_make_mappings( [gmodel.params() for gmodel in self.gmodels()], 'model') self._times_mdl_eval = [] self._times_mdl_d2h = [] self._time_samples = collections.defaultdict(list)
def parse_component_s3d_trait_args(rptraits, rhtraits, vptraits, vhtraits, dptraits, dhtraits, zptraits, sptraits, wptraits): if not rptraits: raise RuntimeError("at least one rptrait is required") if not rhtraits: raise RuntimeError("at least one rhtrait is required") if not vptraits: raise RuntimeError("at least one vptrait is required") if not dptraits: raise RuntimeError("at least one dptrait is required") rptraits = iterutils.tuplify(rptraits) rhtraits = iterutils.tuplify(rhtraits) vptraits = iterutils.tuplify(vptraits) vhtraits = iterutils.tuplify(vhtraits) \ if vhtraits else tuple([None] * len(vptraits)) dptraits = iterutils.tuplify(dptraits) dhtraits = iterutils.tuplify(dhtraits) \ if dhtraits else tuple([None] * len(dptraits)) zptraits = iterutils.tuplify(zptraits) if zptraits else tuple() sptraits = iterutils.tuplify(sptraits) if sptraits else tuple() wptraits = iterutils.tuplify(wptraits) if wptraits else tuple() if None in vhtraits: vhtraits = iterutils.replace_items_and_copy(vhtraits, None, traits.VHTraitOne()) if None in dhtraits: dhtraits = iterutils.replace_items_and_copy(dhtraits, None, traits.DHTraitOne()) rptraits_len = len(rptraits) rhtraits_len = len(rhtraits) vptraits_len = len(vptraits) vhtraits_len = len(vhtraits) dptraits_len = len(dptraits) dhtraits_len = len(dhtraits) if rptraits_len != rhtraits_len: raise RuntimeError( f"the number of rhtraits must be equal to " f"the number of rptraits ({rhtraits_len} != {rptraits_len})") if vptraits_len != vhtraits_len: raise RuntimeError( f"the number of vhtraits must be equal to " f"the number of vptraits ({vhtraits_len} != {vptraits_len})") if len(dptraits) != len(dhtraits): raise RuntimeError( f"the number of dhtraits must be equal to " f"the number of dptraits ({dhtraits_len} != {dptraits_len})") return dict(rptraits=rptraits, rhtraits=rhtraits, vptraits=vptraits, vhtraits=vhtraits, dptraits=dptraits, dhtraits=dhtraits, zptraits=zptraits, sptraits=sptraits, wptraits=wptraits)
def __init__(self, dmodels, gmodels, drivers): dmodels = iterutils.tuplify(dmodels) gmodels = iterutils.tuplify(gmodels) drivers = iterutils.tuplify(drivers) ndmodels = len(dmodels) ngmodels = len(gmodels) ndrivers = len(drivers) if not (ndmodels == ngmodels == ndrivers): raise RuntimeError(f"could not create model; the number of " f"gmodels ({ngmodels}), " f"dmodels ({ndmodels}), and " f"drivers ({ndrivers}) must be equal") models = [] for dmodel, gmodel, driver in zip(dmodels, gmodels, drivers): models.append(Model(dmodel, gmodel, driver))
def make_model_group_from_cmp(dmodels, gmodels, drivers): dmodels = iterutils.tuplify(dmodels) gmodels = iterutils.tuplify(gmodels) drivers = iterutils.tuplify(drivers) ndmodels = len(dmodels) ngmodels = len(gmodels) ndrivers = len(drivers) if not (ndmodels == ngmodels == ndrivers): raise RuntimeError(f"could not create model group; the number of " f"gmodels ({ngmodels}), " f"dmodels ({ndmodels}), and " f"drivers ({ndrivers}) must be equal") models = [] for dmodel, gmodel, driver in zip(dmodels, gmodels, drivers): models.append(Model(dmodel, gmodel, driver)) return make_model_group_from_seq(models)
def make_param_symbol(name, indices): indices = iterutils.tuplify(indices, False) if not indices: result = name elif len(indices) == 1: result = f'{name}{make_param_symbol_subscript_bindx(indices[0])}' else: result = f'{name}{make_param_symbol_subscript_aindx(indices)}' return result
def __init__(self, components): self._cmps = iterutils.tuplify(components) self._size = [None, None] self._wcube = None self._dtype = None self._driver = None self._backend = None (self._params, self._mappings) = _detail.make_gmodel_2d_params(self._cmps)
def __init__(self, components, size_z=None, step_z=None, tauto=False, tcomponents=None): if tcomponents is None: tcomponents = () self._cmps = iterutils.tuplify(components) self._tcmps = iterutils.tuplify(tcomponents) self._tauto = tauto self._size = [None, None, size_z] self._step = [None, None, step_z] self._zero = [None, None, None] self._tcube = None self._wcube = None self._dtype = None self._driver = None self._backend = None (self._params, self._mappings, self._tmappings) = _detail.make_gmodel_3d_params( self._cmps, self._tcmps, self._tauto)
def parse_component_d3d_trait_args(rptraits, rhtraits, zptraits, sptraits, wptraits): if not rptraits: raise RuntimeError("at least one rptrait is required") if not rhtraits: raise RuntimeError("at least one rhtrait is required") rptraits = iterutils.tuplify(rptraits) rhtraits = iterutils.tuplify(rhtraits) zptraits = iterutils.tuplify(zptraits) if zptraits is not None else () sptraits = iterutils.tuplify(sptraits) if sptraits is not None else () wptraits = iterutils.tuplify(wptraits) if wptraits is not None else () rptraits_len = len(rptraits) rhtraits_len = len(rhtraits) if rptraits_len != rhtraits_len: raise RuntimeError( f"the number of rhtraits must be equal to " f"the number of rptraits ({rhtraits_len} != {rptraits_len})") return dict(rptraits=rptraits, rhtraits=rhtraits, zptraits=zptraits, sptraits=sptraits, wptraits=wptraits)
def __init__( self, datasets, drivers, dmodels, gmodels, wd=False, wp=0.0, wu=1.0): super().__init__(drivers, dmodels, gmodels) self._datasets = datasets = iterutils.tuplify(datasets) if len(datasets) != len(dmodels): raise RuntimeError( f"the number of datasets and dmodels must be equal " f"({len(datasets)} != {len(dmodels)})") n = self.nitems() self._d_dataset_d_vector = iterutils.make_list(n, None) self._d_dataset_m_vector = iterutils.make_list(n, None) self._d_dataset_e_vector = iterutils.make_list(n, None) self._d_residual_vector = iterutils.make_list(n, None) self._h_residual_vector = iterutils.make_list(n, None) self._d_residual_nddata = iterutils.make_list(n, dict()) self._h_residual_nddata = iterutils.make_list(n, dict()) self._s_counts = iterutils.make_list(n, (None, None)) if not iterutils.is_sequence(wp): wp = iterutils.make_tuple(n, wp) if not iterutils.is_sequence(wu): wu = iterutils.make_tuple(n, wu) self._wd = wd self._wp = wp self._wu = wu self._weights_d = iterutils.make_tuple(n, dict()) self._weights_p = iterutils.make_tuple(n, dict()) self._weights_u = iterutils.make_tuple(n, dict()) if len(wp) != n: raise RuntimeError( f"the length of wp and the number of datasets are not equal " f"({len(wp)} != {n})") if len(wu) != n: raise RuntimeError( f"the length of wu and the number of datasets are not equal " f"({len(wu)} != {n})") for i in range(n): dataset = datasets[i] dmodel = dmodels[i] names_dat = tuple(dataset.keys()) names_mdl = tuple(dmodel.onames()) if set(names_dat) != set(names_mdl): raise RuntimeError( f"dataset and dmodel are incompatible " f"for item #{i} " f"({names_dat} != {names_mdl})") if dataset.dtype != dmodel.dtype(): raise RuntimeError( f"dataset and dmodel have incompatible dtypes " f"for item #{i} " f"({dataset.dtype} != {dmodel.dtype()})") if dataset.size() != dmodel.size(): raise RuntimeError( f"dataset and dmodel have incompatible sizes " f"for item #{i} " f"({dataset.size()} != {dmodel.size()})") if dataset.step() != dmodel.step(): raise RuntimeError( f"dataset and dmodel have incompatible steps " f"for item #{i} " f"({dataset.step()} != {dmodel.step()})") if dataset.zero() != dmodel.zero(): raise RuntimeError( f"dataset and dmodel have incompatible zeros " f"for item #{i} " f"({dataset.zero()} != {dmodel.zero()})") for name in names_mdl: min_ = np.nanmin(dataset[name].data()) max_ = np.nanmax(dataset[name].data()) self._weights_d[i][name] = 1 / (max_ - min_) if wd else 1.0 if isinstance(wp[i], type(None)): self._weights_p[i][name] = 0.0 elif isinstance(wp[i], numbers.Real): self._weights_p[i][name] = wp[i] elif isinstance(wp[i], dict): self._weights_p[i][name] = wp[i].get(name, 0.0) if isinstance(wu[i], type(None)): self._weights_u[i][name] = 1.0 elif isinstance(wu[i], numbers.Real): self._weights_u[i][name] = wu[i] elif isinstance(wu[i], dict): self._weights_u[i][name] = wu[i].get(name, 1.0) self._backends = iterutils.make_list(n, None) self._prepared = False
def __init__(self, data, step): step = iterutils.tuplify(step) if len(step) == 1: step = step + (step[0],) self._data = data.copy() self._step = step
def __init__(self, models): self._models = models = iterutils.tuplify(models) self._pdescs, self._mappings = miscutils.merge_dicts_and_make_mappings( [model.pdescs() for model in models], 'model')
def make_model_group_from_seq(models): return ModelGroup(iterutils.tuplify(models))
def load_moves_with_weights(info): moves = iterutils.tuplify(info, False) weights = [move.pop('weight', 1) for move in moves] moves = move_parser.load(moves) return tuple(zip(moves, weights))
def make_fitter_result(objective, parameters, posterior=None, extra=None, solutions=None): # At least one solution or a global posterior is required if not solutions: if not posterior: raise RuntimeError( "at least one solution or a global posterior " "is required in order to create a Fitter Result") # If we have a global posterior but not solutions, # transform the global posterior to a new solution solutions = dict(posterior=posterior) posterior = None # Ensure solutions are iterable for convenience solutions = iterutils.tuplify(solutions, False) # Process the global posterior if posterior: posterior = make_fitter_result_posterior(posterior, parameters) # Make some arrays with exploded param names for later use enames_all = parameters.enames(True, True, True) enames_free = parameters.enames(False, False, True) enames_tied = parameters.enames(False, True, False) enames_fixed = parameters.enames(True, False, False) enames_varying = parameters.enames(False, True, True) sols = [] # For each solution, create a FitterResultSolution # and try to populate as many of its fields as possible. for i, s in enumerate(solutions): sol = FitterResultSolution() # ... if 'mode' in s: eparams_free = dict(zip(enames_free, s['mode'])) eparams_varying = {p: None for p in enames_varying} parameters.evaluate(eparams_free, eparams_varying, True) sol.mode = np.array(list(eparams_varying.values())) # Calculate statistical quantities from posterior if 'posterior' in s: posterior = make_fitter_result_posterior(s['posterior'], parameters) sol.covar = np.cov(posterior.samples, rowvar=False) sol.mean = np.mean(posterior.samples, axis=0) sol.std = np.std(posterior.samples, axis=0) # If no sol.mode = posterior.samples[np.argmax(posterior.logprobs), :] if sol.mode is None: sol.mode = posterior.samples[np.argmax(posterior.logprobs)] sol.posterior = posterior # Otherwise, salvage whatever is available else: if 'mean' in s: sol.mean = np.full(len(enames_varying), np.nan) for j, param in enumerate(enames_free): sol.mean[enames_varying.index(param)] = s['mean'][j] if 'std' in s: sol.std = np.full(len(enames_varying), np.nan) for j, param in enumerate(enames_free): sol.std[enames_varying.index(param)] = s['std'][j] if 'covar' in s: pass # Mode must be provided or recovered somehow if sol.mode is None: raise RuntimeError( f"mode was not provided or could not be recovered " f"for solution {i}") # Calculate quantities using the mode eparams_varying = dict(zip(enames_varying, sol.mode)) eparams_free = {n: eparams_varying[n] for n in enames_free} print(eparams_free) params = parameters.evaluate(eparams_free) dof = 100 sol.model = objective.model_h(params) sol.residual = objective.residual_nddata_h(params) sol.wresidual = objective.residual_nddata_h(params) sol.chisqr = 1.0 sol.rchisqr = sol.chisqr / (dof - len(enames_free)) sol.wchisqr = 1.0 sol.rwchisqr = sol.wchisqr / (dof - len(enames_free)) # ... sols.append(sol) # Sort solution by chi-squared sols = sorted(sols, key=lambda x: x.chisqr) param_names = dict(all=enames_all, free=enames_free, tied=enames_tied, fixed=enames_fixed, varying=enames_varying) return FitterResult(objective.datasets(), parameters, param_names, sols, posterior, extra)