def init(self): nvars = len(self.opts.variables) assert nvars and (self.ndim is None or nvars == self.ndim ), 'Can scan only %i variable(s)' % (self.ndim) self.minimizer = self.opts.minimizer[0] self.allpars = OrderedDict([ (par.qualifiedName(), par) for par in get_parameters(self.opts.pars, drop_fixed=True, drop_free=False, drop_constrained=False) ]) self.scanpars = OrderedDict([ (par.qualifiedName(), par) for par in get_parameters( [par.parname for par in self.opts.variables]) ]) self.otherpars = [ par for (name, par) in self.allpars.items() if not name in self.scanpars.keys() ] # scanpars = [] for varcfg in self.opts.variables: spar = self.allpars.get(varcfg.parname)
def init(self): statistic = ROOT.StatisticOutput( self.opts.statistic.transformations.back().outputs.back()) minimizer = minimizers[self.opts.type](statistic) loaded_parameters = get_parameters( self.opts.par, drop_fixed=True, drop_free=False, drop_constrained=self.opts.drop_constrained) statistic_parameters = [] for par in loaded_parameters: if par.influences(self.opts.statistic.transformations.back()): statistic_parameters.append(par) elif cfg.debug_par_fetching: warnings.warn( "parameter {} doesn't influence the statistic and is being dropped" .format(par.name())) else: continue minimizer.addpars(statistic_parameters) if self.opts.spec is not None: minimizer.spec = spec.parse(self.env, minimizer, self.opts.spec) self.env.parts.minimizer[self.opts.name] = minimizer
def __extract_obs(self, obses): for obs in obses: if '/' in obs: yield self.env.get(obs) else: for param in get_parameters([obs], drop_fixed=True, drop_free=True): yield param
def test_fixed(): env.defparameter('probe1', central=0., sigma=1.) env.defparameter('probe2', central=0., sigma=1.) env.defparameter('probe3', central=0., sigma=1.) env.defparameter('probe_fixed', central=0., sigma=1., fixed=True) print() msg = 'Testing that par {0} is created fixed' print(msg.format(env.parameters['probe_fixed'].name())) assert env.parameters['probe_fixed'].isFixed() == True print('It is!\n') print("Checks whether get_parameters() discards fixed params by default:") no_fixed = [_.name() for _ in get_parameters(['probe1', 'probe_fixed'])] assert 'probe1' in no_fixed print('True!\n') print("Checks that get_parameters(drop_fixed=False) keeps parameters:") with_fixed = [ par.name() for par in get_parameters(['probe1', 'probe_fixed'], drop_fixed=False) ] assert 'probe1' in with_fixed and 'probe_fixed' in with_fixed print('True!\n')
def run(self): dataset = Dataset(bases=self.opts.datasets) cov_parameters = get_parameters(self.opts.cov_parameters, drop_fixed=True, drop_free=True) if self.opts.observables: observables = list(self.__extract_obs(self.opts.observables)) else: observables = None if self.opts.cov_parameters: print( 'Compute covariance matrix for {} parameters:'.format( len(cov_parameters)), *self.opts.cov_parameters) blocks = dataset.makeblocks(observables, cov_parameters) if self.opts.toymc: if self.opts.toymc == 'covariance': toymc = ROOT.CovarianceToyMC() add = toymc.add elif self.opts.toymc == 'poisson': toymc = ROOT.PoissonToyMC() add = lambda t, c: toymc.add(t) elif self.opts.toymc == 'normal': toymc = C.NormalToyMC() add = toymc.add elif self.opts.toymc == 'normalStats': toymc = C.NormalStatsToyMC() add = toymc.add elif self.opts.toymc == 'asimov': toymc = C.Snapshot() add = lambda t, c: toymc.add_input(t) for block in blocks: add(block.theory, block.cov) blocks = [ block._replace(data=toymc_out) for (block, toymc_out) in zip( blocks, iter( toymc.transformations.front().outputs.values())) ] self.env.parts.toymc[self.opts.name] = toymc for toymc in toymc.transformations.values(): toymc.setLabel(self.opts.toymc + ' ToyMC ' + self.opts.name) self.env.parts.analysis[self.opts.name] = blocks self.env.parts.analysis_errors[self.opts.name] = dataset
def load_pulls(self, dataset): # # Load nuisance parameters # # Get list of UncertainParameter objects, drop free and fixed pull_pars = get_parameters(self.opts.pull, drop_fixed=True, drop_free=True) variables = [par.getVariable() for par in pull_pars] sigmas, centrals, covariance = get_uncertainties(pull_pars) npars = len(pull_pars) print(' nuisance: {} parameters'.format(npars)) from gna.constructors import VarArray, Points # Create an array, representing pull parameter values self.pull_vararray = VarArray(variables, labels='Nuisance: values') # Create an array, representing pull parameter central values self.pull_centrals = Points(centrals, labels='Nuisance: central') try: if covariance: cov = self.pull_covariance = Points( covariance, labels='Nuisance: covariance matrix') else: # If there are no correlations, store only the uncertainties cov = self.pull_sigmas2 = Points(sigmas**2, labels='Nuisance: sigma') except ValueError: # handle case with covariance matrix if covariance.any(): cov = self.pull_covariance = Points( covariance, labels='Nuisance: covariance matrix') else: # If there are no correlations, store only the uncertainties cov = self.pull_sigmas2 = Points(sigmas**2, labels='Nuisance: sigma') dataset.assign(self.pull_vararray.single(), self.pull_centrals.single(), cov.single()) ns = self.env.globalns('pull') ns.addobservable(self.opts.name, self.pull_vararray.single()) self.env.future['pull', self.opts.name] = self.pull_vararray.single()
def run(self): groups = self.opts.groups if self.opts.gnames: gnames = self.opts.gnames if len(gnames) != len(groups): raise ValueError("Length of groups and name of groups don\'t equal") else: gnames = [group[0] for group in groups] self.DataSaver(gnames) if self.opts.ns: ns = self.env.ns(self.opts.ns) else: ns = self.env.globalns observables = [] prediction = C.Concat() grid = [] if self.opts.observables: ns = self.env.globalns for obs in self.opts.observables: observables.append((obs, ns.getobservable(obs))) prediction.append(ns.getobservable(obs)) grid.append(ns.getobservable(obs).data().shape[0]) print(obs+' added') splited = obs.split('/') else: for obs in ns.walkobservables(): splited = obs[0].split('/') if len(splited) == 2: short = splited[1] if '.' not in short and '_' not in short: observables.append(obs) prediction.append(obs[1]) grid.append(obs[1].data().shape[0]) print(obs[0]+' added') name = splited[0] covmat = C.Covmat() covmat.cov.stat.connect(prediction) covmat.cov.setLabel('Covmat') self.data['prediction'] = prediction.data().copy() # names_of_parameters = {group: [] for group in gnames} for group, gname in zip(groups, gnames): cov_pars = get_parameters([name+'.'+g for g in group], drop_fixed=True, drop_free=True) cov_names = [x.qualifiedName()[len(name)+1:] for x in cov_pars] jac, par_covs = self.make_jac(prediction, cov_pars, gname) # names_of_parameters[gname] = self.PathToDict(cov_pars) product = np.matmul(jac.data().copy(), par_covs.data().copy()) product = np.matmul(product.copy(), jac.data().copy().T) jac_norm = jac.data().T / prediction.data() self.data[gname]['jac'] = jac_norm.T self.data[gname]['syst'] = syst = product.copy() self.data[gname]['full'] = cov_full = covmat.cov.data().copy() + syst self.data[gname]['diag'] = np.diagonal(cov_full.copy()) self.data[gname]['Chol'] = np.linalg.cholesky(cov_full) sdiag = np.diagonal(syst)**.5 self.data[gname]['syst_corl'] = syst / sdiag / sdiag[:, None] sdiag = np.diagonal(cov_full)**.5 self.data[gname]['full_corl'] = cov_full / sdiag / sdiag[:, None] self.data[gname]['params'] = group for gname in gnames: if any(self.data[gname]) is None: raise "None {} for {}".format(self.data_names[key], gname) # print(names_of_parameters) if self.opts.out_hdf5: path = self.opts.out_hdf5 with h5py.File(path, 'w') as f: f.create_dataset('prediction', data=self.data['prediction']) for gname in self.data.keys(): if gname != 'prediction': for key in self.data[gname].keys(): f.create_dataset(gname+'/'+key, data=self.data[gname][key]) print('Save output file: '+path) if any([self.opts.out_fig, self.opts.show]): num = 0 path = str(self.opts.out_fig).split('.') fig, ax = plt.subplots(figsize=(12, 9), dpi=300) data = self.data['prediction'] self.plot_diag(fig, ax, data, '', 'stat.') fig.tight_layout() if self.opts.out_fig: tmp_path = path[0]+'_{:02}_stat.{}'.format(num, path[1]) plt.savefig(tmp_path) num += 1 print('Save output file: '+tmp_path) if self.opts.show: plt.show() fig.clf() fig.clear() plt.close() for gname in gnames: for key in self.data_names.keys(): if key == 'jac' and self.opts.params: fig, ax = plt.subplots(figsize=(15, 9), dpi=300) else: fig, ax = plt.subplots(figsize=(12, 9), dpi=300) data = self.data[gname][key] if key == 'diag': self.plot_diag(fig, ax, data, ' for '+gname, key) else: self.plot_matrix(fig, ax, data, gname, key) if key not in {'jac', 'diag'}: for i in range(1, len(grid)): lvl = i * grid[i] -.5 ax.plot([0, data.shape[0]], [lvl, lvl], color='white', alpha=0.5) ax.plot([lvl, lvl], [0, data.shape[1]], color='white', alpha=0.5) elif key is 'jac': for i in range(1, len(grid)): lvl = i * grid[i] ax.plot([0, data.shape[0]], [lvl, lvl], color='white', alpha=0.5) fig.tight_layout() if self.opts.params and key == 'jac': lines = '\n' lines = lines.join(self.data[gname]['params']) plot_lines(lines, loc='upper right', outside=[-.42, 1.02]) if self.opts.out_fig: tmp_path = path[0]+'_{:02}_{}_{}.{}'.format(num, gname, key, path[1]) plt.savefig(tmp_path) num += 1 print('Save output file: '+tmp_path) if self.opts.show: plt.show() fig.clf() fig.clear() plt.close()
def test_par_loader(): probe1 = env.defparameter('probe1', central=0., sigma=1.) probe2 = env.defparameter('probe2', central=0., sigma=1.) probe3 = env.defparameter('probe3', central=0., sigma=1.) test_ns = env.ns('test_ns') test1 = test_ns.defparameter('test1', central=1., sigma=0.1) test2 = test_ns.defparameter('test2', central=1., sigma=0.1) test3 = test_ns.defparameter('test3', central=1., sigma=0.1) test4 = test_ns.defparameter('test4', central=1., sigma=0.1) extra_test_ns = env.ns('extra_test_ns') extra1 = extra_test_ns.defparameter('extra1', central=1., sigma=0.1) for name, par in test_ns.walknames(): print("Par in namespace", name) print() print("Quering pars with get_parameters() ") par1 = get_parameters(['probe1']) assert (par1[0] == probe1) print('Got global parameter {}'.format(par1[0].name())) print() par2 = get_parameters(['test_ns.test1']) assert (par2[0] == test1) print('Got parameter {0} from namespace {1}'.format( par2[0].name(), test_ns.name)) print() par_in_namespace = get_parameters(['test_ns']) assert (par_in_namespace == [test1, test2, test3, test4]) print() print('Got parameters {0} from ns {ns}'.format( [_.name() for _ in par_in_namespace], ns=test_ns.name)) print() par_two_namespaces = get_parameters(['test_ns', 'extra_test_ns']) assert (par_two_namespaces == [test1, test2, test3, test4, extra1]) print() print('Got parameters {0} from nses {ns}'.format( [_.name() for _ in par_two_namespaces], ns=[test_ns.name, extra_test_ns.name])) print() par_mixed_ns_and_global = get_parameters(['test_ns', 'probe1']) print('Got parameters {0} from nses {ns} and global'.format( [_.name() for _ in par_mixed_ns_and_global], ns=test_ns.name)) assert (par_mixed_ns_and_global == [test1, test2, test3, test4, probe1]) print() wildcard = get_parameters(['test_ns*']) assert (wildcard == [test1, test2, test3, test4]) print('Got parameters {0} from by wildcard '.format( [_.name() for _ in wildcard])) print() #Asking for missing parameters, raise KeyError try: get_parameters(['missing']) except KeyError: print("KeyError for missing parameter is raised correctly")
def test_par_cov(): probe_1 = env.defparameter('probe1', central=0., sigma=1.) probe_2 = env.defparameter('probe2', central=0., sigma=1.) probe_3 = env.defparameter('probe3', central=0., sigma=1.) test_ns = env.ns('test_ns') test_ns.defparameter('test0', central=1., sigma=0.1) test_ns.defparameter('test1', central=1., sigma=0.1) test_ns.defparameter('test2', central=1., sigma=0.1) test_ns.defparameter('test3', central=1., sigma=0.1) extra_test_ns = env.ns('extra_test_ns') extra1 = extra_test_ns.defparameter('extra1', central=1., sigma=0.1) extra2 = extra_test_ns.defparameter('extra2', central=1., sigma=0.1) extra3 = extra_test_ns.defparameter('extra3', central=1., sigma=0.1) extra4 = extra_test_ns.defparameter('extra4', central=1., sigma=0.1) cov1 = 0.1 print("Setting covariance of probe_1 with probe_2 to {0}".format(cov1)) probe_1.setCovariance(probe_2, cov1) print("Check that they are mutually correlated now.") assert probe_1.isCorrelated(probe_2) and probe_2.isCorrelated(probe_1) print("Success") print("Get covariance from both -- {0} and {1}\n".format( probe_1.getCovariance(probe_2), probe_2.getCovariance(probe_1))) print("Checks that change of one propagates to another") cov2 = 0.2 probe_1.setCovariance(probe_2, cov2) assert (probe_1.getCovariance(probe_2) == cov2 and probe_2.getCovariance(probe_1) == cov2) print("Success\n") test_pars = get_parameters( ['test_ns.test0', 'test_ns.test1', 'test_ns.test2', 'test_ns.test3']) print("Test pars sequence is {}".format([_.name() for _ in test_pars])) cov_matrix1 = make_fake_covmat(4) print("Test covariance matrix is \n", cov_matrix1) ch.covariate_pars(test_pars, cov_matrix1) for first, second in itertools.combinations_with_replacement( range(len(test_pars)), 2): try: if first != second: assert test_pars[first].getCovariance( test_pars[second]) == cov_matrix1[first, second] else: assert test_pars[first].sigma() == np.sqrt(cov_matrix1[first, second]) except AssertionError: print((first, second), test_pars[first].getCovariance(test_pars[second])**2, cov_matrix1[first, second]) raise extra_pars = [extra1, extra2, extra3] cov_mat_extra = make_fake_covmat(3) cov_storage = ch.CovarianceStorage("extra_store", extra_pars, cov_mat_extra) ch.covariate_ns('extra_test_ns', cov_storage) for first, second in itertools.combinations_with_replacement( range(len(extra_pars)), 2): try: if first != second: assert test_pars[first].getCovariance( test_pars[second]) == cov_matrix1[first, second] else: assert test_pars[first].sigma() == np.sqrt(cov_matrix1[first, second]) except AssertionError: print((first, second), test_pars[first].getCovariance(test_pars[second])**2, cov_matrix1[first, second]) raise