def build(self): self.objects = [] for name, label in self.cfg.instances.items(): if label is None: label = 'Snapshot {autoindex}' for it in self.nidx_minor.iterate(): snapshot = C.Snapshot(labels=it.current_format(label)) self.objects.append(snapshot) self.set_input( name, it, snapshot.snapshot.source, argument_number=0) self.set_output(name, it, snapshot.snapshot.result)
def run(self): dataset = Dataset(desc=self.opts.name) verbose = self.opts.verbose if self.opts.verbose: print("Dataset '{}' with:".format(self.opts.name)) if self.opts.pull: self.load_pulls(dataset) self.snapshots = dict() for theory_path, data_path in self.opts.theory_data: theory, data = env.future['spectra', theory_path], env.future['spectra', data_path] data.data() if self.opts.verbose: print(' theory: ', str(theory)) print(' data: ', str(data)) if self.opts.error_type == 'neyman': error = data.single() elif self.opts.error_type == 'pearson': error = theory.single() if not error.getTaintflag().frozen(): snapshot = self.snapshots[error] = C.Snapshot( error, labels='Snapshot: stat errors') snapshot.single().touch() error = snapshot dataset.assign(obs=theory, value=data, error=error.single()) for theory_path, data_path, variance_path in self.opts.theory_data_variance: theory = env.future['spectra', theory_path] data = env.future['spectra', data_path] variance = env.future['spectra', variance_path] data.data() variance.data() if self.opts.verbose: print(' theory: ', str(theory)) print(' data: ', str(data)) print(' variance:', str(variance)) dataset.assign(obs=theory, value=data, error=variance.single()) self.env.parts.dataset[self.opts.name] = dataset
def run(self): dataset = Dataset(bases=self.opts.datasets) cov_parameters = get_parameters(self.opts.cov_parameters, drop_fixed=True, drop_free=True) if self.opts.observables: observables = list(self.__extract_obs(self.opts.observables)) else: observables = None if self.opts.cov_parameters: print( 'Compute covariance matrix for {} parameters:'.format( len(cov_parameters)), *self.opts.cov_parameters) blocks = dataset.makeblocks(observables, cov_parameters) if self.opts.toymc: if self.opts.toymc == 'covariance': toymc = ROOT.CovarianceToyMC() add = toymc.add elif self.opts.toymc == 'poisson': toymc = ROOT.PoissonToyMC() add = lambda t, c: toymc.add(t) elif self.opts.toymc == 'normal': toymc = C.NormalToyMC() add = toymc.add elif self.opts.toymc == 'normalStats': toymc = C.NormalStatsToyMC() add = toymc.add elif self.opts.toymc == 'asimov': toymc = C.Snapshot() add = lambda t, c: toymc.add_input(t) for block in blocks: add(block.theory, block.cov) blocks = [ block._replace(data=toymc_out) for (block, toymc_out) in zip( blocks, iter( toymc.transformations.front().outputs.values())) ] self.env.parts.toymc[self.opts.name] = toymc for toymc in toymc.transformations.values(): toymc.setLabel(self.opts.toymc + ' ToyMC ' + self.opts.name) self.env.parts.analysis[self.opts.name] = blocks self.env.parts.analysis_errors[self.opts.name] = dataset
def run(self): if self.opts.random_seed: np.random.seed(self.opts.random_seed) dataset = Dataset(desc=None) verbose = self.opts.verbose if verbose: print('Adding pull parameters to dataset', self.opts.name) if self.opts.pull: self.load_pulls(dataset) self.snapshots = dict() if self.opts.asimov_data: for theory_path, data_path in self.opts.asimov_data: try: theory, data = env.get(theory_path), env.get(data_path) except KeyError: theory, data = env.future['spectra', theory_path], env.future['spectra', data_path] if self.opts.error_type == 'neyman': error=data.single() elif self.opts.error_type == 'pearson': error=theory.single() if not error.getTaintflag().frozen(): snapshot = self.snapshots[error] = C.Snapshot(error, labels='Snapshot: stat errors') snapshot.single().touch() error = snapshot dataset.assign(obs=theory, value=data, error=error.single()) # if self.opts.asimov_poisson: # for theory_path, data_path in self.opts.asimov_poisson: # data_poisson = np.random.poisson(env.get(data_path).data()) # if self.opts.error_type == 'neyman': # dataset.assign(env.get(theory_path), # data_poisson, # env.get(data_path)) # elif self.opts.error_type == 'pearson': # dataset.assign(env.get(theory_path), # data_poisson, # env.get(theory_path)) self.env.parts.dataset[self.opts.name] = dataset
def test_snapshot_01(function_name): """Test ViewRear on Points (start, len)""" size = 4 ns = env.env.globalns(function_name) names = [] for i in range(size): name = 'val_%02i' % i names.append(name) ns.defparameter(name, central=i, fixed=True, label='Value %i' % i) with ns: vararray = C.VarArray(names) snapshot = C.Snapshot(vararray) for ichange, iname in enumerate([''] + names, -1): print(' Change', ichange) if iname: par = ns[iname] par.set(par.value() + 1.0) res = snapshot.snapshot.result.data() vars = vararray.single().data() print(' Result', res) print(' Vars', vars) tainted = snapshot.snapshot.tainted() print(' Taintflag', tainted) assert not tainted if iname: assert (res != vars).any() else: assert (res == vars).all() print() snapshot.snapshot.unfreeze() assert snapshot.snapshot.tainted() res = snapshot.snapshot.result.data() print('Result', res) print('Vars', vars) assert (res == vars).all()
def init(self): self.ns = self.env.globalns(self.opts.ns) try: output = self.ns.getobservable(self.opts.name_in) except KeyError: output = self.env.future['spectra', self.opts.name_in] if not output: raise Exception('Invalid or missing output: {}'.format( self.opts.name_in)) self.snapshot = C.Snapshot(output) trans = self.snapshot.snapshot if self.opts.label: trans.setLabel(self.opts.label) trans.touch() self.ns.addobservable(self.opts.name_out, self.snapshot.single(), export=not self.opts.hidden) self.env.future['spectra', self.opts.name_out] = self.snapshot.single() self.env.parts.snapshot[self.opts.name_out] = self.snapshot
def build(self): for idx in self.nidx: reac, = idx.current_values() name = "snf_correction" + idx.current_format() try: _snf_energy, _snf_spectra = list( map(C.Points, self.snf_raw_data[reac])) except KeyError: # U238 doesn't have offequilibrium correction so just pass 1. _snf_energy, _snf_spectra = list( map(C.Points, self.snf_raw_data['average'])) _snf_energy.points.setLabel( "Original energies for SNF spectrum of {}".format(reac)) snf_spectra = C.InterpLinear( labels='Correction for spectra in {}'.format(reac)) snf_spectra.set_overflow_strategy( R.GNA.Interpolation.Strategy.Constant) snf_spectra.set_underflow_strategy( R.GNA.Interpolation.Strategy.Constant) insegment = snf_spectra.transformations.front() insegment.setLabel("Segments") interpolator_trans = snf_spectra.transformations.back() interpolator_trans.setLabel( "Interpolated SNF correction for {}".format(reac)) passthrough = C.Identity( labels="Nominal spectra for {}".format(reac)) _snf_energy >> (insegment.edges, interpolator_trans.x) _snf_spectra >> interpolator_trans.y self.set_input('snf_correction', idx, (insegment.points, interpolator_trans.newx), argument_number=0) self.set_input('snf_correction', idx, (passthrough.single_input()), argument_number=1) snap = C.Snapshot( passthrough.single(), labels='Snapshot of nominal spectra for SNF in {}'.format( reac)) product = C.Product( outputs=[snap.single(), interpolator_trans.single()], labels='Product of nominal spectrum to SNF correction in {}'. format(reac)) par_name = "snf_scale" self.reqparameter(par_name, idx, central=1., relsigma=1, labels="SNF norm for reactor {0}".format(reac)) outputs = [product.single()] weights = ['.'.join((par_name, idx.current_format()))] with self.namespace: final_sum = C.WeightedSum( weights, outputs, labels='SNF spectrum from {0} reactor'.format(reac)) self.context.objects[name] = final_sum self.set_output("snf_correction", idx, final_sum.single())
def run(self): dataset = Dataset(bases=self.opts.datasets, desc=self.opts.name) if self.opts.cov_parameters: try: cov_parameters = self.env.future['parameter_groups', self.opts.cov_parameters] except KeyError: raise Exception('Unable to get pargroup {}'.format( self.opts.cov_parameters)) cov_parameters_all = list(cov_parameters.values()) cov_parameters, skip_parameters = partition( lambda par: par_influences(par, dataset.data.keys()), cov_parameters_all) if skip_parameters: print('Skip {} cov parameters as they do not affect the model'. format(len(skip_parameters))) if self.opts.verbose > 1: print(' ', [p.qualifiedName() for p in skip_parameters]) if self.opts.cov_strict: raise self._exception( 'Some parameters do not affect the model.') else: cov_parameters = [] if self.opts.observables: observables = list(self.__extract_obs(self.opts.observables)) else: observables = None if self.opts.verbose: names = ', '.join((d.desc for d in self.opts.datasets)) print("Analysis '{}' with: {}".format(self.opts.name, names), end='') if self.opts.cov_parameters: print(' and {} parameters from {}'.format( len(cov_parameters), self.opts.cov_parameters)) else: print() blocks = dataset.makeblocks(observables, cov_parameters) if self.opts.toymc: if self.opts.toymc == 'covariance': toymc = ROOT.CovarianceToyMC() add = toymc.add elif self.opts.toymc == 'poisson': toymc = ROOT.PoissonToyMC() add = lambda t, c: toymc.add(t) elif self.opts.toymc == 'normal': toymc = C.NormalToyMC() add = toymc.add elif self.opts.toymc == 'normalStats': toymc = C.NormalStatsToyMC() add = toymc.add elif self.opts.toymc == 'asimov': toymc = C.Snapshot() add = lambda t, c: toymc.add_input(t) for block in blocks: add(block.theory, block.cov) blocks = [ block._replace(data=toymc_out) for (block, toymc_out ) in zip(blocks, toymc.transformations.front().outputs.values()) ] self.env.parts.toymc[self.opts.name] = toymc for toymc in toymc.transformations.values(): toymc.setLabel(self.opts.toymc + ' ToyMC ' + self.opts.name) self.env.parts.analysis[self.opts.name] = blocks self.env.parts.analysis_errors[self.opts.name] = dataset storage = self.env.future.child(('analysis', self.opts.name)) for i, block in enumerate(blocks): i = str(i) storage[i] = dict(theory=block.theory, data=block.data, L=block.cov.single())
def build(self): for idx in self.nidx.iterate(): if 'isotope' in idx.names()[0]: iso, reac = idx.current_values() else: reac, iso = idx.current_values() name = "offeq_correction." + idx.current_format() try: _offeq_energy, _offeq_spectra = list(map(C.Points, self.offeq_raw_spectra[iso])) _offeq_energy.points.setLabel("Original energies for offeq spectrum of {}".format(iso)) except KeyError: # U238 doesn't have offequilibrium correction so just pass 1. if iso != 'U238': raise passthrough = C.Identity(labels='Nominal {0} spectrum in {1} reactor'.format(iso, reac)) self.context.objects[name] = passthrough dummy = C.Identity() #just to serve 1 input self.set_input('offeq_correction', idx, dummy.single_input(), argument_number=0) self.set_input('offeq_correction', idx, passthrough.single_input(), argument_number=1) self.set_output("offeq_correction", idx, passthrough.single()) continue offeq_spectra = C.InterpLinear(labels='Correction for {} spectra'.format(iso)) offeq_spectra.set_overflow_strategy(R.GNA.Interpolation.Strategy.Constant) offeq_spectra.set_underflow_strategy(R.GNA.Interpolation.Strategy.Constant) insegment = offeq_spectra.transformations.front() insegment.setLabel("Offequilibrium segments") interpolator_trans = offeq_spectra.transformations.back() interpolator_trans.setLabel("Interpolated spectral correction for {}".format(iso)) passthrough = C.Identity(labels="Nominal {0} spectrum in {1} reactor".format(iso, reac)) _offeq_energy >> (insegment.edges, interpolator_trans.x) _offeq_spectra >> interpolator_trans.y # Enu self.set_input('offeq_correction', idx, (insegment.points, interpolator_trans.newx), argument_number=0) # Anue spectra self.set_input('offeq_correction', idx, ( passthrough.single_input()), argument_number=1) par_name = "offeq_scale" self.reqparameter(par_name, idx, central=1., relsigma=0.3, labels="Offequilibrium norm for reactor {1} and iso " "{0}".format(iso, reac)) self.reqparameter("dummy_scale", idx, central=1, fixed=True, labels="Dummy weight for reactor {1} and iso " "{0} for offeq correction".format(iso, reac)) snap = C.Snapshot(passthrough.single(), labels='Snapshot of {} spectra in reac {}'.format(iso, reac)) prod = C.Product(labels='Product of initial {} spectra and ' 'offequilibrium corr in {} reactor'.format(iso, reac)) prod.multiply(interpolator_trans.single()) prod.multiply(snap.single()) outputs = [passthrough.single(), prod.single()] weights = ['.'.join(("dummy_scale", idx.current_format())), '.'.join((par_name, idx.current_format()))] with self.namespace: final_sum = C.WeightedSum(weights, outputs, labels='Corrected to offequilibrium ' '{0} spectrum in {1} reactor'.format(iso, reac)) self.context.objects[name] = final_sum self.set_output("offeq_correction", idx, final_sum.single())