def test_MatrixProductDVDt(n1, n2, sym, tmp_path): '''Matrix product of D V Dt''' size1, size2 = n1 * n2, n2 * n2 left_n = np.arange(size1, dtype='d').reshape(n1, n2) + 1.0 square_n = 40.0 + size2 - np.arange(size2, dtype='d').reshape(n2, n2) if sym: square_n = 0.5 * (square_n + square_n.T) left = C.Points(left_n) square = C.Points(square_n) dvdt = R.MatrixProductDVDt(left, square) result = dvdt.product.product() check = np.matmul(np.matmul(left_n, square_n), left_n.transpose()) diff = np.fabs(result - check) # print('Max diff ({}, {})'.format(n1, n2), diff.max()) assert (result == check).all() if sym: checksym = result - result.T assert not checksym.any() path = os.path.join(str(tmp_path), 'graph_{:d}_{:d}.png'.format(n1, n2)) savegraph(dvdt.product, path, verbose=False) allure_attach_file(path)
def build(self): for rit in self.nidx_reactor: core_name, = rit.current_values() core = self.core_info_daily[core_name] days_in_period = np.array(core['days']).astype(int) thermal_power_daily = np.repeat(core['power'], days_in_period) if self.cfg.nominal_power: thermal_power_daily = np.ones(len(thermal_power_daily)) thermal_power_per_core = C.Points(thermal_power_daily, labels=rit.current_format('Thermal power\n{autoindex}')) self.context.objects[(core_name, 'thermal_power')] = thermal_power_per_core self.set_output('thermal_power', rit, thermal_power_per_core.single()) for oit in self.nidx_isotope: it = rit+oit iso_name, = oit.current_values() # map fission fractions and thermal powers to days instead of weeks fission_fractions_daily = np.repeat(core['fission_fractions'], days_in_period) label=it.current_format('Fission fractions\n{autoindex}') fission_per_iso = C.Points(fission_fractions_daily[iso_name], labels=label) self.context.objects[it.current_values(name='fission_fractions')] = fission_per_iso self.set_output('fission_fractions', it, fission_per_iso.single()) if self.cfg.add_ff: label_add=it.current_format('Additional set of fission fractions for norm calc\n{autoindex}') fission_per_iso_add = C.Points(fission_fractions_daily[iso_name], labels=label_add) self.context.objects[it.current_values(name='fission_fractions_add')] = fission_per_iso_add self.set_output('fission_fractions_add', it, fission_per_iso_add.single())
def test_chi2_v01(): n = 10 start = 10 offset = 1.0 dataa = N.arange(start, start+n, dtype='d') theorya = dataa+offset stata = dataa**0.5 data = C.Points(dataa, labels='Data') theory = C.Points(theorya, labels='Theory') stat = C.Points(stata, labels='Stat errors') chi = C.Chi2(labels='Chi2') chi.add(theory, data, stat) if graphviz: chi.print() from gna.graphviz import savegraph savegraph(data.single(), 'output/unit_chi2_v01.dot') res = chi.chi2.chi2.data()[0] res_expected1 = (((dataa-theorya)/stata)**2).sum() res_expected2 = ((offset/stata)**2).sum() assert (res==res_expected1).all() assert (res==res_expected2).all()
def test_chi2_v02(): n = 10 start = 10 offset = 1.0 dataa = N.arange(start, start+n, dtype='d') theorya = dataa+offset covmat = N.diag(dataa) La = N.linalg.cholesky(covmat) data = C.Points(dataa, labels='Data') theory = C.Points(theorya, labels='Theory') L = C.Points(La, labels='Stat errors') chi = C.Chi2(labels='Chi2') chi.add(theory, data, L) if graphviz: chi.print() from gna.graphviz import savegraph savegraph(data.single(), 'output/unit_chi2_v02.dot') res = chi.chi2.chi2.data()[0] res_expected1 = (offset**2/dataa).sum() assert (res==res_expected1).all()
def build(self): self.inputs = {} self.outputs = {} self.points = {} self.interp = {} for k, v in self.data.items(): x = C.Points(v[0], labels='{} geo-nu X0'.format(k)) y = C.Points(v[1], labels='{} geo-nu Y0'.format(k)) interp = C.InterpLinear( labels=('{} X insegment'.format(k), '{} geo-neutrino spectra'.format(k))) interp.set_underflow_strategy( R.GNA.Interpolation.Strategy.Constant) interp.set_overflow_strategy(R.GNA.Interpolation.Strategy.Constant) interp.set_fill_value(0.0) interp.setXY(x, y) self.points[k] = (x, y) self.interp[k] = interp for idx in self.nidx.iterate(): for k in self.data.keys(): interp = self.interp[k] self.set_input('geonu_spectrum_{}'.format(k), idx, (interp.insegment.points, interp.interp.newx), argument_number=0) self.set_output('geonu_spectrum_{}'.format(k), idx, interp.interp.interp)
def normalization(self, reactor, detector, normtype): """Returns normalization object for given reactor/detector pair and norm type""" def vec(lst): v = ROOT.vector("std::string")() for s in lst: v.push_back(s) return v if normtype == 'calc': bindings = {} for isoname in reactor.fission_fractions: bindings["EnergyPerFission_{0}".format(isoname)] = self.ns( "isotopes")(isoname)["EnergyPerFission"] norm = ROOT.ReactorNorm(vec(reactor.fission_fractions.keys()), bindings=bindings) lt = C.Points(detector.livetime) lt.points.setLabel('livetime: | ' + detector.name) norm.isotopes.livetime(lt) pr = C.Points(reactor.power_rate) pr.points.setLabel('power: | ' + reactor.name) norm.isotopes.power_rate(pr) norm.isotopes.setLabel('norm: | {} to {}'.format( reactor.name, detector.name)) for isoname, frac in reactor.fission_fractions.items(): ff = C.Points(frac) ff.points.setLabel('fission frac: | {} at {}'.format( isoname, reactor.name)) norm.isotopes['fission_fraction_{0}'.format(isoname)](ff) elif normtype == 'manual': norm = ROOT.ReactorNormAbsolute( vec(reactor.fission_fractions.keys())) for isoname, frac in reactor.fission_fractions.items(): norm.isotopes['fission_fraction_{0}'.format(isoname)](frac) return norm
def test_chi2_v03(): n = 10 start = 10 offset = 1.0 dataa = N.arange(start, start+n, dtype='d') theorya = dataa+offset covmat = N.diag(dataa)+2.0 La = N.linalg.cholesky(covmat) data = C.Points(dataa, labels='Data') theory = C.Points(theorya, labels='Theory') L = C.Points(La, labels='Stat errors') chi = C.Chi2(labels='Chi2') chi.add(theory, data, L) if graphviz: chi.print() from gna.graphviz import savegraph savegraph(data.single(), 'output/unit_chi2_v03.dot') res = chi.chi2.chi2.data()[0] diff = N.array(dataa-theorya).T res_expected1 = N.matmul(diff.T, N.matmul(N.linalg.inv(covmat), diff)) ndiff = N.matmul(N.linalg.inv(La), diff) res_expected2 = N.matmul(ndiff.T, ndiff) assert N.allclose(res, res_expected1, rtol=0, atol=1.e-15) assert N.allclose(res, res_expected2, rtol=0, atol=1.e-15)
def test_typeclass_kind_v02(): objects = [C.Points(np.arange(6)), C.Points(np.arange(6).reshape(3, 2))] outputs = [p.single() for p in objects] obj = C.DummyType() list(map(obj.add_input, outputs)) dt_points = R.TypeClasses.CheckKindT(context.current_precision())(1) R.SetOwnership(dt_points, False) dt_points.dump() print() dt_hist = R.TypeClasses.CheckKindT(context.current_precision())(2) R.SetOwnership(dt_hist, False) dt_hist.dump() print() obj.add_typeclass(dt_points) res = obj.process_types() assert res obj.add_typeclass(dt_hist) print('Exception expected: ', end='') res = obj.process_types() assert not res
def test_viewrear_points_04_auto_fromhist_edges(): """Test ViewRear on Histogram (determine start and length)""" edges = N.arange(13, dtype='d') arr = N.zeros(edges.size, dtype=context.current_precision_short()) points_main = C.Points(arr) ranges = [ (0, 3), (0, 12), (1, 3), (6, 6), (6, 1)] for rng in ranges: print('Range', rng) hist_main = C.Histogram(edges, arr[:-1]) start, len = rng pedges = edges[start:start+len+1] arr_sub = N.arange(start, start+len+1, dtype=arr.dtype) hist_sub=C.Histogram(pedges, arr_sub[:-1], True) points_sub=C.Points(arr_sub, True) view = C.ViewRear() view.determineOffset(hist_main, hist_sub, True) points_main >> view.view.original points_sub >> view.view.rear res = view.view.result.data() expect = arr.copy() expect[start:start+len+1]=arr_sub print('Result', res) print('Expect', expect) print() assert (res==expect).all()
def test_typeclass_ndim_v03(): objects = [ C.Histogram(np.arange(6), np.arange(5)), C.Points(np.arange(5)), C.Histogram2d(np.arange(6), np.arange(7)), C.Points(np.arange(12).reshape(3, 4)) ] outputs = [p.single() for p in objects] obj = C.DummyType() list(map(obj.add_input, outputs)) dt = R.TypeClasses.CheckNdimT(context.current_precision())(1, (0, 1)) R.SetOwnership(dt, False) dt.dump() print() obj.add_typeclass(dt) res = obj.process_types() assert res dt1 = R.TypeClasses.CheckNdimT(context.current_precision())(2, (-2, -1)) R.SetOwnership(dt1, False) dt1.dump() print() obj.add_typeclass(dt1) res = obj.process_types() assert res
def build(self): reac, other = self.idx.split(('r')) for rit in reac.iterate(): core_name, = rit.current_values() core = self.core_info_daily[core_name] days_in_period = np.array(core['days']) thermal_power_daily = np.repeat(core['power'], days_in_period) thermal_power_per_core = C.Points(thermal_power_daily) thermal_power_per_core.points.setLabel( rit.current_format('Thermal power\n{autoindex}')) self.objects[(core_name, 'thermal_power')] = thermal_power_per_core self.set_output(thermal_power_per_core.single(), 'thermal_power', rit) for oit in other.iterate(): it = rit + oit iso_name = it.indices['i'].current # map fission fractions and thermal powers to days instead of weeks fission_fractions_daily = np.repeat(core['fission_fractions'], days_in_period) fission_per_iso = C.Points(fission_fractions_daily[iso_name]) fission_per_iso.points.setLabel( it.current_format('Fission fractions\n{autoindex}')) self.objects[(core_name, 'fission_fractions', iso_name)] = fission_per_iso self.set_output(fission_per_iso.single(), 'fission_fractions', it)
def build(self): model_edges_t = C.Points( self.model_edges, ns=self.namespace ) model_edges_t.points.setLabel('Spectra interpolation edges') self.context.objects['edges'] = model_edges_t self.shared.reactor_anu_edges = model_edges_t.single() if self.cfg.free_params: with self.reac_ns: tmp = C.VarArray(self.variables, ns=self.reac_ns, labels='Spec pars:\nlog(n_i)') if self.cfg.varmode == 'log': self.context.objects['npar_log'] = tmp self.free_weights = R.Exp(ns=self.reac_ns) self.free_weights.exp.points( tmp ) self.free_weights.exp.setLabel('n_i') else: tmp.vararray.setLabel('n_i') self.free_weights = tmp self.interp_expo = interp_expo = R.InterpExpo(ns=self.reac_ns) sampler = interp_expo.transformations.front() model_edges_t >> sampler.inputs.edges sampler_input = sampler.inputs.points interp_expo_t = interp_expo.transformations.back() for i, it in enumerate(self.nidx_major): isotope, = it.current_values() spectrum_raw_t = C.Points( self.spectra[isotope], ns=self.reac_ns ) spectrum_raw_t.points.setLabel('%s spectrum, original'%isotope) self.context.objects[('spectrum_raw', isotope)] = spectrum_raw_t if self.cfg.free_params: spectrum_t = C.Product(ns=self.reac_ns) spectrum_t.multiply( spectrum_raw_t ) spectrum_t.multiply( self.free_weights.single() ) spectrum_t.product.setLabel('%s spectrum, corrected'%isotope) else: spectrum_t = spectrum_raw_t if i>0: interp_expo_t = interp_expo.add_transformation() model_edges_t >> interp_expo_t.inputs.x interp_output = interp_expo.add_input(spectrum_t) interp_input = interp_expo_t.inputs.newx if i>0: self.set_input(self.cfg.name, it, interp_input, argument_number=0) else: self.set_input(self.cfg.name, it, (sampler_input, interp_input), argument_number=0) interp_expo_t.setLabel('%s spectrum, interpolated'%isotope) """Store data""" self.set_output(self.cfg.name, it, interp_output) self.context.objects[('spectrum', isotope)] = spectrum_t
def build(self): self.load_data() model_edges_t = C.Points(self.model_edges, ns=self.namespace) model_edges_t.points.setLabel('Spectra interpolation edges') self.context.objects['edges'] = model_edges_t self.shared.reactor_anu_edges = model_edges_t.single() self.corrections = None if self.cfg.get('corrections', None): self.corrections, = execute_bundles(cfg=self.cfg.corrections, shared=self.shared) self.interp_expo = interp_expo = R.InterpExpo(ns=self.namespace) sampler = interp_expo.transformations.front() model_edges_t >> sampler.inputs.edges sampler_input = sampler.inputs.points interp_expo_t = interp_expo.transformations.back() for i, it in enumerate(self.nidx_major): isotope, = it.current_values() spectrum_raw_t = C.Points(self.spectra[isotope], ns=self.namespace) spectrum_raw_t.points.setLabel('%s spectrum, original' % isotope) self.context.objects[('spectrum_raw', isotope)] = spectrum_raw_t if self.corrections: spectrum_t = R.Product(ns=self.namespace) spectrum_t.multiply(spectrum_raw_t) for corr in self.corrections.bundles.values(): spectrum_t.multiply(corr.outputs[isotope]) spectrum_t.product.setLabel('%s spectrum, corrected' % isotope) else: spectrum_t = spectrum_raw_t if i > 0: interp_expo_t = interp_expo.add_transformation() model_edges_t >> interp_expo_t.inputs.x interp_output = interp_expo.add_input(spectrum_t) interp_input = interp_expo_t.inputs.newx if i > 0: self.set_input(self.cfg.name, it, interp_input, argument_number=0) else: self.set_input(self.cfg.name, it, (sampler_input, interp_input), argument_number=0) interp_expo_t.setLabel('%s spectrum, interpolated' % isotope) """Store data""" self.set_output(self.cfg.name, it, interp_output) self.context.objects[('spectrum', isotope)] = spectrum_t
def test_view_01(): start = 2.0 size = 12 edges = N.arange(start, start + 12, dtype='d') arr_centers = N.arange(-2, -2 + size - 1, dtype='d') arr_edges = N.arange(-1, -1 + size, dtype='d') print('Input edges', edges) print('Input arr (centers)', arr_centers) print('Input arr (edges)', arr_edges) hist = C.Histogram(edges) hist_in = C.Histogram(edges, arr_centers) points_in = C.Points(arr_centers) points_e_in = C.Points(arr_edges) lims = [(-1.0, 2.0), (0, 3), (0, 12), (1, 3), (6, 6.1), (1, 6), (10, 11), (0.1, 1.5), (1.1, 1.8), (5.5, 10.9), (10.1, 10.9), (10.1, 13.0)] for (threshold, ceiling) in lims: print('Limits0 ', threshold, ceiling) threshold += start ceiling += start print(' Limits', threshold, ceiling) view = C.ViewHistBased(hist, threshold, ceiling) hist_out = view.add_input(hist_in) points_out = view.add_input(points_in) points_e_out = view.add_input(points_e_in) idx_thresh = N.max((int(N.floor(threshold - start)), 0)) idx_ceiling = int(N.ceil(ceiling - start)) res_expected = edges[idx_thresh:idx_ceiling + 1] print(' Indices', idx_thresh, idx_ceiling) hist_out_res = hist_out.data() hist_out_exp = arr_centers[idx_thresh:idx_ceiling] points_out_res = points_out.data() points_out_exp = arr_centers[idx_thresh:idx_ceiling] points_e_out_res = points_e_out.data() points_e_out_exp = arr_edges[idx_thresh:idx_ceiling + 1] res = N.array(view.view.view.datatype().edges) print(' Edges result', res) print(' Edges expect', res_expected) print(' Hist result', hist_out_res) print(' Hist expect', hist_out_exp) print(' Poits result', points_out_res) print(' Poits expect', points_out_exp) print(' Poits (edges) result', points_e_out_res) print(' Poits (edges) expect', points_e_out_exp) print() assert (res == res_expected).all() assert (hist_out_res == hist_out_exp).all() assert (points_out_res == points_out_exp).all() assert (points_e_out_res == points_e_out_exp).all()
def define_variables(self): daq = self.common_namespace('daq') daq.reqparameter('first_day', central=self.info['first_day'], fixed=True, sigma=0.01, label='First DAQ day start: %s' % self.info['str_first_day']) daq.reqparameter('last_day', central=self.info['last_day'], fixed=True, sigma=0.01, label='Last DAQ day end: %s' % self.info['str_last_day']) daq.reqparameter('ndays', central=self.info['days'], fixed=True, sigma=0.01, label='Total number of days') data_lt = 0.0 for it in self.idx.iterate(): ad, = it.current_values() data = self.data.get(ad, None) if data is None: raise self.exception('Failed to retrieve data for %s from %s' % (ad, self.cfg.file)) data_lt = data['livetime'] + data_lt livetime = C.Points(data['livetime']) livetime.points.setLabel( it.current_format('Livetime\n{autoindex}')) eff = C.Points(data['eff']) eff.points.setLabel( it.current_format('Efficiency (mu*mult)\n{autoindex}')) efflivetime = R.Product(livetime, eff) efflivetime.product.setLabel( it.current_format('Livetime (eff)\n{autoindex}')) self.objects[('livetime', ad)] = livetime self.objects[('eff', ad)] = eff self.objects[('efflivetime', ad)] = efflivetime self.set_output(livetime.single(), 'livetime_daily', it) self.set_output(eff.single(), 'eff_daily', it) self.set_output(efflivetime.single(), 'efflivetime_daily', it) ndays_daq = (data_lt > 0.0).sum() daq.reqparameter('ndays_daq', central=ndays_daq, fixed=True, sigma=0.01, label='Total number of DAQ days (exclude no DAQ)')
def make_prediction(self, from_neutrino, to_neutrino, Enu_arr, L): with self.ns: Enu = C.Points(Enu_arr) oscprob_classes = { 'standard': ROOT.OscProbPMNS, 'decoh': ROOT.OscProbPMNSDecoh, } oscprobs = {} data = {} for name, cls in oscprob_classes.items(): oscprob = cls(from_neutrino, to_neutrino) for compname in list(oscprob.probsum.inputs.keys()): if compname != 'comp0': for tname, tf in oscprob.transformations.items(): if compname in tf.outputs: oscprob.probsum[compname](tf[compname]) break else: oscprob.probsum[compname](C.Points( np.ones_like(Enu_arr))) for tf in oscprob.transformations.values(): if 'Enu' in tf.inputs: tf.inputs.Enu(Enu) self.ns.addobservable('probability_{0}'.format(name), oscprob.probsum) oscprobs[name] = oscprob data[name] = oscprob.probsum data_stand, data_decoh = data['standard'], data['decoh'] self.ns["L"].set(L) self.ns["Delta"].set(0.0) sigma_arr = [1.e-17, 1e-1, 2e-1, 5e-1] nu_names = ['nu_e', 'nu_mu', 'nu_tau'] filename = 'oscprob_' + nu_names[ from_neutrino.flavor] + '_' + nu_names[ to_neutrino.flavor] + '.pdf' self.open_pdf(filename, oscprobs['standard'].__class__.__name__) plt.plot(Enu_arr, data_stand.data(), label=r"$P_{PW}$", linewidth=3) for sigma in sigma_arr: self.ns["sigma"].set(sigma) plt.plot(Enu_arr, data_decoh.data(), label=r"$\sigma={0}$".format(sigma)) #plt.show() nu_tex = [r"$\nu_e$", r"$\nu_\mu$", r"$\nu_\tau$"] self.close_pdf( 'E, [MeV]', r'$P($' + nu_tex[from_neutrino.flavor] + r'$\to$' + nu_tex[to_neutrino.flavor] + r'$)$')
def build(self): model_edges_t = C.Points( self.model_edges, ns=self.common_namespace ) model_edges_t.points.setLabel('E0 (bin edges)') self.objects['edges'] = model_edges_t self.shared.reactor_anu_edges = model_edges_t.single() self.corrections=None if self.cfg.get('corrections', None): self.corrections, = execute_bundles(cfg=self.cfg.corrections, shared=self.shared) self.interp_expo = interp_expo = R.InterpExpo(ns=self.common_namespace) sampler = interp_expo.transformations.front() model_edges_t >> sampler.inputs.edges sampler_input = sampler.inputs.points interp_expo.bind_transformations(False) interp_expo_t = interp_expo.transformations.back() for i, it in enumerate(self.idx): isotope = it.current_values()[0] spectrum_raw_t = C.Points( self.spectra[isotope], ns=self.common_namespace ) spectrum_raw_t.points.setLabel('S0(E0):\n'+isotope) self.objects[('spectrum_raw', isotope)] = spectrum_raw_t if self.corrections: spectrum_t = R.Product(ns=self.common_namespace) spectrum_t.multiply( spectrum_raw_t ) for corr in self.corrections.bundles.values(): spectrum_t.multiply( corr.outputs[isotope] ) spectrum_t.product.setLabel('S(E0):\n'+isotope) else: spectrum_t = spectrum_raw_t if i>0: interp_expo_t = interp_expo.add_transformation(False) interp_expo.bind_transformations(False) model_edges_t >> interp_expo_t.inputs.x interp_output = interp_expo.add_input(spectrum_t) interp_input = interp_expo_t.inputs.newx if i>0: self.set_input(interp_input, self.cfg.name, it, clone=0) else: self.set_input((sampler_input, interp_input), self.cfg.name, it, clone=0) interp_expo_t.setLabel('S(E):\n'+isotope) """Store data""" self.set_output(interp_output, self.cfg.name, it) self.objects[('spectrum', isotope)] = spectrum_t
def make(nsources, ntargets): sources = [C.Points([constant]) for i in range(nsources)] targets = [ R.DebugTransformation('debug_%02d' % i) for i in range(ntargets) ] return sources + targets
def test_points_v01(): mat = N.arange(12, dtype='d').reshape(3, 4) print('Input matrix (numpy)') print(mat) print() # # Create transformations # points = C.Points(mat) identity = C.Identity() identity.identity.source(points.points.points) res = identity.identity.target.data() dt = identity.identity.target.datatype() # # Dump # print('Eigen dump (C++)') identity.dump() print() print('Points output') print(points.points.points.data()) print('Result (C++ Data to numpy)') print(res, res.dtype) print() print('Datatype:', str(dt)) assert N.allclose(mat, res), "C++ and Python results doesn't match"
def test_typeclass_same_v02(): """Last input has another shape""" arrays = [np.arange(12, dtype='d').reshape(3,4) for i in range(5)] arrays[-1]=arrays[-1].reshape(4,3) points = [C.Points(a) for a in arrays] outputs = [p.points.points for p in points] print(outputs) obj = C.DummyType() list(map(obj.add_input, outputs)) dt = R.TypeClasses.CheckSameTypesT(context.current_precision())((1,-2)) R.SetOwnership(dt, False) dt.dump(); print() obj.add_typeclass(dt) res = obj.process_types(); assert res dt1 = R.TypeClasses.CheckSameTypesT(context.current_precision())((-2,-1)) R.SetOwnership(dt1, False) dt1.dump(); print() obj.add_typeclass(dt1) print('Exception expected: ',end='') res = obj.process_types(); assert not res
def test_typeclass_passeach_02(): """Pass with step 2""" objects = [ C.Histogram2d(np.arange(4), np.arange(5)), C.Histogram(np.arange(4)), C.Points(np.arange(20).reshape(4,5)) ] outputs = [p.single() for p in objects] obj = C.DummyType() i = list(map(obj.add_input, outputs)) i1 = list(map(obj.add_input, outputs)) for i in range(3): obj.add_output() dt1 = R.TypeClasses.PassEachTypeT(context.current_precision())((0,-1,2), (0,-1)) R.SetOwnership(dt1, False) dt1.dump(); print() obj.add_typeclass(dt1) res = obj.process_types(); assert res obj.print() dta = outputs[0].datatype() dtb = outputs[1].datatype() dtc = outputs[2].datatype() doutputs = obj.transformations.back().outputs assert doutputs[0].datatype()==dta assert doutputs[1].datatype()==dtc assert doutputs[2].datatype()==dtb
def test_typeclass_passtype(): """Last input has another edges""" objects = [ C.Histogram2d(np.arange(4), np.arange(5)), C.Histogram(np.arange(4)), C.Points(np.arange(12).reshape(3,4)) ] outputs = [p.single() for p in objects] obj = C.DummyType() k = list(map(obj.add_input, outputs)) for i in range(5): obj.add_output() dt1 = R.TypeClasses.PassTypeT(context.current_precision())((0,), (0,1)) dt2 = R.TypeClasses.PassTypeT(context.current_precision())((1,), (2,-1)) R.SetOwnership(dt1, False) R.SetOwnership(dt2, False) dt1.dump(); print() dt2.dump(); print() obj.add_typeclass(dt1) obj.add_typeclass(dt2) res = obj.process_types(); assert res obj.print() dta = outputs[0].datatype() dtb = outputs[1].datatype() doutputs = obj.transformations.back().outputs assert doutputs[0].datatype()==dta assert doutputs[1].datatype()==dta assert doutputs[2].datatype()==dtb assert doutputs[3].datatype()==dtb assert doutputs[4].datatype()==dtb
def test_anue_free_spectra(tmp_path): """ Test implementation of a model of antineutrino spectra with free parameters in exponential parametrization. """ _enu = np.linspace(1.8, 8.0, 500, dtype='d') Enu = C.Points(_enu, labels='anue energy') indices = [ ('i', 'isotope', ['U235', 'U238', 'Pu239', 'Pu241']) ] expr = ['anuspec[i,r](enu())'] a = Expression_v01(expr, indices = NIndex.fromlist(indices)) a.parse() lib = dict() a.guessname(lib, save=True) ns_anuexpr = env.globalns('anue_expr') cfg = NestedDict( anuspec = NestedDict( bundle = dict(name='reactor_anu_spectra', version='v04'), name = 'anuspec', filename = ['data/reactor_anu_spectra/Huber/Huber_smooth_extrap_{isotope}_13MeV0.01MeVbin.dat', 'data/reactor_anu_spectra/Mueller/Mueller_smooth_extrap_{isotope}_13MeV0.01MeVbin.dat'], # strategy = dict( underflow='constant', overflow='extrapolate' ), varmode='log', varname='anu_weight_{index}', free_params=True, ns_name='spectral_weights', edges = np.concatenate( ( np.arange( 1.8, 8.7, 0.5 ), [ 12.3 ] ) ), ), enu = NestedDict( bundle = NestedDict(name='predefined', version='v01', major=''), name = 'enu', inputs = None, outputs = Enu.single(), ), ) context = ExpressionContext_v01(cfg, ns=ns_anuexpr) a.build(context) ns_anuexpr.printparameters(labels=True) u235_spec = context.outputs.anuspec.U235 u235_spec.plot_vs(Enu.single(), label='default pars') ns_anuexpr['spectral_weights.anu_weight_5'].set(0.3) ns_anuexpr['spectral_weights.anu_weight_7'].set(-0.3) plt.rcParams.update({'font.size': 14}) plt.rcParams.update({'text.usetex': True}) u235_spec.plot_vs(Enu.single(), label='update pars') plt.yscale('log') plt.xlabel(r'$E_{\nu}$, MeV') plt.ylabel('Anue per MeV') plt.legend() plt.title('Antineutrino spectrum') path = os.path.join(str(tmp_path), 'anuspec.png') savefig(path, dpi=300) allure_attach_file(path)
def test_filllike_v02(function_name): """Initialize inpnuts""" size = 5 arr1 = N.arange(0, size) """Initialize environment""" ns = env.globalns(function_name) p1 = ns.defparameter('w1', central=1.0, sigma=0.1) points1 = C.Points(arr1) with ns: ws = C.WeightedSum(['w1'], [points1.points.points]) ws.print() print() flvalue = 2.0 fl = C.FillLike(flvalue) ws >> fl.fill.inputs[0] out = fl.fill.outputs[0] data = out.data() print('data:', data) print() compare_filllike(data, [flvalue] * size, 'Data output failed') print('Change parameter') p1.set(-1.0) taintflag = fl.fill.tainted() print('data:', data) print('taintflag:', taintflag) compare_filllike(data, [flvalue] * size, 'Data output failed') compare_filllike(taintflag, False, 'Taintflag should be false')
def gpuargs_make(nsname, mat1, mat2): from gna.env import env ns = env.globalns(nsname) ns.reqparameter('par1', central=1.0, fixed=True, label='Dummy parameter 1') ns.reqparameter('par2', central=1.5, fixed=True, label='Dummy parameter 2') ns.reqparameter('par3', central=1.01e5, fixed=True, label='Dummy parameter 3') ns.printparameters(labels=True) points1, points2 = C.Points(mat1), C.Points(mat2) with ns: dummy = C.Dummy(4, "dummy", ['par1', 'par2', 'par3']) return dummy, points1, points2, ns
def test_sumaxis_01(kind, axis): """Test ViewRear on Points (start, len)""" size = 4 inp = np.arange(12.0).reshape(3, 4) shouldbe = inp.sum(axis=axis) xedges = np.arange(inp.shape[0] + 1) yedges = np.arange(inp.shape[1] + 1) edges = (xedges, yedges) if kind == 'points': Inp = C.Points(inp) else: Inp = C.Histogram2d(xedges, yedges, inp) sum = C.SumAxis(axis, Inp) sum.printtransformations() res = sum.sumaxis.result.data() print('Input', inp) print('Result ({})'.format(axis), res) print('Should be', shouldbe) assert np.allclose(res, shouldbe, atol=0, rtol=0) if kind == 'hist': newedges = sum.sumaxis.result.datatype().edges select = 1 if axis == 0 else 0 print('Original edges', edges) print('New edges', newedges) assert np.allclose(edges[select], newedges)
def _pointize(self, obj): """Given object checks whether it is C++ type (perhaps better to refactor it) and if not make a Points out of it. Use case -- turning numpy array into points. """ try: # check if we are in modern PyROOT cppyy.typeid(obj) except KeyError: # not C++ object, trying convert to Points obj = C.Points(obj) except AttributeError: # we are in legacy PyROOT if not isinstance(type(obj), ROOT.PyRootType): obj = C.Points(obj) return obj
def init(self): ns = env.ns(self.opts.name) ns.reqparameter('BackgroundRate', central=0, sigma=0.1) ns.reqparameter('Mu', central=1, sigma=1) ns.reqparameter('E0', central=2, sigma=0.05) ns.reqparameter('Width', central=0.2, sigma=0.005) edges = np.linspace(self.opts.Emin, self.opts.Emax, self.opts.nbins + 1) orders = np.array([self.opts.order] * (len(edges) - 1), dtype=int) integrator = ROOT.GaussLegendre(edges, orders, len(orders)) hist = ROOT.GaussLegendreHist(integrator) signal = ROOT.Sum() n = self.opts.PoissonOrder model = {} #ff = np.arange(1,n+1) #ff = 1/scipy.misc.factorial(ff)*np.exp(-self.opts.PoissionMean) #ff_points = C.Points(ff) #print(ff, ff_points) with ns: for i in range(1, n + 1): print(i, n) model[i] = ROOT.GaussianPeakWithBackground(i) model[i].rate.E(integrator.points.x) # print(model[i].rate,model[i].rate.rate,ff_points[i]) prod = ROOT.Product() prod.multiply(model[i].rate.rate) poisson_factor = poisson.pmf(i, self.opts.PoissonMean) poisson_factor_prod = C.Points([poisson_factor]) print(type(model[i].rate), poisson_factor, poisson_factor_prod) prod.multiply(poisson_factor_prod) signal.add(prod) hist.hist.f(signal) ns.addobservable('spectrum', hist.hist)
def check_condproduct(function_name, arrays): print('Test ', function_name, len(arrays), ':', sep='') for array in arrays: print(array) print() nprod = len(arrays)-1 truth1=1.0 truth2=1.0 for i, a in enumerate(arrays): truth1*=a if i<nprod: truth2*=a ns = env.globalns(function_name) condition = ns.defparameter('condition', central=1.0, fixed=True) points = [C.Points(array) for array in arrays] with ns: prod = C.ConditionalProduct(nprod, 'condition', outputs=[p.points.points for p in points]) calc1 = prod.single().data().copy() print('Result (1)', condition.value(), calc1, end='\n\n') condition.set(0.0) calc2 = prod.single().data().copy() print('Result (0)', condition.value(), calc2, end='\n\n') assert (calc1==truth1).all() assert (calc2==truth2).all()
def prepare_inputs(self): if self.mctype=='NormalToyMC': self.input_err = C.Points(self.err_stat) self.inputs = (self.hist, self.input_err) elif self.mctype=='CovarianceToyMC': self.inputs = (self.hist, self.input_L) else: self.inputs = (self.hist,)