def test_typeclass_passtype(): """Last input has another edges""" objects = [ C.Histogram2d(np.arange(4), np.arange(5)), C.Histogram(np.arange(4)), C.Points(np.arange(12).reshape(3,4)) ] outputs = [p.single() for p in objects] obj = C.DummyType() k = list(map(obj.add_input, outputs)) for i in range(5): obj.add_output() dt1 = R.TypeClasses.PassTypeT(context.current_precision())((0,), (0,1)) dt2 = R.TypeClasses.PassTypeT(context.current_precision())((1,), (2,-1)) R.SetOwnership(dt1, False) R.SetOwnership(dt2, False) dt1.dump(); print() dt2.dump(); print() obj.add_typeclass(dt1) obj.add_typeclass(dt2) res = obj.process_types(); assert res obj.print() dta = outputs[0].datatype() dtb = outputs[1].datatype() doutputs = obj.transformations.back().outputs assert doutputs[0].datatype()==dta assert doutputs[1].datatype()==dta assert doutputs[2].datatype()==dtb assert doutputs[3].datatype()==dtb assert doutputs[4].datatype()==dtb
def build(self): if self.xorders.size > 1: if self.xorders.size + 1 != self.edges.size: raise Exception( 'Incompartible edges and xorders definition:\n {!r}\n {!r}' .format(self.edges, self.xorders)) self.integrator = R.GaussLegendre2d(self.edges, self.xorders, self.edges.size - 1, -1.0, 1.0, self.cfg.yorder) else: self.integrator = R.GaussLegendre2d(self.edges, int(self.xorders[0]), self.edges.size - 1, -1.0, 1.0, self.cfg.yorder) self.integrator.points.setLabel('Gauss-Legendre 2d') self.integrator.points.x.setLabel(self.cfg.variables[0]) self.integrator.points.xedges.setLabel('%s edges' % self.cfg.variables[0]) self.integrator.points.y.setLabel(self.cfg.variables[1]) self.set_output(self.integrator.points.x, self.cfg.variables[0]) self.set_output(self.integrator.points.xedges, '%s_edges' % self.cfg.variables[0]) self.set_output(self.integrator.points.xhist, '%s_hist' % self.cfg.variables[0]) self.set_output(self.integrator.points.y, self.cfg.variables[1]) for i, it in enumerate(self.idx.iterate()): hist = R.GaussLegendre2dHist(self.integrator) hist.hist.setLabel(it.current_format(name='hist')) self.set_input(hist.hist.f, self.cfg.name, it, clone=0) self.set_output(hist.hist.hist, self.cfg.name, it)
def test_typeclass_ndim_v03(): objects = [ C.Histogram(np.arange(6), np.arange(5)), C.Points(np.arange(5)), C.Histogram2d(np.arange(6), np.arange(7)), C.Points(np.arange(12).reshape(3, 4)) ] outputs = [p.single() for p in objects] obj = C.DummyType() list(map(obj.add_input, outputs)) dt = R.TypeClasses.CheckNdimT(context.current_precision())(1, (0, 1)) R.SetOwnership(dt, False) dt.dump() print() obj.add_typeclass(dt) res = obj.process_types() assert res dt1 = R.TypeClasses.CheckNdimT(context.current_precision())(2, (-2, -1)) R.SetOwnership(dt1, False) dt1.dump() print() obj.add_typeclass(dt1) res = obj.process_types() assert res
def test_typeclass_same_v02(): """Last input has another shape""" arrays = [np.arange(12, dtype='d').reshape(3,4) for i in range(5)] arrays[-1]=arrays[-1].reshape(4,3) points = [C.Points(a) for a in arrays] outputs = [p.points.points for p in points] print(outputs) obj = C.DummyType() list(map(obj.add_input, outputs)) dt = R.TypeClasses.CheckSameTypesT(context.current_precision())((1,-2)) R.SetOwnership(dt, False) dt.dump(); print() obj.add_typeclass(dt) res = obj.process_types(); assert res dt1 = R.TypeClasses.CheckSameTypesT(context.current_precision())((-2,-1)) R.SetOwnership(dt1, False) dt1.dump(); print() obj.add_typeclass(dt1) print('Exception expected: ',end='') res = obj.process_types(); assert not res
def test_par_02(floatprecision='double'): """Test getters (vec)""" assert floatprecision in ['double', 'float'] const = N.array([1.5, 2.6, 3.7], dtype=floatprecision[0]) var = R.parameter(floatprecision)('testpar', const.size) taintflag = R.taintflag('tflag') var.subscribe(taintflag) print('Set', const) var.set(const) print('Taintflag', bool(taintflag)) check('ret scalar', None, var.value(), const[0], taintflag, True) for i, val in enumerate(const): check('ret index[%i]' % i, None, var.value(i), val, taintflag, False) check('ret vector', None, list(var.values()), const, taintflag, False) ret = N.zeros(const.size, dtype=floatprecision[0]) before = ret.copy() var.values(ret) check('arg C array', before, ret, const, taintflag, False) ret = R.vector(floatprecision)(const.size) before = list(ret) var.values(ret) check('arg std vector', before, list(ret), const, taintflag, False)
def test_par_03(floatprecision='double'): """Test setters""" assert floatprecision in ['double', 'float'] var = R.parameter(floatprecision)('testpar') taintflag = R.taintflag('tflag') var.subscribe(taintflag) taintflag.set(False) const = 1.5 var.set(const) check('scalar', None, var.value(), const, taintflag) const += 1.0 var.set(0, const) check('index [0]', None, var.value(), const, taintflag) const += 1.0 arr = N.array([const], dtype=floatprecision[0]) var.set(arr) check('C array', None, var.value(), const, taintflag) const += 1.0 arr = R.vector(floatprecision)(1, const) var.set(arr) check('std vector', None, var.value(), const, taintflag)
def test_gauss_par_repl(): p1 = R.GaussianParameter('double')('test1') p1.set(-1.0) p2 = R.GaussianParameter('double')('test2') p2.set(-2.0) def prt(title): if title: print(title) print(' p1', p1.name(), p1.value()) print(' p2', p2.name(), p2.value()) print() prt('Before replacement') p2.getVariable().replace(p1.getVariable()) # p2.getParameter().replace( p1.getVariable() ) prt('After replacement') assert (p1.value() == p2.value()) p1.set(1) prt('Change parameter 1') assert (p1.value() == p2.value()) p2.set(2) prt('Change parameter 2') assert (p1.value() == p2.value())
def test_par_01(floatprecision='double'): """Test getters""" assert floatprecision in ['double', 'float'] var = R.parameter(floatprecision)('testpar') taintflag = R.taintflag('tflag') var.subscribe(taintflag) const = 1.5 print('Set', const) var.set(const) print('Taintflag', bool(taintflag)) check('ret scalar', None, var.value(), const, taintflag, True) check('ret index[0]', None, var.value(0), const, taintflag, False) check('ret vector', None, list(var.values()), [const], taintflag, False) ret = N.zeros(1, dtype=floatprecision[0]) before = ret.copy() var.values(ret) check('arg C array', before, ret, [const], taintflag, False) ret = R.vector(floatprecision)(1) before = list(ret) var.values(ret) check('arg std vector', before, list(ret), [const], taintflag, False)
def make_sample_file(filename): file = R.TFile(filename, 'recreate') assert not file.IsZombie() it = 1 name = 'hist' h = R.TH1D(name, name, 10, 0, 10) h.SetBinContent(it, 1) it += 1 file.WriteTObject(h) for gr, dets in cfg.groups.items(): name = 'hist_{group}'.format(group=gr) h = R.TH1D(name, name, 10, 0, 10) h.SetBinContent(it, 1) it += 1 file.WriteTObject(h) for det in dets: name = 'hist_{group}_{det}'.format(group=gr, det=det) h = R.TH1D(name, name, 10, 0, 10) h.SetBinContent(it, 1) it += 1 file.WriteTObject(h) print('Generated file contents') file.ls() file.Close()
def build(self): self.comp0 = R.FillLike(1.0, labels='OP comp0') for it_source in self.idx_source: for it_detector in self.idx_detector: it_dist = it_source+it_detector dist = it_dist.current_format(name='baseline') oscprobkey = it_dist.current_format('{autoindex}')[1:] with self.namespace: with self.namespace('pmns'): oscprob = self.context.objects[oscprobkey] = R.OscProbPMNS(R.Neutrino.ae(), R.Neutrino.ae(), dist) for it_component in self.idx_component: component, = it_component.current_values() it = it_source+it_detector+it_component if component=='comp0': output = self.comp0.fill.outputs['a'] input = self.comp0.fill.inputs['a'] else: if not component in oscprob.transformations: raise Exception( 'No component %s in oscprob transformation'%component ) trans = oscprob.transformations[component] trans.setLabel( it.current_format('OP {component}: {reactor}-\\>{detector}') ) output = trans[component] input = trans['Enu'] self.set_input('oscprob', it, input, argument_number=0) self.set_output('oscprob', it, output)
def test_typeclass_kind_v01(): objects = [ C.Histogram(np.arange(6), np.arange(5)), C.Histogram2d(np.arange(6), np.arange(7)) ] outputs = [p.single() for p in objects] obj = C.DummyType() list(map(obj.add_input, outputs)) dt_points = R.TypeClasses.CheckKindT(context.current_precision())(1) R.SetOwnership(dt_points, False) dt_points.dump() print() dt_hist = R.TypeClasses.CheckKindT(context.current_precision())(2) R.SetOwnership(dt_hist, False) dt_hist.dump() print() obj.add_typeclass(dt_hist) res = obj.process_types() assert res obj.add_typeclass(dt_points) print('Exception expected: ', end='') res = obj.process_types() assert not res
def unpack_hist1(output, dtype, kwargs={}): dtype = dtype or output.datatype() data = output.data() edges = np.array(dtype.edgesNd[0], dtype='d') widths = edges[1:] - edges[:-1] rel_offsets = np.fabs(widths - widths[0]) / widths.max() name = kwargs.pop('name', '') title = kwargs.pop('label', kwargs.pop('title', '')) if (rel_offsets < 1.e-9).all(): # Constant width histogram hist = R.TH1D(name, title, edges.size - 1, edges[0], edges[-1]) else: hist = R.TH1D(name, title, edges.size - 1, edges) buffer = root2numpy.get_buffer_hist1(hist) buffer[:] = data hist.SetEntries(data.sum()) set_axes(hist, kwargs) if kwargs: raise Exception('Unparsed options in extra arguments for TH1D') return hist
def build(self): model_edges_t = C.Points( self.model_edges, ns=self.namespace ) model_edges_t.points.setLabel('Spectra interpolation edges') self.context.objects['edges'] = model_edges_t self.shared.reactor_anu_edges = model_edges_t.single() if self.cfg.free_params: with self.reac_ns: tmp = C.VarArray(self.variables, ns=self.reac_ns, labels='Spec pars:\nlog(n_i)') if self.cfg.varmode == 'log': self.context.objects['npar_log'] = tmp self.free_weights = R.Exp(ns=self.reac_ns) self.free_weights.exp.points( tmp ) self.free_weights.exp.setLabel('n_i') else: tmp.vararray.setLabel('n_i') self.free_weights = tmp self.interp_expo = interp_expo = R.InterpExpo(ns=self.reac_ns) sampler = interp_expo.transformations.front() model_edges_t >> sampler.inputs.edges sampler_input = sampler.inputs.points interp_expo_t = interp_expo.transformations.back() for i, it in enumerate(self.nidx_major): isotope, = it.current_values() spectrum_raw_t = C.Points( self.spectra[isotope], ns=self.reac_ns ) spectrum_raw_t.points.setLabel('%s spectrum, original'%isotope) self.context.objects[('spectrum_raw', isotope)] = spectrum_raw_t if self.cfg.free_params: spectrum_t = C.Product(ns=self.reac_ns) spectrum_t.multiply( spectrum_raw_t ) spectrum_t.multiply( self.free_weights.single() ) spectrum_t.product.setLabel('%s spectrum, corrected'%isotope) else: spectrum_t = spectrum_raw_t if i>0: interp_expo_t = interp_expo.add_transformation() model_edges_t >> interp_expo_t.inputs.x interp_output = interp_expo.add_input(spectrum_t) interp_input = interp_expo_t.inputs.newx if i>0: self.set_input(self.cfg.name, it, interp_input, argument_number=0) else: self.set_input(self.cfg.name, it, (sampler_input, interp_input), argument_number=0) interp_expo_t.setLabel('%s spectrum, interpolated'%isotope) """Store data""" self.set_output(self.cfg.name, it, interp_output) self.context.objects[('spectrum', isotope)] = spectrum_t
def __init__(self, title): self._title = title self._data = [] self._readers = { R.GaussianParameter('double'): self._read_gaussian, R.UniformAngleParameter('double'): self._read_angle, DiscreteParameter: self._read_discrete, }
def build(self): self.load_data() model_edges_t = C.Points(self.model_edges, ns=self.namespace) model_edges_t.points.setLabel('Spectra interpolation edges') self.context.objects['edges'] = model_edges_t self.shared.reactor_anu_edges = model_edges_t.single() self.corrections = None if self.cfg.get('corrections', None): self.corrections, = execute_bundles(cfg=self.cfg.corrections, shared=self.shared) self.interp_expo = interp_expo = R.InterpExpo(ns=self.namespace) sampler = interp_expo.transformations.front() model_edges_t >> sampler.inputs.edges sampler_input = sampler.inputs.points interp_expo_t = interp_expo.transformations.back() for i, it in enumerate(self.nidx_major): isotope, = it.current_values() spectrum_raw_t = C.Points(self.spectra[isotope], ns=self.namespace) spectrum_raw_t.points.setLabel('%s spectrum, original' % isotope) self.context.objects[('spectrum_raw', isotope)] = spectrum_raw_t if self.corrections: spectrum_t = R.Product(ns=self.namespace) spectrum_t.multiply(spectrum_raw_t) for corr in self.corrections.bundles.values(): spectrum_t.multiply(corr.outputs[isotope]) spectrum_t.product.setLabel('%s spectrum, corrected' % isotope) else: spectrum_t = spectrum_raw_t if i > 0: interp_expo_t = interp_expo.add_transformation() model_edges_t >> interp_expo_t.inputs.x interp_output = interp_expo.add_input(spectrum_t) interp_input = interp_expo_t.inputs.newx if i > 0: self.set_input(self.cfg.name, it, interp_input, argument_number=0) else: self.set_input(self.cfg.name, it, (sampler_input, interp_input), argument_number=0) interp_expo_t.setLabel('%s spectrum, interpolated' % isotope) """Store data""" self.set_output(self.cfg.name, it, interp_output) self.context.objects[('spectrum', isotope)] = spectrum_t
def test_datatype_preallocated_v02(): dt = R.DataType() buf = N.arange(10, dtype='d') dt.points().shape(buf.size).preallocated(buf) dt1 = R.DataType() dt1.__assign__(dt) assert dt1 == dt assert dt.requiresReallocation(dt1)
def test_histogram_v02_TH2D(tmp_path): rhist = R.TH2D('testhist', 'testhist', 20, 0, 10, 24, 0, 12) xyg = R.TF2("xyg", "exp([0]*x)*exp([1]*y)", 0, 10, 0, 12) xyg.SetParameter(0, -1 / 2.) xyg.SetParameter(1, -1 / 8.) R.gDirectory.Add(xyg) rhist.FillRandom('xyg', 10000) hist = C.Histogram2d(rhist) buf = rhist.get_buffer().T res = hist.hist.hist() # Plot fig = plt.figure() ax = plt.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('X label') ax.set_ylabel('Y label') ax.set_title('ROOT histogram') rhist.pcolorfast(colorbar=True) suffix = 'histogram2d' path = os.path.join(str(tmp_path), suffix + '.png') savefig(path, dpi=300) allure_attach_file(path) plt.close() fig = plt.figure() ax = plt.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('X label') ax.set_ylabel('Y label') ax.set_title('GNA histogram') hist.hist.hist.plot_pcolorfast(colorbar=True) suffix = 'histogram2d' path = os.path.join(str(tmp_path), suffix + '.png') savefig(path, dpi=300) allure_attach_file(path) plt.close() path = os.path.join(str(tmp_path), suffix + '_graph.png') savegraph(hist.hist, path) allure_attach_file(path) plt.close() # Test consistency assert np.all(buf == res)
def build(self): with entryContext(subgraph="IBD"): # initalize Evis to Enu converter with self.namespace("ibd"): self.econv = R.EvisToEe() # register it's input and output self.set_input('ee', None, self.econv.Ee.Evis, argument_number=0) self.set_output('ee', None, self.econv.Ee.Ee) if self.cfg.order==0: # in 0th order and 1d case # create 1d cross section with self.namespace("ibd"): self.ibd = R.IbdZeroOrder() # register Enu input and output self.set_input('enu', None, self.ibd.Enu.Ee, argument_number=0) self.set_output('enu', None, self.ibd.Enu.Enu) # register cross section input and output self.set_input('ibd_xsec', None, self.ibd.xsec.Ee, argument_number=0) self.set_output('ibd_xsec', None, self.ibd.xsec.xsec) #label cross section for the graph self.ibd.xsec.setLabel('IBD xsec (0)') elif self.cfg.order==1: # in 1th order and 2d case # create 2d cross section with self.namespace("ibd"): self.ibd = R.IbdFirstOrder() # register Enu inputs and output self.set_input('enu', None, self.ibd.Enu.Ee, argument_number=0) self.set_input('enu', None, self.ibd.Enu.ctheta, argument_number=1) self.set_output('enu', None, self.ibd.Enu.Enu) # register cross section inputs and output self.set_input('ibd_xsec', None, self.ibd.xsec.Enu, argument_number=0) self.set_input('ibd_xsec', None, self.ibd.xsec.ctheta, argument_number=1) self.set_output('ibd_xsec', None, self.ibd.xsec.xsec) # label cross section self.ibd.xsec.setLabel('IBD xsec (1)') # register jacobian inputs and output self.set_input('jacobian', None, self.ibd.jacobian.Enu, argument_number=0) self.set_input('jacobian', None, self.ibd.jacobian.Ee, argument_number=1) self.set_input('jacobian', None, self.ibd.jacobian.ctheta, argument_number=2) self.set_output('jacobian', None, self.ibd.jacobian.jacobian) # label jacobian self.ibd.jacobian.setLabel('Ee-\\>Enu jacobian') # label neutrino energy caclulator self.ibd.Enu.setLabel('Enu')
def build(self): # initalize Evis to Enu converter with self.common_namespace("ibd"): self.econv = R.EvisToEe() # register it's input and output self.set_input( self.econv.Ee.Evis, 'ee', clone=0 ) self.set_output( self.econv.Ee.Ee, 'ee' ) if self.cfg.order==0: # in 0th order and 1d case # create 1d cross section with self.common_namespace("ibd"): self.ibd = R.IbdZeroOrder() # register Enu input and output self.set_input(self.ibd.Enu.Ee, 'enu', clone=0) self.set_output(self.ibd.Enu.Enu, 'enu') # register cross section input and output self.set_input(self.ibd.xsec.Ee, 'ibd_xsec', clone=0) self.set_output(self.ibd.xsec.xsec, 'ibd_xsec') #label cross section for the graph self.ibd.xsec.setLabel('IBD xsec (0)') elif self.cfg.order==1: # in 1th order and 2d case # create 2d cross section with self.common_namespace("ibd"): self.ibd = R.IbdFirstOrder() # register Enu inputs and output self.set_input(self.ibd.Enu.Ee, 'enu', clone=0) self.set_input(self.ibd.Enu.ctheta, 'enu', clone=1) self.set_output(self.ibd.Enu.Enu, 'enu') # register cross section inputs and output self.set_input(self.ibd.xsec.Enu, 'ibd_xsec', clone=0) self.set_input(self.ibd.xsec.ctheta, 'ibd_xsec', clone=1) self.set_output(self.ibd.xsec.xsec, 'ibd_xsec') # label cross section self.ibd.xsec.setLabel('IBD xsec (1)') # register jacobian inputs and output self.set_input(self.ibd.jacobian.Enu, 'jacobian', clone=0) self.set_input(self.ibd.jacobian.Ee, 'jacobian', clone=1) self.set_input(self.ibd.jacobian.ctheta, 'jacobian', clone=2) self.set_output(self.ibd.jacobian.jacobian, 'jacobian') # label jacobian self.ibd.jacobian.setLabel('Ee->Enu jacobian') # label neutrino energy caclulator self.ibd.Enu.setLabel('Enu')
def build_mat(self): """Assembles a chain for eleak detector effect using input matrix""" haspar = bool(self.cfg.get('parname')) ndiag = self.cfg.get('ndiag', 1) norm = self.eleak_matrix.sum(axis=0) norm[norm == 0.0] = 1.0 self.eleak_matrix /= norm points = C.Points(self.eleak_matrix, ns=self.namespace, labels='Eleak matrix\n raw') self.context.objects['matrix'] = points if haspar: self.set_output('eleak_matrix_raw', None, points.single()) else: self.set_output('eleak_matrix', None, points.single()) for itdet in self.nidx_major: if haspar: parname = itdet.current_format(name=self.cfg.parname) # Target=OffDiagonal, Mode=Upper renormdiag = R.RenormalizeDiag( ndiag, 1, 1, parname, ns=self.namespace, labels=itdet.current_format('Eleak matrix\n {autoindex}')) points.points >> renormdiag.renorm.inmat self.set_output('eleak_matrix', itdet, renormdiag.single()) self.context.objects[itdet.current_values( name='renormdiag')] = renormdiag matinput = renormdiag.renorm else: matinput = points.points for itother in self.nidx_minor: it = itdet + itother esmear = R.HistSmear( True, labels=it.current_format( '{{Eleak effect|{autoindex}}}')) # True for 'upper' matinput >> esmear.smear.inputs.SmearMatrix self.set_input('eleak', it, esmear.smear.Ntrue, argument_number=0) self.set_output('eleak', it, esmear.single()) self.context.objects[it.current_values(name='esmear')] = esmear
def build(self): model_edges_t = C.Points( self.model_edges, ns=self.common_namespace ) model_edges_t.points.setLabel('E0 (bin edges)') self.objects['edges'] = model_edges_t self.shared.reactor_anu_edges = model_edges_t.single() self.corrections=None if self.cfg.get('corrections', None): self.corrections, = execute_bundles(cfg=self.cfg.corrections, shared=self.shared) self.interp_expo = interp_expo = R.InterpExpo(ns=self.common_namespace) sampler = interp_expo.transformations.front() model_edges_t >> sampler.inputs.edges sampler_input = sampler.inputs.points interp_expo.bind_transformations(False) interp_expo_t = interp_expo.transformations.back() for i, it in enumerate(self.idx): isotope = it.current_values()[0] spectrum_raw_t = C.Points( self.spectra[isotope], ns=self.common_namespace ) spectrum_raw_t.points.setLabel('S0(E0):\n'+isotope) self.objects[('spectrum_raw', isotope)] = spectrum_raw_t if self.corrections: spectrum_t = R.Product(ns=self.common_namespace) spectrum_t.multiply( spectrum_raw_t ) for corr in self.corrections.bundles.values(): spectrum_t.multiply( corr.outputs[isotope] ) spectrum_t.product.setLabel('S(E0):\n'+isotope) else: spectrum_t = spectrum_raw_t if i>0: interp_expo_t = interp_expo.add_transformation(False) interp_expo.bind_transformations(False) model_edges_t >> interp_expo_t.inputs.x interp_output = interp_expo.add_input(spectrum_t) interp_input = interp_expo_t.inputs.newx if i>0: self.set_input(interp_input, self.cfg.name, it, clone=0) else: self.set_input((sampler_input, interp_input), self.cfg.name, it, clone=0) interp_expo_t.setLabel('S(E):\n'+isotope) """Store data""" self.set_output(interp_output, self.cfg.name, it) self.objects[('spectrum', isotope)] = spectrum_t
def test_par_05(floatprecision='double'): """Test nested vector""" assert floatprecision in ['double', 'float'] const = N.array([1.5, 2.6, 3.7], dtype=floatprecision[0]) var = R.parameter('vector<%s>' % floatprecision)('testpar') var.value().resize(3) taintflag = R.taintflag('tflag') var.subscribe(taintflag) taintflag.set(False) vec = C.stdvector(const) var.set(vec) check('vec', None, list(var.value()), const, taintflag)
def build(self): if self.xorders.size>1: if self.xorders.size+1 != self.xedges.size: raise self.exception('Incompartible edges and xorders definition:\n {!r}\n {!r}'.format(self.xedges, self.xorders)) self.integrator = R.Integrator21GL(self.xedges.size-1, self.xorders, self.xedges, self.cfg.yorder, -1.0, 1.0) else: self.integrator = R.Integrator21GL(self.xedges.size-1, int(self.xorders[0]), self.xedges, self.cfg.yorder, -1.0, 1.0) self.integrator.points.setLabel('GL sampler (2d)') self.integrator.points.x.setLabel(self.cfg.variables[0]) self.integrator.points.xedges.setLabel('%s edges'%self.cfg.variables[0]) self.integrator.points.xcenters.setLabel('{} bin centers'.format(self.cfg.variables[0])) self.integrator.points.y.setLabel(self.cfg.variables[1]) self.set_output(self.cfg.variables[0], None, self.integrator.points.x) self.set_output('{}_edges'.format(self.cfg.variables[0]), None, self.integrator.points.xedges) self.set_output('{}_centers'.format(self.cfg.variables[0]), None, self.integrator.points.xcenters) self.set_output('{}_hist'.format(self.cfg.variables[0]), None, self.integrator.points.xhist) self.set_output('{}_mesh'.format(self.cfg.variables[0]), None, self.integrator.points.xmesh) self.set_output('{}_mesh'.format(self.cfg.variables[1]), None, self.integrator.points.ymesh) self.set_output(self.cfg.variables[1], None, self.integrator.points.y) hist = self.integrator.hist instances = self.cfg['instances'] needadd = False for name, label in instances.items(): noindex = False if isinstance(label, (dict, NestedDict)): noindex = label.get('noindex') label = label.get('label') if label is None: label = '{name} {autoindex} (GL)' for it in self.nidx: if needadd: hist = self.integrator.add_transformation() needadd=True if noindex: hist.setLabel(label) self.set_input(name, None, self.integrator.add_input(), argument_number=0) self.set_output(name, None, hist.outputs.back()) break hist.setLabel(it.current_format(label, name='Integral')) self.set_input(name, it, self.integrator.add_input(), argument_number=0) self.set_output(name, it, hist.outputs.back())
def build(self): '''Bringing uncertainties together with spectrum. Spectrum values are grouped by bins in antineutrino energy (binning is read together with uncertainties). Each bin is corrected accordingly and corrected spectrum is provided as output. ''' order = ','.join((self.cfg.iso_idx, self.cfg.reac_idx)) reversed_order = ','.join((self.cfg.reac_idx, self.cfg.iso_idx)) order_from_idx = self.nidx_major.comma_list() if order_from_idx == order: normal_order = True elif order_from_idx == reversed_order: normal_order = False else: raise ValueError("Indicies from config and real don't match") for idx in self.nidx_major: if normal_order: iso, reac, = idx.current_values() else: reac, iso, = reac_idx.current_values() unc = self.total_unc[reac][iso] nominal = R.FillLike(1., labels="Nominal weight for spectrum") unc.single() >> nominal.single_input() total = C.Sum( outputs=[unc, nominal], labels=f"Nominal weigts + correction for {iso} in {reac}") correction = R.ReactorSpectrumUncertainty(self.binning.single(), total.single()) correction.insegment.setLabel( f"Segments in HM-bins for {iso} in {reac}") correction.transformations.in_bin_product.setLabel( f"Correction to antineutrino spectrum in HM model for {iso} in {reac}" ) self.set_input("corrected_spectrum", idx, correction.insegment.points, argument_number=0) self.set_input("corrected_spectrum", idx, correction.transformations.in_bin_product.spectrum, argument_number=1) self.set_output( "corrected_spectrum", idx, correction.transformations.in_bin_product.corrected_spectrum)
def test_arrayview_allocation(): nitems, ndata = 6, 15 allocator = R.arrayviewAllocatorSimple(context.current_precision())(ndata) arrays = [] for i in range(1, nitems): array = R.arrayview(context.current_precision())(i, allocator) for j in range(i): array[j] = j print(i, array.view()) arrays.append(array) print('Arrays:', [array.view() for array in arrays]) print('Data (filled):', allocator.view()) print('Data (all):', allocator.viewall())
def build(self): if self.xorders.size > 1: if self.xorders.size + 1 != self.edges.size: raise Exception( 'Incompartible edges and xorders definition:\n {!r}\n {!r}' .format(self.edges, self.xorders)) self.integrator = R.Integrator21GL(self.edges.size - 1, self.xorders, self.edges, self.cfg.yorder, -1.0, 1.0) else: self.integrator = R.Integrator21GL(self.edges.size - 1, int(self.xorders[0]), self.edges, self.cfg.yorder, -1.0, 1.0) self.integrator.points.setLabel('GL sampler (2d)') self.integrator.points.x.setLabel(self.cfg.variables[0]) self.integrator.points.xedges.setLabel('%s edges' % self.cfg.variables[0]) self.integrator.points.xcenters.setLabel('{} bin centers'.format( self.cfg.variables[0])) self.integrator.points.y.setLabel(self.cfg.variables[1]) self.set_output(self.cfg.variables[0], None, self.integrator.points.x) self.set_output('{}_edges'.format(self.cfg.variables[0]), None, self.integrator.points.xedges) self.set_output('{}_centers'.format(self.cfg.variables[0]), None, self.integrator.points.xcenters) self.set_output('{}_hist'.format(self.cfg.variables[0]), None, self.integrator.points.xhist) self.set_output('{}_mesh'.format(self.cfg.variables[0]), None, self.integrator.points.xmesh) self.set_output('{}_mesh'.format(self.cfg.variables[1]), None, self.integrator.points.ymesh) self.set_output(self.cfg.variables[1], None, self.integrator.points.y) hist = self.integrator.hist for i, it in enumerate(self.nidx): if i: hist = self.integrator.add_transformation() hist.setLabel( it.current_format('{name} {autoindex} (GL)', name='Integral')) self.set_input('integral', it, self.integrator.add_input(), argument_number=0) self.set_output('integral', it, hist.outputs.back())
def test_arrayview_complex(): a1 = R.arrayview(context.current_precision())(2) a1[0] = 2 a1[1] = 3 c1 = a1.complex() assert c1.real == 2.0 assert c1.imag == 3.0
def define_variables(self): names_all = set(self.cfg.names) names_unc = self.cfg.fractions.keys() names_eval = names_all - set(names_unc) if len(names_eval) != 1: raise self.exception( 'User should provide N-1 fractions, the last one is not independent\n' 'all: {!s}\nfractions: {!s}'.format(self.cfg.names, names_unc)) name_eval = names_eval.pop() subst = [] names = () for name, val in self.cfg.fractions.items(): cname = self.cfg.format.format(component=name) names += cname, par = self.common_namespace.reqparameter(cname, cfg=val) par.setLabel('{} fraction'.format(name)) subst.append(self.common_namespace.pathto(cname)) label = '{} fraction: '.format(name_eval) label += '-'.join(('1', ) + names) name_eval = self.cfg.format.format(component=name_eval) with self.common_namespace: self.vd = R.VarDiff(stdvector(subst), name_eval, 1.0, ns=self.common_namespace) par = self.common_namespace[name_eval].get() par.setLabel(label)
def saveInfo(self, name, info, title='', print_threshold=3, **kwargs): auto_title = kwargs.pop('auto_title', False) assert not kwargs, 'Unparsed arguments: ' + str(kwargs) data = ordered_yaml.ordered_dump(info) if auto_title: title = str(data) if title[-1] == '\n': title = title[:-1] title = title.replace('\n', '; ') if title: title = title + ' (yaml)' else: title = 'YAML data' odata = R.TObjString(data) self.outputfile.WriteTObject(odata, name, 'overwrite') key = self.outputfile.GetKey(name) key.SetTitle(title) self.print( print_threshold, 'Save info as {name}: {title}'.format(name=name, title=title)) self.print(print_threshold, data, end='\n')
def Histogram2d(arg1, arg2=None, arg3=None, *args, **kwargs): if arg2 is None and arg3 is None: if isinstance(arg1, R.TH2): xedges = arg1.GetXaxis().get_bin_edges().astype('d') yedges = arg1.GetYaxis().get_bin_edges().astype('d') data = np.ascontiguousarray(arg1.get_buffer().T, dtype='d').ravel( order='F') # histogram buffer is transposed (y, x) else: raise Exception( 'Should provide (xedges, yedges[, data]) or (TH2) to construct Histogram2d' ) else: xedges = np.ascontiguousarray(arg1, dtype='d') yedges = np.ascontiguousarray(arg2, dtype='d') reqsize = (xedges.size - 1) * (yedges.size - 1) if arg3 is None: data = np.zeros(reqsize, dtype='d') else: data = arg3 if reqsize != data.size: raise Exception( 'Bin edges and data are not consistent (%i,%i and %i)' % (xedges.size, yedges.size, data.size)) data = np.ascontiguousarray(data, dtype='d').ravel(order='F') return R.Histogram2d(xedges.size - 1, xedges, yedges.size - 1, yedges, data, *args, **kwargs)