def test_filllike_v02(function_name): """Initialize inpnuts""" size = 5 arr1 = N.arange(0, size) """Initialize environment""" ns = env.globalns(function_name) p1 = ns.defparameter('w1', central=1.0, sigma=0.1) points1 = C.Points(arr1) with ns: ws = C.WeightedSum(['w1'], [points1.points.points]) ws.print() print() flvalue = 2.0 fl = C.FillLike(flvalue) ws >> fl.fill.inputs[0] out = fl.fill.outputs[0] data = out.data() print('data:', data) print() compare_filllike(data, [flvalue] * size, 'Data output failed') print('Change parameter') p1.set(-1.0) taintflag = fl.fill.tainted() print('data:', data) print('taintflag:', taintflag) compare_filllike(data, [flvalue] * size, 'Data output failed') compare_filllike(taintflag, False, 'Taintflag should be false')
def test_weightedsum_02(function_name): ns, weights, arr1, p1, points1, arr2, p2, points2, zeros = weightedsum_make( function_name) outputs = [o.single() for o in [points1]] with ns: ws = C.WeightedSum(weights[:1], outputs) ws.print() print('Mode1: a1*w1+a2*w2') print(' ', p1.value(), p2.value(), ws.sum.sum.data()) assert (ws.sum.sum.data() == arr1).all() p1.set(2) print(' ', p1.value(), p2.value(), ws.sum.sum.data()) assert (ws.sum.sum.data() == 2.0 * arr1).all() p2.set(2) print(' ', p1.value(), p2.value(), ws.sum.sum.data()) assert (ws.sum.sum.data() == 2.0 * arr1).all() p1.set(1) print(' ', p1.value(), p2.value(), ws.sum.sum.data()) assert (ws.sum.sum.data() == arr1).all() p2.set(1) print(' ', p1.value(), p2.value(), ws.sum.sum.data()) assert (ws.sum.sum.data() == arr1).all() print()
def define_variables(self): """Define variables for correlated and uncorrelated uncertainties. Correlated uncertainties are represented as WeightedSums of fixed uncertainties from HM model multiplied by single weight for each reactor and shared for isotopes within same reactor. Uncorrelated uncertainties are represented as VarArrays of uncertain parameters. Each parameter uncertainty corresponds to uncorrelated uncertainty from HM model multiplied for given isotope in given bin. All parameters are uncorrelated between reactors and isotopes. """ num_of_bins = len(self.bins) - 1 with self.unc_ns: for reac_idx in self.nidx_major.get_subset(self.cfg.reac_idx): reac, = reac_idx.current_values() corr_name = "corr_unc." + reac_idx.current_format() self.unc_ns.reqparameter( corr_name, central=0., sigma=1., label=f"Correlated uncertainty in {reac}") for iso_idx in self.nidx_major.get_subset(self.cfg.iso_idx): iso, = iso_idx.current_values() idx = iso_idx + reac_idx uncorr_temp = "uncorr_unc." + idx.current_format( ) + ".{bin}" vars = [] for bin, iso_unc in zip(range(num_of_bins), self.uncertainties['uncorr'][iso]): name = uncorr_temp.format(bin=bin) vars.append(name) self.unc_ns.reqparameter( name, central=0., sigma=iso_unc, label= f'''Uncorrelated uncertainty for {iso} in {reac}, bin {bin}''' ) uncorr_unc = C.VarArray( vars, ns=self.unc_ns, labels=f'Uncorrelated uncertainties for {iso} in {reac}' ) self.uncorrelated_vars[reac][iso] = uncorr_unc tmp = C.Points( self.uncertainties['corr'][iso], labels= f'Fixed array of correlated uncertainties for {iso} in {reac}' ) corr_unc = C.WeightedSum( [corr_name], [tmp], ns=self.unc_ns, labels=f"Correlated uncertainties for {iso} in {reac}") self.correlated_vars[reac][iso] = corr_unc self.total_unc[reac][iso] = C.Sum([uncorr_unc, corr_unc])
def test_filllike_v01(function_name='test_filllike_v01'): """Initialize inpnuts""" size = 5 arr1 = N.arange(0, size) """Initialize environment""" ns = env.globalns(function_name) p1 = ns.defparameter('w1', central=1.0, sigma=0.1) points1 = C.Points(arr1) with ns: ws = C.WeightedSum(['w1'], [points1.points.points]) ws.print() print() dbg1 = R.DebugTransformation('wsum') dbg1.debug.source(ws.sum.sum) flvalue = 2.0 fl = C.FillLike(flvalue) fl.fill.inputs[0](dbg1.debug.target) dbg2 = R.DebugTransformation('fill') dbg2.debug.source(fl.fill.outputs[0]) data = dbg2.debug.target.data() print('data:', data) print() compare_filllike(data, [flvalue] * size, 'Data output failed') print('Change parameter') p1.set(-1.0) taintflag = dbg2.debug.tainted() data = dbg2.debug.target.data() print('data:', data) print('taintflag:', taintflag) compare_filllike(data, [flvalue] * size, 'Data output failed') compare_filllike(taintflag, False, 'Taintflag should be false')
x_edges = np.linspace(-np.pi, np.pi, nbins+1, dtype='d') x_widths = x_edges[1:]-x_edges[:-1] x_width_ratio = x_widths/x_widths.min() orders = 4 # Initialize histogram hist = C.Histogram(x_edges) # Initialize integrator integrator = R.IntegratorGL(nbins, orders) integrator.points.edges(hist.hist.hist) int_points = integrator.points.x # Initialize sine and two cosines sin_t = R.Sin(int_points) cos1_arg = C.WeightedSum( ['fcn1.k'], [int_points] ) cos2_arg = C.WeightedSum( ['fcn2.k'], [int_points] ) cos1_t = R.Cos(cos1_arg.sum.sum) cos2_t = R.Cos(cos2_arg.sum.sum) # Initalizes both weighted sums fcn1 = C.WeightedSum(['fcn1.a', 'fcn1.b'], [sin_t.sin.result, cos1_t.cos.result]) fcn2 = C.WeightedSum(['fcn2.a', 'fcn2.b'], [sin_t.sin.result, cos2_t.cos.result]) # Create two debug transformations to check the execution dbg1a = R.DebugTransformation('before int1', 1.0) dbg2a = R.DebugTransformation('before int2', 1.0) dbg1b = R.DebugTransformation('after int1', 1.0) dbg2b = R.DebugTransformation('after int2', 1.0)
def build(self): for idx in self.nidx.iterate(): if 'isotope' in idx.names()[0]: iso, reac = idx.current_values() else: reac, iso = idx.current_values() name = "offeq_correction." + idx.current_format() try: _offeq_energy, _offeq_spectra = list( map(C.Points, self.offeq_raw_spectra[iso])) _offeq_energy.points.setLabel( "Original energies for offeq spectrum of {}".format(iso)) except KeyError: # U238 doesn't have offequilibrium correction so just pass 1. if iso != 'U238': raise ones = C.FillLike( 1., labels='Offeq correction to {0} spectrum in {1} reactor'. format(iso, reac)) self.context.objects[name] = ones self.set_input('offeq_correction', idx, ones.single_input(), argument_number=0) self.set_output("offeq_correction", idx, ones.single()) continue offeq_spectra = C.InterpLinear( labels='Correction for {} spectra'.format(iso)) offeq_spectra.set_overflow_strategy( R.GNA.Interpolation.Strategy.Constant) offeq_spectra.set_underflow_strategy( R.GNA.Interpolation.Strategy.Constant) insegment = offeq_spectra.transformations.front() insegment.setLabel("Segments") interpolator_trans = offeq_spectra.transformations.back() interpolator_trans.setLabel( "Interpolated spectral correction for {}".format(iso)) ones = C.FillLike(1., labels="Nominal spectra for {}".format(iso)) _offeq_energy >> (insegment.edges, interpolator_trans.x) _offeq_spectra >> interpolator_trans.y self.set_input('offeq_correction', idx, (insegment.points, interpolator_trans.newx, ones.single_input()), argument_number=0) par_name = "offeq_scale" self.reqparameter( par_name, idx, central=1., relsigma=0.3, labels="Offequilibrium norm for reactor {1} and iso " "{0}".format(iso, reac)) self.reqparameter("dummy_scale", idx, central=1, fixed=True, labels="Dummy weight for reactor {1} and iso " "{0} for offeq correction".format(iso, reac)) outputs = [ones.single(), interpolator_trans.single()] weights = [ '.'.join(("dummy_scale", idx.current_format())), '.'.join( (par_name, idx.current_format())) ] with self.namespace: final_sum = C.WeightedSum(weights, outputs, labels='Offeq correction to ' '{0} spectrum in {1} reactor'.format( iso, reac)) self.context.objects[name] = final_sum self.set_output("offeq_correction", idx, final_sum.single())
def main(opts): global savefig cfg = NestedDict( bundle=dict( name='energy_nonlinearity_birks_cherenkov', version='v01', nidx=[('r', 'reference', ['R1', 'R2'])], major=[], ), stopping_power='stoppingpower.txt', annihilation_electrons=dict( file='input/hgamma2e.root', histogram='hgamma2e_1KeV', scale=1.0 / 50000 # event simulated ), pars=uncertaindict( [ ('birks.Kb0', (1.0, 'fixed')), ('birks.Kb1', (15.2e-3, 0.1776)), # ('birks.Kb2', (0.0, 'fixed')), ("cherenkov.E_0", (0.165, 'fixed')), ("cherenkov.p0", (-7.26624e+00, 'fixed')), ("cherenkov.p1", (1.72463e+01, 'fixed')), ("cherenkov.p2", (-2.18044e+01, 'fixed')), ("cherenkov.p3", (1.44731e+01, 'fixed')), ("cherenkov.p4", (3.22121e-02, 'fixed')), ("Npescint", (1341.38, 0.0059)), ("kC", (0.5, 0.4737)), ("normalizationEnergy", (12.0, 'fixed')) ], mode='relative'), integration_order=2, correlations_pars=['birks.Kb1', 'Npescint', 'kC'], correlations=[1.0, 0.94, -0.97, 0.94, 1.0, -0.985, -0.97, -0.985, 1.0], fill_matrix=True, labels=dict(normalizationEnergy='Pessimistic'), ) ns = env.globalns('energy') quench = execute_bundle(cfg, namespace=ns) ns.printparameters(labels=True) print() normE = ns['normalizationEnergy'].value() # # Input bins # evis_edges_full_input = N.arange(0.0, 15.0 + 1.e-6, 0.025) evis_edges_full_hist = C.Histogram(evis_edges_full_input, labels='Evis bin edges') evis_edges_full_hist >> quench.context.inputs.evis_edges_hist['00'] # # Python energy model interpolation function # from scipy.interpolate import interp1d lsnl_x = quench.histoffset.histedges.points_truncated.data() lsnl_y = quench.positron_model_relative.single().data() lsnl_fcn = interp1d(lsnl_x, lsnl_y, kind='quadratic') # # Energy resolution # def eres_sigma_rel(edep): return 0.03 / edep**0.5 def eres_sigma_abs(edep): return 0.03 * edep**0.5 # # Energy offset # from physlib import pc edep_offset = pc.DeltaNP - pc.ElectronMass # # Oscprob # baselinename = 'L' ns = env.ns("oscprob") import gna.parameters.oscillation gna.parameters.oscillation.reqparameters(ns) ns.defparameter(baselinename, central=52.0, fixed=True, label='Baseline, km') # # Define energy range # enu_input = N.arange(1.8, 15.0, 0.001) edep_input = enu_input - edep_offset edep_lsnl = edep_input * lsnl_fcn(edep_input) # Initialize oscillation variables enu = C.Points(enu_input, labels='Neutrino energy, MeV') component_names = C.stdvector(['comp0', 'comp12', 'comp13', 'comp23']) with ns: R.OscProbPMNSExpressions(R.Neutrino.ae(), R.Neutrino.ae(), component_names, ns=ns) labels = [ 'Oscillation probability|%s' % s for s in ('component 12', 'component 13', 'component 23', 'full', 'probsum') ] oscprob = R.OscProbPMNS(R.Neutrino.ae(), R.Neutrino.ae(), baselinename, labels=labels) enu >> oscprob.full_osc_prob.Enu enu >> (oscprob.comp12.Enu, oscprob.comp13.Enu, oscprob.comp23.Enu) unity = C.FillLike(1, labels='Unity') enu >> unity.fill.inputs[0] with ns: op_sum = C.WeightedSum(component_names, [ unity.fill.outputs[0], oscprob.comp12.comp12, oscprob.comp13.comp13, oscprob.comp23.comp23 ], labels='Oscillation probability sum') psur = op_sum.single().data() from scipy.signal import argrelmin, argrelmax psur_minima, = argrelmin(psur) psur_maxima, = argrelmax(psur) def build_extrema(x): data_min_x = (x[psur_minima][:-1] + x[psur_minima][1:]) * 0.5 data_min_y = (x[psur_minima][1:] - x[psur_minima][:-1]) data_max_x = (x[psur_maxima][:-1] + x[psur_maxima][1:]) * 0.5 data_max_y = (x[psur_maxima][1:] - x[psur_maxima][:-1]) data_ext_x = N.vstack([data_max_x, data_min_x]).T.ravel() data_ext_y = N.vstack([data_max_y, data_min_y]).T.ravel() return data_ext_x, data_ext_y psur_ext_x_enu, psur_ext_y_enu = build_extrema(enu_input) psur_ext_x_edep, psur_ext_y_edep = build_extrema(edep_input) psur_ext_x_edep_lsnl, psur_ext_y_edep_lsnl = build_extrema(edep_lsnl) # # Plots and tests # if opts.output and opts.output.endswith('.pdf'): pdfpages = PdfPages(opts.output) pdfpagesfilename = opts.output savefig_old = savefig pdf = pdfpages.__enter__() def savefig(*args, **kwargs): if opts.individual and args and args[0]: savefig_old(*args, **kwargs) pdf.savefig() else: pdf = None pdfpagesfilename = '' pdfpages = None fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Edep, MeV') ax.set_ylabel('Evis/Edep') ax.set_title('Positron energy nonlineairty') quench.positron_model_relative.single().plot_vs( quench.histoffset.histedges.points_truncated, label='definition range') quench.positron_model_relative_full.plot_vs( quench.histoffset.histedges.points, '--', linewidth=1., label='full range', zorder=0.5) ax.vlines(normE, 0.0, 1.0, linestyle=':') ax.legend(loc='lower right') ax.set_ylim(0.8, 1.05) savefig(opts.output, suffix='_total_relative') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Edep, MeV') ax.set_ylabel(r'$\sigma/E$') ax.set_title('Energy resolution') ax.plot(edep_input, eres_sigma_rel(edep_input), '-') savefig(opts.output, suffix='_eres_rel') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Edep, MeV') ax.set_ylabel(r'$\sigma$') ax.set_title('Energy resolution') ax.plot(edep_input, eres_sigma_abs(edep_input), '-') savefig(opts.output, suffix='_eres_abs') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Enu, MeV') ax.set_ylabel('Psur') ax.set_title('Survival probability') op_sum.single().plot_vs(enu.single(), label='full') ax.plot(enu_input[psur_minima], psur[psur_minima], 'o', markerfacecolor='none', label='minima') ax.plot(enu_input[psur_maxima], psur[psur_maxima], 'o', markerfacecolor='none', label='maxima') savefig(opts.output, suffix='_psur_enu') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Edep, MeV') ax.set_ylabel('Psur') ax.set_title('Survival probability') op_sum.single().plot_vs(edep_input, label='true') op_sum.single().plot_vs(edep_lsnl, label='with LSNL') ax.legend() savefig(opts.output, suffix='_psur_edep') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Enu, MeV') ax.set_ylabel('Dist, MeV') ax.set_title('Nearest peaks distance') ax.plot(psur_ext_x_enu, psur_ext_y_enu, 'o-', markerfacecolor='none') savefig(opts.output, suffix='_dist_enu') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Edep, MeV') ax.set_ylabel('Dist, MeV') ax.set_title('Nearest peaks distance') ax.plot(psur_ext_x_edep, psur_ext_y_edep, '-', markerfacecolor='none', label='true') ax.plot(psur_ext_x_edep_lsnl, psur_ext_y_edep_lsnl, '-', markerfacecolor='none', label='with LSNL') ax.plot(edep_input, eres_sigma_abs(edep_input), '-', markerfacecolor='none', label=r'$\sigma$') ax.legend(loc='upper left') savefig(opts.output, suffix='_dist') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Edep, MeV') ax.set_ylabel(r'Dist/$\sigma$') ax.set_title('Resolution ability') x1, y1 = psur_ext_x_edep, psur_ext_y_edep / eres_sigma_abs(psur_ext_x_edep) x2, y2 = psur_ext_x_edep_lsnl, psur_ext_y_edep_lsnl / eres_sigma_abs( psur_ext_x_edep_lsnl) ax.plot(x1, y1, '-', markerfacecolor='none', label='true') ax.plot(x2, y2, '-', markerfacecolor='none', label='with LSNL') ax.legend(loc='upper left') savefig(opts.output, suffix='_ability') ax.set_xlim(3, 4) ax.set_ylim(5, 8) savefig(opts.output, suffix='_ability_zoom') fig = P.figure() ax = P.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('Edep, MeV') ax.set_ylabel(r'Dist/$\sigma$') ax.set_title('Resolution ability difference (quenching-true)') y2fcn = interp1d(x2, y2) y2_on_x1 = y2fcn(x1) diff = y2_on_x1 - y1 from scipy.signal import savgol_filter diff = savgol_filter(diff, 21, 3) ax.plot(x1, diff) savefig(opts.output, suffix='_ability_diff') if pdfpages: pdfpages.__exit__(None, None, None) print('Write output figure to', pdfpagesfilename) savegraph(quench.histoffset.histedges.points_truncated, opts.graph, namespace=ns) if opts.show: P.show()
def build(self): for idx in self.nidx.iterate(): if 'isotope' in idx.names()[0]: iso, reac = idx.current_values() else: reac, iso = idx.current_values() name = "offeq_correction." + idx.current_format() try: _offeq_energy, _offeq_spectra = list(map(C.Points, self.offeq_raw_spectra[iso])) _offeq_energy.points.setLabel("Original energies for offeq spectrum of {}".format(iso)) except KeyError: # U238 doesn't have offequilibrium correction so just pass 1. if iso != 'U238': raise passthrough = C.Identity(labels='Nominal {0} spectrum in {1} reactor'.format(iso, reac)) self.context.objects[name] = passthrough dummy = C.Identity() #just to serve 1 input self.set_input('offeq_correction', idx, dummy.single_input(), argument_number=0) self.set_input('offeq_correction', idx, passthrough.single_input(), argument_number=1) self.set_output("offeq_correction", idx, passthrough.single()) continue offeq_spectra = C.InterpLinear(labels='Correction for {} spectra'.format(iso)) offeq_spectra.set_overflow_strategy(R.GNA.Interpolation.Strategy.Constant) offeq_spectra.set_underflow_strategy(R.GNA.Interpolation.Strategy.Constant) insegment = offeq_spectra.transformations.front() insegment.setLabel("Offequilibrium segments") interpolator_trans = offeq_spectra.transformations.back() interpolator_trans.setLabel("Interpolated spectral correction for {}".format(iso)) passthrough = C.Identity(labels="Nominal {0} spectrum in {1} reactor".format(iso, reac)) _offeq_energy >> (insegment.edges, interpolator_trans.x) _offeq_spectra >> interpolator_trans.y # Enu self.set_input('offeq_correction', idx, (insegment.points, interpolator_trans.newx), argument_number=0) # Anue spectra self.set_input('offeq_correction', idx, ( passthrough.single_input()), argument_number=1) par_name = "offeq_scale" self.reqparameter(par_name, idx, central=1., relsigma=0.3, labels="Offequilibrium norm for reactor {1} and iso " "{0}".format(iso, reac)) self.reqparameter("dummy_scale", idx, central=1, fixed=True, labels="Dummy weight for reactor {1} and iso " "{0} for offeq correction".format(iso, reac)) snap = C.Snapshot(passthrough.single(), labels='Snapshot of {} spectra in reac {}'.format(iso, reac)) prod = C.Product(labels='Product of initial {} spectra and ' 'offequilibrium corr in {} reactor'.format(iso, reac)) prod.multiply(interpolator_trans.single()) prod.multiply(snap.single()) outputs = [passthrough.single(), prod.single()] weights = ['.'.join(("dummy_scale", idx.current_format())), '.'.join((par_name, idx.current_format()))] with self.namespace: final_sum = C.WeightedSum(weights, outputs, labels='Corrected to offequilibrium ' '{0} spectrum in {1} reactor'.format(iso, reac)) self.context.objects[name] = final_sum self.set_output("offeq_correction", idx, final_sum.single())
y_nbins = 30 y_edges = np.linspace(0, 2.0 * np.pi, y_nbins + 1, dtype='d') y_widths = y_edges[1:] - y_edges[:-1] y_orders = 3 # Initialize histogram hist = C.Histogram2d(x_edges, y_edges) # Initialize integrator integrator = R.Integrator2GL(x_nbins, x_orders, y_nbins, y_orders) integrator.points.edges(hist.hist.hist) int_points = integrator.points # Create integrable: a*sin(x) + b*cos(k*x) arg_t = C.WeightedSum(['a', 'b'], [int_points.xmesh, int_points.ymesh]) sin_t = R.Sin(arg_t.sum.sum) # integrator.add_input(sint_t.sin.result) integrator.hist.f(sin_t.sin.result) X, Y = integrator.points.xmesh.data(), integrator.points.ymesh.data() integrator.print() print() # Label transformations hist.hist.setLabel('Input histogram\n(bins definition)') integrator.points.setLabel('Sampler\n(Gauss-Legendre)') integrator.hist.setLabel('Integrator\n(convolution)') sin_t.sin.setLabel('sin(ax+by)') arg_t.sum.setLabel('ax+by')
def main(opts): global savefig if opts.output and opts.output.endswith('.pdf'): pdfpages = PdfPages(opts.output) pdfpagesfilename=opts.output savefig_old=savefig pdf=pdfpages.__enter__() def savefig(*args, **kwargs): close = kwargs.pop('close', False) if opts.individual and args and args[0]: savefig_old(*args, **kwargs) pdf.savefig() if close: P.close() else: pdf = None pdfpagesfilename = '' pdfpages = None cfg = NestedDict( bundle = dict( name='energy_nonlinearity_birks_cherenkov', version='v01', nidx=[ ('r', 'reference', ['R1', 'R2']) ], major=[], ), stopping_power='data/data_juno/energy_model/2019_birks_cherenkov_v01/stoppingpower.txt', annihilation_electrons=dict( file='data/data_juno/energy_model/2019_birks_cherenkov_v01/hgamma2e.root', histogram='hgamma2e_1KeV', scale=1.0/50000 # events simulated ), pars = uncertaindict( [ ('birks.Kb0', (1.0, 'fixed')), ('birks.Kb1', (15.2e-3, 0.1776)), # ('birks.Kb2', (0.0, 'fixed')), ("cherenkov.E_0", (0.165, 'fixed')), ("cherenkov.p0", ( -7.26624e+00, 'fixed')), ("cherenkov.p1", ( 1.72463e+01, 'fixed')), ("cherenkov.p2", ( -2.18044e+01, 'fixed')), ("cherenkov.p3", ( 1.44731e+01, 'fixed')), ("cherenkov.p4", ( 3.22121e-02, 'fixed')), ("Npescint", (1341.38, 0.0059)), ("kC", (0.5, 0.4737)), ("normalizationEnergy", (11.9999999, 'fixed')) ], mode='relative' ), integration_order = 2, correlations_pars = [ 'birks.Kb1', 'Npescint', 'kC' ], correlations = [ 1.0, 0.94, -0.97, 0.94, 1.0, -0.985, -0.97, -0.985, 1.0 ], fill_matrix=True, labels = dict( normalizationEnergy = 'Pessimistic' ), ) ns = env.globalns('energy') quench = execute_bundle(cfg, namespace=ns) print() normE = ns['normalizationEnergy'].value() # # Input bins # evis_edges_full_input = N.arange(0.0, 15.0+1.e-6, 0.001) evis_edges_full_hist = C.Histogram(evis_edges_full_input, labels='Evis bin edges') evis_edges_full_hist >> quench.context.inputs.evis_edges_hist['00'] # # Python energy model interpolation function # lsnl_x = quench.histoffset.histedges.points_truncated.data() lsnl_y = quench.positron_model_relative.single().data() lsnl_fcn = interp1d(lsnl_x, lsnl_y, kind='quadratic', bounds_error=False, fill_value='extrapolate') # # Energy resolution # def eres_sigma_rel(edep): return 0.03/edep**0.5 def eres_sigma_abs(edep): return 0.03*edep**0.5 # # Oscprob # baselinename='L' ns = env.ns("oscprob") import gna.parameters.oscillation gna.parameters.oscillation.reqparameters(ns) ns.defparameter(baselinename, central=52.0, fixed=True, label='Baseline, km') # # Define energy range # data = Data(N.arange(1.8, 15.0, 0.001), lsnl_fcn=lsnl_fcn, eres_fcn=eres_sigma_abs) # Initialize oscillation variables enu = C.Points(data.enu, labels='Neutrino energy, MeV') component_names = C.stdvector(['comp0', 'comp12', 'comp13', 'comp23']) with ns: R.OscProbPMNSExpressions(R.Neutrino.ae(), R.Neutrino.ae(), component_names, ns=ns) labels=['Oscillation probability|%s'%s for s in ('component 12', 'component 13', 'component 23', 'full', 'probsum')] oscprob = R.OscProbPMNS(R.Neutrino.ae(), R.Neutrino.ae(), baselinename, labels=labels) enu >> oscprob.full_osc_prob.Enu enu >> (oscprob.comp12.Enu, oscprob.comp13.Enu, oscprob.comp23.Enu) unity = C.FillLike(1, labels='Unity') enu >> unity.fill.inputs[0] with ns: op_sum = C.WeightedSum(component_names, [unity.fill.outputs[0], oscprob.comp12.comp12, oscprob.comp13.comp13, oscprob.comp23.comp23], labels='Oscillation probability sum') oscprob.printtransformations() env.globalns.printparameters(labels=True) ns = env.globalns('oscprob') data.set_dm_par(ns['DeltaMSqEE']) data.set_nmo_par(ns['Alpha']) data.set_psur_fcn(op_sum.single().data) data.build() # # Plotting # xmax = 12.0 # # Positron non-linearity # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='Evis/Edep', title='Positron energy nonlineairty') ax.minorticks_on(); ax.grid() quench.positron_model_relative.single().plot_vs(quench.histoffset.histedges.points_truncated, label='definition range') quench.positron_model_relative_full.plot_vs(quench.histoffset.histedges.points, '--', linewidth=1., label='full range', zorder=0.5) ax.vlines(normE, 0.0, 1.0, linestyle=':') ax.legend(loc='lower right') ax.set_ylim(0.8, 1.05) ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_total_relative', close=not opts.show_all) # # Positron non-linearity derivative # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='dEvis/dEdep', title='Positron energy nonlineairty derivative') ax.minorticks_on(); ax.grid() e = quench.histoffset.histedges.points_truncated.single().data() f = quench.positron_model_relative.single().data()*e ec = (e[1:] + e[:-1])*0.5 df = (f[1:] - f[:-1]) dedf = (e[1:] - e[:-1])/df ax.plot(ec, dedf) ax.legend(loc='lower right') ax.set_ylim(0.975, 1.01) ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_total_derivative', close=not opts.show_all) # # Positron non-linearity effect # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='Evis/Edep', title='Positron energy nonlineairty') ax.minorticks_on(); ax.grid() es = N.arange(1.0, 3.1, 0.5) esmod = es*lsnl_fcn(es) esmod_shifted = esmod*(es[-1]/esmod[-1]) ax.vlines(es, 0.0, 1.0, linestyle='--', linewidth=2, alpha=0.5, color='green', label='Edep') ax.vlines(esmod, 0.0, 1.0, linestyle='-', color='red', label='Edep quenched') ax.legend() savefig(opts.output, suffix='_quenching_effect_0') ax.vlines(esmod_shifted, 0.0, 1.0, linestyle=':', color='blue', label='Edep quenched, scaled') ax.legend() savefig(opts.output, suffix='_quenching_effect_1', close=not opts.show_all) # # Energy resolution # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel=r'$\sigma/E$', title='Energy resolution') ax.minorticks_on(); ax.grid() ax.plot(data.edep, eres_sigma_rel(data.edep), '-') ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_eres_rel', close=not opts.show_all) # # Energy resolution # fig = P.figure() ax = P.subplot(111, xlabel= 'Edep, MeV', ylabel= r'$\sigma$', title='Energy resolution') ax.minorticks_on(); ax.grid() ax.plot(data.edep, eres_sigma_abs(data.edep), '-') ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_eres_abs', close=not opts.show_all) # # Survival probability vs Enu # fig = P.figure() ax = P.subplot(111, xlabel='Enu, MeV', ylabel='Psur', title='Survival probability') ax.minorticks_on(); ax.grid() ax.plot(data.enu, data.data_no.psur[data.dmmid_idx], label=r'full NO') ax.plot(data.enu, data.data_io.psur[data.dmmid_idx], label=r'full IO') ax.plot(data.data_no.data_enu.psur_e[data.dmmid_idx], data.data_no.data_enu.psur[data.dmmid_idx], '^', markerfacecolor='none') ax.legend() ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_psur_enu') ax.set_xlim(2.0, 4.5) savefig(opts.output, suffix='_psur_enu_zoom', close=not opts.show_all) # # Survival probability vs Edep # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='Psur', title='Survival probability') ax.minorticks_on(); ax.grid() ax.plot(data.edep, data.data_no.psur[data.dmmid_idx], label=r'full NO') ax.plot(data.edep, data.data_io.psur[data.dmmid_idx], label=r'full IO') ax.plot(data.data_no.data_edep.psur_e[data.dmmid_idx], data.data_no.data_edep.psur[data.dmmid_idx], '^', markerfacecolor='none') ax.legend() ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_psur_edep') ax.set_xlim(1.2, 3.7) savefig(opts.output, suffix='_psur_edep_zoom', close=not opts.show_all) # # Survival probability vs Edep_lsnl # fig = P.figure() ax = P.subplot(111, xlabel='Edep quenched, MeV', ylabel='Psur', title='Survival probability') ax.minorticks_on(); ax.grid() ax.plot(data.edep_lsnl, data.data_no.psur[data.dmmid_idx], label=r'full NO') ax.plot(data.edep_lsnl, data.data_io.psur[data.dmmid_idx], label=r'full IO') ax.plot(data.data_no.data_edep_lsnl.psur_e[data.dmmid_idx], data.data_no.data_edep_lsnl.psur[data.dmmid_idx], '^', markerfacecolor='none') ax.legend() ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_psur_edep_lsnl') ax.set_xlim(1.2, 3.7) savefig(opts.output, suffix='_psur_edep_lsnl_zoom', close=not opts.show_all) # # Distance between nearest peaks vs Enu, single # fig = P.figure() ax = P.subplot(111, xlabel='Enu, MeV', ylabel='Dist, MeV', title='Nearest peaks distance') ax.minorticks_on(); ax.grid() ax.plot(data.data_no.data_enu.diff_x[data.dmmid_idx], data.data_no.data_enu.diff[data.dmmid_idx], label=r'NO') ax.plot(data.data_io.data_enu.diff_x[data.dmmid_idx], data.data_io.data_enu.diff[data.dmmid_idx], label=r'IO') ax.legend() ax.set_xlim(0.0, xmax) ax.set_ylim(bottom=0.0) savefig(opts.output, suffix='_dist_enu') ax.set_xlim(2.0, 5.0) ax.set_ylim(top=0.5) savefig(opts.output, suffix='_dist_enu_zoom', close=not opts.show_all) # # Distance between nearest peaks vs Edep, single # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='Dist, MeV', title='Nearest peaks distance') ax.minorticks_on(); ax.grid() ax.plot(data.data_no.data_edep.diff_x[data.dmmid_idx], data.data_no.data_edep.diff[data.dmmid_idx], label=r'NO') ax.plot(data.data_io.data_edep.diff_x[data.dmmid_idx], data.data_io.data_edep.diff[data.dmmid_idx], label=r'IO') ax.legend() ax.set_xlim(0.0, xmax) ax.set_ylim(bottom=0.0) savefig(opts.output, suffix='_dist_edep') ax.set_xlim(1.2, 4.2) ax.set_ylim(top=0.5) savefig(opts.output, suffix='_dist_edep_zoom', close=not opts.show_all) # # Distance between nearest peaks vs Edep, single # fig = P.figure() ax = P.subplot(111, xlabel='Edep quenched, MeV', ylabel='Dist, MeV', title='Nearest peaks distance') ax.minorticks_on(); ax.grid() ax.plot(data.data_no.data_edep_lsnl.diff_x[data.dmmid_idx], data.data_no.data_edep_lsnl.diff[data.dmmid_idx], label=r'NO') ax.plot(data.data_io.data_edep_lsnl.diff_x[data.dmmid_idx], data.data_io.data_edep_lsnl.diff[data.dmmid_idx], label=r'IO') ax.legend() ax.set_xlim(0.0, xmax) ax.set_ylim(bottom=0.0) savefig(opts.output, suffix='_dist_edep_lsnl') ax.set_xlim(1.2, 4.2) ax.set_ylim(top=0.5) savefig(opts.output, suffix='_dist_edep_lsnl_zoom') poly = N.polynomial.polynomial.Polynomial([0, 1, 0]) x = data.data_no.data_edep_lsnl.diff_x[data.dmmid_idx] pf = poly.fit(x, data.data_no.data_edep_lsnl.diff[data.dmmid_idx], 2) print(pf) ax.plot(x, pf(x), label=r'NO fit') ax.legend() savefig(opts.output, suffix='_dist_edep_lsnl_fit', close=not opts.show_all) # # Distance between nearest peaks vs Edep, multiple # fig = P.figure() ax = P.subplot(111, xlabel='Edep quenched, MeV', ylabel='Dist, MeV', title='Nearest peaks distance') ax.minorticks_on(); ax.grid() ax.plot(data.data_no.data_edep_lsnl.diff_x[data.dmmid_idx], data.data_no.data_edep_lsnl.diff[data.dmmid_idx], label=r'NO') ax.plot(data.data_io.data_edep_lsnl.diff_x[data.dmmid_idx], data.data_io.data_edep_lsnl.diff[data.dmmid_idx], '--', label=r'IO') for idx in (0, 5, 15, 20): ax.plot(data.data_io.data_edep_lsnl.diff_x[idx], data.data_io.data_edep_lsnl.diff[idx], '--') ax.legend() ax.set_xlim(0.0, xmax) savefig(opts.output, suffix='_dist_edep_lsnl_multi', close=not opts.show_all) # # Distance between nearest peaks difference # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='Dist(IO) - Dist(NO), MeV', title='Nearest peaks distance diff: IO-NO') ax.minorticks_on(); ax.grid() ax.plot(data.e, data.diffs.edep[data.dmmid_idx], '-', markerfacecolor='none', label='Edep') ax.plot(data.e, data.diffs.edep_lsnl[data.dmmid_idx], '-', markerfacecolor='none', label='Edep quenched') ax.plot(data.e, data.diffs.enu[data.dmmid_idx], '-', markerfacecolor='none', label='Enu') ax.legend() savefig(opts.output, suffix='_dist_diff') ax.plot(data.e, data.eres, '-', markerfacecolor='none', label='Resolution $\\sigma$') ax.legend() savefig(opts.output, suffix='_dist_diff_1') # # Distance between nearest peaks difference relative to sigma # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='(Dist(IO) - Dist(NO))/$\\sigma$', title='Nearest peaks distance diff: IO-NO') ax.minorticks_on(); ax.grid() ax.plot(data.e, data.diffs_rel.edep[data.dmmid_idx], '-', markerfacecolor='none', label='Edep') ax.plot(data.e, data.diffs_rel.edep_lsnl[data.dmmid_idx], '-', markerfacecolor='none', label='Edep quenched') i_edep=N.argmax(data.diffs_rel.edep[data.dmmid_idx]) i_edep_lsnl=N.argmax(data.diffs_rel.edep_lsnl[data.dmmid_idx]) ediff = data.e[i_edep] - data.e[i_edep_lsnl] ax.axvline(data.e[i_edep], linestyle='dashed', label='Max location: %.3f MeV'%(data.e[i_edep])) ax.axvline(data.e[i_edep_lsnl], linestyle='dashed', label='Max location: %.3f MeV'%(data.e[i_edep_lsnl])) ax.axvspan(data.e[i_edep], data.e[i_edep_lsnl], alpha=0.2, label='Max location diff: %.3f MeV'%(ediff)) ax.legend() savefig(opts.output, suffix='_dist_diff_rel') # # Distance between nearest peaks difference relative to sigma # fig = P.figure() ax = P.subplot(111, xlabel='Edep, MeV', ylabel='(Dist(IO) - Dist(NO))/$\\sigma$', title='Nearest peaks distance diff: IO-NO') ax.minorticks_on(); ax.grid() ledep = ax.plot(data.e, data.diffs_rel.edep[data.dmmid_idx], '--', markerfacecolor='none', label='Edep')[0] lquench = ax.plot(data.e, data.diffs_rel.edep_lsnl[data.dmmid_idx], '-', color=ledep.get_color(), markerfacecolor='none', label='Edep quenched')[0] kwargs=dict(alpha=0.8, linewidth=1.5, markerfacecolor='none') for idx in (0, 5, 15, 20): l = ax.plot(data.e, data.diffs_rel.edep[idx], '--', **kwargs)[0] ax.plot(data.e, data.diffs_rel.edep_lsnl[idx], '-', color=l.get_color(), **kwargs) ax.legend() savefig(opts.output, suffix='_dist_diff_rel_multi', close=not opts.show_all) # # Distance between nearest peaks difference relative to sigma # fig = P.figure() ax = P.subplot(111, ylabel=r'$\Delta m^2_\mathrm{ee}$', xlabel='Edep quenched, MeV', title='Nearest peaks distance diff: IO-NO/$\\sigma$') ax.minorticks_on(); ax.grid() formatter = ax.yaxis.get_major_formatter() formatter.set_useOffset(False) formatter.set_powerlimits((-2,2)) formatter.useMathText=True c = ax.pcolormesh(data.mesh_e.T, data.mesh_dm.T, data.diffs_rel.edep_lsnl.T) from mpl_tools.helpers import add_colorbar add_colorbar(c, rasterized=True) c.set_rasterized(True) savefig(opts.output, suffix='_dist_diff_rel_heatmap') if pdfpages: pdfpages.__exit__(None,None,None) print('Write output figure to', pdfpagesfilename) # savegraph(quench.histoffset.histedges.points_truncated, opts.graph, namespace=ns) if opts.show or opts.show_all: P.show()
def build(self): with entryContext(subgraph='LSNL'): self.init_data() # # Initialize bin edges # self.histoffset = C.HistEdgesOffset( self.doubleme, labels='Offset/threshold bin edges (Evis, Te+)') histedges = self.histoffset.histedges # Declare open input self.set_input('evis_edges_hist', None, histedges.hist_in, argument_number=0) histedges.points.setLabel('Evis (full range)') histedges.points_truncated.setLabel('Evis (truncated)') histedges.points_threshold.setLabel('Evis (threshold)') histedges.points_offset.setLabel('Te+') histedges.hist_truncated.setLabel('Hist Evis (truncated)') histedges.hist_threshold.setLabel('Hist Evis (threshold)') histedges.hist_offset.setLabel('Hist Te+') # # Birk's model integration # birks_e_input, birks_quenching_input = self.stopping_power[ 'e'], self.stopping_power['dedx'] self.birks_e_p, self.birks_quenching_p = C.Points( birks_e_input, labels='Te (input)'), C.Points(birks_quenching_input, labels='Stopping power (dE/dx)') birksns = self.namespace('birks') with birksns: self.birks_integrand_raw = C.PolyRatio( [], list(sorted(birksns.storage.keys())), labels="Birk's integrand") self.birks_quenching_p >> self.birks_integrand_raw.polyratio.points self.doubleemass_point = C.Points([-self.doubleme], labels='2me offset') self.integrator_ekin = C.IntegratorGL( self.histoffset.histedges.hist_offset, self.cfg.integration_order, labels=('Te sampler (GL)', "Birk's integrator (GL)")) self.birks_integrand_interpolator = C.InterpLogx( self.birks_e_p, self.integrator_ekin.points.x, labels=("Birk's InSegment", "Birk's interpolator")) self.birks_integrand_interpolated = self.birks_integrand_interpolator.add_input( self.birks_integrand_raw.polyratio.ratio) self.birks_integral = self.integrator_ekin.add_input( self.birks_integrand_interpolated) self.birks_accumulator = C.PartialSum(0., labels="Birk's Evis|[MeV]") self.birks_integral >> self.birks_accumulator.reduction # # Cherenkov model # with self.namespace('cherenkov'): self.cherenkov = C.Cherenkov_Borexino(labels='Npe Cherenkov') self.histoffset.histedges.points_offset >> self.cherenkov.cherenkov # # Electron energy model # with self.namespace: self.electron_model = C.WeightedSum( ['kC', 'Npescint'], [ self.cherenkov.cherenkov.ch_npe, self.birks_accumulator.reduction.out ], labels='Npe: electron responce') # # 2 511 keV gamma model # self.annihilation_electrons_centers = C.Points( self.annihilation_electrons_centers_input, labels='Annihilation gamma E centers') self.annihilation_electrons_p = C.Points( self.annihilation_electrons_p_input, labels='Annihilation gamma weights') self.view_lowe = C.ViewHistBased( self.histoffset.histedges.hist_offset, 0.0, self.annihilation_electrons_edges_input[-1], labels=('Low E indices', 'Low E view')) self.ekin_edges_lowe = self.view_lowe.add_input( self.histoffset.histedges.points_offset) self.electron_model_lowe = self.view_lowe.add_input( self.electron_model.single()) self.ekin_edges_lowe.setLabel('Te+ edges (low Te view)') self.electron_model_lowe.setLabel( 'Npe: electron responce (low Te view)') self.electron_model_lowe_interpolator = C.InterpLinear( self.ekin_edges_lowe, self.annihilation_electrons_centers, labels=('Annihilation E InSegment', 'Annihilation gamma interpolator')) self.electron_model_lowe_interpolated = self.electron_model_lowe_interpolator.add_input( self.electron_model_lowe) with self.namespace: self.npe_positron_offset = C.Convolution( 'ngamma', labels='e+e- annihilation Evis [MeV]') self.electron_model_lowe_interpolated >> self.npe_positron_offset.normconvolution.fcn self.annihilation_electrons_p >> self.npe_positron_offset.normconvolution.weights # # Total positron model # self.positron_model = C.SumBroadcast( outputs=[ self.electron_model.sum.sum, self.npe_positron_offset.normconvolution.result ], labels='Npe: positron responce') self.positron_model_scaled = C.FixedPointScale( self.histoffset.histedges.points_truncated, self.namespace['normalizationEnergy'], labels=('Fixed point index', 'Positron energy model|Evis, MeV')) self.positron_model_scaled = self.positron_model_scaled.add_input( self.positron_model.sum.outputs[0]) self.positron_model_scaled_full_view = C.ViewRear( -1.0, labels='Positron Energy nonlinearity|full range') self.positron_model_scaled_full_view.determineOffset( self.histoffset.histedges.hist, self.histoffset.histedges.hist_truncated, True) self.histoffset.histedges.points >> self.positron_model_scaled_full_view.view.original self.positron_model_scaled >> self.positron_model_scaled_full_view.view.rear self.positron_model_scaled_full = self.positron_model_scaled_full_view.view.result # # Relative positron model # self.positron_model_relative = C.Ratio( self.positron_model_scaled, self.histoffset.histedges.points_truncated, labels='Positron energy nonlinearity') self.positron_model_relative_full_view = C.ViewRear( 0.0, labels='Positron Energy nonlinearity|full range') self.positron_model_relative_full_view.determineOffset( self.histoffset.histedges.hist, self.histoffset.histedges.hist_truncated, True) self.histoffset.histedges.points >> self.positron_model_relative_full_view.view.original self.positron_model_relative >> self.positron_model_relative_full_view.view.rear self.positron_model_relative_full = self.positron_model_relative_full_view.view.result # # Hist Smear # self.pm_histsmear = C.HistNonlinearity( self.cfg.get('fill_matrix', False), labels=('Nonlinearity matrix', 'Nonlinearity smearing')) self.pm_histsmear.set_range(-0.5, 20.0) self.positron_model_scaled_full >> self.pm_histsmear.matrix.EdgesModified self.histoffset.histedges.hist >> self.pm_histsmear.matrix.Edges trans = self.pm_histsmear.transformations.back() for i, it in enumerate(self.nidx.iterate()): # if i: # trans = self.pm_histsmear.add_transformation() inp = self.pm_histsmear.add_input() trans.setLabel( it.current_format('Nonlinearity smearing {autoindex}')) self.set_input('lsnl', it, inp, argument_number=0) self.set_output('lsnl', it, trans.outputs.back())
w3 = ns.defparameter('c', central=0.05, free=True, label='weight 3') # Print the list of parameters ns.printparameters(labels=True) print() # Create x and several functions to be added with weights x = np.linspace(-1.0*np.pi, 1.0*np.pi, 500) a1 = C.Points(np.sin(x)) a2 = C.Points(np.sin(16.0*x)) a3 = C.Points(np.cos(16.0*x)) outputs = [a.points.points for a in (a1, a2, a3)] # Initialize the WeightedSum with list of variables and list of outputs weights = ['a', 'b', 'c'] wsum = C.WeightedSum(weights, outputs) wsum.print() # Do some plotting fig = plt.figure() ax = plt.subplot( 111 ) ax.minorticks_on() ax.grid() ax.set_xlabel( 'x' ) ax.set_ylabel( 'f(x)' ) ax.set_title(r'$a\,\sin(x)+b\,\sin(16x)+c\,\cos(16x)$') label = 'a={}, b={}, c={}'.format(w1.value(), w2.value(), w3.value()) wsum.sum.sum.plot_vs(x, label=label) w2.push(0.0)
ns.defparameter('group.c', central=0.05, free=True, label='weight 3 (local)') # Print the list of parameters ns.printparameters(labels=True) print() # Create x and several functions to be added with weights x = np.linspace(-1.0 * np.pi, 1.0 * np.pi, 500) a1 = C.Points(np.sin(x)) a2 = C.Points(np.sin(16.0 * x)) a3 = C.Points(np.cos(16.0 * x)) outputs = [a.points.points for a in (a1, a2, a3)] # Initialize the WeightedSum with list of variables and list of outputs weights1 = ['a', 'b', 'group.c'] wsum1 = C.WeightedSum(weights1, outputs) weights2 = ['a', 'b', 'c'] with ns('group'): wsum2 = C.WeightedSum(weights2, outputs) wsum1.print() print() wsum2.print() print() # Do some plotting fig = plt.figure() ax = plt.subplot(111) ax.minorticks_on() ax.grid()
# Print the list of parameters ns.printparameters(labels=True) print() # Define binning and integration orders nbins = 30 x_edges = np.linspace(-1.0 * np.pi, 1.0 * np.pi, nbins + 1, dtype='d') orders = 3 # Initialize integrator integrator = R.IntegratorGL(nbins, orders, x_edges) int_points = integrator.points.x # Create integrable: a*sin(x) + b*cos(k*x) cos_arg = C.WeightedSum(['k'], [int_points]) sin_t = R.Sin(int_points) cos_t = R.Cos(cos_arg.sum.sum) fcn = C.WeightedSum(['a', 'b'], [sin_t.sin.result, cos_t.cos.result]) integrator.hist.f(fcn.sum.sum) # Print objects integrator.print() print() fcn.print() print() cos_t.print() print()
def test_oscprob(): baselinename = 'L' ns = env.ns("testoscprob") gna.parameters.oscillation.reqparameters(ns) ns.defparameter(baselinename, central=2.0, fixed=True, label='Baseline, km') # Define energy range enu_input = np.arange(1.0, 10.0, 0.01) enu = C.Points(enu_input, labels='Neutrino energy, MeV') # Initialize oscillation variables component_names = C.stdvector(['comp0', 'comp12', 'comp13', 'comp23']) with ns: R.OscProbPMNSExpressions(R.Neutrino.ae(), R.Neutrino.ae(), component_names, ns=ns) # Initialize neutrino oscillations with ns: labels = [ 'Oscillation probability|%s' % s for s in ('component 12', 'component 13', 'component 23', 'full', 'probsum') ] oscprob = C.OscProbPMNS(R.Neutrino.ae(), R.Neutrino.ae(), baselinename, labels=labels) enu >> oscprob.full_osc_prob.Enu enu >> (oscprob.comp12.Enu, oscprob.comp13.Enu, oscprob.comp23.Enu) # Oscillation probability as single transformation op_full = oscprob.full_osc_prob.oscprob # Oscillation probability as weighted sum unity = C.FillLike(1, labels='Unity') enu >> unity.fill.inputs[0] with ns: op_sum = C.WeightedSum(component_names, [ unity.fill.outputs[0], oscprob.comp12.comp12, oscprob.comp13.comp13, oscprob.comp23.comp23 ], labels='Oscillation probability sum') # Print some information oscprob.print() print() ns.printparameters(labels=True) # Print extended information oscprob.print(data=True, slice=slice(None, 5)) assert np.allclose(op_full.data(), op_sum.data()) if "pytest" not in sys.modules: fig = plt.figure() ax = plt.subplot(111) ax.minorticks_on() ax.grid() ax.set_xlabel('E nu, MeV') ax.set_ylabel('P') ax.set_title('Oscillation probability') op_full.plot_vs(enu.single(), '-', label='full oscprob') op_sum.plot_vs(enu.single(), '--', label='oscprob (sum)') ax.legend(loc='lower right') savefig('output/test_oscprob.pdf') savegraph(enu, 'output/test_oscprob_graph.dot', namespace=ns) savegraph(enu, 'output/test_oscprob_graph.pdf', namespace=ns) plt.show()
def build(self): for idx in self.nidx: reac, = idx.current_values() name = "snf_correction" + idx.current_format() try: _snf_energy, _snf_spectra = list( map(C.Points, self.snf_raw_data[reac])) except KeyError: # U238 doesn't have offequilibrium correction so just pass 1. _snf_energy, _snf_spectra = list( map(C.Points, self.snf_raw_data['average'])) _snf_energy.points.setLabel( "Original energies for SNF spectrum of {}".format(reac)) snf_spectra = C.InterpLinear( labels='Correction for spectra in {}'.format(reac)) snf_spectra.set_overflow_strategy( R.GNA.Interpolation.Strategy.Constant) snf_spectra.set_underflow_strategy( R.GNA.Interpolation.Strategy.Constant) insegment = snf_spectra.transformations.front() insegment.setLabel("Segments") interpolator_trans = snf_spectra.transformations.back() interpolator_trans.setLabel( "Interpolated SNF correction for {}".format(reac)) passthrough = C.Identity( labels="Nominal spectra for {}".format(reac)) _snf_energy >> (insegment.edges, interpolator_trans.x) _snf_spectra >> interpolator_trans.y self.set_input('snf_correction', idx, (insegment.points, interpolator_trans.newx), argument_number=0) self.set_input('snf_correction', idx, (passthrough.single_input()), argument_number=1) snap = C.Snapshot( passthrough.single(), labels='Snapshot of nominal spectra for SNF in {}'.format( reac)) product = C.Product( outputs=[snap.single(), interpolator_trans.single()], labels='Product of nominal spectrum to SNF correction in {}'. format(reac)) par_name = "snf_scale" self.reqparameter(par_name, idx, central=1., relsigma=1, labels="SNF norm for reactor {0}".format(reac)) outputs = [product.single()] weights = ['.'.join((par_name, idx.current_format()))] with self.namespace: final_sum = C.WeightedSum( weights, outputs, labels='SNF spectrum from {0} reactor'.format(reac)) self.context.objects[name] = final_sum self.set_output("snf_correction", idx, final_sum.single())
with context.set_context(manager=ndata, precision=args.precision) as manager: ns.defparameter("L", central=52,sigma=0) #kilometre gna.parameters.oscillation.reqparameters_reactor(ns, dm='23') pmnsexpr = C.OscProbPMNSExpressions(from_nu, to_nu, modecos, ns=ns) ns.materializeexpressions() ns.printparameters(labels=True) E = C.Points(E_arr, labels='Energy') with ns: oscprob = C.OscProb3(from_nu, to_nu, 'L', modecos, labels=clabels) unity = C.FillLike(1, labels='Unity') E >> (unity.fill, oscprob.comp12, oscprob.comp13, oscprob.comp23) ws = C.WeightedSum(weights, labels, labels='OscProb') unity >> ws.sum.comp0 oscprob.comp12 >> ws.sum.item12 oscprob.comp13 >> ws.sum.item13 oscprob.comp23 >> ws.sum.item23 ns.materializeexpressions() pars = tuple(par.getVariable() for (name,par) in ns.walknames()) manager.setVariables(C.stdvector(pars)) if args.graph: from gna.graphviz import savegraph savegraph(ws.sum, args.graph) name, ext = args.graph.rsplit('.', 1) savegraph(ws.sum, name+'_vars.'+ext, namespace=ns)