def test_view_03(function_name): ns = env.env.globalns(function_name) names = [] for i in range(12): name = 'val_%02i' % i names.append(name) ns.defparameter(name, central=i, fixed=True, label='Value %i' % i) with ns: vararray = C.VarArray(names) ranges = [(0, 3), (0, 12), (1, 3), (6, 6), (6, 1)] for rng in ranges: print('Range', rng) start, len = rng cnames = names[start:start + len] view = C.View(vararray, start, len) for ichange, iname in enumerate([''] + cnames, -1): if iname: print(' Change', ichange) par = ns[iname] par.set(par.value() + 1.0) expect = vararray.single().data() res = view.view.view.data() print(' Result', res) print(' Expect 0', expect) expect = expect[start:start + len] print(' Expect', expect) assert (res == expect).all() print()
def preinit_variables(self): if self.opts.spectrum_unc: spec = self.namespace('spectrum') cfg = self.cfg.shape_uncertainty unc = cfg.unc edges = self.cfg.rebin.edges # bin-to-bin should take into account the number of bins it is applied to unccorrection = ((edges.size-1.0)/cfg.nbins)**0.5 unc.uncertainty*=unccorrection names = [] for bini in range(edges.size-1): name = 'norm_bin_%04i'%bini names.append(name) label = 'Spectrum shape unc. final bin %i (%.03f, %.03f) MeV'%(bini, edges[bini], edges[bini+1]) spec.reqparameter(name, cfg=unc, label=label) with spec: vararray = C.VarArray(names, labels='Spectrum shape norm') self.cfg.shape_uncertainty = OrderedDict( bundle = dict(name='predefined', version='v01'), name = 'shape_norm', inputs = None, outputs = vararray.single(), unc = cfg.unc, object = vararray )
def test_jacobian_v01(): ns = env.globalns("test_jacobian_v01") names = ['zero', 'one', 'two', 'three', 'four', 'five'] values = N.arange(len(names), dtype=context.current_precision_short()) jac = C.Jacobian() for name, value in zip(names, values): if value: par = ns.defparameter(name, central=value, relsigma=0.1) else: par = ns.defparameter(name, central=value, sigma=0.1) if name == 'five': break jac.append(par) five = par with ns: va = C.VarArray(names) va.vararray.points >> jac.jacobian.func vars = va.vararray.points.data() print('Python array:', values.shape, values) print('Array:', vars.shape, vars) res = jac.jacobian.jacobian.data() print('Jacobian:', res.shape, res) req = N.eye(len(names), len(names) - 1) assert N.allclose(res, req, atol=1.e-12) t = jac.jacobian tf1 = t.getTaintflag() tf0 = va.vararray.getTaintflag() assert not tf0.tainted() assert not tf1.tainted() assert tf1.frozen() five.set(12.0) assert tf0.tainted() assert not tf1.tainted() assert tf1.frozen() t.unfreeze() assert tf0.tainted() assert tf1.tainted() assert not tf1.frozen() res = jac.jacobian.jacobian.data() assert N.allclose(res, req, atol=1.e-12) assert not tf0.tainted() assert not tf1.tainted() assert tf1.frozen()
def build(self): model_edges_t = C.Points( self.model_edges, ns=self.namespace ) model_edges_t.points.setLabel('Spectra interpolation edges') self.context.objects['edges'] = model_edges_t self.shared.reactor_anu_edges = model_edges_t.single() if self.cfg.free_params: with self.reac_ns: tmp = C.VarArray(self.variables, ns=self.reac_ns, labels='Spec pars:\nlog(n_i)') if self.cfg.varmode == 'log': self.context.objects['npar_log'] = tmp self.free_weights = R.Exp(ns=self.reac_ns) self.free_weights.exp.points( tmp ) self.free_weights.exp.setLabel('n_i') else: tmp.vararray.setLabel('n_i') self.free_weights = tmp self.interp_expo = interp_expo = R.InterpExpo(ns=self.reac_ns) sampler = interp_expo.transformations.front() model_edges_t >> sampler.inputs.edges sampler_input = sampler.inputs.points interp_expo_t = interp_expo.transformations.back() for i, it in enumerate(self.nidx_major): isotope, = it.current_values() spectrum_raw_t = C.Points( self.spectra[isotope], ns=self.reac_ns ) spectrum_raw_t.points.setLabel('%s spectrum, original'%isotope) self.context.objects[('spectrum_raw', isotope)] = spectrum_raw_t if self.cfg.free_params: spectrum_t = C.Product(ns=self.reac_ns) spectrum_t.multiply( spectrum_raw_t ) spectrum_t.multiply( self.free_weights.single() ) spectrum_t.product.setLabel('%s spectrum, corrected'%isotope) else: spectrum_t = spectrum_raw_t if i>0: interp_expo_t = interp_expo.add_transformation() model_edges_t >> interp_expo_t.inputs.x interp_output = interp_expo.add_input(spectrum_t) interp_input = interp_expo_t.inputs.newx if i>0: self.set_input(self.cfg.name, it, interp_input, argument_number=0) else: self.set_input(self.cfg.name, it, (sampler_input, interp_input), argument_number=0) interp_expo_t.setLabel('%s spectrum, interpolated'%isotope) """Store data""" self.set_output(self.cfg.name, it, interp_output) self.context.objects[('spectrum', isotope)] = spectrum_t
def define_variables(self): """Define variables for correlated and uncorrelated uncertainties. Correlated uncertainties are represented as WeightedSums of fixed uncertainties from HM model multiplied by single weight for each reactor and shared for isotopes within same reactor. Uncorrelated uncertainties are represented as VarArrays of uncertain parameters. Each parameter uncertainty corresponds to uncorrelated uncertainty from HM model multiplied for given isotope in given bin. All parameters are uncorrelated between reactors and isotopes. """ num_of_bins = len(self.bins) - 1 with self.unc_ns: for reac_idx in self.nidx_major.get_subset(self.cfg.reac_idx): reac, = reac_idx.current_values() corr_name = "corr_unc." + reac_idx.current_format() self.unc_ns.reqparameter( corr_name, central=0., sigma=1., label=f"Correlated uncertainty in {reac}") for iso_idx in self.nidx_major.get_subset(self.cfg.iso_idx): iso, = iso_idx.current_values() idx = iso_idx + reac_idx uncorr_temp = "uncorr_unc." + idx.current_format( ) + ".{bin}" vars = [] for bin, iso_unc in zip(range(num_of_bins), self.uncertainties['uncorr'][iso]): name = uncorr_temp.format(bin=bin) vars.append(name) self.unc_ns.reqparameter( name, central=0., sigma=iso_unc, label= f'''Uncorrelated uncertainty for {iso} in {reac}, bin {bin}''' ) uncorr_unc = C.VarArray( vars, ns=self.unc_ns, labels=f'Uncorrelated uncertainties for {iso} in {reac}' ) self.uncorrelated_vars[reac][iso] = uncorr_unc tmp = C.Points( self.uncertainties['corr'][iso], labels= f'Fixed array of correlated uncertainties for {iso} in {reac}' ) corr_unc = C.WeightedSum( [corr_name], [tmp], ns=self.unc_ns, labels=f"Correlated uncertainties for {iso} in {reac}") self.correlated_vars[reac][iso] = corr_unc self.total_unc[reac][iso] = C.Sum([uncorr_unc, corr_unc])
def build(self): uncpars = OrderedDict() for name, vars in self.uncorr_vars.items(): with self.common_namespace: uncpar_t = C.VarArray(vars, ns=self.common_namespace) uncpar_t.vararray.setLabel('Uncorr correction:\n' + name) uncpars[name] = uncpar_t self.objects[('uncorrelated_correction', name)] = uncpar_t self.transformations_out[name] = uncpar_t.transformations[0] self.outputs[name] = uncpar_t.single()
def test_viewrear_points_01_start_len(function_name): """Test ViewRear on Points (start, len)""" arr = N.zeros(12, dtype=context.current_precision_short()) ns = env.env.globalns(function_name) names=[] for i in range(arr.size): name='val_%02i'%i names.append(name) ns.defparameter( name, central=i, fixed=True, label='Value %i'%i ) ranges = [ (0, 3), (0, 12), (1, 3), (6, 6), (6, 1)] for rng in ranges: start, len = rng pview = arr[start:start+len] points = C.Points(arr) view = C.ViewRear(points, start, len); cnames = names[start:start+len] with ns: vararray = C.VarArray(cnames) vararray >> view.view.rear print('Range', rng) for ichange, iname in enumerate(['']+cnames, -1): if iname: print(' Change', ichange) par=ns[iname] par.set(par.value()+1.0) res0 = vararray.single().data() res = view.view.result.data() expect = arr.copy() for i in range(start, start+len): expect[i] = ns[names[i]].value() expect0 = [] for i in range(start, start+len): expect0.append(ns[names[i]].value()) print(' Result 0', res0) print(' Expect 0', expect0) print(' Result', res) print(' Expect', expect) print(' Original data', points.single().data()) print(' Original data (expect)', arr) assert (res==expect).all() assert (res0==expect0).all() assert (res[start:start+len]==res0).all() assert (points.single().data()==arr).all() print()
def preinit_variables(self): mode_yb = self.opts.mode.startswith('yb') if self.opts.spectrum_unc in ['final', 'initial']: spec = self.namespace('spectrum') cfg = self.cfg.shape_uncertainty unc = cfg.unc if self.opts.spectrum_unc == 'initial': if mode_yb: edges = self.cfg.kinint2_enu.edges else: edges = self.cfg.kinint2.edges elif self.opts.spectrum_unc == 'final': if mode_yb: edges = self.cfg.rebin_yb.edges else: edges = self.cfg.rebin.edges # bin-to-bin should take into account the number of bins it is applied to unccorrection = ((edges.size - 1.0) / cfg.nbins)**0.5 unc.uncertainty *= unccorrection names = [] for bini in range(edges.size - 1): name = 'norm_bin_%04i' % bini names.append(name) label = 'Spectrum shape unc. final bin %i (%.03f, %.03f) MeV' % ( bini, edges[bini], edges[bini + 1]) spec.reqparameter(name, cfg=unc, label=label) with spec: vararray = C.VarArray(names, labels='Spectrum shape norm') self.cfg.shape_uncertainty = NestedDict(bundle=dict( name='predefined', version='v01'), name='shape_norm', inputs=None, outputs=vararray.single(), unc=cfg.unc, object=vararray) elif self.opts.spectrum_unc == 'none': pass else: raise Exception('Unknown spectrum shape uncertainty type: ' + self.opts.spectrum_unc)
def test_snapshot_01(function_name): """Test ViewRear on Points (start, len)""" size = 4 ns = env.env.globalns(function_name) names = [] for i in range(size): name = 'val_%02i' % i names.append(name) ns.defparameter(name, central=i, fixed=True, label='Value %i' % i) with ns: vararray = C.VarArray(names) snapshot = C.Snapshot(vararray) for ichange, iname in enumerate([''] + names, -1): print(' Change', ichange) if iname: par = ns[iname] par.set(par.value() + 1.0) res = snapshot.snapshot.result.data() vars = vararray.single().data() print(' Result', res) print(' Vars', vars) tainted = snapshot.snapshot.tainted() print(' Taintflag', tainted) assert not tainted if iname: assert (res != vars).any() else: assert (res == vars).all() print() snapshot.snapshot.unfreeze() assert snapshot.snapshot.tainted() res = snapshot.snapshot.result.data() print('Result', res) print('Vars', vars) assert (res == vars).all()
def build(self): corrpars = OrderedDict() for name, vars in self.corr_vars.items(): with self.common_namespace: corr_sigma_t = C.VarArray(vars, ns=self.common_namespace) corrpar_t = R.WeightedSum(1.0, C.stdvector([self.cfg.uncname]), C.stdvector(['offset'])) corrpar_i = corrpar_t.sum.inputs corrpar_i['offset'](corr_sigma_t) corr_sigma_t.vararray.setLabel('Corr unc:\n' + name) corrpar_t.sum.setLabel('Corr correction:\n' + name) corrpars[name] = corrpar_t self.objects[('correlated_sigma', name)] = corr_sigma_t self.objects[('correlated_correction', name)] = corrpar_t self.transformations_out[name] = corrpar_t.transformations[0] self.outputs[name] = corrpar_t.single()
def define_variables(self): separate_uncertainty = self.cfg.get('separate_uncertainty', False) parname = self.cfg.parameter pars = self.cfg.pars labelfmt = self.cfg.get('label', '') for it_major in self.nidx_major: major_values = it_major.current_values() if major_values: parcfg = pars[major_values] else: parcfg = pars for it_minor in self.nidx_minor: it = it_major + it_minor label = it.current_format(labelfmt) if labelfmt else '' if separate_uncertainty: if parcfg.mode == 'fixed': raise self.exception( 'Can not separate uncertainty for fixed parameters' ) unccfg = parcfg.get_unc() uncpar = self.reqparameter(separate_uncertainty, it, cfg=unccfg, label=label + ' (norm)') parcfg.mode = 'fixed' par = self.reqparameter(parname, it, cfg=parcfg, label=label) if self.cfg.get("objectize"): import gna.constructors as C with self.namespace: var_array = C.VarArray( [par.qualifiedName()], labels=par.qualifiedName().split('.', 1)[1]) output = var_array.vararray.points self.set_output(parname, it, output) self._par_container.append(par)
def build(self): with self.common_namespace: npar_raw_t = C.VarArray(self.variables, ns=self.common_namespace) nsname = self.common_namespace.name if self.cfg.varmode == 'log': npar_raw_t.vararray.setLabel('Spec pars:\nlog(n_i)') npar_t = R.Exp(ns=self.common_namespace) npar_t.exp.points(npar_raw_t) npar_t.exp.setLabel('n_i') self.objects['npar_log'] = npar_raw_t else: npar_raw_t.vararray.setLabel('n_i') npar_t = npar_raw_t for ns in self.namespaces: """Store data""" self.transformations_out[ns.name] = npar_t.transformations[0] self.outputs[ns.name] = npar_t.single() self.objects['corrections'] = npar_t
def test_vararray_v01(function_name): ns = env.globalns(function_name) names = ['zero', 'one', 'two', 'three', 'four', 'five'] values = N.arange(len(names), dtype=context.current_precision_short()) for name, value in zip(names, values): ns.defparameter(name, central=value, relsigma=0.1) with ns: va = C.VarArray(names) res = va.vararray.points.data() print('Python array:', values) print('Array:', res) assert N.allclose(values, res) for i, (val, name) in enumerate(enumerate(names, 2)): ns[name].set(val) values[i] = val res = va.vararray.points.data() assert N.allclose(values, res)
def test_object_variables(function_name): ns = env.globalns(function_name) names = [ 'zero', 'one', 'two', 'three', 'four', 'five' ] values = N.arange(len(names), dtype=context.current_precision_short()) for name, value in zip(names, values): ns.defparameter(name, central=value, relsigma=0.1) with ns: va = C.VarArray(names) for i, (name, val_true) in enumerate(zip(names, values)): var = va.variables[i].getVariable() assert name==var.name() val = var.cast().value() assert val==val_true for i, (name, val_true) in enumerate(zip(names, values)): ns[name].set(val_true) var=va.variables[i].getVariable() val=var.cast().value() assert val==val_true
def test_arrsum(function_name): varname = 'out' ns = env.globalns(function_name) names = ["var1", "var2", "var3", "var4"] variables = [ ns.reqparameter(name, central=float(i), relsigma=0.1) for i, name in enumerate(names) ] with ns: var_arr = C.VarArray(names) print("Input var array ", var_arr.vararray.points.data()) sum_arr = C.ArraySum(varname, var_arr, ns=ns) # materialize variable ns[varname].get() output = var_arr.vararray.points print('Data:', output.data(), output.data().sum()) print("Value of %s evaluable immediately after initialization " % varname, ns[varname].value(), sum_arr.arrsum.sum.data()) print() assert (output.data().sum() == ns[varname].value()).all() # sum_arr.arrsum.arr(var_arr.vararray) # sum_arr.exposeEvaluable(var_arr.vararray) # print(sum_arr.arrsum.accumulated.data()) print("Change value of var1 variable to 10") ns['var1'].set(10) print('Data:', output.data(), output.data().sum()) ns[varname].dump() print("Sum should now be ", np.sum(var_arr.vararray.points.data())) print("Check the value %s of evaluable now: " % varname, ns['out'].value(), sum_arr.arrsum.sum.data()) assert (output.data().sum() == ns[varname].value()).all() print() ns.printparameters()
y0 = np.exp(1/edges.single().data()**0.5) # Make it saw-like by scaling all odd and even points to different sides y0[::2]*=1.4 y0[1::2]*=0.7 y0 = C.Points(y0, labels='Coarse y\n(not scaled)') # Define two sets of scales to correct each value of y0 pars1 = env.globalns('pars1') pars2 = env.globalns('pars2') for ns in (pars1, pars2): for i in range(nsegments+1): ns.defparameter('par_{:02d}'.format(i), central=1, free=True, label='Scale for x_{}={}'.format(i, edges_data[i])) # Initialize transformations for scales with pars1: varray1=C.VarArray(list(pars1.keys()), labels='Scales 1\n(p1)') with pars2: varray2=C.VarArray(list(pars2.keys()), labels='Scales 2\n(p2)') # Make two products: y0 scaled by varray1 and varray2 y1 = C.Product([varray1.single(), y0.single()], labels='p1*y') y2 = C.Product([varray2.single(), y0.single()], labels='p2*y') # Initialize interpolator manual = False labels=('Segment index\n(fine x in coarse x)', 'Interpolator') if manual: # Bind transformations manually interpolator = R.InterpExpo(labels=labels)