def _set_age_values(self, f, include_decay_error=False): arc = self.arar_constants j = copy(self.j) if j is None: j = ufloat(1e-4, 1e-7) j.tag = 'Position' j.std_dev = self.position_jerr or 0 age = age_equation(j, f, include_decay_error=include_decay_error, arar_constants=arc) self.uage_w_position_err = age j = self.j if j is None: j = ufloat(1e-4, 1e-7, tag='J') age = age_equation(j, f, include_decay_error=include_decay_error, arar_constants=arc) self.uage_w_j_err = age j = copy(self.j) if j is None: j = ufloat(1e-4, 1e-7, tag='J') j.std_dev = 0 age = age_equation(j, f, include_decay_error=include_decay_error, arar_constants=arc) self.uage = age self.age = nominal_value(age) self.age_err = std_dev(age) self.age_err_wo_j = std_dev(age) for iso in self.itervalues(): iso.age_error_component = self.get_error_component(iso.name)
def _save_currents(self, dban): dvc = self.dvc if dvc.update_currents_enabled: ps = self.per_spec db = dvc.db for key, iso in ps.isotope_group.isotopes.items(): param = db.add_parameter('{}_intercept'.format(key)) db.add_current(dban, iso.value, iso.error, param, iso.units) param = db.add_parameter('{}_blank'.format(key), iso.blank.units) db.add_current(dban, iso.blank.value, iso.blank.error, param, iso.blank.units) param = db.add_parameter('{}_bs_corrected'.format(key)) v = iso.get_baseline_corrected_value() db.add_current(dban, nominal_value(v), std_dev(v), param, iso.units) param = db.add_parameter('{}_ic_corrected'.format(key)) v = iso.get_ic_corrected_value() db.add_current(dban, nominal_value(v), std_dev(v), param, iso.units) param = db.add_parameter(key) v = iso.get_non_detector_corrected_value() db.add_current(dban, nominal_value(v), std_dev(v), param, iso.units) param = db.add_parameter(iso.baseline.name, iso.baseline.units) db.add_current(dban, iso.baseline.value, iso.baseline.error, param, iso.baseline.units)
def test_counter(): if (sys.version_info < (2, 6, 0)): from nose.plugins.skip import SkipTest raise SkipTest mcu = Arduino() reg = mcu.registers.proxy mcu.pins.reset() p = mcu.pin(5) p.write_mode(OUTPUT) p.pwm.write_value(128) print 'frequencies_available:', p.pwm.frequencies_available for fset in p.pwm.frequencies_available: p.pwm.frequency = fset assert abs(p.pwm.frequency - fset) <= 1 print '---------------------------' print 'fset=', fset print '---------------------------' for ms in [10, 20, 50, 100, 200, 500, 1000]: for _ in range(1): t = ms / 1000.0 with mcu.counter: mcu.counter.run(t) f = mcu.counter.frequency t = mcu.counter.gate_time err = f - fset print 't=%s f=%s ' % (t, f) ok_(abs(nominal_value(err)) <= 0.1+std_dev(err), (abs(nominal_value(err)),std_dev(err)))
def _add_baseline(self, spec, dbiso, dbdet, odet): iso = dbiso.Label self.debug('add baseline dbdet= {}. original det= {}'.format( iso, odet)) det = dbdet.detector_type.Label tb, vb = spec.get_baseline_data(iso, odet) pos = spec.get_baseline_position(iso) blob = self._build_timeblob(tb, vb) db = self.db label = '{} Baseline'.format(det.upper()) ncnts = len(tb) db_baseline = db.add_baseline(blob, label, ncnts, dbiso) db.flush() # if spec.is_peak_hop: # det = spec.peak_hop_detector # bs = spec.get_baseline_uvalue(iso) bs, fncnts = spec.get_filtered_baseline_uvalue(iso) # sem = bs.std_dev / (fncnts) ** 0.5 if fncnts else 0 bfit = spec.get_baseline_fit(iso) self.debug('baseline {}. v={}, e={}'.format(iso, nominal_value(bs), std_dev(bs))) infoblob = self._make_infoblob(nominal_value(bs), std_dev(bs), fncnts, pos) db_changeable = db.add_baseline_changeable_item( self.data_reduction_session_id, bfit, infoblob) # baseline and baseline changeable items need matching BslnID db_changeable.BslnID = db_baseline.BslnID db.flush()
def _add_baseline(self, spec, dbiso, dbdet, odet): iso = dbiso.Label self.debug('add baseline dbdet= {}. original det= {}'.format(iso, odet)) det = dbdet.detector_type.Label tb, vb = spec.get_baseline_data(iso, odet) pos = spec.get_baseline_position(iso) blob = self._build_timeblob(tb, vb) db = self.db label = '{} Baseline'.format(det.upper()) ncnts = len(tb) db_baseline = db.add_baseline(blob, label, ncnts, dbiso) db.flush() # if spec.is_peak_hop: # det = spec.peak_hop_detector # bs = spec.get_baseline_uvalue(iso) bs, fncnts = spec.get_filtered_baseline_uvalue(iso) # sem = bs.std_dev / (fncnts) ** 0.5 if fncnts else 0 bfit = spec.get_baseline_fit(iso) self.debug('baseline {}. v={}, e={}'.format(iso, nominal_value(bs), std_dev(bs))) infoblob = self._make_infoblob(nominal_value(bs), std_dev(bs), fncnts, pos) db_changeable = db.add_baseline_changeable_item(self.data_reduction_session_id, bfit, infoblob) # baseline and baseline changeable items need matching BslnID db_changeable.BslnID = db_baseline.BslnID db.flush()
def _load_unknown_computed(self, an, new_list): attrs = (('Age', 'uage_w_j_err'), # ('Age', 'age', None, None, 'age_err'), ('w/o J', 'wo_j', '', 'uage', 'age_err_wo_j'), ('K/Ca', 'kca'), ('K/Cl', 'kcl'), ('40Ar*', 'rad40_percent'), ('F', 'uF'), ('w/o Irrad', 'wo_irrad', '', 'uF', 'F_err_wo_irrad')) if new_list: def comp_factory(n, a, value=None, value_tag=None, error_tag=None): if value is None: value = getattr(an, a) display_value = True if value_tag: value = getattr(an, value_tag) display_value = False if error_tag: e = getattr(an, error_tag) else: e = std_dev(value) return ComputedValue(name=n, tag=a, value=nominal_value(value) or 0, value_tag=value_tag or '', display_value=display_value, error=e or 0) cv = [comp_factory(*args) for args in attrs] self.computed_values = cv else: age = an.uage nage, sage = nominal_value(age), std_dev(age) try: self.summary_str = u'Age={} {}{}({}%)'.format(floatfmt(nage), PLUSMINUS, floatfmt(sage), format_percent_error(nage, sage)) except: pass for ci in self.computed_values: attr = ci.tag if attr == 'wo_j': ci.error = an.age_err_wo_j or 0 ci.value = nominal_value(getattr(an, ci.value_tag)) elif attr == 'wo_irrad': ci.error = an.F_err_wo_irrad or 0 ci.value = nominal_value(getattr(an, ci.value_tag)) else: v = getattr(an, attr) if v is not None: ci.value = nominal_value(v) ci.error = std_dev(v)
def _load_unknown_computed(self, an, new_list): attrs = (('Age', 'uage_w_j_err'), ('w/o J', 'wo_j', '', 'uage', 'age_err_wo_j'), ('K/Ca', 'kca'), ('K/Cl', 'kcl'), ('40Ar*', 'radiogenic_yield'), ('F', 'uF'), ('w/o Irrad', 'wo_irrad', '', 'uF', 'F_err_wo_irrad')) if new_list: def comp_factory(n, a, value=None, value_tag=None, error_tag=None): if value is None: value = getattr(an, a) display_value = True if value_tag: value = getattr(an, value_tag) display_value = False if error_tag: e = getattr(an, error_tag) else: e = std_dev(value) return self._computed_value_factory( name=n, tag=a, value=nominal_value(value) or 0, value_tag=value_tag or '', display_value=display_value, error=e or 0) cv = [comp_factory(*args) for args in attrs] self.computed_values = cv else: age = an.uage nage, sage = nominal_value(age), std_dev(age) try: self.summary_str = u'Age={} {}{}({}%)'.format( floatfmt(nage), PLUSMINUS, floatfmt(sage), format_percent_error(nage, sage)) except: pass for ci in self.computed_values: ci.sig_figs = self.sig_figs attr = ci.tag if attr == 'wo_j': ci.error = an.age_err_wo_j or 0 ci.value = nominal_value(getattr(an, ci.value_tag)) elif attr == 'wo_irrad': ci.error = an.F_err_wo_irrad or 0 ci.value = nominal_value(getattr(an, ci.value_tag)) else: v = getattr(an, attr) if v is not None: ci.value = nominal_value(v) ci.error = std_dev(v)
def _make_intermediate_summary(self, sh, ag, cols, label): row = self._current_row age_idx = next((i for i, c in enumerate(cols) if c.label == 'Age'), 0) cum_idx = next( (i for i, c in enumerate(cols) if c.attr == 'cumulative_ar39'), 0) fmt = self._get_number_format('summary_age') kcafmt = self._get_number_format('summary_kca') fmt.set_bottom(1) kcafmt.set_bottom(1) fmt2 = self._workbook.add_format({'bottom': 1, 'bold': True}) border = self._workbook.add_format({'bottom': 1}) for i in range(age_idx + 1): sh.write_blank(row, i, '', fmt) startcol = 1 sh.write(row, startcol, '{:02n}'.format(ag.aliquot), fmt2) sh.write_string(row, startcol + 1, label, fmt2) cols[startcol + 1].calculate_width(label) age = ag.uage tn = ag.total_n if label == 'plateau': if not ag.plateau_steps: age = None else: txt = 'n={}/{} steps={}'.format(ag.nsteps, tn, ag.plateau_steps_str) sh.write(row, startcol + 2, txt, border) sh.write(row, cum_idx + 1, format_mswd(ag.get_plateau_mswd_tuple()), border) else: txt = 'n={}/{}'.format(ag.nanalyses, tn) sh.write(row, startcol + 2, txt, border) sh.write(row, cum_idx + 1, format_mswd(ag.get_mswd_tuple()), border) if age is not None: sh.write_number(row, age_idx, nominal_value(age), fmt) sh.write_number(row, age_idx + 1, std_dev(age), fmt) else: sh.write(row, age_idx, 'No plateau', border) sh.write_number(row, age_idx + 2, nominal_value(ag.kca), kcafmt) sh.write_number(row, age_idx + 3, std_dev(ag.kca), kcafmt) if label == 'plateau': sh.write_number(row, cum_idx, ag.plateau_total_ar39(), fmt) else: sh.write_number(row, cum_idx, ag.valid_total_ar39(), fmt) self._current_row += 1
def _value_string(self, t): if t == 'uF': a, e = self.f, self.f_err elif t == 'uage': a, e = nominal_value(self.uage), std_dev(self.uage) else: v = self.get_value(t) if isinstance(v, Isotope): v = v.get_intensity() a, e = nominal_value(v), std_dev(v) return a, e
def _value_string(self, t): if t == 'uF': a, e = self.F, self.F_err elif t == 'uage': a, e = nominal_value(self.uage), std_dev(self.uage) else: v = self.get_value(t) if isinstance(v, Isotope): v = v.get_intensity() a, e = nominal_value(v), std_dev(v) return a, e
def _make_intermediate_summary(self, sh, ag, cols, label): row = self._current_row age_idx = next((i for i, c in enumerate(cols) if c.label == 'Age'), 0) cum_idx = next((i for i, c in enumerate(cols) if c.attr == 'cumulative_ar39'), 0) fmt = self._get_number_format('summary_age') kcafmt = self._get_number_format('summary_kca') fmt.set_bottom(1) kcafmt.set_bottom(1) fmt2 = self._workbook.add_format({'bottom': 1, 'bold': True}) border = self._workbook.add_format({'bottom': 1}) for i in range(age_idx + 1): sh.write_blank(row, i, '', fmt) startcol = 1 sh.write(row, startcol, '{:02n}'.format(ag.aliquot), fmt2) sh.write_rich_string(row, startcol + 1, label, fmt2) cols[startcol + 1].calculate_width(label) age = ag.uage tn = ag.total_n if label == 'plateau': if not ag.plateau_steps: age = None else: txt = 'n={}/{} steps={}'.format(ag.nsteps, tn, ag.plateau_steps_str) sh.write(row, startcol + 2, txt, border) sh.write(row, cum_idx + 1, format_mswd(ag.get_plateau_mswd_tuple()), border) else: txt = 'n={}/{}'.format(ag.nanalyses, tn) sh.write(row, startcol + 2, txt, border) sh.write(row, cum_idx + 1, format_mswd(ag.get_mswd_tuple()), border) if age is not None: sh.write_number(row, age_idx, nominal_value(age), fmt) sh.write_number(row, age_idx + 1, std_dev(age), fmt) else: sh.write(row, age_idx, 'No plateau', border) sh.write_number(row, age_idx + 2, nominal_value(ag.kca), kcafmt) sh.write_number(row, age_idx + 3, std_dev(ag.kca), kcafmt) if label == 'plateau': sh.write_number(row, cum_idx, ag.plateau_total_ar39(), fmt) else: sh.write_number(row, cum_idx, ag.valid_total_ar39(), fmt) self._current_row += 1
def __residenceTime(self): """ Calculate the residence time of a single step event. """ # if previous errors were detected, the # event is already rejected, don't process it # any further if self.mdProcessingStatus != 'normal': return # set numpy warning handling. # raise divide by zero errors so we # can catch them np.seterr(divide='raise') ocmu = np.abs(uncertainties.nominal_value(self.mdOpenChCurrent)) ocsd = np.abs(uncertainties.std_dev(self.mdOpenChCurrent)) bcmu = np.abs(uncertainties.nominal_value(self.mdBlockedCurrent)) bcsd = np.abs(uncertainties.std_dev(self.mdBlockedCurrent)) # refine the start estimate idx = self.eStartEstimate try: while np.abs((np.abs(self.eventData[idx]) - ocmu) / ocsd) > 5.0: idx -= 1 # Set the start point self.mdEventStart = idx + 1 # Next move the count forward so we are in the blocked channel region of the pulse while np.abs((np.abs(self.eventData[idx]) - bcmu) / bcsd) > 0.5: idx += 1 # Search for the event end. 7*sigma allows us to prevent false # positives while np.abs((np.abs(self.eventData[idx]) - bcmu) / bcsd) < 7.0: idx += 1 # Finally backtrack to find the true event end while np.abs((np.abs(self.eventData[idx]) - bcmu) / bcsd) > 5.0: idx -= 1 except (IndexError, FloatingPointError): self.rejectEvent('eResTime') return self.mdEventEnd = idx - 1 # residence time in ms self.mdResTime = 1000. * ( (self.mdEventEnd - self.mdEventStart) / float(self.Fs))
def __init__(self, ratios, name, *args, **kw): super(Result, self).__init__(*args, **kw) vs = array([nominal_value(ri) for ri in ratios]) es = array([std_dev(ri) for ri in ratios]) self.name = name m = ratios.mean() self.value = nominal_value(m) self.error = std_dev(m) wm, we = calculate_weighted_mean(vs, es) self.wm_value = wm self.wm_error = we self.mswd = calculate_mswd(vs, es, wm=wm)
def test_access_to_std_dev(): "Uniform access to the standard deviation" x = ufloat((1, 0.1)) y = 2*x # std_dev for Variable and AffineScalarFunc objects: assert uncertainties.std_dev(x) == x.std_dev() assert uncertainties.std_dev(y) == y.std_dev() # std_dev for other objects: assert uncertainties.std_dev([]) == 0 assert uncertainties.std_dev(None) == 0
def _get_error(self, attr, n=3, **kw): v = "" item = self.item if hasattr(item, attr): v = getattr(self.item, attr) if v: v = floatfmt(std_dev(v), n=n, **kw) elif hasattr(item, "isotopes"): if attr in item.isotopes: v = item.isotopes[attr].get_intensity() v = floatfmt(std_dev(v), n=n, **kw) return v
def _get_error(self, attr, n=3, **kw): v = '' item = self.item if hasattr(item, attr): v = getattr(self.item, attr) if v: v = floatfmt(std_dev(v), n=n, **kw) elif hasattr(item, 'isotopes'): if attr in item.isotopes: v = item.isotopes[attr].get_intensity() v = floatfmt(std_dev(v), n=n, **kw) return v
def _air_ratio(self): a4038 = self.isotope_group.get_ratio('Ar40/Ar38', non_ic_corr=True) a4036 = self.isotope_group.get_ratio('Ar40/Ar36', non_ic_corr=True) # e4038 = uformat_percent_error(a4038, include_percent_sign=True) # e4036 = uformat_percent_error(a4036, include_percent_sign=True) lines = [self._make_header('Ratios'), 'Ar40/Ar36= {} {}'.format(floatfmt(nominal_value(a4036)), errorfmt(nominal_value(a4036), std_dev(a4036))), 'Ar40/Ar38= {} {}'.format(floatfmt(nominal_value(a4038)), errorfmt(nominal_value(a4038), std_dev(a4038)))] return self._make_lines(lines)
def test_access_to_std_dev(): "Uniform access to the standard deviation" x = ufloat((1, 0.1)) y = 2 * x # std_dev for Variable and AffineScalarFunc objects: assert uncertainties.std_dev(x) == x.std_dev() assert uncertainties.std_dev(y) == y.std_dev() # std_dev for other objects: assert uncertainties.std_dev([]) == 0 assert uncertainties.std_dev(None) == 0
def __residenceTime(self): """ Calculate the residence time of a single step event. """ # if previous errors were detected, the # event is already rejected, don't process it # any further if self.mdProcessingStatus != 'normal': return # set numpy warning handling. # raise divide by zero errors so we # can catch them np.seterr(divide='raise') ocmu=np.abs(uncertainties.nominal_value(self.mdOpenChCurrent)) ocsd=np.abs(uncertainties.std_dev(self.mdOpenChCurrent)) bcmu=np.abs(uncertainties.nominal_value(self.mdBlockedCurrent)) bcsd=np.abs(uncertainties.std_dev(self.mdBlockedCurrent)) # refine the start estimate idx=self.eStartEstimate try: while np.abs((np.abs(self.eventData[idx])-ocmu)/ocsd) > 5.0: idx-=1 # Set the start point self.mdEventStart=idx+1 # Next move the count forward so we are in the blocked channel region of the pulse while np.abs((np.abs(self.eventData[idx])-bcmu)/bcsd) > 0.5: idx+=1 # Search for the event end. 7*sigma allows us to prevent false # positives while np.abs((np.abs(self.eventData[idx])-bcmu)/bcsd) < 7.0: idx+=1 # Finally backtrack to find the true event end while np.abs((np.abs(self.eventData[idx])-bcmu)/bcsd) > 5.0: idx-=1 except ( IndexError, FloatingPointError ): self.rejectEvent('eResTime') return self.mdEventEnd=idx-1 # residence time in ms self.mdResTime=1000.*((self.mdEventEnd-self.mdEventStart)/float(self.Fs))
def _make_summary(self, sh, cols, group): fmt = self._bold start_col = 0 if self._options.include_kca: idx = next((i for i, c in enumerate(cols) if c[1] == 'K/Ca')) sh.write_rich_string(self._current_row, start_col, u'K/Ca {}'.format(PLUSMINUS_ONE_SIGMA), fmt) kca = group.weighted_kca if self._options.use_weighted_kca else group.arith_kca sh.write(self._current_row, idx, nominal_value(kca)) sh.write(self._current_row, idx + 1, std_dev(kca)) self._current_row += 1 idx = next((i for i, c in enumerate(cols) if c[1] == 'Age')) sh.write_rich_string( self._current_row, start_col, u'Weighted Mean Age {}'.format(PLUSMINUS_ONE_SIGMA), fmt) sh.write(self._current_row, idx, nominal_value(group.weighted_age)) sh.write(self._current_row, idx + 1, std_dev(group.weighted_age)) self._current_row += 1 if self._options.include_plateau_age and hasattr(group, 'plateau_age'): sh.write_rich_string(self._current_row, start_col, u'Plateau {}'.format(PLUSMINUS_ONE_SIGMA), fmt) sh.write(self._current_row, 3, 'steps {}'.format(group.plateau_steps_str)) sh.write(self._current_row, idx, nominal_value(group.plateau_age)) sh.write(self._current_row, idx + 1, std_dev(group.plateau_age)) self._current_row += 1 if self._options.include_isochron_age: sh.write_rich_string( self._current_row, start_col, u'Isochron Age {}'.format(PLUSMINUS_ONE_SIGMA), fmt) sh.write(self._current_row, idx, nominal_value(group.isochron_age)) sh.write(self._current_row, idx + 1, std_dev(group.isochron_age)) self._current_row += 1 if self._options.include_integrated_age and hasattr( group, 'integrated_age'): sh.write_rich_string( self._current_row, start_col, u'Integrated Age {}'.format(PLUSMINUS_ONE_SIGMA), fmt) sh.write(self._current_row, idx, nominal_value(group.integrated_age)) sh.write(self._current_row, idx + 1, std_dev(group.integrated_age)) self._current_row += 1
def _update_ratios(self, an): for ci in self.computed_values: nd = ci.detectors n, d = nd.split('/') niso, diso = self._get_isotope(n), self._get_isotope(d) noncorrected = self._get_non_corrected_ratio(niso, diso) corrected, ic = self._get_corrected_ratio(niso, diso) ci.trait_set(value=floatfmt(nominal_value(corrected)), error=floatfmt(std_dev(corrected)), noncorrected_value=nominal_value(noncorrected), noncorrected_error=std_dev(noncorrected), ic_factor=nominal_value(ic))
def _get_value(self, item, attr): val = None if attr in ('aliquot', 'step'): val = getattr(item, attr) elif attr == 'run date': val = item.rundate elif attr in ('age', 'age error'): val = getattr(item, 'uage') val = nominal_value(val) if attr == 'age' else std_dev(val) elif attr in ('kca', 'kca error'): val = getattr(item, 'kca') val = nominal_value(val) if attr == 'kca' else std_dev(val) return val
def _linear_error_propagation(self, age, r, sr): """ age in years :param age: :param r: :return: linear error propagation age error in years """ lambda_total = self._lambda_t b = self._lambda_b el = self._lambda_ec f = self._f # partial derivatives pd_el = -(1. / lambda_total) * (age + (b * f * r / ( (el**2) * umath.exp(lambda_total * age)))) pd_b = (1 / lambda_total) * ( (f * r / (el * umath.exp(lambda_total * age))) - age) pd_f = r / (el * umath.exp(lambda_total * age)) pd_r = f / (el * umath.exp(lambda_total * age)) sel = std_dev(el) sb = std_dev(b) sf = std_dev(self._f) # sr = std_dev(r) # (partial derivatives x sigma) ** 2 pd_el2 = (pd_el * sel)**2 pd_b2 = (pd_b * sb)**2 pd_f2 = (pd_f * sf)**2 pd_r2 = (pd_r * sr)**2 sum_pd = pd_el2 + pd_b2 + pd_f2 + pd_r2 # covariances cov_f_el = 7.1903e-19 cov_f_b = -6.5839e-19 cov_el_b = -3.4711e-26 cov_f_el2 = 2. * cov_f_el * pd_f * pd_el cov_f_b2 = 2. * cov_f_b * pd_f * pd_b cov_el_b = 2. * cov_el_b * pd_el * pd_b sum_cov = cov_f_el2 + cov_f_b2 + cov_el_b ss = sum_pd + sum_cov # uncertainty in age st = ss**0.5 return nominal_value(st)
def load_measurement(self, an, ar): # j = self._get_j(an) j = ar.j jf = 'NaN' if j is not None: jj = floatfmt(nominal_value(j), n=7, s=5) pe = format_percent_error(nominal_value(j), std_dev(j), include_percent_sign=True) jf = u'{} \u00b1{:0.2e}({})'.format(jj, std_dev(j), pe) a39 = ar.ar39decayfactor a37 = ar.ar37decayfactor ms = [MeasurementValue(name='Branch', value=an.branch), MeasurementValue(name='DAQ Version', value=an.collection_version), MeasurementValue(name='UUID', value=an.uuid), MeasurementValue(name='RepositoryID', value=an.repository_identifier), MeasurementValue(name='Spectrometer', value=an.mass_spectrometer), MeasurementValue(name='Run Date', value=an.rundate.strftime('%Y-%m-%d %H:%M:%S')), MeasurementValue(name='Irradiation', value=self._get_irradiation(an)), MeasurementValue(name='J', value=jf), MeasurementValue(name='Position Error', value=floatfmt(an.position_jerr, use_scientific=True)), MeasurementValue(name='Lambda K', value=nominal_value(ar.arar_constants.lambda_k), units='1/a'), MeasurementValue(name='Project', value=an.project), MeasurementValue(name='Sample', value=an.sample), MeasurementValue(name='Material', value=an.material), MeasurementValue(name='Comment', value=an.comment), MeasurementValue(name='Ar39Decay', value=floatfmt(a39)), MeasurementValue(name='Ar37Decay', value=floatfmt(a37)), MeasurementValue(name='Sens.', value=floatfmt(an.sensitivity, use_scientific=True), units=an.sensitivity_units)] self.measurement_values = ms
def _monte_carlo_error_propagation(self, vr, m): lambda_total = self._lambda_t el = self._lambda_ec f = self._f vel = nominal_value(el) + std_dev(el) * randn(self._n) vt = nominal_value(lambda_total) + std_dev(lambda_total) * randn(self._n) vf = nominal_value(f) + std_dev(f) * randn(self._n) vt_mc = ones(1, m) * vt vf_mc = ones(1, m) * vf vel_mc = ones(1, m) * vel t_mc = log(vt_mc / vel_mc * vf_mc * vr + 1) / vt_mc return mean(t_mc), std(t_mc)
def _linear_error_propagation(self, age, r, sr): """ age in years :param age: :param r: :return: linear error propagation age error in years """ lambda_total = self._lambda_t b = self._lambda_b el = self._lambda_ec f = self._f # partial derivatives pd_el = -(1. / lambda_total) * (age + (b * f * r / ((el ** 2) * umath.exp(lambda_total * age)))) pd_b = (1 / lambda_total) * ((f * r / (el * umath.exp(lambda_total * age))) - age) pd_f = r / (el * umath.exp(lambda_total * age)) pd_r = f / (el * umath.exp(lambda_total * age)) sel = std_dev(el) sb = std_dev(b) sf = std_dev(self._f) # sr = std_dev(r) # (partial derivatives x sigma) ** 2 pd_el2 = (pd_el * sel) ** 2 pd_b2 = (pd_b * sb) ** 2 pd_f2 = (pd_f * sf) ** 2 pd_r2 = (pd_r * sr) ** 2 sum_pd = pd_el2 + pd_b2 + pd_f2 + pd_r2 # covariances cov_f_el = 7.1903e-19 cov_f_b = -6.5839e-19 cov_el_b = -3.4711e-26 cov_f_el2 = 2. * cov_f_el * pd_f * pd_el cov_f_b2 = 2. * cov_f_b * pd_f * pd_b cov_el_b = 2. * cov_el_b * pd_el * pd_b sum_cov = cov_f_el2 + cov_f_b2 + cov_el_b ss = sum_pd + sum_cov # uncertainty in age st = ss ** 0.5 return nominal_value(st)
def _air_ratio(self): a4038 = self.isotope_group.get_ratio('Ar40/Ar38', non_ic_corr=True) a4036 = self.isotope_group.get_ratio('Ar40/Ar36', non_ic_corr=True) # e4038 = uformat_percent_error(a4038, include_percent_sign=True) # e4036 = uformat_percent_error(a4036, include_percent_sign=True) lines = [ self._make_header('Ratios'), 'Ar40/Ar36= {} {}'.format( floatfmt(nominal_value(a4036)), errorfmt(nominal_value(a4036), std_dev(a4036))), 'Ar40/Ar38= {} {}'.format( floatfmt(nominal_value(a4038)), errorfmt(nominal_value(a4038), std_dev(a4036))) ] return self._make_lines(lines)
def _plot(self, rs, tag, n, plotid): plot = self.graph.new_plot(padding_left=100) plot.y_axis.title = tag xs = arange(n) ys = array([nominal_value(ri) for ri in rs]) yes = array([std_dev(ri) for ri in rs]) p, s, l = self.graph.new_series(xs, ys, yerror=yes, fit='weighted mean', type='scatter') ebo = ErrorBarOverlay(component=s, orientation='y', nsigma=2, visible=True, use_end_caps=True) s.underlays.append(ebo) s.yerror = ArrayDataSource(yes) self.graph.set_x_limits(pad='0.1', plotid=plotid) ymin, ymax = min(ys - 2 * yes), max(ys + 2 * yes) self.graph.set_y_limits(min_=ymin, max_=ymax, pad='0.1', plotid=plotid)
def _plot_ratio(self, po, i): xs = [nominal_value(ai) for ai in self._unpack_attr(po.xtitle)] ys = [nominal_value(ai) for ai in self._unpack_attr(po.ytitle)] plot, scatter, line = self.graph.new_series(x=array(xs), y=array(ys), fit='linear', add_inspector=False, marker=po.marker, marker_size=po.marker_size) opt = self.options nsigma = opt.error_bar_nsigma for axk in 'xy': caps = getattr(opt, '{}_end_caps'.format(axk)) visible = getattr(po, '{}_error'.format(axk)) attr = getattr(po, '{}title'.format(axk)) es = [std_dev(ai) for ai in self._unpack_attr(attr)] self._add_error_bars(scatter, es, axk, nsigma, end_caps=caps, visible=visible)
def _get_baseline_corrected(self, analysis, k): if k in analysis.isotopes: iso = analysis.isotopes[k] v = iso.get_baseline_corrected_value() return nominal_value(v), std_dev(v) else: return 0, 0
def _calculate_integrated_mean_error(self, weighting, ks, rs): sks = ks.sum() weights = None fs = rs / ks errors = array([std_dev(f) for f in fs]) values = array([nominal_value(f) for f in fs]) if weighting == 'Volume': vpercent = ks / sks weights = [nominal_value(wi) for wi in (vpercent * errors)**2] elif weighting == 'Variance': weights = 1 / errors**2 if weights is not None: wmean, sum_weights = average(values, weights=weights, returned=True) if weighting == 'Volume': werr = sum_weights**0.5 else: werr = sum_weights**-0.5 f = ufloat(wmean, werr) else: f = rs.sum() / sks return f
def _add_isotope(self, analysis, spec, iso, det, refdet): db = self.db if DBVERSION >= 16.3: rdet = analysis.reference_detector.detector_type.Label else: rdet = analysis.ReferenceDetectorLabel if det == rdet: dbdet = refdet else: if spec.is_peak_hop: """ if is_peak_hop fool mass spec. e.g Ar40 det = H1 not CDD det=PEAK_HOP_MAP['Ar40']=='CDD' """ if iso in PEAK_HOP_MAP: det = PEAK_HOP_MAP[iso] if DBVERSION >= 16.3: dbdet = db.add_detector(det) else: dbdet = db.add_detector(det, Label=det) ic = spec.isotopes[iso].ic_factor dbdet.ICFactor = float(nominal_value(ic)) dbdet.ICFactorEr = float(std_dev(ic)) db.flush() n = spec.get_ncounts(iso) return db.add_isotope(analysis, dbdet, iso, NumCnts=n), dbdet
def _get_values(self, attr): vs = (ai.get_value(attr) for ai in self.clean_analyses()) ans = [vi for vi in vs if vi is not None] if ans: vs = [nominal_value(v) for v in ans] es = [std_dev(v) for v in ans] return array(vs), array(es)
def _get_j_err(self): j = self.j try: e = (std_dev(j) / nominal_value(j)) if j is not None else 0 except ZeroDivisionError: e = nan return e
def load_isotopes(self): isos = self.editor.model.isotopes ns = [] bks = [] bs = [] ics = [] dets = [] for k in self.editor.model.isotope_keys: iso = isos[k] iso.use_static = True ns.append(iso) bks.append(iso.blank) bs.append(iso.baseline) det = iso.detector if not det in dets: v, e = nominal_value(iso.ic_factor), std_dev(iso.ic_factor) ics.append(ICFactor(value=v, error=e, ovalue=v, oerror=e, name=det)) dets.append(det) self.isotopes = ns self.blanks = bks self.baselines = bs self.ic_factors = ics
def add_interpreted_age(self, ln, ia): db = self.processor.db with db.session_ctx(): hist = db.add_interpreted_age_history(ln) a = ia.get_ma_scaled_age() mswd = ia.preferred_mswd if isnan(mswd): mswd = 0 db_ia = db.add_interpreted_age(hist, age=float(nominal_value(a)), age_err=float(std_dev(a)), display_age_units=ia.age_units, age_kind=ia.preferred_age_kind, kca_kind=ia.preferred_kca_kind, kca=float(ia.preferred_kca_value), kca_err=float(ia.preferred_kca_error), mswd=float(mswd), include_j_error_in_mean=ia.include_j_error_in_mean, include_j_error_in_plateau=ia.include_j_error_in_plateau, include_j_error_in_individual_analyses= ia.include_j_error_in_individual_analyses) for ai in ia.all_analyses: plateau_step = ia.get_is_plateau_step(ai) ai = db.get_analysis_uuid(ai.uuid) db.add_interpreted_age_set(db_ia, ai, tag=ai.tag, plateau_step=plateau_step)
def _assemble(self, step, unk, cum39): """ step, T(C), t(min), 39mol, %error 39, cum ar39, age, age_er, age_er_w_j, cl_age, cl_age_er cl_age and cl_age_er are currently ignored. Ar/Ar age is used as a placeholder instead :param unk: :return: """ temp = unk.extract_value time_at_temp = unk.extract_duration / 60. molv = unk.moles_k39 mol_39, e = nominal_value(molv), std_dev(molv) mol_39_perr = e / mol_39 * 100 age = unk.age age_err = unk.age_err_wo_j age_err_w_j = unk.age_err cols = [ step + 1, temp - self.temp_offset, time_at_temp - self.time_offset, mol_39, mol_39_perr, cum39, age, age_err, age_err_w_j, age, age_err ] cols = ','.join([str(v) for v in cols]) return '{}\n'.format(cols)
def add_interpreted_age(self, ia): a = ia.get_ma_scaled_age() mswd = ia.preferred_mswd if isnan(mswd): mswd = 0 d = dict(age=float(nominal_value(a)), age_err=float(std_dev(a)), display_age_units=ia.age_units, age_kind=ia.preferred_age_kind, kca_kind=ia.preferred_kca_kind, kca=float(ia.preferred_kca_value), kca_err=float(ia.preferred_kca_error), mswd=float(mswd), include_j_error_in_mean=ia.include_j_error_in_mean, include_j_error_in_plateau=ia.include_j_error_in_plateau, include_j_error_in_individual_analyses=ia. include_j_error_in_individual_analyses, sample=ia.sample, material=ia.material, identifier=ia.identifier, nanalyses=ia.nanalyses, irradiation=ia.irradiation) d['analyses'] = [ dict(uuid=ai.uuid, tag=ai.tag, plateau_step=ia.get_is_plateau_step(ai)) for ai in ia.all_analyses ] self._add_interpreted_age(ia, d)
def _export_spec_factory(self): # dc = self.collector # fb = dc.get_fit_block(-1, self.fits) # rs_name, rs_text = self._assemble_script_blob() rid = self.per_spec.run_spec.runid # blanks = self.get_previous_blanks() # dkeys = [d.name for d in self._active_detectors] # sf = dict(zip(dkeys, fb)) # p = self._current_data_frame ic = self.per_spec.isotope_group.get_ic_factor('CDD') exp = MassSpecExportSpec(runid=rid, runscript_name=self.per_spec.runscript_name, runscript_text=self.per_spec.runscript_blob, # signal_fits=sf, mass_spectrometer=self.per_spec.run_spec.mass_spectrometer.capitalize(), # blanks=blanks, # data_path=p, isotopes=self.per_spec.isotope_group.isotopes, # signal_intercepts=si, # signal_intercepts=self._processed_signals_dict, is_peak_hop=self.per_spec.save_as_peak_hop, ic_factor_v=float(nominal_value(ic)), ic_factor_e=float(std_dev(ic))) exp.load_record(self.per_spec.run_spec) return exp
def small_sep02 ( mx , mxErr, lag=0): """ Given a frequency matrix and errors calculates the (scaled) small separation d02 and propagates errors. Notes ----- The parameter lag is the difference between the radial orders of the first modes of l=0 and l=2. """ (Nn, Nl) = np.shape(mx) d02 = np.zeros((1,Nn)) d02Err = np.zeros((1,Nn)) d02.fill(None) # values that can't be calculated are NaN for n in range(1-lag,Nn): if (mx[n,0] != 0. and mx[n-1+lag,2] != 0.): a = un.ufloat( (mx[n,0], mxErr[n,0]) ) b = un.ufloat( (mx[n-1+lag,2], mxErr[n-1+lag,2]) ) result = (a-b) / 3. d02[0,n-1+lag] = un.nominal_value(result) d02Err[0,n-1+lag] = un.std_dev(result) return d02, d02Err
def small_sep13 ( mx , mxErr, lag=0): """ Given a frequency matrix and errors calculates the (scaled) small separation d13 and propagates errors. Notes ----- The parameter lag is the difference between the radial orders of the first modes of l=1 and l=3. """ (Nn, Nl) = np.shape(mx) d13 = np.zeros((1,Nn)) d13Err = np.zeros((1,Nn)) d13.fill(None) # values that can't be calculated remain NaN for n in range(1-lag,Nn): if (mx[n,1] != 0. and mx[n-1+lag,3] != 0.): a = un.ufloat( (mx[n,1], mxErr[n,1]) ) b = un.ufloat( (mx[n-1+lag,3], mxErr[n-1+lag,3]) ) result = (a-b) / 5. d13[0,n-1+lag] = un.nominal_value(result) d13Err[0,n-1+lag] = un.std_dev(result) return d13, d13Err
def add_irradiation_production(self, name, pr, ifc): kw = {} for k, v in ifc.iteritems(): if k == 'cl3638': k = 'P36Cl38Cl' else: k = k.capitalize() kw[k] = float(nominal_value(v)) kw['{}Er'.format(k)] = float(std_dev(v)) kw['ClOverKMultiplier'] = pr['Cl_K'] kw['ClOverKMultiplierEr'] = 0 kw['CaOverKMultiplier'] = pr['Ca_K'] kw['CaOverKMultiplierEr'] = 0 v = binascii.crc32(''.join([str(v) for v in kw.itervalues()])) with self.session_ctx() as sess: q = sess.query(IrradiationProductionTable) q = q.filter(IrradiationProductionTable.ProductionRatiosID == v) if not self._query_one(q): i = IrradiationProductionTable(Label=name, ProductionRatiosID=v, **kw) self._add_item(i) return v
def calculate_ylimits(self, po, s39, vs, pma=None): ps = s39 / s39.sum() ps = ps > 0.01 vs = vs[ps] # filter ys,es if 39Ar < 1% of total try: vs, es = zip(*[(nominal_value(vi), std_dev(vi)) for vi in vs]) vs, es = array(vs), array(es) nes = es * self.options.step_nsigma yl = vs - nes yu = vs + nes _mi = min(yl) _ma = max(yu) if pma: _ma = max(pma, _ma) except ValueError: _mi = 0 _ma = 1 if not po.has_ylimits(): if po.calculated_ymin is None: po.calculated_ymin = _mi else: po.calculated_ymin = min(po.calculated_ymin, _mi) if po.calculated_ymax is None: po.calculated_ymax = _ma else: po.calculated_ymax = max(po.calculated_ymax, _ma)
def _load_air_computed(self, an, new_list): if self.experiment_type == AR_AR: if new_list: c = an.arar_constants ratios = [('40Ar/36Ar', 'Ar40/Ar36', nominal_value(c.atm4036)), ('40Ar/38Ar', 'Ar40/Ar38', nominal_value(c.atm4038))] cv = self._make_ratios(ratios) self.computed_values = cv self._update_ratios() try: niso, diso = self._get_ratio('Ar40/Ar36') if niso and diso: noncorrected = self._get_non_corrected_ratio(niso, diso) v, e = nominal_value(noncorrected), std_dev(noncorrected) ref = 295.5 self.summary_str = u'Ar40/Ar36={} {}{}({}%) IC={:0.5f}'.format( floatfmt(v), PLUSMINUS, floatfmt(e), format_percent_error(v, e), nominal_value(noncorrected / ref)) except: pass else: # todo add ratios for other isotopes. e.g Ne pass
def _monte_carlo_error_propagation(self, vr, m): lambda_total = self._lambda_t el = self._lambda_ec f = self._f vel = nominal_value(el) + std_dev(el) * randn(self._n) vt = nominal_value(lambda_total) + std_dev(lambda_total) * randn( self._n) vf = nominal_value(f) + std_dev(f) * randn(self._n) vt_mc = ones(1, m) * vt vf_mc = ones(1, m) * vf vel_mc = ones(1, m) * vel t_mc = log(vt_mc / vel_mc * vf_mc * vr + 1) / vt_mc return mean(t_mc), std(t_mc)
def get_mean_raw(self, tau=None): vs = [] corrfunc = self._deadtime_correct for r in six.itervalues(self._cp): n = int(r['NShots']) nv = ufloat(float(r['Ar40']), float(r['Ar40err'])) * 6240 dv = ufloat(float(r['Ar36']), float(r['Ar36err'])) * 6240 if tau: dv = corrfunc(dv, tau * 1e-9) vs.append((n, nv / dv)) key = lambda x: x[0] vs = sorted(vs, key=key) mxs = [] mys = [] mes = [] for n, gi in groupby(vs, key=key): mxs.append(n) ys, es = list(zip(*[(nominal_value(xi[1]), std_dev(xi[1])) for xi in gi])) wm, werr = calculate_weighted_mean(ys, es) mys.append(wm) mes.append(werr) return mxs, mys, mes
def _export_spec_factory(self): # dc = self.collector # fb = dc.get_fit_block(-1, self.fits) # rs_name, rs_text = self._assemble_script_blob() rid = self.per_spec.run_spec.runid # blanks = self.get_previous_blanks() # dkeys = [d.name for d in self._active_detectors] # sf = dict(zip(dkeys, fb)) # p = self._current_data_frame ic = self.per_spec.isotope_group.get_ic_factor('CDD') exp = MassSpecExportSpec( runid=rid, runscript_name=self.per_spec.runscript_name, runscript_text=self.per_spec.runscript_blob, # signal_fits=sf, mass_spectrometer=self.per_spec.run_spec.mass_spectrometer. capitalize(), # blanks=blanks, # data_path=p, isotopes=self.per_spec.isotope_group.isotopes, # signal_intercepts=si, # signal_intercepts=self._processed_signals_dict, is_peak_hop=self.per_spec.save_as_peak_hop, ic_factor_v=float(nominal_value(ic)), ic_factor_e=float(std_dev(ic))) exp.load_record(self.per_spec.run_spec) return exp
def _calculate_integrated_age(self, ans, weighting): ret = ufloat(0, 0) if ans and all((not isinstance(a, InterpretedAgeGroup) for a in ans)): rs = array([a.get_computed_value('rad40') for a in ans]) ks = array([a.get_computed_value('k39') for a in ans]) sks = ks.sum() weights = None if weighting == 'Volume': weights = ks / sks elif weighting == 'Variance': weights = [1 / std_dev(k) ** 2 for k in rs/ks] if weights is not None: wmean, sum_weights = average([nominal_value(fi) for fi in rs / ks], weights=weights, returned=True) werr = sum_weights ** -0.5 f = ufloat(wmean, werr) else: f = rs.sum() / sks a = ans[0] j = a.j try: ret = age_equation(f, j, a.arar_constants) # / self.age_scalar except ZeroDivisionError: pass return ret
def add_interpreted_age(self, ia): db = self.db a = ia.get_ma_scaled_age() mswd = ia.preferred_mswd if isnan(mswd): mswd = 0 d = dict(age=float(nominal_value(a)), age_err=float(std_dev(a)), display_age_units=ia.age_units, age_kind=ia.preferred_age_kind, kca_kind=ia.preferred_kca_kind, kca=float(ia.preferred_kca_value), kca_err=float(ia.preferred_kca_error), mswd=float(mswd), include_j_error_in_mean=ia.include_j_error_in_mean, include_j_error_in_plateau=ia.include_j_error_in_plateau, include_j_error_in_individual_analyses=ia.include_j_error_in_individual_analyses) db_ia = db.add_interpreted_age(**d) d['analyses'] = [dict(uuid=ai.uuid, tag=ai.tag, plateau_step=ia.get_is_plateau_step(ai)) for ai in ia.all_analyses] self._add_interpreted_age(ia, d) for ai in ia.all_analyses: plateau_step = ia.get_is_plateau_step(ai) db_ai = db.get_analysis_uuid(ai.uuid) db.add_interpreted_age_set(db_ia, db_ai, tag=ai.tag, plateau_step=plateau_step)