def __init__(self, x=None, y=None, w=None, bbox=[None] * 2, k=3, s=0, filename=None): """ Parameters ---------- x,y : 1-d sequences of data points (z must be in strictly ascending order). x is production rate, y is corresponding depth Optional input: w - positive 1-d sequence of weights bbox - 2-sequence specifying the boundary of the approximation interval. By default, bbox=[p[0],p[-1]] k - degree of the univariate spline (defaults to 3) s - positive smoothing factor defined for estimation condition: sum((w[i]*(z[i]-s(p[i])))**2,axis=0) <= s Default s=len(w) which should be a good value if 1/w[i] is an estimate of the standard deviation of y[i]. filename - file to load a saved spline from """ if (x is None) or (y is None) and (filename is not None): self._data = util.unpickle(filename) else: self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0], xe=bbox[1], s=s) self._reset_class()
def __init__(self, *args, **kwargs): """ Parameters ---------- x,y : 1-d sequences of data points (z must be in strictly ascending order). x is production rate, y is corresponding depth Optional input: w - positive 1-d sequence of weights bbox - 2-sequence specifying the boundary of the approximation interval. By default, bbox=[p[0],p[-1]] k - degree of the univariate spline (defaults to 3) s - positive smoothing factor defined for estimation condition: sum((w[i]*(z[i]-s(p[i])))**2,axis=0) <= s Default s=len(w) which should be a good value if 1/w[i] is an estimate of the standard deviation of y[i]. filename - file to load a saved spline from """ self.filename = None self.ext = None if 'filename' in kwargs: self.filename = kwargs['filename'] del kwargs['filename'] data = util.unpickle(self.filename) if type(data) == tuple: # Old style data format self._data = data self.ext = 0 else: # New style data format is dict self._data = data['_data'] self.ext = data['ext'] self._reset_class() else: super(ProductionSpline, self).__init__(*args, **kwargs)
import nuclide import sim n = nuclide.Be10Qtz() # get the data from the simulation loaded into memory concs = np.genfromtxt("concs.txt") errors = np.genfromtxt("errors.txt") models = np.genfromtxt("models.txt") conc_true = np.genfromtxt("conc_true.txt") conc_meas = np.genfromtxt("conc_meas.txt") conc_meas_err = n.measurement_error(conc_meas) ms = np.genfromtxt("ms.txt") misfits = np.genfromtxt("misfits.txt") constraints = util.unpickle("constraints.dat") con = constraints p = util.unpickle("production_rate.dat") dof = con["n_gl"] # denormalize the models ms *= con["max_dz"] / con["rho"] / 100.0 dz_true_m = con["dz_true_m"] # get data for plotting a depth vs time curve t_true, z_true = sim.glacial_depth_v_time(con["t_gl"], con["t_int"], con["t_postgl"], dz_true_m, n_gl=con["n_gl"]) dvt_len = 2 * (con["n_gl"] + 1) fit_t = np.zeros((misfits.size, dvt_len)) fit_z = np.empty((misfits.size, dvt_len)) for i in range(misfits.size): fit_t[i, :], fit_z[i, :] = sim.glacial_depth_v_time(
import nuclide import sim n = nuclide.Be10Qtz() # get the data from the simulation loaded into memory concs = np.genfromtxt('concs.txt') errors = np.genfromtxt('errors.txt') models = np.genfromtxt('models.txt') conc_true = np.genfromtxt('conc_true.txt') conc_meas = np.genfromtxt('conc_meas.txt') conc_meas_err = n.measurement_error(conc_meas) ms = np.genfromtxt('ms.txt') misfits = np.genfromtxt('misfits.txt') constraints = util.unpickle('constraints.dat') con = constraints p = util.unpickle('production_rate.dat') dof = con['n_gl'] # denormalize the models ms *= con['max_dz'] / con['rho'] / 100.0 dz_true_m = con['dz_true_m'] # get data for plotting a depth vs time curve t_true, z_true = sim.glacial_depth_v_time(con['t_gl'], con['t_int'], con['t_postgl'], dz_true_m, n_gl=con['n_gl']) dvt_len = 2 * (con['n_gl'] + 1)
import numpy as np import numpy.random import matplotlib.pyplot as plt from cosmogenic import util import nuclide import sim # get the data from the simulation loaded into memory concs = np.genfromtxt('concs.txt') errors = np.genfromtxt('errors.txt') models = np.genfromtxt('models.txt') ms = np.atleast_2d(np.genfromtxt('ms.txt')) misfits = np.atleast_1d(np.genfromtxt('misfits.txt')) con = util.unpickle('con.dat') p = util.unpickle('production_rate.dat') ms_denorm = ms * (con['max_dz'] - con['min_dz']) + con['min_dz'] # denormalize the models ms_m = ms_denorm / con['rho'] / 100.0 dvt_len = 2 * (con['n_gl'] + 1) fit_t = np.zeros((misfits.size, dvt_len)) fit_z = np.empty((misfits.size, dvt_len)) for i in range(misfits.size): fit_t[i, :], fit_z[i, :] = sim.glacial_depth_v_time(con['t_gl'], con['t_int'], con['t_postgl'], ms_m[i], n_gl=con['n_gl'])
from __future__ import division import numpy as np from cosmogenic import util import sim import nuclide import na import production con = util.unpickle('con.dat') print con p = util.unpickle('production_rate.dat') # we need a way to measure error between models def chi2(a, b, sigma): """ Chi squared of two vectors """ return (((a - b) / sigma)**2).sum() # degrees of freedom in our problem print 'Degrees of freedom =', con['dof'] try: perm_err = chi2(con['C_meas'], con['C_target'], con['C_meas_err']) print 'Error from permutation =', perm_err except: pass # limits of the parameter space, normalized to be in [0, 1] hi_lim = np.ones(con['n_gl']) lo_lim = np.zeros(con['n_gl'])