예제 #1
0
    def test_bad_model(self):
        X = data.sim_data(10)
        Y = models.bad_model(X)
        assert pl.allclose(Y.sum(axis=2), 1), 'should be all ones, (%s found)' % str(Y.sum(axis=2))

        # test again for 10x2x3 dataset
        X = data.sim_data(10, [[.1, .4, .5]], [.1, .1, .1])
        Y = models.bad_model(X)
        assert pl.allclose(Y.sum(axis=2), 1), 'should be all ones, (%s found)' % str(Y.sum(axis=2))
예제 #2
0
    def test_bad_model(self):
        X = data.sim_data(10)
        Y = models.bad_model(X)
        assert pl.allclose(
            Y.sum(axis=2),
            1), 'should be all ones, (%s found)' % str(Y.sum(axis=2))

        # test again for 10x2x3 dataset
        X = data.sim_data(10, [[.1, .4, .5]], [.1, .1, .1])
        Y = models.bad_model(X)
        assert pl.allclose(
            Y.sum(axis=2),
            1), 'should be all ones, (%s found)' % str(Y.sum(axis=2))
예제 #3
0
def find_data(items, values):

    for subdir, dirs, files in os.walk("hole-results-old/"):

        if not os.path.isfile(subdir + "/parameters.xml"):
            continue

        # Extract the parameters used
        param = Parameters()
        File(subdir + "/parameters.xml") >> param

        # It matched, extract information
        if pl.allclose(pl.array([eval("param." + item) for item in items]),
                       pl.array(values)):
            data = pl.loadtxt(subdir + "/energies.txt")
            t = data[:, 0]
            S = data[:, 2]

            # Umtimate stress
            tf = (t[-1] + t[-2]) / 2

            # Elastic limit
            ind = pl.find(S > 0)[0]
            te = (t[ind] + t[ind - 1]) / 2

            return te, tf
예제 #4
0
def sim_data(N,
             true_cf=[[.3, .6, .1], [.3, .5, .2]],
             true_std=[[.2, .05, .05], [.3, 0.1, 0.1]],
             sum_to_one=True):
    """ 
    Create an NxTxJ matrix of simulated data (T is determined by the length 
    of true_cf, J by the length of the elements of true_cf). 

    true_cf - a list of lists of true cause fractions (each must sum to one)
    true_std - a list of lists of the standard deviations corresponding to the true csmf's 
             for each time point. Can either be a list of length J inside a list of length
             1 (in this case, the same standard deviation is used for all time points) or 
             can be T lists of length J (in this case, the a separate standard deviation 
             is specified and used for each time point). 
    """

    if sum_to_one == True:
        assert pl.allclose(pl.sum(true_cf, 1),
                           1), 'The sum of elements of true_cf must equal 1'
    T = len(true_cf)
    J = len(true_cf[0])

    ## if only one std provided, duplicate for all time points
    if len(true_std) == 1 and len(true_cf) > 1:
        true_std = [true_std[0] for i in range(len(true_cf))]

    ## transform the mean and std to logit space
    transformed_std = []
    for t in range(T):
        pi_i = pl.array(true_cf[t])
        sigma_pi_i = pl.array(true_std[t])
        transformed_std.append(
            ((1 / (pi_i * (pi_i - 1)))**2 * sigma_pi_i**2)**0.5)

    ## find minimum standard deviation (by cause across time) and draw from this
    min = pl.array(transformed_std).min(0)
    common_perturbation = [
        pl.ones([T, J]) * mc.rnormal(mu=0, tau=min**-2) for n in range(N)
    ]

    ## draw from remaining variation
    tau = pl.array(transformed_std)**2 - min**2
    tau[tau == 0] = 0.000001
    additional_perturbation = [
        [mc.rnormal(mu=0, tau=tau[t]**-1) for t in range(T)] for n in range(N)
    ]

    result = pl.zeros([N, T, J])
    for n in range(N):
        result[n, :, :] = [
            mc.invlogit(
                mc.logit(true_cf[t]) + common_perturbation[n][t] +
                additional_perturbation[n][t]) for t in range(T)
        ]

    return result
예제 #5
0
 def test_sim_data_2(self): 
     sims = 10000 
     return # skip for now
     test1 = pl.zeros(3, dtype='f').view(pl.recarray)
     for i in range(sims): 
         temp = data.sim_data(1, [0.1,0.1,0.8], [0.01,0.01,0.01])
         test1 = pl.vstack((test1, temp))
     test1 = test1[1:,]
     test2 = data.sim_data(sims, [0.1,0.1,0.8], [0.01, 0.01, 0.01])
     diff = (test1.mean(0) - test2.mean(0))/test1.mean(0)
     assert pl.allclose(diff, 0, atol=0.01), 'should be close to zero, (%s found)' % str(diff)
예제 #6
0
 def test_sim_data_2(self):
     sims = 10000
     return  # skip for now
     test1 = pl.zeros(3, dtype='f').view(pl.recarray)
     for i in range(sims):
         temp = data.sim_data(1, [0.1, 0.1, 0.8], [0.01, 0.01, 0.01])
         test1 = pl.vstack((test1, temp))
     test1 = test1[1:, ]
     test2 = data.sim_data(sims, [0.1, 0.1, 0.8], [0.01, 0.01, 0.01])
     diff = (test1.mean(0) - test2.mean(0)) / test1.mean(0)
     assert pl.allclose(
         diff, 0,
         atol=0.01), 'should be close to zero, (%s found)' % str(diff)
예제 #7
0
def sim_data(N, true_cf=[[.3, .6, .1],
                           [.3, .5, .2]],
             true_std=[[.2, .05, .05], 
                       [.3, 0.1, 0.1]],
             sum_to_one=True):
    """ 
    Create an NxTxJ matrix of simulated data (T is determined by the length 
    of true_cf, J by the length of the elements of true_cf). 

    true_cf - a list of lists of true cause fractions (each must sum to one)
    true_std - a list of lists of the standard deviations corresponding to the true csmf's 
             for each time point. Can either be a list of length J inside a list of length
             1 (in this case, the same standard deviation is used for all time points) or 
             can be T lists of length J (in this case, the a separate standard deviation 
             is specified and used for each time point). 
    """

    if sum_to_one == True: 
        assert pl.allclose(pl.sum(true_cf, 1), 1), 'The sum of elements of true_cf must equal 1' 
    T = len(true_cf)
    J = len(true_cf[0])
    
    ## if only one std provided, duplicate for all time points 
    if len(true_std)==1 and len(true_cf)>1: 
        true_std = [true_std[0] for i in range(len(true_cf))]    

    ## transform the mean and std to logit space
    transformed_std = []
    for t in range(T): 
        pi_i = pl.array(true_cf[t])
        sigma_pi_i = pl.array(true_std[t])
        transformed_std.append( ((1/(pi_i*(pi_i-1)))**2 * sigma_pi_i**2)**0.5 )
            
    ## find minimum standard deviation (by cause across time) and draw from this 
    min = pl.array(transformed_std).min(0)
    common_perturbation = [pl.ones([T,J])*mc.rnormal(mu=0, tau=min**-2) for n in range(N)]
    
    ## draw from remaining variation 
    tau=pl.array(transformed_std)**2 - min**2
    tau[tau==0] = 0.000001
    additional_perturbation = [[mc.rnormal(mu=0, tau=tau[t]**-1) for t in range(T)] for n in range(N)]

    result = pl.zeros([N, T, J])
    for n in range(N):
        result[n, :, :] = [mc.invlogit(mc.logit(true_cf[t]) + common_perturbation[n][t] + additional_perturbation[n][t]) for t in range(T)]

    return result
예제 #8
0
파일: view.py 프로젝트: jwblin/qtcm
import pylab as p
from matplotlib.toolkits.basemap import Basemap
import os
import Scientific.IO.NetCDF as S


#--- Get data (assuming lon goes from 0 to 360):  Make arrays cyclic
#    in longitude:

time_idx = 5   #@@@ USER ADJUSTABLE

f = S.NetCDFFile('qm_seasonal_1yr.nc', mode='r')
lat = f.variables['lat'].getValue()
lon = f.variables['lon'].getValue()
u1_all = f.variables['u1'].getValue()
if p.allclose(lon[0], 0):
    u1 = p.zeros( (p.size(lat), p.size(lon)+1) )
    u1[:,0:-1] = u1_all[time_idx,:,:]
    u1[:,-1] = u1_all[time_idx,:,0]

tmp = copy.copy(lon)
lon = p.zeros((p.size(tmp)+1,))
lon[0:-1] = tmp[:]
lon[-1] = tmp[0]+360
del tmp

f.close()


#--- Mapping information:
예제 #9
0
def test_data_model_sim():
    # generate simulated data
    n = 50
    sigma_true = .025

    # start with truth
    a = pl.arange(0, 100, 1)
    pi_age_true = .0001 * (a * (100. - a) + 100.)

    # choose age intervals to measure
    age_start = pl.array(mc.runiform(0, 100, n), dtype=int)
    age_start.sort()  # sort to make it easy to discard the edges when testing
    age_end = pl.array(mc.runiform(age_start+1, pl.minimum(age_start+10,100)), dtype=int)

    # find truth for the integral across the age intervals
    import scipy.integrate
    pi_interval_true = [scipy.integrate.trapz(pi_age_true[a_0i:(a_1i+1)]) / (a_1i - a_0i) 
                        for a_0i, a_1i in zip(age_start, age_end)]

    # generate covariates that add explained variation
    X = mc.rnormal(0., 1.**2, size=(n,3))
    beta_true = [-.1, .1, .2]
    Y_true = pl.dot(X, beta_true)

    # calculate the true value of the rate in each interval
    pi_true = pi_interval_true*pl.exp(Y_true)

    # simulate the noisy measurement of the rate in each interval
    p = mc.rnormal(pi_true, 1./sigma_true**2.)

    # store the simulated data in a pandas DataFrame
    data = pandas.DataFrame(dict(value=p, age_start=age_start, age_end=age_end,
                                 x_0=X[:,0], x_1=X[:,1], x_2=X[:,2]))
    data['effective_sample_size'] = pl.maximum(p*(1-p)/sigma_true**2, 1.)

    data['standard_error'] = pl.nan
    data['upper_ci'] = pl.nan
    data['lower_ci'] = pl.nan

    data['year_start'] = 2005.  # TODO: make these vary
    data['year_end'] = 2005.
    data['sex'] = 'total'
    data['area'] = 'all'

    # generate a moderately complicated hierarchy graph for the model
    hierarchy = nx.DiGraph()
    hierarchy.add_node('all')
    hierarchy.add_edge('all', 'super-region-1', weight=.1)
    hierarchy.add_edge('super-region-1', 'NAHI', weight=.1)
    hierarchy.add_edge('NAHI', 'CAN', weight=.1)
    hierarchy.add_edge('NAHI', 'USA', weight=.1)
    output_template=pandas.DataFrame(dict(year=[1990, 1990, 2005, 2005, 2010, 2010]*2,
                                          sex=['male', 'female']*3*2,
                                          x_0=[.5]*6*2,
                                          x_1=[0.]*6*2,
                                          x_2=[.5]*6*2,
                                          pop=[50.]*6*2,
                                          area=['CAN']*6 + ['USA']*6))
    

    # create model and priors
    vars = data_model.data_model('test', data, hierarchy, 'all')


    # fit model
    mc.MAP(vars).fit(method='fmin_powell', verbose=1)
    m = mc.MCMC(vars)
    m.use_step_method(mc.AdaptiveMetropolis, [m.gamma_bar, m.gamma, m.beta])
    m.sample(30000, 15000, 15)

    # check estimates
    pi_usa = data_model.predict_for(output_template, hierarchy, 'all', 'USA', 'male', 1990, vars)
    assert pl.allclose(pi_usa.mean(), (m.mu_age.trace()*pl.exp(.05)).mean(), rtol=.1)

    # check convergence
    print 'gamma mc error:', m.gamma_bar.stats()['mc error'].round(2), m.gamma.stats()['mc error'].round(2)


    # plot results
    for a_0i, a_1i, p_i in zip(age_start, age_end, p):
        pl.plot([a_0i, a_1i], [p_i,p_i], 'rs-', mew=1, mec='w', ms=4)
    pl.plot(a, pi_age_true, 'g-', linewidth=2)
    pl.plot(pl.arange(101), m.mu_age.stats()['mean'], 'k-', drawstyle='steps-post', linewidth=3)
    pl.plot(pl.arange(101), m.mu_age.stats()['95% HPD interval'], 'k', linestyle='steps-post:')
    pl.plot(pl.arange(101), pi_usa.mean(0), 'r-', linewidth=2, drawstyle='steps-post')
    pl.savefig('age_integrating_sim.png')

    # compare estimate to ground truth (skip endpoints, because they are extra hard to get right)
    assert pl.allclose(m.pi.stats()['mean'][10:-10], pi_true[10:-10], rtol=.2)
    lb, ub = m.pi.stats()['95% HPD interval'].T
    assert pl.mean((lb <= pi_true)[10:-10] & (pi_true <= ub)[10:-10]) > .75
예제 #10
0
import copy
import pylab as p
from matplotlib.toolkits.basemap import Basemap
import os
import Scientific.IO.NetCDF as S

#--- Get data (assuming lon goes from 0 to 360):  Make arrays cyclic
#    in longitude:

time_idx = 5  #@@@ USER ADJUSTABLE

f = S.NetCDFFile('qm_seasonal_1yr.nc', mode='r')
lat = f.variables['lat'].getValue()
lon = f.variables['lon'].getValue()
u1_all = f.variables['u1'].getValue()
if p.allclose(lon[0], 0):
    u1 = p.zeros((p.size(lat), p.size(lon) + 1))
    u1[:, 0:-1] = u1_all[time_idx, :, :]
    u1[:, -1] = u1_all[time_idx, :, 0]

tmp = copy.copy(lon)
lon = p.zeros((p.size(tmp) + 1, ))
lon[0:-1] = tmp[:]
lon[-1] = tmp[0] + 360
del tmp

f.close()

#--- Mapping information:

map = Basemap(projection='cyl',
예제 #11
0
파일: slip.py 프로젝트: MMaus/mutils
def SLIP_step3D(IC, SLIP_params, use_legacy=False):
    """
    simulates the SLIP in 3D

    :args:
        IC: initial state vector, containing y0, vx0, vz0
            (x0 is assumed to be 0;
             z0  is assumed to be 0; 
             vy0 = 0 (apex);
            also ground level = 0)
        SLIP_params(dict):
            k
            L0
            m
            alpha : "original" angle of attack
            beta  : lateral leg turn
                    foot position relative to CoM in flight:
                        xF = vx0*t_flight + L0*cos(alpha)*cos(beta)
                        yF = -L0*sin(alpha)
                        zF = vz0*t_flight - L0*cos(alpha)*sin(beta)
            dE: energy change in "midstance" by changing k and L0
            g: gravity (negative! should be ~ -9.81 for SI)
        use_legacy (bool): Use legacy code or SLIP model in C

    :returns:
        sim_data, sim_state
        sim_data : *dict*
            the results of the simulation
        sim_states: *dict*
            information about the simulation (e.g. errors)

    """

    alpha = SLIP_params['alpha']
    beta = SLIP_params['beta']
    k = SLIP_params['k']
    L0 = SLIP_params['L0']
    dE = SLIP_params['dE']
    g = SLIP_params['g']
    m = SLIP_params['m']

    y0 = IC[0]
    vx0 = IC[1]
    vz0 = IC[2]

    # model code in C

    #if models == None:
    #    models = []
    #    models.append(mo.makeODE(model1_code, '_slip3D_1',skiphash=skiphash))
    #    models.append(mo.makeODE(model2_code, '_slip3D_2',skiphash=skiphash))

    if not allclose(g, -9.81):
        raise ValueError, "currently gravity is hard-coded to -9.81"
    # concatenate state vector of four elements:
    # (1) time to touchdown
    # (2) time to vy = 0
    # (3) time to takeoff
    # (4) time to apex
    # (1) and (4) are analytically described

    y_land = L0 * sin(alpha)
    if y0 < y_land:
        raise ValueError, "invalid starting condition"

    # before starting, define the model:
    def SLIP_ode(y, t, params):
        """
        defines the ODE of the SLIP, under stance condition
        state: 
            [x
             y
             z
             vx
             vy
             vz]
        params:
            {'L0' : leg rest length
             'x0' : leg touchdown position
             'k'  : spring stiffness
             'm'  : mass
             'xF' : anterior foot position
             'zF' : lateral foot position }
        """

        dy0 = y[3]
        dy1 = y[4]
        dy2 = y[5]
        L = sqrt((y[0] - params['xF'])**2 + y[1]**2 + (y[2] - params['zF'])**2)
        F = params['k'] * (params['L0'] - L)
        Fx = F * (y[0] - params['xF']) / L
        Fy = F * y[1] / L
        Fz = F * (y[2] - params['zF']) / L
        dy3 = Fx / m
        dy4 = Fy / m + params['g']
        dy5 = Fz / m
        return hstack([dy0, dy1, dy2, dy3, dy4, dy5])

    def sim_until(IC, params, stop_fcn, tmax=2.):
        """
        simulated the SLIP_ode until stop_fcn has a zero-crossing
        includes a refinement of the time at this instant
        stop_fcn must be a function of the system state, e.g.
        stop_fcn(IC) must exist
        
        this function is especially adapted to the SLIP state,
        so it uses dot(x1) = x3, dot(x2) = x4
        tmax: maximal simulation time [s]
        """
        init_sign = sign(stop_fcn(IC))
        #1st: evaluate a certain fraction
        tvec_0 = .001 * arange(50)
        sim_results = []
        sim_tvecs = []
        newIC = IC
        sim_results.append(
            odeint(SLIP_ode, newIC, tvec_0, args=(params, ), rtol=1e-9))
        sim_tvecs.append(tvec_0)
        check_vec = [init_sign * stop_fcn(x) for x in sim_results[-1]]
        t_tot = 0.
        while min(check_vec) > 0:
            newIC = sim_results[-1][-1, :]
            sim_results.append(
                odeint(SLIP_ode, newIC, tvec_0, args=(params, ), rtol=1e-9))
            sim_tvecs.append(tvec_0)
            check_vec = [init_sign * stop_fcn(x) for x in sim_results[-1]]
            t_tot += tvec_0[-1]
            # time exceeded or ground hit
            if t_tot > tmax or min(sim_results[-1][:, 1] < 0):
                raise SimFailError, "simulation failed"

        # now: zero-crossing detected
        # -> refine!
        minidx = find(array(check_vec) < 0)[0]
        if minidx == 0:
            # this should not happen because the first value in
            # check_vec should be BEFORE the zero_crossing by
            # construction
            raise ValueError, "ERROR: this should not happen!"
        # refine simulation by factor 50, but only for two
        # adjacent original time frames
        newIC = sim_results[-1][minidx - 1, :]
        sim_results[-1] = sim_results[-1][:minidx, :]
        sim_tvecs[-1] = sim_tvecs[-1][:minidx]
        # avoid that last position can be the zero-crossing
        n_refine = 100
        tvec_0 = linspace(tvec_0[0], tvec_0[1] + 2. / n_refine, n_refine + 2)
        sim_results.append(
            odeint(SLIP_ode, newIC, tvec_0, args=(params, ), rtol=1e-9))
        sim_tvecs.append(tvec_0)

        # linearly interpolate to zero
        check_vec = [init_sign * stop_fcn(x) for x in sim_results[-1]]
        minidx = find(array(check_vec) < 0)[0]
        if minidx == 0:
            # this should not happen because the first value in
            # check_vec should be BEFORE the zero_crossing by
            # construction
            raise ValueError, "ERROR: this should not happen! (2)"

        # compute location of zero-crossing
        y0 = sim_results[-1][minidx - 1, :]
        y1 = sim_results[-1][minidx, :]
        fcn0 = stop_fcn(y0)
        fcn1 = stop_fcn(y1)
        t0 = tvec_0[minidx - 1]
        t1 = tvec_0[minidx]
        t_zero = t0 - (t1 - t0) * fcn0 / (fcn1 - fcn0)
        # cut last simulation result and replace last values
        # by interpolated values
        sim_results[-1] = sim_results[-1][:minidx + 1, :]
        sim_tvecs[-1] = sim_tvecs[-1][:minidx + 1]

        for coord in arange(sim_results[-1].shape[1]):
            sim_results[-1][-1, coord] = interp(
                t_zero, [t0, t1],
                [sim_results[-1][-2, coord], sim_results[-1][-1, coord]])
        sim_tvecs[-1][-1] = t_zero
        #newIC = sim_results[-1][minidx-1,:]
        #sim_results[-1] = sim_results[-1][:minidx,:]
        #sim_tvecs[-1] = sim_tvecs[-1][:minidx]
        #tvec_0 = linspace(tvec_0[0],tvec_0[1],100)
        #sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
        #            args=(params,),rtol=1e-9))
        #sim_tvecs.append(tvec_0)

        # concatenate lists
        sim_data = vstack(
            [x[:-1, :] for x in sim_results[:-1] if x.shape[0] > 1] + [
                sim_results[-1],
            ])
        sim_time = [
            sim_tvecs[0],
        ]
        for idx in arange(1, len(sim_tvecs)):
            sim_time.append(sim_tvecs[idx] + sim_time[-1][-1])
        sim_time = hstack([x[:-1] for x in sim_time[:-1]] + [
            sim_time[-1],
        ])

        return sim_data, sim_time

    # --- Section 1: time to touchdown
    # TODO: make sampling frequency regular
    t_flight1 = sqrt(-2. * (y0 - y_land) / g)
    #t_flight = sqrt()
    tvec_flight1 = .01 * arange(t_flight1 * 100.)
    vy_flight1 = tvec_flight1 * g
    y_flight1 = y0 + .5 * g * (tvec_flight1**2)
    x_flight1 = vx0 * tvec_flight1
    vx_flight1 = vx0 * ones_like(tvec_flight1)
    z_flight1 = vz0 * tvec_flight1
    vz_flight1 = vz0 * ones_like(tvec_flight1)
    x_TD = vx0 * t_flight1
    z_TD = vz0 * t_flight1

    # --- Section 2: time to vy = 0
    # approach: calculate forward -> estimate interval of
    # zero position of vy -> refine simulation in that interval
    # until a point with vy sufficiently close to zero is in the
    # resulting vector
    params = {
        'L0': L0,
        'xF': t_flight1 * vx0 + L0 * cos(alpha) * cos(beta),
        'zF': t_flight1 * vz0 - L0 * cos(alpha) * sin(beta),
        'k': k,
        'm': m,
        'g': g
    }

    buffsize = 8000
    buf = zeros((buffsize, _slipmdl1.WIDTH), dtype=np.float64)
    IC = array([x_TD, y_land, z_TD, vx0, t_flight1 * g, vz0])
    buf[0, 1:] = IC
    buf[0, 0] = t_flight1

    # for the model in C
    xF = t_flight1 * vx0 + L0 * cos(alpha) * cos(beta)
    zF = t_flight1 * vz0 - L0 * cos(alpha) * sin(beta)
    #print k, L0, m, xF, zF

    #print "IC=", IC
    #print "t0:", t_flight1
    if use_legacy:  # skip original code

        # initial guess: L0*cos(alpha)/vx0
        #t_sim1 = L0*cos(alpha)/vx0
        # TODO: implement sim_fail check!
        sim_fail = False
        try:
            sim_phase2, t_phase2 = sim_until(IC, params, lambda x: x[4])
            t_phase2 += t_flight1
        except SimFailError:
            print 'simulation aborted (phase 2)\n'
            sim_fail = True

        #print "t_min:", t_phase2[-1]
        #print "y_min:", sim_phase2[-1,1]

    else:
        pars = array([k, L0, m, xF, zF], dtype=np.float64)
        #print "pars=", pars

        sim_fail = False
        N = _slipmdl1.odeOnce(buf, 2., dt=5e-3, pars=pars)
        #print "FS (landing): ", buf[N, [0,2,5]]
        #print "N=", N
        if N >= buffsize - 1 or buf[N, 0] >= 2.:
            sim_fail = True
            print "simulation aborted (phase 2)"

        if not allclose(buf[N, 5], 0):
            print "WARNING: nadir not found"

        sim_phase2 = buf[:N, 1:].copy()
        t_phase2 = buf[:N, 0].copy()

    # Phase 3:

    #return t_phase2, sim_phase2
    if not sim_fail:
        L = sqrt(sim_phase2[-1, 1]**2 + (sim_phase2[-1, 0] - params['xF'])**2 +
                 (sim_phase2[-1, 2] - params['zF'])**2)

        if use_legacy:  # skip original code
            dk, dL = dk_dL(L0, k, L, dE)
            params2 = deepcopy(params)
            params2['k'] += dk
            params2['L0'] += dL
            IC = sim_phase2[-1, :]
            compression = (lambda x: sqrt(
                (x[0] - params2['xF'])**2 + x[1]**2 +
                (x[2] - params['zF'])**2) - params2['L0'])
            #print ('L:', L, 'dk', dk, 'dL', dL, 'dE', dE, '\ncompression:', compression(IC),
            #      'IC', IC)
            try:
                sim_phase3, t_phase3 = sim_until(IC, params2, compression)
                sim_phase3 = sim_phase3[1:, :]
                t_phase3 = t_phase3[1:] + t_phase2[-1]
            except SimFailError:
                print 'simulation aborted (phase 3)\n'
                sim_fail = True
        else:

            dk, dL = dk_dL(L0, k, L, dE)
            pars[0] += dk
            pars[1] += dL

            # new IC and start time is final state and time from previous
            # simulation step

            buf[0, :] = buf[N, :]
            buf[0, 5] = 0.  # hard-code vertical velocity set to 0.

            N = _slipmdl2.odeOnce(buf, 2., dt=5e-3, pars=pars)
            if N >= buffsize - 1 or buf[N, 0] >= 2.:
                sim_fail = True
                print "simulation aborted (phase 3)"
            else:
                sim_phase3 = buf[1:N, 1:]
                t_phase3 = buf[1:N, 0]

    # Phase 4:
    if not sim_fail:
        # time to apex
        # TODO: make sampling frequency regular
        vy_liftoff = sim_phase3[-1, 4]
        #vz_liftoff = sim_phase3[-1,5]
        t_flight2 = -1. * vy_liftoff / g
        #t_flight = sqrt()
        tvec_flight2 = arange(t_flight2, 0, -.01)[::-1]
        vy_flight2 = tvec_flight2 * g + vy_liftoff
        y_flight2 = (sim_phase3[-1, 1] + vy_liftoff * tvec_flight2 + .5 * g *
                     (tvec_flight2**2))
        x_flight2 = sim_phase3[-1, 0] + sim_phase3[-1, 3] * tvec_flight2
        vx_flight2 = sim_phase3[-1, 3] * ones_like(tvec_flight2)
        z_flight2 = sim_phase3[-1, 2] + sim_phase3[-1, 5] * tvec_flight2
        vz_flight2 = sim_phase3[-1, 5] * ones_like(tvec_flight2)
        #print tvec_flight2
        tvec_flight2 += t_phase3[-1]

    # todo: return data until error
    if sim_fail:
        return {
            't': None,
            'x': None,
            'y': None,
            'z': None,
            'vx': None,
            'vy': None,
            'vz': None,
            'sim_fail': sim_fail,
            'dk': None,
            'dL': None
        }

    # finally: concatenate phases
    x_final = hstack(
        [x_flight1, sim_phase2[:, 0], sim_phase3[:, 0], x_flight2])
    y_final = hstack(
        [y_flight1, sim_phase2[:, 1], sim_phase3[:, 1], y_flight2])
    z_final = hstack(
        [z_flight1, sim_phase2[:, 2], sim_phase3[:, 2], z_flight2])
    vx_final = hstack(
        [vx_flight1, sim_phase2[:, 3], sim_phase3[:, 3], vx_flight2])
    vy_final = hstack(
        [vy_flight1, sim_phase2[:, 4], sim_phase3[:, 4], vy_flight2])
    vz_final = hstack(
        [vz_flight1, sim_phase2[:, 5], sim_phase3[:, 5], vz_flight2])
    tvec_final = hstack([tvec_flight1, t_phase2, t_phase3, tvec_flight2])

    return {
        't': tvec_final,
        'x': x_final,
        'y': y_final,
        'z': z_final,
        'vx': vx_final,
        'vy': vy_final,
        'vz': vz_final,
        'sim_fail': sim_fail,
        'dk': dk,
        'dL': dL,
        #'sim_res':sim_res,
        #'sim_phase2': sim_phase2_cut,
        #'t_phase2': t_phase2_cut
    }
예제 #12
0
def intersect(h1, h2):
    if h1.ambiant_dimension != h2.ambiant_dimension:
        raise ValueError("Different ambiant\
            spaces dimensions ({}!={})".format(len(w1), len(w2)))


#    elif pl.allclose(w1,w2) and pl.allclose(b1,b2):
#        print("return same object")
#        return copy(h1)
#    elif pl.allclose(w1,w2) and not pl.allclose(b1,b2):
#        print("return empty set")
#        return None

#Now that the trivial cases have been removed there exists an
#intersection of the two hyperplanes to obtain the new form of
#the hyperplane we perform the following 3 steps:
# (1)consider the intersection of two affine spaces as the intersection
#    of a linear space and an affine space with modified bias
# (2)express the bias of the affine space in the linear space,
#    this gives us the bias that will be for the new affine space
# (3)compute the basis of the intersection of the two linear spaces
#    this gives us the span of the linear space, jointly with the above
#    bias corresponding to the affine space from the intersection
#(1)
# we set h2 to be without bias and thus we alter h1 bias as follows
    bp = h1.bias - h2.bias
    #(2)
    A = sp.hstack([h1.basis, h2.basis])
    output = sparse_lsqr(A, bp)
    alpha_beta_star, istop, itn = output[:3]
    if (istop == 2):
        warnings.warn("In Intersection of {} with {},\
                least square solution is approximate".format(h1.name, h2.name))
    # since we set h2 to be without bias we need to express the bias vector w.r.t.
    # the h2 basis as follows
    bpp = h2.basis.dot(alpha_beta_star[h1.dim:])
    print("New bias is {}".format(bpp))
    # (3) --------- Implement the Zassenhaus algorithm -------------
    # assumes each space basis is in the same basis
    # first need to create the original matrix
    print(h1.basis.todense(), '\n\n')
    print(h2.basis.todense(), '\n\n')
    matrix = sp.bmat([[h1.basis, h2.basis], [h1.basis, None]]).T.todense()
    # Now we need to put the top left half in row echelon form
    # loop over the columns of the left half
    rows = range(matrix.shape[0])
    for column in range(h1.dim + h2.dim):
        #        print('start column:{}\nmatrix:\n{}\n\n'.format(column,matrix))
        # compute current index and value of pivot A_{column,column}
        pivot = matrix[rows[column], column]
        # check for active pivoting to switch current row
        # with the one with maximum value
        maximum_index = matrix[rows[column:], column].argmax() + column
        if maximum_index > column:
            print('needs preventive partial pivoting')
            rows[column] = maximum_index
            rows[maximum_index] = column
            pivot = matrix[rows[maximum_index], column]
        # Loop over the rows
        multiplicators = matrix[rows[column + 1:], column] / pivot
        matrix[rows[column + 1:]] -= multiplicators.reshape(
            (-1, 1)) * matrix[rows[column]].reshape((1, -1))
        if (pl.allclose(matrix[rows[column + 1:]], 0)): return matrix
예제 #13
0
def test_data_model_sim():
    # generate simulated data
    n = 50
    sigma_true = .025

    # start with truth
    a = pl.arange(0, 100, 1)
    pi_age_true = .0001 * (a * (100. - a) + 100.)

    # choose age intervals to measure
    age_start = pl.array(mc.runiform(0, 100, n), dtype=int)
    age_start.sort()  # sort to make it easy to discard the edges when testing
    age_end = pl.array(mc.runiform(age_start + 1,
                                   pl.minimum(age_start + 10, 100)),
                       dtype=int)

    # find truth for the integral across the age intervals
    import scipy.integrate
    pi_interval_true = [
        scipy.integrate.trapz(pi_age_true[a_0i:(a_1i + 1)]) / (a_1i - a_0i)
        for a_0i, a_1i in zip(age_start, age_end)
    ]

    # generate covariates that add explained variation
    X = mc.rnormal(0., 1.**2, size=(n, 3))
    beta_true = [-.1, .1, .2]
    Y_true = pl.dot(X, beta_true)

    # calculate the true value of the rate in each interval
    pi_true = pi_interval_true * pl.exp(Y_true)

    # simulate the noisy measurement of the rate in each interval
    p = mc.rnormal(pi_true, 1. / sigma_true**2.)

    # store the simulated data in a pandas DataFrame
    data = pandas.DataFrame(
        dict(value=p,
             age_start=age_start,
             age_end=age_end,
             x_0=X[:, 0],
             x_1=X[:, 1],
             x_2=X[:, 2]))
    data['effective_sample_size'] = pl.maximum(p * (1 - p) / sigma_true**2, 1.)

    data['standard_error'] = pl.nan
    data['upper_ci'] = pl.nan
    data['lower_ci'] = pl.nan

    data['year_start'] = 2005.  # TODO: make these vary
    data['year_end'] = 2005.
    data['sex'] = 'total'
    data['area'] = 'all'

    # generate a moderately complicated hierarchy graph for the model
    hierarchy = nx.DiGraph()
    hierarchy.add_node('all')
    hierarchy.add_edge('all', 'super-region-1', weight=.1)
    hierarchy.add_edge('super-region-1', 'NAHI', weight=.1)
    hierarchy.add_edge('NAHI', 'CAN', weight=.1)
    hierarchy.add_edge('NAHI', 'USA', weight=.1)
    output_template = pandas.DataFrame(
        dict(year=[1990, 1990, 2005, 2005, 2010, 2010] * 2,
             sex=['male', 'female'] * 3 * 2,
             x_0=[.5] * 6 * 2,
             x_1=[0.] * 6 * 2,
             x_2=[.5] * 6 * 2,
             pop=[50.] * 6 * 2,
             area=['CAN'] * 6 + ['USA'] * 6))

    # create model and priors
    vars = data_model.data_model('test', data, hierarchy, 'all')

    # fit model
    mc.MAP(vars).fit(method='fmin_powell', verbose=1)
    m = mc.MCMC(vars)
    m.use_step_method(mc.AdaptiveMetropolis, [m.gamma_bar, m.gamma, m.beta])
    m.sample(30000, 15000, 15)

    # check estimates
    pi_usa = data_model.predict_for(output_template, hierarchy, 'all', 'USA',
                                    'male', 1990, vars)
    assert pl.allclose(pi_usa.mean(), (m.mu_age.trace() * pl.exp(.05)).mean(),
                       rtol=.1)

    # check convergence
    print 'gamma mc error:', m.gamma_bar.stats()['mc error'].round(
        2), m.gamma.stats()['mc error'].round(2)

    # plot results
    for a_0i, a_1i, p_i in zip(age_start, age_end, p):
        pl.plot([a_0i, a_1i], [p_i, p_i], 'rs-', mew=1, mec='w', ms=4)
    pl.plot(a, pi_age_true, 'g-', linewidth=2)
    pl.plot(pl.arange(101),
            m.mu_age.stats()['mean'],
            'k-',
            drawstyle='steps-post',
            linewidth=3)
    pl.plot(pl.arange(101),
            m.mu_age.stats()['95% HPD interval'],
            'k',
            linestyle='steps-post:')
    pl.plot(pl.arange(101),
            pi_usa.mean(0),
            'r-',
            linewidth=2,
            drawstyle='steps-post')
    pl.savefig('age_integrating_sim.png')

    # compare estimate to ground truth (skip endpoints, because they are extra hard to get right)
    assert pl.allclose(m.pi.stats()['mean'][10:-10], pi_true[10:-10], rtol=.2)
    lb, ub = m.pi.stats()['95% HPD interval'].T
    assert pl.mean((lb <= pi_true)[10:-10] & (pi_true <= ub)[10:-10]) > .75