Beispiel #1
0
def test_covariate_model_dispersion():
    # simulate normal data
    n = 100

    model = data.ModelData()
    model.hierarchy, model.output_template = data_simulation.small_output()

    Z = mc.rcategorical([.5, 5.], n)
    zeta_true = -.2

    pi_true = .1
    ess = 10000.*pl.ones(n)
    eta_true = pl.log(50)
    delta_true = 50 + pl.exp(eta_true)

    p = mc.rnegative_binomial(pi_true*ess, delta_true*pl.exp(Z*zeta_true)) / ess

    
    model.input_data = pandas.DataFrame(dict(value=p, z_0=Z))
    model.input_data['area'] = 'all'
    model.input_data['sex'] = 'total'
    model.input_data['year_start'] = 2000
    model.input_data['year_end'] = 2000



    # create model and priors
    vars = dict(mu=mc.Uninformative('mu_test', value=pi_true))
    vars.update(covariate_model.mean_covariate_model('test', vars['mu'], model.input_data, {}, model, 'all', 'total', 'all'))
    vars.update(covariate_model.dispersion_covariate_model('test', model.input_data, .1, 10.))
    vars.update(rate_model.neg_binom_model('test', vars['pi'], vars['delta'], p, ess))

    # fit model
    m = mc.MCMC(vars)
    m.sample(2)
Beispiel #2
0
def calcAUC(data, y0, lag, mgr, asym, time):
    """
    Calculate the area under the curve of the logistic function
    using its integrated formula
    [ A( [A-y0] log[ exp( [4m(l-t)/A]+2 )+1 ]) / 4m ] + At
    """

    # First check that max growth rate is not zero
    # If so, calculate using the data instead of the equation
    if mgr == 0:
        auc = calcAUCData(data, time)
    else:
        timeS = time[0]
        timeE = time[-1]
        t1 = asym - y0
        #try:
        t2_s = py.log(py.exp((4 * mgr * (lag - timeS) / asym) + 2) + 1)
        t2_e = py.log(py.exp((4 * mgr * (lag - timeE) / asym) + 2) + 1)
        #except RuntimeWarning as rw:
            # Exponent is too large, setting to 10^3
        #    newexp = 1000
        #    t2_s = py.log(newexp + 1)
        #    t2_e = py.log(newexp + 1)
        t3 = 4 * mgr
        t4_s = asym * timeS
        t4_e = asym * timeE

        start = (asym * (t1 * t2_s) / t3) + t4_s
        end = (asym * (t1 * t2_e) / t3) + t4_e
        auc = end - start

    if py.absolute(auc) == float('Inf'):
        x = py.diff(time)
        auc = py.sum(x * data[1:])
    return auc
Beispiel #3
0
def duxbury_cdf(X,L,s):
	"""
	Returns the duxbury cdf evaluated at X.
	The duxbury CDF is 1 - exp( -(L^2)*exp( - (s/x)^2 ) )
	"""
	
	return 1 - pylab.exp( -L*L*pylab.exp( -((s/X)**2.0) )) 
Beispiel #4
0
    def diagnostic(self, kmin=1, kmax=8, k=None, ymax=None):
        self.run(kmin=kmin, kmax=kmax); 
        pylab.clf()
        pylab.subplot(3,1,2);
        self.plot()
        mf = GaussianMixtureFitting(self.fitting.data)
        if k is None:
            mf.estimate(k=self.best_k)
        else:
            mf.estimate(k=k)
        pylab.subplot(3,1,1)
        mf.plot()
        if ymax is not None:
            pylab.ylim([0, ymax])

        pylab.subplot(3,1,3)
        min_value = np.array([self.all_results[x]['AICc'] for x in self.x]).min()
        pylab.plot(self.x, [pylab.exp((min_value-self.all_results[k]['AICc'])/2)
            for k in  self.x], 'o-', label='AICc')
        min_value = np.array([self.all_results[x]['AIC'] for x in self.x]).min()
        pylab.plot(self.x, [pylab.exp((min_value-self.all_results[k]['AIC'])/2)
            for k in  self.x], 'o-', label='AIC')
        
        pylab.xlabel('probability of information loss (based on AICc')
        pylab.legend()
Beispiel #5
0
 def fresnelSingleTransformFW(self,d) :
     i2 = Intensity2D(self.nx,self.startx,self.endx,
                      self.ny,self.starty,self.endy,
                      self.wl)
     u1p   = self.i*pl.exp(-1j*pl.pi/(d*self.wl)*(self.xgrid**2+self.ygrid**2))
     ftu1p = pl.fftshift(pl.fft2(pl.fftshift(u1p)))
     i2.i  = ftu1p*1j/(d*self.wl)*pl.exp(-1j*pl.pi/(d*self.wl)*(self.xgrid**2+self.ygrid**2))
     return i2
Beispiel #6
0
 def wave_gen(self,ploti=1):
     if self.wave_type=="pulse":
         self.wave_origin=p.exp(-5e-2*(p.arange(self.iter_total)-20)**2)
     elif self.wave_type=="sine":
         self.wave_origin=(1-p.exp(-1e-7*(p.arange(self.iter_total))))*p.sin(2*p.pi*p.arange(self.iter_total)/(20))
     
     if ploti==1:
         p.figure(3)
         p.plot(self.wave_origin)
Beispiel #7
0
def beta(v, gate):
   """
   backward rate of the Hudgkin-Huxley potassium gate
   """
   if gate=='n':
      return 0.125 * p.exp( (v+65)/-80. )
   elif gate=='m':
      return 4 * p.exp(-(v+65) / 18)
   elif gate=='h':
      return 1 / (1 + p.exp( -(v+35) / 10 ))
def plot_jp_tmax_surf(mu,c,phi,pmax,smax,ks):
    # @brief tau max based on varying slip rate, normal pressure
    s_dot = py.arange(0,smax,smax/100.)
    prange = py.arange(0,pmax,pmax/100.)
    kap = 1-py.exp(-s_dot/ks)   #  kappa
    
    TMAX = py.zeros((len(kap),len(prange)))
    tphi = py.tan(phi)  # keep tan(phi) handy
    for k_i in range(0,len(kap)):
        k_tmp = kap[k_i]    
        for p_j in range(0,len(prange)):
            p_tmp = prange[p_j]
            TMAX[k_i][p_j] = k_tmp*(c+p_tmp*tphi) + (1-k_tmp)*p_tmp*mu
            
    fig = plt.figure()           
    ax = fig.add_subplot(121)
    # should be ok to plot the surface
    S, P = py.meshgrid(s_dot, prange)
    CS = plt.contour(S,P,TMAX,8,colors='k',linewidths=1.5)
    plt.clabel(CS,inlne=1,fontsize=16)
    img = plt.imshow(TMAX, interpolation='bilinear', origin='lower',
                     cmap=cm.jet,extent=(min(s_dot),max(s_dot),min(prange),max(prange)))
    CBI = plt.colorbar(img, orientation='vertical',shrink=0.8)
    CBI.set_label(r'$\tau ,max $[psi]')
    ax.set_title(r'$\tau ,max = f(\sigma,\kappa), ks=%.2f $'%ks)
    ax.set_xlabel('slip rate [in/sec]')
    ax.set_ylabel(r'$\sigma_z $',size=24)
    
    # use twice ks, re-calc what's necessary, then replot
    ks2 = ks * 2
    kap2 = 1-py.exp(-s_dot/ks2)
    
    TMAX2 = py.zeros((len(kap2),len(prange)))
    # tphi = py.tan(phi)  # keep tan(phi) handy
    for k_i in range(0,len(kap2)):
        k2_tmp = kap2[k_i]    
        for p_j in range(0,len(prange)):
            p_tmp = prange[p_j]
            TMAX2[k_i][p_j] = k2_tmp*(c+p_tmp*tphi) + (1-k2_tmp)*p_tmp*mu
            
    
    #fig = plt.figure()           
    ax = fig.add_subplot(122)
    # should be ok to plot the surface
    # S, P = py.meshgrid(s_dot, prange)
    CS2 = plt.contour(S,P,TMAX2,8,colors='k',linewidths=1.5)
    plt.clabel(CS2,inlne=1,fontsize=16)
    img2 = plt.imshow(TMAX2, interpolation='bilinear', origin='lower',
                     cmap=cm.jet,extent=(min(s_dot),max(s_dot),min(prange),max(prange)))
    CBI2 = plt.colorbar(img2, orientation='vertical',shrink=0.8)
    CBI2.set_label(r'$\tau ,max $[psi]')
    ax.set_title(r'$\tau ,max = f(\sigma,\kappa), ks=%.2f $'%ks2)
    ax.set_xlabel('slip rate [in/sec]')
    ax.set_ylabel(r'$\sigma_z $',size=24)
Beispiel #9
0
def alpha(v,gate):
   """
   forward rate of the Hudgkin-Huxley potassium gate
   """
   if gate=='n':
      v_centered = v + 55
      return 0.01 * v_centered / (1 - p.exp(-v_centered/10.))
   elif gate=='m':
      return 0.1 * (v + 40) / (1 - p.exp( -(v + 40)/10))
   elif gate=='h':
      return 0.07 * p.exp( - (v + 65) / 20)
Beispiel #10
0
def test_predict_for_wo_data():
    """ Approach to testing predict_for function:

    1. Create model with known mu_age, known covariate values, known effect coefficients
    2. Setup MCMC with NoStepper for all stochs
    3. Sample to generate trace with known values
    4. Predict for results, and confirm that they match expected values
    """
    
    
    d = data.ModelData()
    d.hierarchy, d.output_template = data_simulation.small_output()


    # create model and priors
    vars = ism.age_specific_rate(d, 'p', 'all', 'total', 'all', None, None, None)

    # fit model
    m = mc.MCMC(vars)
    m.sample(1)


    ### Prediction case 1: constant zero random effects, zero fixed effect coefficients

    # check estimates with priors on random effects
    d.parameters['p']['random_effects'] = {}
    for node in ['USA', 'NAHI', 'super-region-1', 'all']:
        d.parameters['p']['random_effects'][node] = dict(dist='Constant', mu=0, sigma=1.e-9) # zero out REs to see if test passes
        
    pred = covariate_model.predict_for(d, d.parameters['p'],
                                         'all', 'total', 'all',
                                         'USA', 'male', 1990,
                                         0., vars['p'], 0., pl.inf)


    ### Prediction case 2: constant non-zero random effects, zero fixed effect coefficients
    # FIXME: this test was failing because PyMC is drawing from the prior of beta[0] even though I asked for NoStepper
                                                      
    # check estimates with priors on random effects
    for i, node in enumerate(['USA', 'NAHI', 'super-region-1']):
        d.parameters['p']['random_effects'][node]['mu'] = (i+1.)/10.
        
    pred = covariate_model.predict_for(d, d.parameters['p'],
                                         'all', 'total', 'all',
                                         'USA', 'male', 1990,
                                         0., vars['p'], 0., pl.inf)

    # test that the predicted value is as expected
    fe_usa_1990 = pl.exp(.5*vars['p']['beta'][0].value) # beta[0] is drawn from prior, even though I set it to NoStepper, see FIXME above
    re_usa_1990 = pl.exp(.1+.2+.3)
    assert_almost_equal(pred,
                        vars['p']['mu_age'].trace() * fe_usa_1990 * re_usa_1990)
Beispiel #11
0
def Fraunhofer(i, z) :
    print "Propagation:Fraunhofer"
    ft = pl.fftshift(pl.fftn(pl.fftshift(i.i)))
    dx = i.wl*z/(i.nx*i.dx)
    dy = i.wl*z/(i.ny*i.dy)
    po = pl.exp(1j*2*pl.pi/i.wl*i.dx*i.dx)/(1j*i.wl*z)
    p = pl.arange(0,i.nx)-(i.nx+0.5)/2.0
    q = pl.arange(0,i.ny)-(i.ny+0.5)/2.0
    [pp,qq] = pl.meshgrid(p,q)
    pm = pl.exp(1j*pl.pi/(i.wl*z)*((pp*dx)**2+(qq*dy)**2))
    i2 = Intensity.Intensity2D(i.nx,-i.nx*dx/2,i.nx*dy/2,i.ny,-i.ny*dy/2,i.ny*dy/2)
    i2.i = po*pm*ft
    return i2
    
    print "Propagation:Fraunhofer>",dx,dy,i.nx*dx,i.ny*dy
Beispiel #12
0
    def fresnelSingleTransformVW(self,d) :
        # compute new window
        x2 = self.nx*pl.absolute(d)*self.wl/(self.endx-self.startx)
        y2 = self.ny*pl.absolute(d)*self.wl/(self.endy-self.starty)

        # create new intensity object
        i2 = Intensity2D(self.nx,-x2/2,x2/2,
                         self.ny,-y2/2,y2/2,
                         self.wl)

        # compute intensity
        u1p   = self.i*pl.exp(-1j*pl.pi/(d*self.wl)*(self.xgrid**2+self.ygrid**2))
        ftu1p = pl.fftshift(pl.fft2(pl.fftshift(u1p)))
        i2.i  = ftu1p*1j/(d*i2.wl)*pl.exp(-1j*pl.pi/(d*i2.wl)*(i2.xgrid**2+i2.ygrid**2))
        return i2
Beispiel #13
0
def logistic(t, y0, a, mgr, l):
    """Logistic modeling"""
    startOD = y0
    exponent = ((4 * mgr / a) * (l - t)) + 2
    try:
        denom = 1 + py.exp(exponent)
        lg = startOD + ((a - startOD) / denom)
    except RuntimeWarning as rw:
        util.printStatus("*" * 55)
        util.printStatus("RuntimeWarning: {}".format(rw))
        util.printStatus("   Exponent value for logistic "
                         "equation is {:.3f}".format(exponent[0]))
        util.printStatus("   This produces a large value in the denominator "
                         "of the logistic equation, probably due to a small "
                         "asymptote value and large max growth rate")
        util.printStatus("   Now setting denominator to value of 10^3")
        util.printStatus("   Predicted parameters:")
        util.printStatus("      y0: {:.3f}".format(y0))
        util.printStatus("      Lag: {:.3f}".format(l))
        util.printStatus("      MGR: {:.3f}".format(mgr))
        util.printStatus("      A: {:.3f}".format(a))
        util.printStatus("*" * 55)
        newdenom = []
        for e in exponent:
            if e > 500:
                newdenom.append(500)
            else:
                newdenom.append(e)
        denom = py.array(newdenom)
        lg = startOD + ((a - startOD) / denom)

    return lg
Beispiel #14
0
    def __init__(self, cambParam=None, **kws):
        """
        Initialize cosmological parameters
        """
        if cambParam == None:
            self.cp = param.CambParams(**kws)
        else:
            self.cp = cambParam

        (self.h, self.om0, self.omB, self.omL, self.omR, self.n) = (
            self.cp.hubble / 100.0,
            self.cp.omega_cdm,
            self.cp.omega_baryon,
            self.cp.omega_lambda,
            4.17e-5 / (self.cp.hubble / 100.0) ** 2,
            self.cp.scalar_spectral_index[0],
        )
        self.omC = self.om0 - self.omB
        self.delH = (
            1.94e-5
            * self.om0 ** (-0.785 - 0.05 * M.log(self.om0))
            * M.exp(-0.95 * (self.n - 1) - 0.169 * (self.n - 1) ** 2)
        )
        self.t = self.bbks
        ##   h^-1 normalization for sigma8=0.9
        self.bondEfsNorm = 216622.0
        ##      bare normalizatin by Pan (apparently correct for h_1)
        self.bondEfsNorm = 122976.0
def gaussian(x,c,w):
	""" Analytic Gaussian function with amplitude 'a', center 'c', width 'w'.
		The FWHM of this fn is 2*sqrt(2*log(2))*w 
		NOT NORMALISED """
	G = exp(-(x-c)**2/(2*w**2))
	G /= G.max()
	return G
Beispiel #16
0
 def _pvoc2(self, X_hat, Phi_hat=None, R=None):
     """
     ::
       alternate (batch) implementation of phase vocoder - time-stretch
       inputs:
         X_hat - estimate of signal magnitude
         [Phi_hat] - estimate of signal phase
         [R] - resynthesis hop ratio
       output:
         updates self.X_hat with modified complex spectrum
     """
     N, W, H = self.nfft, self.wfft, self.nhop
     R = 1.0 if R is None else R
     dphi = P.atleast_2d((2*P.pi * H * P.arange(N/2+1)) / N).T
     print "Phase Vocoder Resynthesis...", N, W, H, R
     A = P.angle(self.STFT) if Phi_hat is None else Phi_hat
     U = P.diff(A,1) - dphi
     U = U - P.np.round(U/(2*P.pi))*2*P.pi
     t = P.arange(0,n_cols,R)
     tf = t - P.floor(t)
     phs = P.c_[A[:,0], U] 
     phs += U[:,idx[1]] + dphi # Problem, what is idx ?
     Xh = (1-tf)*Xh[:-1] + tf*Xh[1:]
     Xh *= P.exp( 1j * phs)
     self.X_hat = Xh
Beispiel #17
0
    def Logistic(self):
        '''Create logistic model from data'''
        tStep = self.time[1] - self.time[0]

        # Time vector for calculating lag phase
        timevec = py.arange(self.time[0], self.time[-1], tStep / 2)

        # Try using to find logistic model with optimal lag phase
        # y = p2 + (A-p2) / (1 + exp(( (um/A) * (L-t) ) + 2))
        sse = 0
        sseF = 0

        # Attempt to use every possible value in the time vector as the lag
        # Choose lag that creates best-fit model
        for idx, lag in enumerate(timevec):
            logDataTemp = [self.startOD + ((self.asymptote - self.startOD) /
                                           (1 + py.exp((((self.maxgrowth /
                                                          self.asymptote) *
                                                         (lag - t)) + 2)
                                                       ))
                                           ) for t in self.time]
            sse = py.sum([((self.data[i] - logDataTemp[i]) ** 2)
                          for i in xrange(len(self.data) - 1)])
            if idx == 0 or sse < sseF:
                logisticData = logDataTemp
                lagF = lag
                sseF = sse

        return logisticData, lagF, sseF
Beispiel #18
0
 def _pvoc(self, X_hat, Phi_hat=None, R=None):
     """
     ::
       a phase vocoder - time-stretch
       inputs:
         X_hat - estimate of signal magnitude
         [Phi_hat] - estimate of signal phase
         [R] - resynthesis hop ratio
       output:
         updates self.X_hat with modified complex spectrum
     """
     N = self.nfft
     W = self.wfft
     H = self.nhop
     R = 1.0 if R is None else R
     dphi = (2*P.pi * H * P.arange(N/2+1)) / N
     print "Phase Vocoder Resynthesis...", N, W, H, R
     A = P.angle(self.STFT) if Phi_hat is None else Phi_hat
     phs = A[:,0]
     self.X_hat = []
     n_cols = X_hat.shape[1]
     t = 0
     while P.floor(t) < n_cols:
         tf = t - P.floor(t)            
         idx = P.arange(2)+int(P.floor(t))
         idx[1] = n_cols-1 if t >= n_cols-1 else idx[1]
         Xh = X_hat[:,idx]
         Xh = (1-tf)*Xh[:,0] + tf*Xh[:,1]
         self.X_hat.append(Xh*P.exp( 1j * phs))
         U = A[:,idx[1]] - A[:,idx[0]] - dphi
         U = U - P.np.round(U/(2*P.pi))*2*P.pi
         phs += (U + dphi)
         t += P.randn()*P.sqrt(PVOC_VAR*R) + R # 10% variance
     self.X_hat = P.np.array(self.X_hat).T
Beispiel #19
0
def growth_rate(params):
    x0,y0,c,k = params
    ymid, ymid = midpoint(params) 
    e = pylab.exp(-k*(xmid-x0))
    rate = k*c*e / (1+e)**2
    offset = ymid - rate*xmid
    return rate, offset
Beispiel #20
0
        def obs_lb(value=value, N=N,
                   Xa=Xa, Xb=Xb,
                   alpha=alpha, beta=beta, gamma=gamma,
                   bounds_func=vars['bounds_func'],
                   delta=delta,
                   age_indices=ai,
                   age_weights=aw):

            # calculate study-specific rate function
            shifts = pl.exp(pl.dot(Xa, alpha) + pl.dot(Xb, pl.atleast_1d(beta)))
            exp_gamma = pl.exp(gamma)
            mu_i = [pl.dot(weights, bounds_func(s_i * exp_gamma[ages], ages)) for s_i, ages, weights in zip(shifts, age_indices, age_weights)]  # TODO: try vectorizing this loop to increase speed
            rate_param = mu_i*N
            violated_bounds = pl.nonzero(rate_param < value)
            logp = mc.negative_binomial_like(value[violated_bounds], rate_param[violated_bounds], delta)
            return logp
    def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None):
        """
        :: 
            Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
            Uses phases from self.STFT if Phi_hat is None.

            Inputs:
            X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)]
            Phi_hat - N/2+1 phase STFT   [None=exp(1j*angle(self.STFT))]
            pvoc - whether to use phase vocoder [False]      
            usewin - whether to use overlap-add [False]

            Returns:
             x_hat - estimated signal
        """
        if not self._have_stft:
                return None
        X_hat = P.np.abs(self.STFT) if X_hat is None else P.np.abs(X_hat)
        if pvoc:
            self._pvoc(X_hat, Phi_hat, pvoc)
        else:
            Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat
            self.X_hat = X_hat *  P.exp( 1j * Phi_hat )
        if usewin:
            self.win = P.hanning(self.nfft) 
            self.win *= 1.0 / ((float(self.nfft)*(self.win**2).sum())/self.nhop)
        else:
            self.win = P.ones(self.nfft)
        if resamp:
            self.win = sig.resample(self.win, int(P.np.round(self.nfft * resamp)))
        fp = self._check_feature_params()
        self.x_hat = self._overlap_add(P.real(self.nfft * P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp)
        if self.verbosity:
            print "Extracted iSTFTM->self.x_hat"        
        return self.x_hat
Beispiel #22
0
def test_random_effect_priors():
    model = data.ModelData()

    # set prior on sex
    parameters = dict(random_effects={'USA': dict(dist='TruncatedNormal', mu=.1, sigma=.5, lower=-10, upper=10)})


    # simulate normal data
    n = 32.
    area_list = pl.array(['all', 'USA', 'CAN'])
    area = area_list[mc.rcategorical([.3, .3, .4], n)]
    alpha_true = dict(all=0., USA=.1, CAN=-.2)
    pi_true = pl.exp([alpha_true[a] for a in area])
    sigma_true = .05
    p = mc.rnormal(pi_true, 1./sigma_true**2.)

    model.input_data = pandas.DataFrame(dict(value=p, area=area))
    model.input_data['sex'] = 'male'
    model.input_data['year_start'] = 2010
    model.input_data['year_end'] = 2010

    model.hierarchy.add_edge('all', 'USA')
    model.hierarchy.add_edge('all', 'CAN')

    # create model and priors
    vars = {}
    vars.update(covariate_model.mean_covariate_model('test', 1, model.input_data, parameters, model,
                                                     'all', 'total', 'all'))

    print vars['alpha']
    print vars['alpha'][1].parents['mu']
    assert vars['alpha'][1].parents['mu'] == .1
Beispiel #23
0
def test_fixed_effect_priors():
    model = data.ModelData()

    # set prior on sex
    parameters = dict(fixed_effects={'x_sex': dict(dist='TruncatedNormal', mu=1., sigma=.5, lower=-10, upper=10)})

    # simulate normal data
    n = 32.
    sex_list = pl.array(['male', 'female', 'total'])
    sex = sex_list[mc.rcategorical([.3, .3, .4], n)]
    beta_true = dict(male=-1., total=0., female=1.)
    pi_true = pl.exp([beta_true[s] for s in sex])
    sigma_true = .05
    p = mc.rnormal(pi_true, 1./sigma_true**2.)

    model.input_data = pandas.DataFrame(dict(value=p, sex=sex))
    model.input_data['area'] = 'all'
    model.input_data['year_start'] = 2010
    model.input_data['year_start'] = 2010



    # create model and priors
    vars = {}
    vars.update(covariate_model.mean_covariate_model('test', 1, model.input_data, parameters, model,
                                                     'all', 'total', 'all'))

    print vars['beta']
    assert vars['beta'][0].parents['mu'] == 1.
Beispiel #24
0
    def _make_log_freq_map(self):
        """
        ::

            For the given ncoef (bands-per-octave) and nfft, calculate the center frequencies
            and bandwidths of linear and log-scaled frequency axes for a constant-Q transform.
        """
        fp = self.feature_params
        bpo = float(self.nbpo) # Bands per octave
        self._fftN = float(self.nfft)
        hi_edge = float( self.hi )
        lo_edge = float( self.lo )
        f_ratio = 2.0**( 1.0 / bpo ) # Constant-Q bandwidth
        self._cqtN = float( P.floor(P.log(hi_edge/lo_edge)/P.log(f_ratio)) )
        self._dctN = self._cqtN
        self._outN = float(self.nfft/2+1)
        if self._cqtN<1: print "warning: cqtN not positive definite"
        mxnorm = P.empty(self._cqtN) # Normalization coefficients        
        fftfrqs = self._fftfrqs #P.array([i * self.sample_rate / float(self._fftN) for i in P.arange(self._outN)])
        logfrqs=P.array([lo_edge * P.exp(P.log(2.0)*i/bpo) for i in P.arange(self._cqtN)])
        logfbws=P.array([max(logfrqs[i] * (f_ratio - 1.0), self.sample_rate / float(self._fftN)) 
                         for i in P.arange(self._cqtN)])
        #self._fftfrqs = fftfrqs
        self._logfrqs = logfrqs
        self._logfbws = logfbws
        self._make_cqt()
Beispiel #25
0
def test_covariate_model_sim_no_hierarchy():
    # simulate normal data
    model = data.ModelData()
    model.hierarchy, model.output_template = data_simulation.small_output()

    X = mc.rnormal(0., 1.**2, size=(128,3))

    beta_true = [-.1, .1, .2]
    Y_true = pl.dot(X, beta_true)

    pi_true = pl.exp(Y_true)
    sigma_true = .01*pl.ones_like(pi_true)

    p = mc.rnormal(pi_true, 1./sigma_true**2.)

    model.input_data = pandas.DataFrame(dict(value=p, x_0=X[:,0], x_1=X[:,1], x_2=X[:,2]))
    model.input_data['area'] = 'all'
    model.input_data['sex'] = 'total'
    model.input_data['year_start'] = 2000
    model.input_data['year_end'] = 2000

    # create model and priors
    vars = {}
    vars.update(covariate_model.mean_covariate_model('test', 1, model.input_data, {}, model, 'all', 'total', 'all'))
    vars.update(rate_model.normal_model('test', vars['pi'], 0., p, sigma_true))

    # fit model
    m = mc.MCMC(vars)
    m.sample(2)
Beispiel #26
0
	def Q_calc(self,X):

		"""
			calculates Q (n_x by n_theta) matrix of the IDE model at  each time step
	
			Arguments
			----------
			X: list of ndarray
				state vectors

			Returns
			---------
			Q : list of ndarray (n_x by n_theta)
		"""

		Q=[]	
		T=len(X)
		Psi=self.model.Gamma_inv_psi_conv_Phi
		Psi_T=pb.transpose(self.model.Gamma_inv_psi_conv_Phi,(0,2,1))

		for t in range(T):

			firing_rate_temp=pb.dot(X[t].T,self.model.Phi_values)
			firing_rate=self.model.act_fun.fmax/(1.+pb.exp(self.model.act_fun.varsigma*(self.model.act_fun.v0-firing_rate_temp)))	

			#calculate q
			g=pb.dot(firing_rate,Psi_T)

			g *=(self.model.spacestep**2)	
			q=self.model.Ts*g
			q=q.reshape(self.model.nx,self.model.n_theta)
			Q.append(q)
		return Q
Beispiel #27
0
 def E(self,z):
     "H(z) = H(0) E(z)"
     onez = 1+z
     onez2 = onez*onez
     onez3 = onez*onez2
     arg = self.m0*onez3 + self.k0*onez2 + self.q0*M.exp(3*self._X)
     return M.sqrt(arg)
Beispiel #28
0
    def fresnelConvolutionTransform(self,d) :
        # make intensity distribution
        i2 = Intensity2D(self.nx,self.startx,self.endx,
                         self.ny,self.starty,self.endy,
                         self.wl)       

        # FT on inital distribution 
        u1ft = pl.fft2(self.i)

        # 2d convolution kernel
        k = 2*pl.pi/i2.wl
        
        # make spatial frequency matrix
        maxsfx = 2*pl.pi/self.dx
        maxsfy = 2*pl.pi/self.dy
        
        dsfx = 2*maxsfx/(self.nx)
        dsfy = 2*maxsfy/(self.ny)
        
        self.sfx = pl.arange(-maxsfx/2,maxsfx/2+1e-15,dsfx/2)
        self.sfy = pl.arange(-maxsfy/2,maxsfy/2+1e-15,dsfy/2)

        [self.sfxgrid, self.sfygrid] = pl.fftshift(pl.meshgrid(self.sfx,self.sfy))
                
        # make convolution kernel 
        kern = pl.exp(1j*d*(self.sfxgrid**2+self.sfygrid**2)/(2*k))
        
        # apply convolution kernel and invert
        i2.i = pl.ifft2(kern*u1ft) 

        return i2
Beispiel #29
0
def getMassFunction(h,c):
    """
    Get n(m,z) from a halo model instance for which nu(m) has already been calculated,
    and a Camb instance.
    """
    
    nuprime2 = h.p.st_little_a * h.nu**2

    nufnu = 2.*(1.+ 1./nuprime2**h.p.stq)*M.sqrt(nuprime2/(2.*M.pi))* \
                M.exp(-nuprime2/2.) # hold off on normalization

    dlognu = h.m*0.

    for i in range(len(h.nu)):
        dlognu[i] = 0.5*M.log(h.nu_pad[i+2]/h.nu_pad[i])

    nmz_unnorm = (dlognu/h.dlogm)*nufnu/h.m**2

    w = N.where(nmz_unnorm < 1.7e308)[0]
    lw = len(w)
    if lw < len(nmz_unnorm):
        print "Warning! the mass function's blowing up!"

    h.nmz = nmz_unnorm*1.
    totaln = halo.generalIntOverMassFn(1,1,1.,h,whichp='mm')
    if h.p.st_big_a == 0.:
        h.nmz /= totaln
    else:
        h.nmz *= h.p.st_big_a

    print 'Normalization const (integrated):',1./totaln
    # if this isn't close to what you expect (~0.322 for Sheth-Tormen, 0.5 for Press-Schechter),
    # you need to expand the mass integration range, the mass bins per dex, or extrapolate c.pk.
    if h.p.st_big_a != 0.:
        print 'Used:',h.p.st_big_a
Beispiel #30
0
def weibull_lsq(data):
	"""
	Returns the weibull parameters estimated by using 
	the least square method for the given data.
	The weibull CDF is 1 - exp(-(x/l)^k).
	One should be aware of the fact that this approach weighs 
	the extreme (small or large) observations more than the 
	bulk.
	"""

	# Evaluate the emperical CDF at the observations
	# and rescale to convert into emperical probability
	n = len(data)
	print type(data)
	print type(empCDF(data,data))	
	ecdf = empCDF(data,data)*n/(1.0  + n)	

	# Make the array of "infered" variables and independent variables
	y = pylab.log(-pylab.log(1-ecdf))
	x = pylab.log(data)

	# estimate regression coefficients of y = a*x + b
	a, b = lsqReg(x,y)

	# Extract the weibull parameters
	k = a 
	l = pylab.exp(-b/k)

	return k, l
Beispiel #31
0
def Tau_K_Calc(D,T,day, weights=(.5,.5)):
    P = pl.exp(1.81+(17.27*D)/(D+237.3)) # water vapor partial pressure
    h = 324.7*P/(T+273.15) # PWV in mm
    tau_w = 3.8 + 0.23*h + 0.065*h**2 # tau from weather, in %, at 22GHz
    if day > 199: day = day - 365.
    m = day + 165. # modified day of the year
    tau_d = 22.1 - 0.178*m + 0.00044*m**2 # tau from seaonal model, in %
    tau_k = weights[0]*tau_w + weights[1]*tau_d # the average, with equal weights (as in the AIPS default)
    return tau_k, h
Beispiel #32
0
def get_KOP(x1plot, y1plot, index, nbn):
    sum = 0
    for n in range(nbn):
        x = x1plot[n]
        y = y1plot[n]
        #get polar coordinate from (x,y) euclidean coordinates
        r, theta = cmath.polar(complex(x, y))
        sum += pb.exp(complex(0, theta))
    return cmath.polar(sum / nbn)
def normal_2d(dx, dy, scale, fwhm=None, sigma=None):
    # not yet bivariate
    if fwhm is None and sigma is None:
        raise Exception("must specify fwhm or sigma")
    if fwhm is not None and sigma is not None:
        raise Exception("can't specify both fwhm and sigma")
    if fwhm is not None and sigma is None:
        sigma = fwhm / (2.0*pylab.sqrt(2.0*pylab.log(2.0)))
    return scale*pylab.exp(-(dx*dx+dy*dy)/(2.0*sigma*sigma))
Beispiel #34
0
        def smooth_gamma(gamma=flat_gamma, knots=knots, tau=smoothing**-2):
            # the following is to include a "noise floor" so that level value
            # zero prior does not exert undue influence on age pattern
            # smoothing
            gamma = gamma.clip(
                pl.log(pl.exp(gamma).mean() / 10.),
                pl.inf)  # only include smoothing on values within 10x of mean

            return mc.normal_like(
                pl.sqrt(pl.sum(pl.diff(gamma)**2 / pl.diff(knots))), 0, tau)
Beispiel #35
0
def firingratecurve(spikes,T=[],dt=1.0,gauss_std=5.0):
  if type(spikes) is list:
    spikes = pylab.array(spikes)
  if type(T) is list and len(T) == 0:
    T = max(spikes[0,:])
  FRs = pylab.zeros([int(T/dt),1])
  FRts = [dt*(i+0.5) for i in range(0,len(FRs))]
  for iFRt in range(0,len(FRs)):
    FRs[iFRt] = sum(pylab.exp(-1/2*(FRts[iFRt]-spikes[0,:])**2/gauss_std**2))
  return [FRs,FRts]
Beispiel #36
0
def gauss_kern(size, sizey = None):
    """ Returns a normalized 2D gauss kernel array for convolutions """
    size = int(size)
    if not sizey:
        sizey = size
    else:
        sizey = int(sizey)
    x, y = mgrid[-size:size+1, -sizey:sizey+1]
    g = exp(-(x**2/float(size)+y**2/float(sizey)))
    return g / g.sum()
Beispiel #37
0
def GaussianFit(xs, ys):

    x = sum(xs * ys) / sum(ys)
    width = pl.sqrt(abs(sum((xs - x)**2 * ys) / sum(ys)))  #Standard Deviation

    max = ys.max()

    fit = lambda t: max * pl.exp(-(t - x)**2 / (2 * width**2))

    return fit(xs), width
def plot_kappa(ks, s_dot_max):
    # @brief kappa scales tau_max based on slip velocity, constant ks
    s_dot = py.arange(0, s_dot_max, s_dot_max / 100.)
    kap = 1 - py.exp(-s_dot / ks)
    fig = plt.figure()
    ax = plt.subplot(111)
    ax.plot(s_dot, kap, linewidth=1.5)
    py.xlabel('s_tb')
    py.ylabel(r'$\kappa $')
    py.grid('on')
    py.title('mod. max shear for slip rate, k = %.2f' % ks)
Beispiel #39
0
def _singleExp(x, dt):
    # Create the convolution kernel
    tau=float(x[0])
    Delta=float(x[1])
    t = pl.arange(0, (10*tau), dt)
    h = pl.zeros(len(t))
    
    h = 1/tau*pl.exp(-(t-Delta)/tau)
    h[pl.find(t<Delta)]=0

    return [h]
Beispiel #40
0
def gauss_pdf(n,mu=0.0,sigma=1.0):
    """
    ::

        Generate a gaussian kernel
         n - number of points to generate
         mu - mean
         sigma - standard deviation
    """
    var = sigma**2
    return 1.0 / pylab.sqrt(2 * pylab.pi * var) * pylab.exp( -(pylab.r_[0:n] - mu )**2 / ( 2.0 * var ) )
Beispiel #41
0
 def computeEigenFunctions(self):
     # generate matrix with M x-rows
     XX = (py.ones([len(self.x), self.M]).T @ py.diag(self.x)).T
     argument = 1j * XX @ py.diag(2 * py.pi * self.m)
     eigenfuncs = []
     for l in range(self.M):
         eigenf = py.sum(py.exp(argument) @ py.diag(self.Cq[:, l]),
                         axis=1).T
         eigenf /= py.sqrt(self.Ns)
         eigenfuncs.append(eigenf)
     self.eigenfuncs = eigenfuncs
Beispiel #42
0
 def mu_age_X(r=rate['r']['mu_age'],
              m=rate['m']['mu_age'],
              f=rate['f']['mu_age']):
     hazard = r + m + f
     pr_not_exit = pl.exp(-hazard)
     X = pl.empty(len(hazard))
     X[-1] = 1 / hazard[-1]
     for i in reversed(range(len(X) - 1)):
         X[i] = pr_not_exit[i] * (X[i + 1] + 1) + 1 / hazard[i] * (
             1 - pr_not_exit[i]) - pr_not_exit[i]
     return X
Beispiel #43
0
def makeNdPlot(dat, ndLabel, tmLabel):
    pylab.plot(numDenHist[:, 0], numDenHist[:, 1], 'k-')
    # exact solution at selected points
    Tex = numDenHist[:, 0]
    nEx = numDenHist[0, 1] * pylab.exp(-0.5 * Tex**2)
    pylab.plot(Tex, nEx, 'r-')
    pylab.gca().set_ylim([-1., 1])
    if tmLabel:
        pylab.xlabel('Time [s]')
    if ndLabel:
        pylab.ylabel('Number Density')
Beispiel #44
0
 def computeMomentumEvolution(self):
     self.momentumPhase = py.exp(-1j * 2 * py.pi * py.diag(
         self.x) @ py.ones([len(self.x), len(self.k)]) @ py.diag(self.k))
     self.momentumEvolution = self.timeEvolution @ self.momentumPhase
     # normalisation of momentum
     self.momentumEvolution *= self.dx / py.sqrt(2 * py.pi)
     self.momentumDensity = abs(self.momentumEvolution)**2
     X = py.array(py.sum(self.momentumDensity, axis=1)**(-1))[:, py.newaxis]
     # normalisation of momentum density
     self.momentumDensity = self.momentumDensity * \
         py.concatenate(len(self.k) * [X], axis=1) / self.dk
Beispiel #45
0
        def rates(S=data_sample,
                Xa=Xa, Xb=Xb,
                alpha=alpha, beta=beta, gamma=gamma,
                bounds_func=vars['bounds_func'],
                age_indices=ai,
                age_weights=aw):

            # calculate study-specific rate function
            shifts = pl.exp(pl.dot(Xa[S], alpha) + pl.dot(Xb[S], pl.atleast_1d(beta)))
            exp_gamma = pl.exp(gamma)
            mu = pl.zeros_like(shifts)
            for i,s in enumerate(S):
                mu[i] = pl.dot(age_weights[s], bounds_func(shifts[i] * exp_gamma[age_indices[s]], age_indices[s]))
                # TODO: evaluate speed increase and accuracy decrease of the following:
                #midpoint = age_indices[s][len(age_indices[s])/2]
                #mu[i] = bounds_func(shifts[i] * exp_gamma[midpoint], midpoint)
                # TODO: evaluate speed increase and accuracy decrease of the following: (to see speed increase, need to code this up using difference of running sums
                #mu[i] = pl.dot(pl.ones_like(age_weights[s]) / float(len(age_weights[s])),
                #               bounds_func(shifts[i] * exp_gamma[age_indices[s]], age_indices[s]))
            return mu
Beispiel #46
0
def makestim(isi=1,
             variation=0,
             width=0.05,
             weight=10,
             start=0,
             finish=1,
             stimshape='gaussian'):
    from pylab import r_, convolve, shape

    # Create event times
    timeres = 0.005  # Time resolution = 5 ms = 200 Hz
    pulselength = 10  # Length of pulse in units of width
    currenttime = 0
    timewindow = finish - start
    allpts = int(timewindow / timeres)
    output = []
    while currenttime < timewindow:
        if currenttime >= 0 and currenttime < timewindow:
            output.append(currenttime)
        currenttime = currenttime + isi + variation * (rand() - 0.5)

    # Create single pulse
    npts = min(pulselength * width / timeres,
               allpts)  # Calculate the number of points to use
    x = (r_[0:npts] - npts / 2 + 1) * timeres
    if stimshape == 'gaussian':
        pulse = exp(-(x / width * 2 - 2)**
                    2)  # Offset by 2 standard deviations from start
        pulse = pulse / max(pulse)
    elif stimshape == 'square':
        pulse = zeros(shape(x))
        pulse[int(npts / 2):int(npts / 2) +
              int(width / timeres)] = 1  # Start exactly on time
    else:
        raise Exception('Stimulus shape "%s" not recognized' % stimshape)

# Create full stimulus
    events = zeros((allpts))
    events[array(array(output) / timeres, dtype=int)] = 1
    fulloutput = convolve(
        events, pulse, mode='same'
    ) * weight  # Calculate the convolved input signal, scaled by rate
    fulltime = (r_[0:allpts] * timeres +
                start) * 1e3  # Create time vector and convert to ms
    fulltime = hstack(
        (0, fulltime, fulltime[-1] + timeres *
         1e3))  # Create "bookends" so always starts and finishes at zero
    fulloutput = hstack(
        (0, fulloutput,
         0))  # Set weight to zero at either end of the stimulus period
    events = hstack((0, events, 0))  # Ditto
    stimvecs = [fulltime, fulloutput, events]  # Combine vectors into a matrix

    return stimvecs
Beispiel #47
0
def assembleMvv(A, B, M, P):
    #A	?
    #B	?
    #M Mesh object
    #P Physics object

    nodes = 4
    sa0, ma0 = localStiffness(M)

    etabflat = M.etab.reshape(M.nelx * M.nelz, nodes)
    Aflat = A.flatten()[pl.newaxis].T
    Bflat = B.flatten()[pl.newaxis].T

    inew = pl.kron(etabflat, pl.ones((1, nodes))).flatten()
    jnew = pl.kron(etabflat, pl.ones((nodes, 1))).flatten()
    dnew = pl.dot(Aflat, sa0.reshape(
        (1, nodes * nodes))).flatten() - P.k0**2 * pl.dot(
            Bflat, ma0.reshape((1, nodes * nodes))).flatten()
    i = list(inew)
    j = list(jnew)
    d = list(dnew)

    #Periodic Bloch-Floquet BC:
    pen = BCpen
    n1 = list(range(0, (M.nelz + 1) * (M.nelx + 1), M.nelx + 1))
    n2 = list(range(M.nelx, (M.nelz + 1) * (M.nelx + 1) + M.nelx, M.nelx + 1))
    i += n1 + n2 + n1 + n2
    j += n1 + n2 + n2 + n1
    h =  [pen]*2*len(n1)+\
       [-pen*exp(1.j*P.kInx*M.lx)]*len(n1)+\
       [-pen*exp(1.j*P.kInx*M.lx).conj()]*len(n1)
    d += [pen]*2*len(n1)+\
       [-pen*exp(1.j*P.kInx*M.lx)]*len(n1)+\
       [-pen*exp(1.j*P.kInx*M.lx).conj()]*len(n1)

    #Calculate the matrices given in Eq (43) in Dossou2006. See Fuchi2010 for a "nicer" way
    #	of writing them. The elements used are the same as in Andreassen2011

    Mvv = coo_matrix((d, (i, j)), shape=(M.ndof, M.ndof),
                     dtype='complex').toarray()
    return Mvv
Beispiel #48
0
    def diagnostic(self, kmin=1, kmax=8, k=None, ymax=None):
        self.run(kmin=kmin, kmax=kmax)
        pylab.clf()
        pylab.subplot(3, 1, 2)
        self.plot()
        mf = GaussianMixtureFitting(self.fitting.data)
        if k is None:
            mf.estimate(k=self.best_k)
        else:
            mf.estimate(k=k)
        pylab.subplot(3, 1, 1)
        mf.plot()
        if ymax is not None:
            pylab.ylim([0, ymax])

        pylab.subplot(3, 1, 3)
        min_value = np.array([self.all_results[x]["AICc"]
                              for x in self.x]).min()
        pylab.plot(
            self.x,
            [
                pylab.exp((min_value - self.all_results[k]["AICc"]) / 2)
                for k in self.x
            ],
            "o-",
            label="AICc",
        )
        min_value = np.array([self.all_results[x]["AIC"]
                              for x in self.x]).min()
        pylab.plot(
            self.x,
            [
                pylab.exp((min_value - self.all_results[k]["AIC"]) / 2)
                for k in self.x
            ],
            "o-",
            label="AIC",
        )

        pylab.xlabel("probability of information loss (based on AICc")
        pylab.legend()
Beispiel #49
0
def gaussian(radial, x):
  """Return gaussian radial function.
  Args:
    radial: (num, num) of gaussian (base, width^2) pair
    x: num of input
  Returns:
    num of gaussian output
  """
  base, width2 = radial
  power = -1 / width2 / 2 * (x-base)**2
  y = pylab.exp(power)
  return y
Beispiel #50
0
def HRF(ms=1.):
    ''' The Heamodynamic response function
    '''
    T = 10
    tau_s = 0.8
    tau_f = 0.4
    scale = 1000. / ms
    v_time = linspace(0., T, scale * T)
    sqrt_tfts = sqrt(1. / tau_f - 1. / (4. * tau_s**2))
    exp_ts = exp(-0.5 * (v_time / tau_s))
    h = exp_ts * sin(v_time * sqrt_tfts) / sqrt_tfts
    return h
Beispiel #51
0
def Vpsp_curr_exp(weight=None, neuron_params=None, set_singlepara=None,
                  title=None, tdur=None, plot=False):
    '''
    Excitatory PSP of IF_curr_exp
    '''
    if neuron_params == None:
        neuron_params = {  # for LIF sampling
            'cm': .2,
            'tau_m': .1,  # 1.,
            'v_thresh': -50.,
            'tau_syn_E': 10.,  # 30.,
            'v_rest': -50.,
            'tau_syn_I': 10.,  # 30.,
            'v_reset': -50.01,  # -50.1,
            'tau_refrac': 10.,  # 30.
            "i_offset": 0.,
        }

    if set_singlepara != None:
        for i in range(len(set_singlepara) / 2):
            neuron_params[set_singlepara[i * 2]] = set_singlepara[i * 2 + 1]

    if weight == None:
        weight = .02

    c_m = neuron_params['cm']
    tau_m = neuron_params['tau_m']

    tau_syn = neuron_params['tau_syn_E']
    v_rest = neuron_params['v_rest']

    scalfac = 1.  # to mV scale factor
    if tdur != None:
        tdur = np.arange(0, tdur, 0.1)  # 0.1 ms
    else:
        tdur = np.arange(0, 150, 0.1)  # 0.1 ms

    A_se = weight * (1. / (c_m * (1. / tau_syn - 1. / tau_m))) * \
        (np.exp(- tdur / tau_m) - np.exp(- tdur / tau_syn)) * scalfac

    if title == 'rec_weight':  # return the rectangular psp weight which will create the same under PSP area within tau_syn as the double exponential PSP
        lif_weight = weight
        rec_weight = lif_weight * (1. / (c_m * (1. - tau_syn / tau_m))) * (
            tau_syn * (np.exp(-1) - 1) - tau_m * (np.exp(- tau_syn / tau_m) - 1))
        return rec_weight

    tmax = np.log(tau_syn / tau_m) / (1 / tau_m - 1 / tau_syn)
    if plot == True:
        p.ion()
        ax.plot(A_se)
        print('Theo tmax: ', tmax)
        print('Theo Amax: ', weight * (1. / (c_m * (1. / tau_syn - 1. / tau_m))) * (p.exp(- tmax / tau_m) - p.exp(- tmax / tau_syn)) * scalfac)
    return A_se
Beispiel #52
0
 def obs(value=value,
         S=data_sample,
         N=N,
         mu_i=rates,
         Xz=Xz,
         zeta=zeta,
         delta=delta):
     #zeta_i = .001
     #residual = pl.log(value[S] + zeta_i) - pl.log(mu_i*N[S] + zeta_i)
     #return mc.normal_like(residual, 0, 100. + delta)
     logp = mc.negative_binomial_like(value[S], N[S]*mu_i, delta*pl.exp(Xz*zeta))
     return logp
Beispiel #53
0
def hellinger_distance(x, y):
    # estimating hellinger distance from https://en.wikipedia.org/wiki/Hellinger_distance
    mu1 = x[0]
    sigma1 = x[1]
    mu2 = y[0]
    sigma2 = y[1]

    BC = pl.sqrt(2.0 * sigma1 * sigma2 /
                 (sigma1**2 + sigma2**2)) * pl.exp(-1 / 4.0 * (mu1 - mu2)**2 /
                                                   (sigma1**2 + sigma2**2))

    return pl.sqrt(1 - BC)
Beispiel #54
0
def simulate_age_group_data(N=50, delta_true=150, pi_true=true_rate_function):
    """ generate simulated data
    """
    # start with a simple model with N rows of data
    model = data_simulation.simple_model(N)

    # record the true age-specific rates
    model.ages = pl.arange(0, 101, 1)
    model.pi_age_true = pi_true(model.ages)

    # choose age groups randomly
    age_width = mc.runiform(1, 100, size=N)
    age_mid = mc.runiform(age_width / 2, 100 - age_width / 2, size=N)
    age_width[:10] = 10
    age_mid[:10] = pl.arange(5, 105, 10)
    #age_width[10:20] = 10
    #age_mid[10:20] = pl.arange(5, 105, 10)

    age_start = pl.array(age_mid - age_width / 2, dtype=int)
    age_end = pl.array(age_mid + age_width / 2, dtype=int)

    model.input_data['age_start'] = age_start
    model.input_data['age_end'] = age_end

    # choose effective sample size uniformly at random
    n = mc.runiform(100, 10000, size=N)
    model.input_data['effective_sample_size'] = n

    # integrate true age-specific rate across age groups to find true group rate
    model.input_data['true'] = pl.nan
    model.input_data['age_weights'] = ''

    for i in range(N):
        beta = mc.rnormal(0., .025**-2)

        # TODO: clean this up, it is computing more than is necessary
        age_weights = pl.exp(beta * model.ages)
        sum_pi_wt = pl.cumsum(model.pi_age_true * age_weights)
        sum_wt = pl.cumsum(age_weights)
        p = (sum_pi_wt[age_end] - sum_pi_wt[age_start]) / (sum_wt[age_end] -
                                                           sum_wt[age_start])

        model.input_data.ix[i, 'true'] = p[i]
        model.input_data.ix[i, 'age_weights'] = ';'.join(
            ['%.4f' % w for w in age_weights[age_start[i]:(age_end[i] + 1)]])

    # sample observed rate values from negative binomial distribution
    model.input_data['value'] = mc.rnegative_binomial(
        n * model.input_data['true'], delta_true) / n

    print model.input_data.drop(['standard_error', 'upper_ci', 'lower_ci'],
                                axis=1)
    return model
    def generatePath(self, T):
        """
        r: rate of return
        sigma: standard deviation
        dt: time steps
        drift: mean movement price
        zn: array of random numbers with dimension(nPaths,nSteps)
        ld: poisson arrival rate
        a: mean drift of jump
        d: standard deviation of jump
        k: expected value of jump

        """
        assert (T > 0), 'Time needs to be a positive number'
        try:
            S0 = self.initialPrice
            r = self.rateOfReturn
            sigma = self.stdev
            ld = self.jumpParameters[0]
            a = self.jumpParameters[1]
            d = self.jumpParameters[2]
            k = pylab.exp(a+0.5*(d**2))-1
            nPaths = self.nPaths
            nSteps = self.nSteps
            dt = T/float(nSteps)
            drift = r-ld*k-0.5*(sigma**2)
            zn = pylab.randn(nPaths, nSteps)
            zn = np.vstack((zn, -zn))
            zp = pylab.randn(nPaths, nSteps)
            zp = np.vstack((zp, -zp))
            S = pylab.zeros((2*nPaths, nSteps))
            p = pylab.poisson(ld*dt, (2*nPaths, nSteps))
            j = a*p+d*pylab.sqrt(p)*zp
            start = S0*pylab.ones((2*nPaths, 1))
            next = S0*pylab.cumprod(pylab.exp(drift*dt +
                                              sigma*pylab.sqrt(dt)*zn + j), 1)
        except ValueError:
            return 'Please check the value' + \
                   'of the properties.'
        return pylab.hstack((start, next))
Beispiel #56
0
    def CalculateRates(self, times, levels):
        N = len(levels)
        t_mat = pylab.matrix(times).T

        # normalize the cell_count data by its minimum
        count_matrix = pylab.matrix(levels).T
        norm_counts = count_matrix - min(levels)
        c_mat = pylab.matrix(norm_counts)
        if c_mat[-1, 0] == 0:
            ge_zero = c_mat[pylab.find(c_mat > 0)]
            if ge_zero.any():
                c_mat[-1, 0] = min(ge_zero)

        for i in pylab.arange(N - 1, 0, -1):
            if c_mat[i - 1, 0] <= 0:
                c_mat[i - 1, 0] = c_mat[i, 0]

        c_mat = pylab.log(c_mat)

        res_mat = pylab.zeros(
            (N, 5))  # columns are: slope, offset, error, avg_value, max_value
        for i in xrange(N - self.window_size):
            i_range = range(i, i + self.window_size)
            x = pylab.hstack(
                [t_mat[i_range, 0],
                 pylab.ones((len(i_range), 1))])
            y = c_mat[i_range, 0]

            # Measurements in window must all be above the min.
            if min(pylab.exp(y)) < self.minimum_level:
                continue

            (a, residues) = pylab.lstsq(x, y)[0:2]
            res_mat[i, 0] = a[0]
            res_mat[i, 1] = a[1]
            res_mat[i, 2] = residues
            res_mat[i, 3] = pylab.mean(count_matrix[i_range, 0])
            res_mat[i, 4] = max(pylab.exp(y))

        return res_mat
Beispiel #57
0
    def broaden(self, fwhm=1e-10, linetype=1, eta=[]):
        output = Spectrum(self.wavelengths)
        window = max(output.wavelengths) - min(output.wavelengths)
        center = max(output.wavelengths) - window/2

        if fwhm <= 0:
            raise ValueError('The linewidth must be positive and nonzero.')

        # Trapezoidal 
        elif linetype is 0:
            pass

        # Gaussian
        elif linetype is 1:
            sigma = fwhm / (2 * log(2)**0.5)
            slit = exp(-(output.wavelengths - center)**2/(2 * sigma**2))

        # Lorentzian
        elif linetype is 2:
            gamma = fwhm
            slit = 0.5 * (gamma / ((output.wavelengths - center)**2 \
                                    + 0.25 * gamma**2)) / pi

        # Pseudo-Voigt
        elif linetype is 3:
            if not eta:
                print "Eta required for pseudo-Voigt profile, quitting"
                sys.exit(0)
            sigma = fwhm
            sigma_g = fwhm / (2 * log(2)**0.5)
            g = exp(-(output.wavelengths - center)**2/(2 * sigma_g**2)) \
                / (sigma_g * (2*pi)**0.5)
            l = (0.5 * sigma / pi) \
                / ((output.wavelengths - center)**2 + .25 * sigma**2)
            slit = eta * l + (1 - eta) * g


        output.intensities = pylab.convolve(self.intensities, slit, 'same')

        return output
Beispiel #58
0
def weight_tbox(obj_rect, offset, tbbs, psrs, sigma_factor = 1./4):
    '''
    center means the gaussian prior's maximum position, offset should be add before this call
    sigma_w, sigma_h 2d gaussian has two sigmas for row and col
    tbbs, tracked_bounding_box'list to np.array.
    psrs, tbbs' psr score list to np.array.
    :param center:
    :param sigma_w: shoulb be set by blob_seg's roi windows' shape, roi_w*sigma_factor
    :param sigma_h:
    :param tbbs:
    :param psrs:
    :return: maximum posterior probability box
    '''
    if tbbs.shape[0] == 0 or psrs.shape[0] == 0:
        print ('Error: empty tracker_tbbs list or psr list in weight_tbox function.\n Return last frame\'s position \n')
        return obj_rect

    #position array
    posxs = tbbs[:,0] + tbbs[:,2]/2
    posys = tbbs[:,1] + tbbs[:,3]/2

    cx, cy = (obj_rect[0]+obj_rect[2]/2, obj_rect[1]+obj_rect[3]/2)
    w      = obj_rect[2] * 2
    h      = obj_rect[3] * 2

    sigma_w = sigma_factor*w
    sigma_h = sigma_factor*h

    ofx, ofy = offset[0:2]
    distxs = np.float32(posxs) -(cx + ofx)
    distys = np.float32(posys) -(cy + ofy)

    #priority probability
    pprob = pylab.exp(-0.5 * ((distxs / sigma_w) ** 2 + (distys / sigma_h) ** 2))

    psrs = psrs / (np.sum(psrs)+np.spacing(1))
    # psrs = psrs.reshape((len(psrs), 1))
    # psrs = np.tile(psrs, (1, 4))

    #NOTE, here the pprob are normalized before using!!! so it's not gaussian distribution anymore.
    norm_pprob = pprob / (np.sum(pprob) + np.spacing(1))
    weights = psrs * norm_pprob

    weights = weights / np.sum(weights)
    weights = norm_pprob.reshape(len(weights), 1)
    weights = np.tile(weights, (1, 4))
    # pbs = pbs.reshape(len(pbs),1)
    # pbs = np.tile(pbs,(1,4))
    # obj_bbox is the average rect of all the tbbs with responding weights, given by tracker's psr and prio-probability
    obj_bbox_array = np.int0(np.sum(tbbs * weights, axis=0))
    obj_bbox = (obj_bbox_array[0], obj_bbox_array[1],obj_bbox_array[2], obj_bbox_array[3])
    return obj_bbox
Beispiel #59
0
def get_KOP_noRest(x1s, y1s, nbn, baseVal):
    sum = 0
    for n in range(nbn):
        #x=x1s[n]; y=y1s[n];
        #get polar coordinate from (x,y) euclidean coordinates
        r, theta = cmath.polar(complex(x1s[n],
                                       y1s[n]))  #returns   -pi < theta < pi
        if theta > -pb.pi / 2:
            sum += pb.exp(complex(0, theta))
        else:
            sum += baseVal

    return cmath.polar(sum / nbn)
Beispiel #60
0
    def to_dissonance(self, tuning, f0=440., num_harmonics=6):
        """
        ::

            Convert scale to dissonance values for num_harmonics harmonics.
            Assume an exponentially decaying harmonic series.
            Returns dissonance scores for each interval in given tuning system.
        """
        harm = pylab.arange(num_harmonics)+1
        h0 = [f0 * k for k in harm]
        a = [pylab.exp(-0.5 * k) for k in harm]
        diss = [dissonance_fun(h0 + [p*k for k in harm]) for p in self.to_scale_freqs(tuning, f0)]
        return diss