示例#1
0
	def correct( self ):	
		self.Z = np.mat([ 
			self.sensors['accelerometer'].roll, 
			self.sensors['accelerometer'].pitch,
			self.sensors['dummy_yaw'].yaw 
			]).transpose()

		self.StateMap = np.mat( tf.transformations.euler_from_quaternion( self.quaternion.get_quaternion(), axes = 'sxyz'  ) ).transpose();

		c11 = 1 - 2 * (self.quaternion.y**2 + self.quaternion.z**2)
		c33 = 1 - 2 * (self.quaternion.x**2 + self.quaternion.y**2)
		c21 = 2 * ( self.quaternion.x*self.quaternion.y + self.quaternion.w*self.quaternion.z )
		c31 = 2 * ( self.quaternion.x*self.quaternion.z - self.quaternion.w*self.quaternion.y )
		c32 = 2 * ( self.quaternion.y*self.quaternion.z + self.quaternion.w*self.quaternion.x )


		self.MeasurementJacobian = np.asmatrix( 
			matlib.diag( [2 * c33/sqrt(c33 ** 2 + c32 ** 2), 2 / sqrt(1 - c31**2), 2 * c11/sqrt(c11 ** 2 + c21 ** 2)] ) 
		) * np.mat([
			[ self.quaternion.x , self.quaternion.w, self.quaternion.z, self.quaternion.y],
			[ self.quaternion.y, -self.quaternion.z, self.quaternion.w, -self.quaternion.x ],
			[ self.quaternion.z, self.quaternion.y, self.quaternion.x, self.quaternion.w]
		 ]) + np.asmatrix(
		 	matlib.diag( [4 * c32/sqrt(c33 ** 2 + c32 ** 2), 0 , 4 * c21/sqrt(c11 ** 2 + c21 ** 2) ]) 
		 ) *  np.mat([
		 	[ 0, self.quaternion.x, self.quaternion.y, 0],
		 	[ 0, 0, 0, 0],
		 	[ 0, 0 , self.quaternion.y, self.quaternion.z]
		]) 

		super(IMU_Kalman, self).correct()
		self.set_quaternion()
示例#2
0
    def refine_R1(self, smoothR1, smoothk2, smoothk2a, h):
        '''
        Ridge regression to get better R1, k2, k2a estimates

        Args:
            smoothR1 (float): R1 value to drive the estimate toward
            smoothk2 (float): k2 value to drive the estimate toward
            smoothk2a (float): k2a value to drive the estimate toward
            h (numpy.ndarray): 1-D array consisting of the  diagonal elements
                               of the matrix used to compute the weighted norm
        '''

        if not smoothR1.ndim==smoothk2.ndim==smoothk2a.ndim==1:
            raise ValueError('smoothR1, smoothk2, smoothk2a must be 1-D')
        if not len(smoothR1)==len(smoothk2)==len(smoothk2a)==self.TAC.shape[0]:
            raise ValueError('Length of smoothR1, smoothk2, smoothk2a must be \
                             equal to the number of rows of TAC')
        if not h.ndim==2:
            raise ValueError('h must be 2-D')
        if not h.shape==(self.TAC.shape[0], 3):
            raise ValueError('Number of rows of h must equal the number of rows of TAC, \
                             and the number of columns of h must be 3')

        # Numerical integration of reference TAC
        intrefTAC = km_integrate(self.refTAC,self.t,self.startActivity)
        for k, TAC in enumerate(self.TAC):
            W = mat.diag(self.weights[k,:])

            # Numerical integration of target TAC
            intTAC = km_integrate(TAC,self.t,self.startActivity)

            # ----- Get R1 incorporating spatial constraint -----
            # Set up the ridge regression model
            # based on Eq. 11 in Zhou et al.
            #X = np.mat(np.column_stack((self.refTAC,intrefTAC,-intTAC)))
            X = np.column_stack((self.refTAC,intrefTAC,-intTAC))
            #y = np.mat(TAC).T
            y = TAC
            H = mat.diag(h[k,:])
            b_sc = np.array((smoothR1[k],smoothk2[k],smoothk2a[k])) #.reshape(-1,1)

            try:
                b = solve(X.T @ W @ X + H, X.T @ W @ y + H @ b_sc)

                R1_lrsc = b[0]
                k2_lrsc = b[1]
                k2a_lrsc = b[2]
            except:
                R1_lrsc = k2_lrsc = k2a_lrsc = 0

            self.results['R1_lrsc'][k] = R1_lrsc
            self.results['k2_lrsc'][k] = k2_lrsc
            self.results['k2a_lrsc'][k] = k2a_lrsc

        return self
示例#3
0
def EMForMoG(datam, n, pl, muvl, covml, max_iters=1):
    assert len(pl)==len(muvl) and len(muvl)==len(covml) 
    d=datam.shape[0]
    N=datam.shape[1]
    #start EM posteriorm n*N
    posteriorm=matlib.zeros((n, N), dtype=np.float32)
    for it in xrange(max_iters):
        print 'Iter: '+str(it)
        #create Gaussian kernels
        NormalFl=[makeNormalF(muv, covm) for muv, covm in zip(muvl, covml)]
        #probability
        px=matlib.zeros((1, N), dtype=np.float32)
        posteriorm.fill(0)
        #caculate posteriorm
        for j in xrange(N):
            cur_data=datam[:, j]
            for i in xrange(n):
                #print i, j
                posteriorm[i, j]=pl[i]*NormalFl[i](cur_data)
                px[0, j]+=posteriorm[i, j]
#        print 'px:', px
        posteriorm/=px
        #update parameters
        #soft num n*1
        softnum=matlib.sum(posteriorm, 1)
        print softnum
        softnum_inv=1.0/softnum
        pl=np.array((softnum/N)).reshape(-1).tolist()
        mum=datam*posteriorm.T*matlib.diag(np.array(softnum_inv).reshape(-1))
        muvl=[mum[:, k] for k in range(n)]
        mum=[]#release         
        for k in range(n):
            datam_temp=datam-muvl[k]
            covml[k]=softnum_inv[k, 0]*datam_temp*matlib.diag(np.array(posteriorm[k, :]).reshape(-1))\
            *datam_temp.T
    return pl, muvl, covml
            
            
        
        
        
        
        
        
        
        
        
        
        
        
        
        
def design_controller():

    # Control input weighting
    R = 1.0

    # State weighting matrix Q
    Q = diag([(10 * pi / 180)**(-2), (10 * pi / 180)**(-2),
              (10 * pi / 180)**(-2), (10 * pi / 180)**(-2)]) / R

    # Always keep this at 1.0 and adjust the scaling of Q
    R = np.array([[1.0]])

    # State error covariance
    W = diag([0.0, 0.0, 1e-6, 1e-6]) * dt

    # Measurement error covariance
    V = .01 * diag([
        (1.0 / 20000 * 2.0 * pi)**2.,  # square of 1.0 count
        (0.00227631723111)**2
    ])  # square of std deviation from static

    # Calculate closed loop eigenvalues and gains
    data = compute_gains(Q, np.array([[1.0]]), W, V, dt)

    matlab_data = {
        'A_cl_w': data['A_cl'],
        'B_cl_w': data['B_cl'],
        'C_cl_w': data['C_cl'],
        'A_w': data['A'],
        'B_c_w': data['B_c'],
        'C_m_w': data['C_m'],
        'C_z_w': data['C_z'],
        'K_c_w': data['K_c'],
        'A_e_w': data['A_e'],
        'B_e_w': data['B_e']
    }
    sio.savemat("cl_data.mat", mdict=matlab_data)
    matlab = "/home/hazelnusse/usr/matlab2012b/bin/matlab"
    import os
    os.system(matlab + " -nosplash -nodesktop -nojvm -r 'tune_pi; exit;'")
    pi_gains = sio.loadmat('pi_gains.mat')
    data['Kp'] = pi_gains['Kp_w'].reshape((N, ))
    data['Ki'] = pi_gains['Ki_w'].reshape((N, ))
    data['Kp_fit'] = smoother(data['theta_R_dot'], data['Kp'])
    data['Ki_fit'] = smoother(data['theta_R_dot'], data['Ki'])

    form_PI_cl(data)

    return data
示例#5
0
def belief_dynamics(b_t, u_t, z_tp1, model,profiler=None,profile=False):
    dynamics_func = model.dynamics_func
    obs_func = model.obs_func
    qDim = model.qDim
    rDim = model.rDim

    x_t, SqrtSigma_t = decompose_belief(b_t, model)
    Sigma_t = SqrtSigma_t*SqrtSigma_t

    if z_tp1 is None:
        # Maximum likelihood observation assumption
        z_tp1 = obs_func(dynamics_func(x_t, u_t, ml.zeros([qDim,1])), ml.zeros([rDim,1]))
    if profile: profiler.start('ekf')
    x_tp1, Sigma_tp1 = ekf(x_t, Sigma_t, u_t, z_tp1, model)
    if profile: profiler.stop('ekf')
    # Compute square root for storage
    # Several different choices available -- we use the principal square root
    if profile: profiler.start('eigh')
    D_diag, V = ml.linalg.eigh(Sigma_tp1)
    if profile: profiler.stop('eigh')
    SqrtSigma_tp1 = V*np.sqrt(ml.diag(D_diag))*V.T

    b_tp1 = compose_belief(x_tp1, SqrtSigma_tp1, model)

    return b_tp1
示例#6
0
def belief_dynamics(b_t, u_t, z_tp1, model, profiler=None, profile=False):
    dynamics_func = model.dynamics_func
    obs_func = model.obs_func
    qDim = model.qDim
    rDim = model.rDim

    x_t, SqrtSigma_t = decompose_belief(b_t, model)
    Sigma_t = SqrtSigma_t * SqrtSigma_t

    if z_tp1 is None:
        # Maximum likelihood observation assumption
        z_tp1 = obs_func(dynamics_func(x_t, u_t, ml.zeros([qDim, 1])),
                         ml.zeros([rDim, 1]))
    if profile: profiler.start('ekf')
    x_tp1, Sigma_tp1 = ekf(x_t, Sigma_t, u_t, z_tp1, model)
    if profile: profiler.stop('ekf')
    # Compute square root for storage
    # Several different choices available -- we use the principal square root
    if profile: profiler.start('eigh')
    D_diag, V = ml.linalg.eigh(Sigma_tp1)
    if profile: profiler.stop('eigh')
    SqrtSigma_tp1 = V * np.sqrt(ml.diag(D_diag)) * V.T

    b_tp1 = compose_belief(x_tp1, SqrtSigma_tp1, model)

    return b_tp1
示例#7
0
def srtm(vol,  int_vol, ref, int_ref, time_frames, opts={}, header=None ):
    '''
    TODO: Add reference to author of code
    '''
    try :
        isotope=header["Info"]["Tracer"]["Isotope"][0]
        print(isotope)
        isotope=re.sub('-', '', isotope)
        print(isotope)
        half_life = half_life_dict[ isotope  ]
    except KeyError :
        half_life = opts.quant_half_life
        if half_life == None :
            print("Error : 1) isotope was either not found in .json header or not one of ",half_life_dict.keys()," was not specified manually with --half-life option.")
            exit(1)

    decayConstant = np.log(2) / half_life 
    time_widths = np.array(header["Time"]["FrameTimes"]["Duration"]).reshape(-1,) / 50.
    time_frames = np.array(time_frames).reshape(-1,) / 60.
    pet_sum = np.sum(vol,axis=0)
    weights = time_widths / (pet_sum * np.exp(decayConstant * time_frames))
    startActivity=0
    ref = ref.reshape(-1,)
    R1=BP=[]

    for k in range(vol.shape[0]):
        TAC = vol[k,:]
        W = mat.diag(weights)
        y = np.mat(TAC).T
        
        def energy_fun(theta3):
            exp_theta3_t = np.exp(np.asscalar(theta3)*time_frames)
            integrant = ref * exp_theta3_t
            conv=np.zeros_like(time_frames)
            for t in range(time_frames.shape[0])[1:]:
                conv[t] = simps(integrant[:t], time_frames[:t]) # km_integrate(integrant,time_frames, startActivity) / exp_theta3_t
            X = np.mat(np.column_stack((ref, conv)))
            thetas = solve(X.T * W * X, X.T * W * y)
            residual = y - X * thetas
            rss = residual.T * W * residual
            return rss

        res = minimize_scalar(energy_fun, bounds=(0.06, 0.6), method='bounded', options=dict(xatol=1e-1))
        theta3 = np.asscalar(res.x)

        exp_theta3_t = np.exp(theta3*time_frames)
        integrant = ref * exp_theta3_t
        conv = simps(integrant,time_frames) / exp_theta3_t
        X = np.mat(np.column_stack((ref, conv)))
        thetas = solve(X.T * W * X, X.T * W * y)

        R1 += thetas[0]
        k2 = thetas[1] + R1[-1]*theta3
        BP += [k2 / theta3 - 1]

    if opts["quant_R1"] :
        return R1

    return BP
def design_controller():

    # Control input weighting
    R = 1.0

    # State weighting matrix Q
    Q = diag([(10*pi/180)**(-2), (10*pi/180)**(-2),
              (10*pi/180)**(-2), (10*pi/180)**(-2)]) / R

    # Always keep this at 1.0 and adjust the scaling of Q
    R = np.array([[1.0]])

    # State error covariance
    W = diag([0.0, 0.0, 1e-6, 1e-6]) * dt

    # Measurement error covariance
    V = .01*diag([(1.0/20000*2.0*pi)**2., # square of 1.0 count
              (0.00227631723111)**2]) # square of std deviation from static

    # Calculate closed loop eigenvalues and gains
    data = compute_gains(Q, np.array([[1.0]]), W, V, dt)

    matlab_data = {'A_cl_w' : data['A_cl'],
                   'B_cl_w' : data['B_cl'],
                   'C_cl_w' : data['C_cl'],
                   'A_w' : data['A'],
                   'B_c_w' : data['B_c'],
                   'C_m_w' : data['C_m'],
                   'C_z_w' : data['C_z'],
                   'K_c_w' : data['K_c'],
                   'A_e_w' : data['A_e'],
                   'B_e_w' : data['B_e']}
    sio.savemat("cl_data.mat", mdict=matlab_data)
    matlab = "/home/hazelnusse/usr/matlab2012b/bin/matlab"
    import os
    os.system(matlab + " -nosplash -nodesktop -nojvm -r 'tune_pi; exit;'")
    pi_gains = sio.loadmat('pi_gains.mat')
    data['Kp'] = pi_gains['Kp_w'].reshape((N,))
    data['Ki'] = pi_gains['Ki_w'].reshape((N,))
    data['Kp_fit'] = smoother(data['theta_R_dot'], data['Kp'])
    data['Ki_fit'] = smoother(data['theta_R_dot'], data['Ki'])

    form_PI_cl(data)

    return data
示例#9
0
文件: qz.py 项目: npalmer/dolo
def qzdiv(stake, A2, B2, Q, Z):
    Aout = A2.copy(); Bout = B2.copy(); Qout = Q.copy(); Zout = Z.copy()
    n, jnk = A2.shape
    # remember diag returns 1d
    root = mat(abs(c_[diag(A2)[:,newaxis], diag(B2)[:,newaxis]]))
    root[:,1] /= where(root[:,0]<1e-13, -root[:,1], root[:,0])
    for i in range(1,n+1)[::-1]:        # always first i rows, decreasing
        m = None
        for j in range(1,i+1)[::-1]:    # search backwards in the first i rows
            #print root.shape
            #print n, i, j
            #print 'i,j in qzdiv', i,j
            if root[j-1,1] > stake or root[j-1,1] < -0.1:
                m = j                   # get last relevant row
                break
        if m == None: return (Aout, Bout, Qout, Zout)
        for k in range(m,i):            # from relev. row to end of first part
            (Aout, Bout, Qout, Zout) = qzswitch(k, Aout, Bout, Qout, Zout)
            root[k-1:k+1, 1] = root[k-1:k+1, 1][::-1]
    return (Aout, Bout, Qout, Zout)
示例#10
0
def qzdiv(stake, A2, B2, Q, Z):
    Aout = A2.copy(); Bout = B2.copy(); Qout = Q.copy(); Zout = Z.copy()
    n, jnk = A2.shape
    # remember diag returns 1d
    root = mat(abs(c_[diag(A2)[:,newaxis], diag(B2)[:,newaxis]]))
    root[:,1] /= where(root[:,0]<1e-13, -root[:,1], root[:,0])
    for i in range(1,n+1)[::-1]:        # always first i rows, decreasing
        m = None
        for j in range(1,i+1)[::-1]:    # search backwards in the first i rows
            #print root.shape
            #print n, i, j
            #print 'i,j in qzdiv', i,j
            if root[j-1,1] > stake or root[j-1,1] < -0.1:
                m = j                   # get last relevant row
                break
        if m == None: return (Aout, Bout, Qout, Zout)
        for k in range(m,i):            # from relev. row to end of first part
            (Aout, Bout, Qout, Zout) = qzswitch(k, Aout, Bout, Qout, Zout)
            root[k-1:k+1, 1] = root[k-1:k+1, 1][::-1]
    return (Aout, Bout, Qout, Zout)
示例#11
0
def GetCovm2D(axis1_len, axis2_len, axis1_angle, nsigma=3):
    'axis1_angle measured in degrees'
    assert axis1_len>0 and axis2_len>0
    w=np.array([axis1_len, axis2_len], dtype=np.float32)
    w=(w/nsigma)**2
    v=matlib.zeros((2, 2), dtype=np.float32)
    axis1_rad=math.radians(axis1_angle)
    axis2_rad=math.radians(axis1_angle+90)
    v[:, 0]=matlib.mat((math.cos(axis1_rad), math.sin(axis1_rad))).T
    v[:, 1]=matlib.mat((math.cos(axis2_rad), math.sin(axis2_rad))).T
    #print 'v get2D', v
    return v*matlib.diag(w)*v.T
示例#12
0
    def fit(self):
        n = len(self.t)
        m = 3

        # diagonal matrix with diagonal elements corresponding to the duration
        # of each time frame
        W = mat.diag(self.dt)

        # Numerical integration of target TAC
        intTAC = km_integrate(self.TAC,self.t,self.startActivity)
        # Numerical integration of reference TAC
        intrefTAC = km_integrate(self.refTAC,self.t,self.startActivity)

        # ----- Get DVR, BP -----
        # Set up the weighted linear regression model
        # based on Eq. 9 in Zhou et al.
        # Per the recommendation in first paragraph on p. 979 of Zhou et al.,
        # smoothed TAC is used in the design matrix, if provided.
        if self.smoothTAC is None:
            X = np.mat(np.column_stack((intrefTAC, self.refTAC, self.TAC)))
        else:
            X = np.mat(np.column_stack((intrefTAC, self.refTAC, self.smoothTAC)))
        y = np.mat(intTAC).T
        b = linalg.solve(X.T * W * X, X.T * W * y)
        residual = y - X * b
        var_b = residual.T * W * residual / (n-m)

        DVR = b[0]
        BP = DVR - 1

        # ----- Get R1 -----
        # Set up the weighted linear regression model
        # based on Eq. 8 in Zhou et al.
        X = np.mat(np.column_stack((self.refTAC,intrefTAC,-intTAC)))
        y = np.mat(self.TAC).T
        b = linalg.solve(X.T * W * X, X.T * W * y)
        residual = y - X * b
        var_b = residual.T * W * residual / (n-m)

        R1 = b[0]

        self.BP = BP
        self.R1 = R1

        return self
示例#13
0
    def fit(self):
        #print(self.TAC.shape)
        for k, TAC in enumerate(self.TAC):
            W = mat.diag(self.weights[k,:])
            #y = np.mat(TAC).T
            y = TAC

            def energy_fun(theta3):
                exp_theta3_t = np.exp(np.asscalar(theta3)*self.t)
                integrant = self.refTAC * exp_theta3_t
                conv = km_integrate(integrant,self.t,self.startActivity) / exp_theta3_t
                #X = np.mat(np.column_stack((self.refTAC, conv)))
                X = np.column_stack((self.refTAC, conv))
                #thetas = solve(X.T * W * X, X.T * W * y)
                thetas = solve(X.T @ W @ X, X.T @ W @ y)
                #residual = y - X * thetas
                residual = y - X @ thetas
                #rss = residual.T * W * residual
                rss = residual.T @ W @ residual
                return rss

            res = minimize_scalar(energy_fun, bounds=(0.06, 0.6),
                                  method='bounded', options=dict(xatol=1e-1))
            theta3 = np.asscalar(res.x)

            exp_theta3_t = np.exp(theta3*self.t)
            integrant = self.refTAC * exp_theta3_t
            conv = km_integrate(integrant,self.t,self.startActivity) / exp_theta3_t
            #X = np.mat(np.column_stack((self.refTAC, conv)))
            X = np.column_stack((self.refTAC, conv))
            #thetas = solve(X.T * W * X, X.T * W * y)
            thetas = solve(X.T @ W @ X, X.T @ W @ y)

            R1 = thetas[0]
            k2 = thetas[1] + R1*theta3
            BP = k2 / theta3 - 1

            self.results['BP'][k] = BP
            self.results['R1'][k] = R1
            self.results['k2'][k] = k2

        return self
示例#14
0
文件: core.py 项目: josipd/Gpy
 def inf(self, x, meanonly=False):
     x = np.asmatrix(x)
     assert x.shape[1] == self.d
     n = x.shape[0]
     # Handle empty test set
     if n == 0:
         return (np.zeros((0, 1)), np.zeros((0, 1)))
     ms = self.kernel.mean*np.ones((n, 1))
     Kbb = self.kernel(x, diag=True)
     # Handle empty training set
     if len(self) == 0:
         return (ms, np.asmatrix(np.diag(Kbb)).T)
     Kba = self.kernel(x, self.x)
     m = self.kernel.mean*np.ones((len(self), 1))
     fm = ms + Kba*scipy.linalg.cho_solve((self.L, True), self.y - m,
                                          overwrite_b=True)
     if meanonly:
         return fm
     else:
         W = scipy.linalg.cho_solve((self.L, True), Kba.T)
         fv = np.asmatrix(Kbb - np.sum(np.multiply(Kba.T, W), axis=0).T)
         # W = np.asmatrix(scipy.linalg.solve(self.L, Kba.T, lower=True))
         # fv = np.asmatrix(Kbb - np.sum(np.power(W, 2), axis=0).T)
         return (fm, fv)
示例#15
0
def MStaircase(Y, Z, precision=1e-12):
    """
    Computes a smaller representation using the staircase 
    algorithm.
    
    Notes
    -----
    This function should not be called directly.
    It is used by 'MinimalRepFromME' and 'MinimalRepFromRAP'.
    
    References
    ----------
    .. [1]  P. Buchholz, M. Telek, "On minimal representation 
            of rational arrival processes." Madrid Conference
            on Qeueuing theory (MCQT), June 2010.
    """

    X = []
    for y in Y:
        X.append(ml.matrix(y))
    M = len(X)
    m = X[0].shape[0]
    U = ml.eye(m)

    #The sum of the ranks calculated in every loop
    ranksum = 0
    crit = True  #The stopping criteria
    while crit == True:
        r = la.matrix_rank(Z, tol=precision)
        ranksum += r
        [Ui, S, T] = la.svd(Z)

        Transf = ml.eye(ranksum - r + Ui.shape[0])
        Transf[-Ui.shape[1]:, -Ui.shape[0]:] = Ui.T
        U = (Transf * U.T).T

        for i in range(M):
            TEMP = Ui.T * X[i] * Ui
            X[i] = TEMP[r:, r:]
            if i == 0:
                Z = TEMP[r:, 0:r]
            else:
                Z = np.hstack((Z, TEMP[r:, 0:r]))

        if la.norm(Z) < precision or la.matrix_rank(
                Z, tol=precision) == m - ranksum:
            crit = False

    n = ranksum
    if la.norm(Z) < precision:
        n = ranksum
        x = np.sum(U.T, 1)
        x = x[0:n]

        #does x have a 0 value somewhere
        yes = False
        zeroloc = ml.zeros((n, 1))
        nonzero = -1  # this will indicate a row of x for which x's value is non-zero
        for l in range(n):
            if abs(x[l]) < precision:
                yes = True
                zeroloc[l, 0] = 1
            elif nonzero == -1:
                nonzero = l

        R = ml.eye(n)
        if yes:
            for l in range(n):
                if zeroloc[l, 0] == 1:
                    R[l, nonzero] = 1
        y = R * x
        Gamma = ml.diag(np.array(y).flatten())
        TEMP1 = ml.eye(m)
        TEMP1[0:n, 0:n] = la.inv(Gamma)
        TEMP2 = ml.eye(m)
        TEMP2[0:n, 0:n] = R

        B = la.inv(TEMP1 * TEMP2 * U.T)
    else:
        n = m
        B = ml.eye(m)
    return (B, n)
示例#16
0
        caldatafile.readline())  # second line is number of data to read
    data = nm.zeros((numdata, 4))

    for i in range(0, numdata):
        zap = caldatafile.readline().split()
        data[i, 0] = float(zap[0])
        data[i, 1] = float(zap[1])
        data[i, 2] = float(zap[2])
        data[i, 3] = float(zap[3])

    Minima = [0, 0, 0, 0, 0, 0, 0, 0, 0]
    pMinima = nm.reshape(Minima, [9])
    Maxima = [10, 10, 6, 6, 6, 0.5, 0.5, 0.5, 0.5]
    pMaxima = nm.reshape(Maxima, [9])  # max and min for prior

    vcovProposal = ml.diag(
        (0.005 * (pMaxima - pMinima))**2)  # variance of the proposal

    param = core.import_pools('KL_FaunalParams')
    gmax = param[
        0, :]  #Original values in the parameterset are the starting values
    #Values        = [1.06,1.14,1.04,1.540,1.188,0.038,0.072,0.028,0.14] ; pValues=nm.reshape(Values,[9])  # starting values of the 9 model being optimised (gmax)
    priorChain = nm.zeros([chainLength, len(gmax)], 'd')
    priorChain[0, :] = gmax  # start the chain
    posteriorChain = nm.zeros([chainLength, len(gmax)], 'd')
    posteriorChain[0, :] = gmax  # start the chain

    logPrior0 = nm.sum(nm.log(stats.uniform.pdf(gmax, pMinima, pMaxima)))

    # this is the call to the model with the initial parameter values
    y, PWt, PVt, pores = core.KeylinkModel(
        gmax)  #returns biomsses of all pools on all days in y
示例#17
0
n = 200
a = nm.linspace(0, nm.pi, n / 2)
x_u = np.c_[nm.cos(a) + 0.5, nm.cos(a) - 0.5].reshape(n, 1)
u = -10 * x_u + nm.randn(n, 1)
x_v = np.c_[nm.sin(a), -nm.sin(a)].reshape(n, 1)
v = 10 * x_v + nm.randn(n, 1)
x = np.c_[u, v]
y = np.zeros((n, 1))
y[0] = 1
y[n - 1] = -1
x2 = np.sum(np.power(x, 2), 1)
hh = 2 * 1**2
k = nm.exp(-(nm.repmat(x2, 1, n) + nm.repmat(x2.T, n, 1) - 2 * x * x.T) / hh)
w = k
t_tmp1 = k**2 + 1 * np.eye(n) + 10 * k * (nm.diag(sum(w)) - w) * k
t = np.linalg.inv(t_tmp1) * (k * y)

m = 100
X = nm.linspace(-20, 20, m).T
X2 = np.power(X, 2)
U = nm.exp(
    -(nm.repmat(np.power(u, 2), 1, m) + nm.repmat(X2.T, n, 1) - 2 * u * X.T) /
    hh)
V = nm.exp(
    -(nm.repmat(np.power(v, 2), 1, m) + nm.repmat(X2.T, n, 1) - 2 * v * X.T) /
    hh)

plt.figure()
plt.xlim(-20, 20)
plt.ylim(-20, 20)
示例#18
0
    def fit(self, smoothTAC=None):
        '''
        Estimate parameters of the SRTM Zhou 2003 model.

        Args:
            smoothTAC (numpy.ndarray): optional. 1- or 2-D array, where each row
                corresponds to a (spatially) smoothed time activity curve
        '''
        if smoothTAC is not None:
            if smoothTAC.ndim==1:
                if not len(smoothTAC)==len(self.t):
                    raise ValueError('smoothTAC and t must have same length')
                # make smoothTAC into a row vector
                smoothTAC = smoothTAC[np.newaxis,:]
            elif smoothTAC.ndim==2:
                if not smoothTAC.shape==self.TAC.shape:
                    raise ValueError('smoothTAC and TAC must have same shape')
            else:
                raise ValueError('smoothTAC must be 1- or 2-dimensional')

        n = len(self.t)
        m = 3

        # Numerical integration of reference TAC
        intrefTAC = km_integrate(self.refTAC,self.t,self.startActivity)

        # Compute BP/DVR, R1, k2, k2a
        for k, TAC in enumerate(self.TAC):
            W = mat.diag(self.weights[k,:])

            # Numerical integration of target TAC
            intTAC = km_integrate(TAC,self.t,self.startActivity)

            # ----- Get DVR -----
            # Set up the weighted linear regression model
            # based on Eq. 9 in Zhou et al.
            # Per the recommendation in first paragraph on p. 979 of Zhou et al.,
            # smoothed TAC is used in the design matrix, if provided.
            if smoothTAC is None:
                X = np.column_stack((intrefTAC, self.refTAC, -TAC))
            else:
                X = np.column_stack((intrefTAC, self.refTAC, -smoothTAC[k,:].flatten()))

            y = intTAC
            try:
                b = solve(X.T @ W @ X, X.T @ W @ y)
                residual = y - X @ b
                # unbiased estimator of noise variance
                noiseVar_eqDVR = residual.T @ W @ residual / (n-m)

                DVR = b[0]
                #R1 = b[1] / b[2]
                #k2 = b[0] / b[2]
                BP = DVR - 1
            except:
                DVR = BP = noiseVar_eqDVR = 0

            # ----- Get R1 -----
            # Set up the weighted linear regression model
            # based on Eq. 8 in Zhou et al.
            #X = np.mat(np.column_stack((self.refTAC,intrefTAC,-intTAC)))
            X = np.column_stack((self.refTAC,intrefTAC,-intTAC))
            #y = np.mat(TAC).T
            y = TAC
            try:
                b = solve(X.T @ W @ X, X.T @ W @ y)
                residual = y - X @ b
                # unbiased estimator of noise variance
                noiseVar_eqR1 = residual.T @ W @ residual / (n-m)

                R1 = b[0]
                k2 = b[1]
                k2a = b[2]
            except:
                R1 = k2 = k2a = noiseVar_eqR1 = 0

            self.results['BP'][k] = BP
            self.results['DVR'][k] = DVR
            self.results['R1'][k] = R1
            self.results['k2'][k] = k2
            self.results['k2a'][k] = k2a

            self.results['noiseVar_eqDVR'][k] = noiseVar_eqDVR
            self.results['noiseVar_eqR1'][k] = noiseVar_eqR1

        return self