Пример #1
0
def firstInitElem(m33,sortEigs,alpha,A):

    l1=sortEigs[0]
    l2=sortEigs[1]
    l3=sortEigs[2]
    
    m1=-m33+l1+l2+l3
    m2=-((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3-2*m33**2*l1+m33*l1**2-2*m33**2*l2+3*m33*l1*l2-l1**2*l2+m33*l2**2- \
      l1*l2**2-2*m33**2*l3+3*m33*l1*l3-l1**2*l3+3*m33*l2*l3-2*l1*l2*l3-l2**2*l3+m33*l3**2-l1*l3**2-l2*l3**2))/ \
      (2*m33*l1**2*l2+2*m33**2*l1**2*l2-l1**3*l2-3*m33*l1**3*l2+l1**4*l2-2*m33*l1*l2**2-2*m33**2*l1*l2**2+l1**3*l2**2+ \
      l1*l2**3+3*m33*l1*l2**3-l1**2*l2**3-l1*l2**4-2*m33*l1**2*l3-2*m33**2*l1**2*l3+l1**3*l3+3*m33*l1**3*l3-l1**4*l3+ \
      2*m33*l2**2*l3+2*m33**2*l2**2*l3-l2**3*l3-3*m33*l2**3*l3+l2**4*l3+2*m33*l1*l3**2+2*m33**2*l1*l3**2-l1**3*l3**2- \
      2*m33*l2*l3**2-2*m33**2*l2*l3**2+l2**3*l3**2-l1*l3**3-3*m33*l1*l3**3+l1**2*l3**3+l2*l3**3+3*m33*l2*l3**3-l2**2*l3**3+ \
      l1*l3**4-l2*l3**4)
    m3=((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3+m33**4-m33**2*l1-2*m33**3*l1+m33**2*l1**2-m33**2*l2-2*m33**3*l2+ \
      m33*l1*l2+3*m33**2*l1*l2-m33*l1**2*l2+m33**2*l2**2-m33*l1*l2**2-m33**2*l3-2*m33**3*l3+m33*l1*l3+3*m33**2*l1*l3- \
      m33*l1**2*l3+m33*l2*l3+3*m33**2*l2*l3-l1*l2*l3-4*m33*l1*l2*l3+l1**2*l2*l3-m33*l2**2*l3+l1*l2**2*l3+m33**2*l3**2- \
      m33*l1*l3**2-m33*l2*l3**2+l1*l2*l3**2))/(-2*m33*l1**2*l2-2*m33**2*l1**2*l2-m33**3*l1**2*l2+l1**3*l2+3*m33*l1**3*l2+ \
      2*m33**2*l1**3*l2-l1**4*l2-m33*l1**4*l2+2*m33*l1*l2**2+2*m33**2*l1*l2**2+m33**3*l1*l2**2-l1**3*l2**2-2*m33*l1**3*l2**2+ \
      l1**4*l2**2-l1*l2**3-3*m33*l1*l2**3-2*m33**2*l1*l2**3+l1**2*l2**3+2*m33*l1**2*l2**3+l1*l2**4+m33*l1*l2**4-l1**2*l2**4+ \
      2*m33*l1**2*l3+2*m33**2*l1**2*l3+m33**3*l1**2*l3-l1**3*l3-3*m33*l1**3*l3-2*m33**2*l1**3*l3+l1**4*l3+m33*l1**4*l3- \
      2*m33*l2**2*l3-2*m33**2*l2**2*l3-m33**3*l2**2*l3+l2**3*l3+3*m33*l2**3*l3+2*m33**2*l2**3*l3-l2**4*l3-m33*l2**4*l3- \
      2*m33*l1*l3**2-2*m33**2*l1*l3**2-m33**3*l1*l3**2+l1**3*l3**2+2*m33*l1**3*l3**2-l1**4*l3**2+2*m33*l2*l3**2+2*m33**2*l2*l3**2+ \
      m33**3*l2*l3**2-l2**3*l3**2-2*m33*l2**3*l3**2+l2**4*l3**2+l1*l3**3+3*m33*l1*l3**3+2*m33**2*l1*l3**3-l1**2*l3**3- \
      2*m33*l1**2*l3**3-l2*l3**3-3*m33*l2*l3**3-2*m33**2*l2*l3**3+l2**2*l3**3+2*m33*l2**2*l3**3-l1*l3**4-m33*l1*l3**4+ \
      l1**2*l3**4+l2*l3**4+m33*l2*l3**4-l2**2*l3**4)
    b3=np.sum(ml.eye(3)-A,1)/(1-m3-m33)
    b2=(-m33*ml.eye(3)+A)*b3/(1-m2)
    b1=(-m3*b3+A*b2)/(1-m1)
    B=ml.hstack((b1,b2,b3))
    a1=alpha*b1
    return (a1,m1,m2,m3,B)
Пример #2
0
def firstInitElem(m33, sortEigs, alpha, A):

    l1 = sortEigs[0]
    l2 = sortEigs[1]
    l3 = sortEigs[2]

    m1 = -m33 + l1 + l2 + l3
    m2=-((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3-2*m33**2*l1+m33*l1**2-2*m33**2*l2+3*m33*l1*l2-l1**2*l2+m33*l2**2- \
      l1*l2**2-2*m33**2*l3+3*m33*l1*l3-l1**2*l3+3*m33*l2*l3-2*l1*l2*l3-l2**2*l3+m33*l3**2-l1*l3**2-l2*l3**2))/ \
      (2*m33*l1**2*l2+2*m33**2*l1**2*l2-l1**3*l2-3*m33*l1**3*l2+l1**4*l2-2*m33*l1*l2**2-2*m33**2*l1*l2**2+l1**3*l2**2+ \
      l1*l2**3+3*m33*l1*l2**3-l1**2*l2**3-l1*l2**4-2*m33*l1**2*l3-2*m33**2*l1**2*l3+l1**3*l3+3*m33*l1**3*l3-l1**4*l3+ \
      2*m33*l2**2*l3+2*m33**2*l2**2*l3-l2**3*l3-3*m33*l2**3*l3+l2**4*l3+2*m33*l1*l3**2+2*m33**2*l1*l3**2-l1**3*l3**2- \
      2*m33*l2*l3**2-2*m33**2*l2*l3**2+l2**3*l3**2-l1*l3**3-3*m33*l1*l3**3+l1**2*l3**3+l2*l3**3+3*m33*l2*l3**3-l2**2*l3**3+ \
      l1*l3**4-l2*l3**4)
    m3=((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3+m33**4-m33**2*l1-2*m33**3*l1+m33**2*l1**2-m33**2*l2-2*m33**3*l2+ \
      m33*l1*l2+3*m33**2*l1*l2-m33*l1**2*l2+m33**2*l2**2-m33*l1*l2**2-m33**2*l3-2*m33**3*l3+m33*l1*l3+3*m33**2*l1*l3- \
      m33*l1**2*l3+m33*l2*l3+3*m33**2*l2*l3-l1*l2*l3-4*m33*l1*l2*l3+l1**2*l2*l3-m33*l2**2*l3+l1*l2**2*l3+m33**2*l3**2- \
      m33*l1*l3**2-m33*l2*l3**2+l1*l2*l3**2))/(-2*m33*l1**2*l2-2*m33**2*l1**2*l2-m33**3*l1**2*l2+l1**3*l2+3*m33*l1**3*l2+ \
      2*m33**2*l1**3*l2-l1**4*l2-m33*l1**4*l2+2*m33*l1*l2**2+2*m33**2*l1*l2**2+m33**3*l1*l2**2-l1**3*l2**2-2*m33*l1**3*l2**2+ \
      l1**4*l2**2-l1*l2**3-3*m33*l1*l2**3-2*m33**2*l1*l2**3+l1**2*l2**3+2*m33*l1**2*l2**3+l1*l2**4+m33*l1*l2**4-l1**2*l2**4+ \
      2*m33*l1**2*l3+2*m33**2*l1**2*l3+m33**3*l1**2*l3-l1**3*l3-3*m33*l1**3*l3-2*m33**2*l1**3*l3+l1**4*l3+m33*l1**4*l3- \
      2*m33*l2**2*l3-2*m33**2*l2**2*l3-m33**3*l2**2*l3+l2**3*l3+3*m33*l2**3*l3+2*m33**2*l2**3*l3-l2**4*l3-m33*l2**4*l3- \
      2*m33*l1*l3**2-2*m33**2*l1*l3**2-m33**3*l1*l3**2+l1**3*l3**2+2*m33*l1**3*l3**2-l1**4*l3**2+2*m33*l2*l3**2+2*m33**2*l2*l3**2+ \
      m33**3*l2*l3**2-l2**3*l3**2-2*m33*l2**3*l3**2+l2**4*l3**2+l1*l3**3+3*m33*l1*l3**3+2*m33**2*l1*l3**3-l1**2*l3**3- \
      2*m33*l1**2*l3**3-l2*l3**3-3*m33*l2*l3**3-2*m33**2*l2*l3**3+l2**2*l3**3+2*m33*l2**2*l3**3-l1*l3**4-m33*l1*l3**4+ \
      l1**2*l3**4+l2*l3**4+m33*l2*l3**4-l2**2*l3**4)
    b3 = np.sum(ml.eye(3) - A, 1) / (1 - m3 - m33)
    b2 = (-m33 * ml.eye(3) + A) * b3 / (1 - m2)
    b1 = (-m3 * b3 + A * b2) / (1 - m1)
    B = ml.hstack((b1, b2, b3))
    a1 = alpha * b1
    return (a1, m1, m2, m3, B)
Пример #3
0
 def elementary (erep, b, k):
     bestdist = evalfun (erep, k)
     bestrep = erep
     repSize = erep[0].shape[1]
     for i in range(repSize):
         for j in range(repSize):
             if i!=j:
                 # create elementary transformation matrix with +b
                 B = ml.eye(repSize)
                 B[i,j] = b
                 B[i,i] = 1.0 - b
                 # apply similarity transform
                 newrep = transfun (erep, B)
                 newdist = evalfun (newrep, k)
                 # store result if better
                 if newdist < bestdist:
                     bestrep = newrep
                     bestdist = newdist
                 # create elementary transformation matrix with -b
                 B = ml.eye(repSize)
                 B[i,j] = -b
                 B[i,i] = 1.0 + b
                 # apply similarity transform
                 newrep = transfun (erep, B)
                 newdist = evalfun (newrep, k)
                 # store result if better
                 if newdist < bestdist:
                     bestrep = newrep
                     bestdist = newdist
     return (bestrep, bestdist)
Пример #4
0
	def __init__(self, **kwargs):
		super(Sensor, self).__init__( **kwargs )

		self.properties.update( measurementList = kwargs.get('measurementList', self.measurementList ),
			stateList = kwargs.get('stateList',self.stateList ) )

		for attribute in self.measurementList:
			setattr(self, attribute, kwargs.get(attribute, 0.) )

		for key in self.stateList:
			attribute = key.split('.')[1]
			setattr( self, attribute, kwargs.get(attribute, 0.) )

		self.properties.update(
			Z = kwargs.get('Z', matlib.zeros( [len(self), 1] )),
			StateMap = kwargs.get('StateMap', matlib.zeros( [len(self) , 1] )),
			MeasurementJacobian = kwargs.get('MeasurementJacobian', matlib.eye( len(self) ) ),
			MeasurementCovariance = kwargs.get('MeasurementCovariance', 0.3 * matlib.eye( len(self) )),
			)

		# self.Z = kwargs.get('Z', matlib.zeros( [len(self), 1] ))
		# self.StateMap = kwargs.get('StateMap', matlib.zeros( [len(self) , 1] ))
		# self.MeasurementJacobian = kwargs.get('MeasurementJacobian', matlib.eye( len(self) ) )
		# self.MeasurementCovariance = kwargs.get('MeasurementCovariance', 0.3 * matlib.eye( len(self) ))

		self.set_Z()
    def __init__(self):
        # model dimensions
        self.xDim = 2 # state space dimension
        self.uDim = 2 # control input dimension
        self.qDim = 2 # dynamics noise dimension
        self.zDim = 2 # observation dimension
        self.rDim = 2 # observtion noise dimension

        # belief space dimension
        # note that we only store the lower (or upper) triangular portion
        # of the covariance matrix to eliminate redundancy
        self.bDim = int(self.xDim + self.xDim*((self.xDim+1)/2.))

        self.dT = 1. # time step for dynamics function
        self.T = 15 # number of time steps in trajectory

        self.alpha_belief = 10. # weighting factor for penalizing uncertainty at intermediate time steps
        self.alpha_final_belief = 10. # weighting factor for penalizing uncertainty at final time step
        self.alpha_control = 1. # weighting factor for penalizing control cost
        
        self.xMin = ml.vstack([-5,-3]) # minimum limits on state (xMin <= x)
        self.xMax = ml.vstack([5,3]) # maximum limits on state (x <= xMax)
        self.uMin = ml.vstack([-1,-1]) # minimum limits on control (uMin <= u)
        self.uMax = ml.vstack([1,1]) # maximum limits on control (u <= uMax)

        self.Q = ml.eye(self.qDim) # dynamics noise variance
        self.R = ml.eye(self.rDim) # observation noise variance

        self.start = ml.zeros([self.xDim,1]) # start state, OVERRIDE
        self.goal = ml.zeros([self.xDim,1]) # end state, OVERRIDE

        self.sqpParams = LightDarkSqpParams()
Пример #6
0
def GS_basis(Kc, verbose=False):
    (n, n_) = Kc.shape
    assert n == n_
    if verbose: print "Beginning GS process. n = {}".format(n)
    G = mat.eye(n,n)
    deleted = 0
    for ell in xrange(n):
        if verbose: 
            if ell%10 == 0: print "ell = {}".format(ell)
        ell -= deleted
        for i in xrange(ell):
            G[:,ell] -= (G.T * Kc * G)[ell,i] * G[:,i]
        cf = (G.T*Kc*G)[ell,ell]
        if cf < (10**-8):
            if verbose: print "Deleting column " + str(ell)
            G = mat.delete(G, ell, 1)
            deleted += 1
        else:
            G[:,ell] /= cf ** .5
    GKG = G.T*Kc*G
    (dummy, g_dim) = G.shape
    assert not mat.isnan(G).any()
    if verbose: 
        print "GKG Divergence: {:e}".format(sum(abs(GKG - mat.eye(g_dim,g_dim))))
    return G
Пример #7
0
 def elementary(erep, b, k):
     bestdist = evalfun(erep, k)
     bestrep = erep
     repSize = erep[0].shape[1]
     for i in range(repSize):
         for j in range(repSize):
             if i != j:
                 # create elementary transformation matrix with +b
                 B = ml.eye(repSize)
                 B[i, j] = b
                 B[i, i] = 1.0 - b
                 # apply similarity transform
                 newrep = transfun(erep, B)
                 newdist = evalfun(newrep, k)
                 # store result if better
                 if newdist < bestdist:
                     bestrep = newrep
                     bestdist = newdist
                 # create elementary transformation matrix with -b
                 B = ml.eye(repSize)
                 B[i, j] = -b
                 B[i, i] = 1.0 + b
                 # apply similarity transform
                 newrep = transfun(erep, B)
                 newdist = evalfun(newrep, k)
                 # store result if better
                 if newdist < bestdist:
                     bestrep = newrep
                     bestdist = newdist
     return (bestrep, bestdist)
Пример #8
0
    def __init__(self):
        # model dimensions
        self.xDim = 2 # state space dimension
        self.uDim = 2 # control input dimension
        self.qDim = 2 # dynamics noise dimension
        self.zDim = 2 # observation dimension
        self.rDim = 2 # observtion noise dimension

        # belief space dimension
        # note that we only store the lower (or upper) triangular portion
        # of the covariance matrix to eliminate redundancy
        self.bDim = int(self.xDim + self.xDim*((self.xDim+1)/2.))

        self.dT = 1. # time step for dynamics function
        self.T = 15 # number of time steps in trajectory

        self.alpha_belief = 10. # weighting factor for penalizing uncertainty at intermediate time steps
        self.alpha_final_belief = 10. # weighting factor for penalizing uncertainty at final time step
        self.alpha_control = 1. # weighting factor for penalizing control cost
        
        self.xMin = ml.vstack([-5,-3]) # minimum limits on state (xMin <= x)
        self.xMax = ml.vstack([5,3]) # maximum limits on state (x <= xMax)
        self.uMin = ml.vstack([-1,-1]) # minimum limits on control (uMin <= u)
        self.uMax = ml.vstack([1,1]) # maximum limits on control (u <= uMax)

        self.Q = ml.eye(self.qDim) # dynamics noise variance
        self.R = ml.eye(self.rDim) # observation noise variance

        self.start = ml.zeros([self.xDim,1]) # start state, OVERRIDE
        self.goal = ml.zeros([self.xDim,1]) # end state, OVERRIDE

        self.sqpParams = LightDarkSqpParams()
Пример #9
0
    def updatePsi(self):
        self.PsicXc = np.zeros((self.nc,self.nc), dtype=np.float)
        self.PsicXe = np.zeros((self.ne,self.ne), dtype=np.float)
        self.PsicXcXe = np.zeros((self.nc,self.ne), dtype=np.float)
        #
        # print self.thetac
        # print self.pc
        # print self.distanceXc
        newPsicXc = np.exp(-np.sum(self.thetac*np.power(self.distanceXc,self.pc), axis=2))
        print newPsicXc[0]
        self.PsicXc = np.triu(newPsicXc,1)
        self.PsicXc = self.PsicXc + self.PsicXc.T + np.mat(eye(self.nc))+np.multiply(np.mat(eye(self.nc)),np.spacing(1))
        self.UPsicXc = np.linalg.cholesky(self.PsicXc)
        self.UPsicXc = self.UPsicXc.T
        print self.PsicXc[0]
        print self.UPsicXc
        exit()

        newPsicXe = np.exp(-np.sum(self.thetac*np.power(self.distanceXe,self.pc), axis=2))
        self.PsicXe = np.triu(newPsicXe,1)
        self.PsiXe = self.PsicXe + self.PsicXe.T + np.mat(eye(self.ne))+np.multiply(np.mat(eye(self.ne)),np.spacing(1))
        self.UPsicXe = np.linalg.cholesky(self.PsicXe)
        self.UPsicXe = self.UPsicXe.T

        newPsiXeXc = np.exp(-np.sum(self.thetad*np.power(self.distanceXcXe,self.pd), axis=2))
        self.PsicXcXe = np.triu(newPsiXeXc,1)
Пример #10
0
def rel2abs(pred):
    # relative to absolute transform
    data_size = pred.shape[0]
    #print(pred.shape)
    #print(pred)
    R = rb.rpy2tr(pred[:, 0:3])
    #print(R[0])
    t = rb.transl(pred[:, 3:6])
    Tl = []
    Tl.append(npmat.eye(4))  # T0
    Tl.append(
        npmat.mat(
            np.concatenate((np.concatenate(
                (R[0][0:3, 0:3], t[0][0:3, 3]), 1), npmat.mat([0, 0, 0, 1])),
                           0)))  #T1
    T = np.zeros((data_size + 1, 4, 4))
    T[0, :, :] = npmat.eye(4)
    T[1, :, :] = npmat.mat(
        np.concatenate((np.concatenate(
            (R[0][0:3, 0:3], t[0][0:3, 3]), 1), npmat.mat([0, 0, 0, 1])), 0))
    for k in range(2, data_size + 1):
        Tn = npmat.mat(
            np.concatenate(
                (np.concatenate((R[k - 1][0:3, 0:3], t[k - 1][0:3, 3]),
                                1), npmat.mat([0, 0, 0, 1])),
                0))  #relative transform from k-1 to k
        Tl.append(Tl[k - 1].dot(Tn))
        T[k, :, :] = Tl[k]
    return Tl, T
Пример #11
0
def compute_x_T_and_two_integrals(A, a, x0, T):
    '''
    Define a new variable:
      z(t) = (1, x(t), w(t), y(t))
    where:
      w(t) = int_0^t x(s) ds
      y(t) = int_0^t w(s) ds
    and then the dynamic of z(t) is:
      d1 = 0
      dx = a*1 + A*x
      dw = x
      dy = w
    which we can re-write in matrix form as:
           (0 0 0 0)
           (a A 0 0)
      dz = (0 I 0 0) z
           (0 0 I 0)
      dz =     C     z
    So we can compute x(t), w(t), y(t) by computing
      z(t) = e^{tC} z(0)
    with z(0) = (1, x(0), 0, 0)
    '''
    n = A.shape[0]
    C = matlib.zeros((3 * n + 1, 3 * n + 1))
    C[1:1 + n, 0] = a
    C[1:1 + n, 1:1 + n] = A
    C[1 + n:1 + 2 * n, 1:1 + n] = matlib.eye(n)
    C[1 + 2 * n:, 1 + n:1 + 2 * n] = matlib.eye(n)
    z0 = np.vstack((1, x0, matlib.zeros((2 * n, 1))))
    e_TC = expm(T * C)
    z = e_TC * z0
    x = z[1:1 + n, 0]
    int_x = z[1 + n:2 * n + 1, 0]
    int2_x = z[1 + 2 * n:, 0]
    return x, int_x, int2_x
Пример #12
0
def GS_basis(Kc, verbose=False):
    (n, n_) = Kc.shape
    assert n == n_
    if verbose: print "Beginning GS process. n = {}".format(n)
    G = mat.eye(n, n)
    deleted = 0
    for ell in xrange(n):
        if verbose:
            if ell % 10 == 0: print "ell = {}".format(ell)
        ell -= deleted
        for i in xrange(ell):
            G[:, ell] -= (G.T * Kc * G)[ell, i] * G[:, i]
        cf = (G.T * Kc * G)[ell, ell]
        if cf < (10**-8):
            if verbose: print "Deleting column " + str(ell)
            G = mat.delete(G, ell, 1)
            deleted += 1
        else:
            G[:, ell] /= cf**.5
    GKG = G.T * Kc * G
    (dummy, g_dim) = G.shape
    assert not mat.isnan(G).any()
    if verbose:
        print "GKG Divergence: {:e}".format(
            sum(abs(GKG - mat.eye(g_dim, g_dim))))
    return G
Пример #13
0
        def test_basic(self):
            # test basic construction
            T = Transform()
            assert_allclose(T, np.matrix(np.eye(4)))

            # test initializing with rotation matrix
            T = Transform(Rpitch(pi / 4))
            N = np.eye(4)
            N[:3, :3] = Rpitch(pi / 4)
            assert_allclose(T, N)

            # test initializing with position vector
            pos = np.matrix([5, 2, 4]).T
            T = Transform(p=pos)
            N = np.eye(4)
            N[:3, 3] = pos
            assert_allclose(T, N)

            # test initializing with both rotation and position vector
            R = Ryaw(5)
            p = np.matrix('3;2;4')
            T = Transform(R, p)
            N = np.eye(4)
            N[:3, :3] = R
            N[:3, 3] = p
            assert_allclose(T, N)
Пример #14
0
 def __new__(cls, R=np.eye(3), p=np.matrix([0, 0, 0]).T):
     if R is None and p is None:
         obj = super(Transform, cls).__new__(cls, np.eye(4))
     else:
         obj = super(Transform,
                     cls).__new__(cls, np.block([[R, p], [0, 0, 0, 1]]))
         # obj = np.block([[R, p], [0, 0, 0, 1]]) appears to be about the same
     return obj.view(cls)
Пример #15
0
def getOrthColumns(m):
    '''
    Constructs the orthogonally complementing columns of the input.

    Input of the form pxr is assumed to have r<=p,
    and have either full column rank r or rank 0 (scalar or matrix)
    Output is of the form px(p-r), except:
    a) if M square and full rank p, returns scalar 0
    b) if rank(M)=0 (zero matrix), returns I_p
    (Note you cannot pass scalar zero, because dimension info would be
    missing.)
    Return type is as input type.
    '''
    if type(m) == type(asarray(m)):
        m = mat(m)
        output = 'array'
    else: output = 'matrix'
    p, r = m.shape
    # first catch the stupid input case
    if p < r: raise ValueError, 'need at least as many rows as columns'
    # we use lstsq(M, ones) just to exploit its rank-finding algorithm,
    rk = lstsq(m, ones(p).T)[2]
    # first the square and full rank case:
    if rk == p: result = zeros((p,0))   # note the shape! hopefully octave-like
    # then the zero-matrix case (within machine precision):
    elif rk == 0: result = eye(p)
    # now the rank-deficient case:
    elif rk < r:
        raise ValueError, 'sorry, matrix does not have full column rank'
    # (what's left should be ok)
    else:
        # we have to watch out for zero rows in M,
        # if they are in the first p-r positions!
        # so the (probably inefficient) algorithm:
            # 1. check the rank of each row
            # 2. if zero, then also put a zero row in c
            # 3. if not, put the next unit vector in c-row
        idr = eye(r)
        idpr = eye(p-r)
        c = empty([0,r])    # starting point  
        co = empty([0, p-r]) # will hold orth-compl.
        idrcount = 0
        for row in range(p):
            # (must be ones() instead of 1 because of 2d-requirement
            if lstsq( m[row,:], ones(1) )[2] == 0 or idrcount >= r:
                c = r_[ c, zeros(r) ]
                co = r_[ co, idpr[row-idrcount, :] ]
            else:     # row is non-zero, and we haven't used all unit vecs 
                c = r_[ c, idr[idrcount, :] ] 
                co = r_[ co, zeros(p-r) ]
                idrcount += 1
        # earlier non-general (=bug) line: c = mat(r_[eye(r), zeros((p-r, r))])
        # and:  co = mat( r_[zeros((r, p-r)), eye(p-r)] )
        # old:
        # result = ( eye(p) - c * (M.T * c).I * M.T ) * co
        result = co - c * solve(m.T * c, m.T * co)
    if output == 'array': return result.A
    else: return result
Пример #16
0
def DMRAPFromMoments(moms, Nm):
    """
    Creates a discrete marked rational arrival process that
    has the same marginal and lag-1 joint moments as given 
    (see [1]_).
    
    Parameters
    ----------
    moms : vector of doubles
        The list of marginal moments. To obtain a discrete 
        marked rational process of order M, 2*M-1 marginal 
        moments are required.
    Nm : list of matrices, shape (M,M)
        The list of lag-1 joint moment matrices. The 
        length of the list determines K, the number of arrival 
        types of the discrete rational process.
    
    Returns
    -------
    H : list of matrices, shape (M,M)
        The H0, H1, ..., HK matrices of the discrete marked
        rational process
    
    References
    ----------
    .. [1] Andras Horvath, Gabor Horvath, Miklos Telek, "A 
           traffic based decomposition of two-class queueing
           networks with priority service," Computer Networks 
           53:(8) pp. 1235-1248. (2009)
    """

    v, H0 = MGFromMoments(moms)
    H0i = la.inv(ml.eye(H0.shape[0]) - H0)

    Ge = ml.zeros(H0.shape)
    G1 = ml.zeros(H0.shape)

    H0ip = ml.eye(H0.shape[0])
    for i in range(H0.shape[0]):
        Ge[i, :] = v * H0ip
        G1[:, i] = np.sum(H0ip, 1)
        H0ip *= (i + 1) * H0i
        if i > 0:
            H0ip *= H0

    Gei = la.inv(Ge)
    G1i = la.inv(G1)

    H = [H0]
    for i in range(1, len(Nm) + 1):
        Nmi = Nm[i - 1]
        row1 = np.array([FactorialMomsFromMoms(Nmi[0, 1:].A.flatten())])
        col1 = np.array([FactorialMomsFromMoms(Nmi[1:, 0].A.flatten())]).T
        mid = JFactorialMomsFromJMoms(Nmi[1:, 1:])
        Nmi = np.bmat([[[[Nmi[0, 0]]], row1], [col1, mid]])
        H.append((ml.eye(H0.shape[0]) - H0) * Gei * Nmi * G1i)
    return H
Пример #17
0
 def regupdatePsi(self):
     self.Psi = np.zeros((self.n,self.n), dtype=np.float)
     self.one = np.ones(self.n)
     self.psi = np.zeros((self.n,1))
     newPsi = np.exp(-np.sum(self.theta*np.power(self.distance,self.pl), axis=2))
     self.Psi = np.triu(newPsi,1)
     self.Psi = self.Psi + self.Psi.T + eye(self.n) + eye(self.n) * (self.Lambda)
     self.U = np.linalg.cholesky(self.Psi)
     self.U = np.matrix(self.U.T)
Пример #18
0
 def regupdatePsi(self):
     self.Psi = np.zeros((self.n, self.n), dtype=np.float)
     self.one = np.ones(self.n)
     self.psi = np.zeros((self.n, 1))
     newPsi = np.exp(-np.sum(self.theta * np.power(self.distance, self.pl), axis=2))
     self.Psi = np.triu(newPsi, 1)
     self.Psi = self.Psi + self.Psi.T + eye(self.n) + eye(self.n) * (self.Lambda)
     self.U = np.linalg.cholesky(self.Psi)
     self.U = np.matrix(self.U.T)
Пример #19
0
def DMRAPFromMoments (moms, Nm):
    """
    Creates a discrete marked rational arrival process that
    has the same marginal and lag-1 joint moments as given 
    (see [1]_).
    
    Parameters
    ----------
    moms : vector of doubles
        The list of marginal moments. To obtain a discrete 
        marked rational process of order M, 2*M-1 marginal 
        moments are required.
    Nm : list of matrices, shape (M,M)
        The list of lag-1 joint moment matrices. The 
        length of the list determines K, the number of arrival 
        types of the discrete rational process.
    
    Returns
    -------
    H : list of matrices, shape (M,M)
        The H0, H1, ..., HK matrices of the discrete marked
        rational process
    
    References
    ----------
    .. [1] Andras Horvath, Gabor Horvath, Miklos Telek, "A 
           traffic based decomposition of two-class queueing
           networks with priority service," Computer Networks 
           53:(8) pp. 1235-1248. (2009)
    """

    v, H0 = MGFromMoments (moms)
    H0i = la.inv(ml.eye(H0.shape[0])-H0)

    Ge = ml.zeros(H0.shape)
    G1 = ml.zeros(H0.shape)

    H0ip = ml.eye(H0.shape[0])
    for i in range(H0.shape[0]):
        Ge[i,:] = v * H0ip
        G1[:,i] = np.sum(H0ip, 1)
        H0ip *= (i+1) * H0i
        if i>0:
            H0ip *= H0

    Gei = la.inv(Ge)
    G1i = la.inv(G1)
    
    H = [H0]
    for i in range(1,len(Nm)+1):
        Nmi = Nm[i-1]
        row1 = np.array([FactorialMomsFromMoms(Nmi[0,1:].A.flatten())])
        col1 = np.array([FactorialMomsFromMoms(Nmi[1:,0].A.flatten())]).T
        mid = JFactorialMomsFromJMoms(Nmi[1:,1:])
        Nmi = np.bmat([[[[Nmi[0,0]]], row1 ], [col1, mid]])
        H.append((ml.eye(H0.shape[0])-H0)*Gei*Nmi*G1i)
    return H
Пример #20
0
 def updatePsi(self):
     self.Psi = np.zeros((self.n,self.n), dtype=np.float)
     self.one = np.ones(self.n)
     self.psi = np.zeros((self.n,1))
     newPsi = np.exp(-np.sum(self.theta*np.power(self.distance,self.pl), axis=2))
     self.Psi = np.triu(newPsi,1)
     self.Psi = self.Psi + self.Psi.T + np.mat(eye(self.n))+np.multiply(np.mat(eye(self.n)),np.spacing(1))
     self.U = np.linalg.cholesky(self.Psi)
     self.U = self.U.T
Пример #21
0
 def updatePsi(self):
     self.Psi = np.zeros((self.n,self.n), dtype=np.float)
     self.one = np.ones(self.n)
     self.psi = np.zeros((self.n,1))
     newPsi = np.exp(-np.sum(self.theta*np.power(self.distance,self.pl), axis=2))
     self.Psi = np.triu(newPsi,1)
     self.Psi = self.Psi + self.Psi.T + np.mat(eye(self.n))+np.multiply(np.mat(eye(self.n)),np.spacing(1))
     self.U = np.linalg.cholesky(self.Psi)
     self.U = self.U.T
Пример #22
0
 def regupdatePsi(self):
     self.Psi = np.zeros((self.n,self.n), dtype=np.float)
     self.one = np.ones(self.n)
     for i in xrange(self.n):
         for j in xrange(i+1,self.n):
             self.Psi[i,j]=np.exp(-np.sum(self.theta*np.power(np.abs((self.X[i]-self.X[j])),self.pl)))
     self.Psi = self.Psi + self.Psi.T + eye(self.n) + eye(self.n) * (self.Lambda)
     self.U = np.linalg.cholesky(self.Psi)
     self.U = np.matrix(self.U.T)
Пример #23
0
 def test_advanced(self):
     T = Transform()
     # right multiplying matrix by a transform results in a matrix
     self.assertEqual(type((np.eye(4) * T)), np.matrix)
     # right multiplying transform by a matrix results in a transform
     self.assertEqual(type(T * np.eye(4)), Transform)
     # right multiplying transform by a non 4x4 matrix results in a matrix
     p = np.matrix('1;3;2')
     print type(T * p)
     self.assertEqual(type(T * p), type(p))
Пример #24
0
 def _compute_Apc(self, q, com_est):
     Mlf, Mrf = self.robot.get_Mlf_Mrf(q, False)
     pyl, pzl = Mlf.translation[1:].A1
     pyr, pzr = Mrf.translation[1:].A1
     cy, cz = com_est.A1
     A_pc = matlib.zeros((3, 4))
     A_pc[:2, :2] = matlib.eye(2)
     A_pc[:2, 2:] = matlib.eye(2)
     A_pc[2, :] = np.matrix([-(pzl - cz), pyl - cy, -(pzr - cz), pyr - cy])
     b_g = np.vstack((self.m * self.g, matlib.zeros(1)))
     return A_pc, b_g
Пример #25
0
 def getMfeature(self):
     mata = npmatrix.empty((3, 4))  # random data
     mata = npmatrix.zeros((3, 4))  # zeros
     mata = npmatrix.ones((3, 4))  # one
     mata = npmatrix.eye(3)  # one along diagonal
     mata = npmatrix.eye(3, 5)  # one along diagonal
     mata = npmatrix.identity(3)  # identity square matrix
     mata = npmatrix.rand(3, 7)  # rand data
     mata = npmatrix.ones((3, 1))  # one
     print(mata)
     print(mata.shape)
     print(mata.dtype)
Пример #26
0
def analyze_gain_tuning_tsid_rigid(conf):
    nc = conf.nc
    (H, A, B) = compute_integrator_dynamics(matlib.zeros((nc, 4 * nc)))
    In = matlib.eye(nc)
    A[nc:, :] = 0.0
    B[nc:2 * nc, :] = In
    B[3 * nc:, :] = 0.0
    P = matlib.eye(5 * nc)

    def compute_system_matrices(gains):
        K = np.hstack([gains[0] * In, gains[1] * In, 0.0 * In, 0.0 * In])
        return A, B, K

    analyze_results(conf, compute_system_matrices, P)
Пример #27
0
def compute_double_integral_x_T(A,
                                a,
                                x0,
                                T,
                                dt=None,
                                compute_also_integral=False,
                                invertible_A=True):
    if (dt is not None):
        N = int(T / dt)
        int2_x = matlib.zeros_like(x0)
        for i in range(1, N):
            int_x = compute_integral_x_T(A, a, x0, i * dt)
            int2_x += dt * int_x
        return int2_x

    if (invertible_A):
        e_TA = expm(T * A)
        Ainv_a = solve(A, a)
        Ainv_x0_plus_Ainv_a = solve(A, x0 + Ainv_a)
        Ainv2_x0_plus_Ainv_a = solve(A, Ainv_x0_plus_Ainv_a)
        I = matlib.eye(A.shape[0])
        int2_x = (
            e_TA - I
        ) * Ainv2_x0_plus_Ainv_a - T * Ainv_x0_plus_Ainv_a - 0.5 * T * T * Ainv_a
        if compute_also_integral:
            int_x = (e_TA - I) * Ainv_x0_plus_Ainv_a - T * Ainv_a
            return int_x, int2_x
        return int2_x

    n = A.shape[0]
    C = matlib.zeros((n + 3, n + 3))
    C[0:n, 0:n] = A
    C[0:n, n] = a
    C[0:n, n + 1] = x0
    C[n:n + 2, n + 1:] = matlib.eye(2)
    z0 = matlib.zeros((n + 3, 1))
    z0[-1, 0] = 1.0
    e_TC = expm(T * C)
    z = e_TC * z0
    int2_x = z[:n, 0]

    # print("A\n", A)
    # print("a\n", a.T)
    # print("x0\n", x0.T)
    # print("C\n", C)
    # print("z0\n", z0.T)
    # print("z\n", z.T)

    return int2_x
Пример #28
0
	def __init__(self,*args, **kwargs):
		super(Gyroscope, self).__init__()
		self.properties = dict()
		if len(args)==1:
			arg = args[0]
			if type(arg)==dict:
				for key, value in arg.items():
					setattr(self, key, value)
			elif type(arg) == tuple or type(arg) == list:
				self.yaw = arg[0]
				self.pitch = arg[1]
				self.roll = arg[2]
			else: #np vector
				self.yaw = arg.item(0)
				self.pitch = arg.item(1)
				self.roll = arg.item(2)
		elif len(args)==3:
			self.yaw = args[0]
			self.pitch = args[1]
			self.roll = args[2]

		for key, value in kwargs.items():
			setattr(self, key, value)


		self.Covariance = kwargs.get('Covariance', 0.4 * matlib.eye(3) )
Пример #29
0
def compute_quadratic_state_integral_ALDS(H, x0, T, dt=None):
    ''' Assuming the state x(t) evolves in time according to a linear dynamic:
            dx(t)/dt = H * x(t)
        Compute the following integral:
            int_{0}^{T} x(t)^T * x(t) dt
    '''
    if (dt is None):
        w, V = eig(H)  # H = V*matlib.diagflat(w)*V^{-1}
        print "Eigenvalues H:", np.sort_complex(w).T
        Lambda_inv = matlib.diagflat(1.0 / w)
        e_2T_Lambda = matlib.diagflat(np.exp(2 * T * w))
        int_e_2T_Lambda = 0.5 * Lambda_inv * (e_2T_Lambda - matlib.eye(n))
        #    V_inv = np.linalg.inv(V)
        #    cost = x0.T*(V_inv.T*(int_e_2T_Lambda*(V_inv*x0)))
        V_inv_x0 = np.linalg.solve(V, x0)
        cost = V_inv_x0.T * int_e_2T_Lambda * V_inv_x0
        return cost[0, 0]

    N = int(T / dt)
    x = simulate_ALDS(H, x0, dt, N)
    cost = 0.0
    not_finite_warning_printed = False
    for i in range(N):
        if (np.all(np.isfinite(x[:, i]))):
            cost += dt * (x[:, i].T * x[:, i])[0, 0]
        elif (not not_finite_warning_printed):
            print 'WARNING: x is not finite at time step %d' % (i)  #, x[:,i].T
            not_finite_warning_printed = True
    return cost
Пример #30
0
def compute_integrator_dynamics(K):
    ''' Compute the matrices associated to an n-th order continuous time integrator.
        The form of the dynamics is: 
            dx = A*x + B*u
        The control law is a linear state feedback:
            u = -K*x
        Input parameters:
            K : n-dimensional vector of feedback gains
        Returns a tuple containing the following elements:
            H: closed-loop dynamics matrix (A-B*K)
            A: state transition matrix
            B: control input matrix
    '''

    m = K.shape[0]  # size of pos vector
    n = K.shape[1] / m  # integrator order
    H = matlib.zeros((m * n, m * n))
    A = matlib.zeros((m * n, m * n))
    B = matlib.zeros((m * n, m))
    H[-m:, :] = -K
    I = matlib.eye(m)
    B[-m:, :] = I
    for i in range(n - 1):
        H[m * i:m * (i + 1), m * (i + 1):m * (i + 2)] = I
        A[m * i:m * (i + 1), m * (i + 1):m * (i + 2)] = I
    return (H, A, B)
Пример #31
0
	def __init__(self,nextState,measurement,uncertainty,state,covariance):
		self.F=nextState
		self.H=measurement
		self.R=uncertainty
		self.state=state
		self.covariance=covariance
		self.I=eye(F.shape[1])
Пример #32
0
 def __init__(self, R=npm.eye(3), t=npm.zeros(3)):
     """
     Pose is defined as p_cw (pose of the world wrt camera)
     """
     p = RigidTransform.from_Rt(R, t)
     RigidTransform.__init__(self, xyzw=p.quat.to_xyzw(), tvec=p.tvec)
     self.__cached_inverse = None
Пример #33
0
def MarginalDistributionFromDMRAP(H):
    """
    Returns the matrix geometrically distributed marginal 
    distribution of a discrete marked rational arrival 
    process.
    
    Parameters
    ----------
    H : list/cell of matrices of shape(M,M), length(N)
        The H0...HN matrices of the DMRAP
    precision : double, optional
        Numerical precision for checking if the input is valid.
        The default value is 1e-14
    
    Returns
    -------
    alpha : matrix, shape (1,M)
        The initial vector of the matrix geometrically
        distributed marginal distribution
    A : matrix, shape (M,M)
        The matrix parameter of the matrix geometrically
        distributed marginal distribution    
    """

    if butools.checkInput and not CheckDMRAPRepresentation(H):
        raise Exception(
            "MarginalDistributionFromDMRAP: Input is not a valid DMRAP representation!"
        )

    return (DRPSolve(
        la.inv(ml.eye(H[0].shape[0]) - H[0]) * SumMatrixList(H[1:])), H[0])
Пример #34
0
def DRPSolve (P):
    """
    Computes the stationary solution of a discrete time 
    Markov chain.
    
    Parameters
    ----------
    P : matrix, shape (M,M)
        The matrix parameter of the rational process
        
    Returns
    -------
    pi : row vector, shape (1,M)
        The vector that satisfies 
        `\pi\, P = \pi, \sum_i \pi_i=1`
    
    Notes
    -----
    Discrete time rational processes are like discrete time 
    Markov chains, but the P matrix does not have to pass 
    the :func:`CheckProbMatrix` test (but the rowsums still 
    have to be ones).
    """

    if butools.checkInput and np.any(np.sum(P,1)-1.0>butools.checkPrecision):
        raise Exception("DRPSolve: The matrix has a rowsum which isn't 1!")

    if not isinstance(P,np.ndarray):
        P = np.array(P)

    return CRPSolve(P-ml.eye(P.shape[0]))
Пример #35
0
	def __init__(self, **kwargs):
		super(BasicProcess, self).__init__( **kwargs )
		self.properties.update( stateList = kwargs.get('stateList', self.stateList) )  #List from property to index in X state
		self.properties.update(
			X = kwargs.get('X', matlib.zeros( [len(self), 1] )),
			ProcessJacobian = kwargs.get('ProcessJacobian', matlib.eye( len(self) ) ),
			Covariance = kwargs.get('Covariance', 0.3 * matlib.eye( len(self) ) ),
			Ts = kwargs.get('Ts', 0.0),
			position = kwargs.get('position', SixDofObject()),
			quaternion = kwargs.get('quaternion', Quaternion()),
			velocity = kwargs.get('velocity', SixDofObject()),
			acceleration = kwargs.get('acceleration', SixDofObject()) 
			)

		self.time = rospy.get_time()
		"""
Пример #36
0
def MarginalDistributionFromDRAP(H0, H1):
    """
    Returns the matrix geometrically distributed marginal 
    distribution of a discrete rational arrival process.
    
    Parameters
    ----------
    H0 : matrix, shape (M,M)
        The H0 matrix of the discrete rational arrival process
    H1 : matrix, shape (M,M)
        The H1 matrix of the discrete rational arrival process
    precision : double, optional
        Numerical precision for checking if the input is valid.
        The default value is 1e-14
    
    Returns
    -------
    alpha : matrix, shape (1,M)
        The initial vector of the matrix geometrically
        distributed marginal distribution
    A : matrix, shape (M,M)
        The matrix parameter of the matrix geometrically
        distributed marginal distribution    
    """

    if butools.checkInput and not CheckDRAPRepresentation(H0, H1):
        raise Exception(
            "MarginalDistributionFromDRAP: Input is not a valid DRAP representation!"
        )

    return (DRPSolve(la.inv(ml.eye(H0.shape[0]) - H0) * H1), H0)
Пример #37
0
	def __init__(self, **kwargs):
		super(GPS, self).__init__()
		self.latitude = kwargs.get('latitude', 0.0)
		self.longitude = kwargs.get('longitude', 0.0)
		self.altitude = kwargs.get('altitude', 0.0)

		easting, northing, number, letter = utm.from_latlon( self.latitude, self.longitude )

		self.easting = easting
		self.northing = northing

		self.zero = dict( 
			latitude = 0.0,
			longitude = 0.0,
			altitude = 0.0,
			easting = 0.0, 
			northing = 0.0, 
			x = 0.0,
			y = 0.0, 
			z = 0.0,
			yaw = 0.0,)
		#self.set_enu()
		self.calibrated = False
		self.valid = False

		self.Covariance = kwargs.get('Covariance',  matlib.eye(3) )
Пример #38
0
	def __init__(self, *args, **kwargs):
		super(Accelerometer, self).__init__()
		self.properties = dict()
		if len(args)==1:
			arg = args[0]
			if type(arg)==dict:
				for key, value in arg.items():
					setattr(self, key, value)
			elif type(arg) == tuple or type(arg) == list:
				self.x = arg[0]
				self.y = arg[1]
				self.z = arg[2]
			else: #np vector
				self.x = arg.item(0)
				self.y = arg.item(1)
				self.z = arg.item(2)
		elif len(args)==3:
			self.x = args[0]
			self.y = args[1]
			self.z = args[2]

		for key, value in kwargs.items():
			setattr(self, key, value)

		self.g = sqrt( self.x ** 2 + self.y ** 2 + self.z ** 2 )

		self.Covariance = kwargs.get('Covariance', 0.4 * matlib.eye(3) )
Пример #39
0
def FluidStationaryDistr (mass0, ini, K, clo, x):
    """
    Returns the stationary distribution of a Markovian 
    fluid model at the given points.
    
    Parameters
    ----------
    mass0 : matrix, shape (1,Np+Nm)
        The stationary probability vector of zero level
    ini : matrix, shape (1,Np)
        The initial vector of the stationary density
    K : matrix, shape (Np,Np)
        The matrix parameter of the stationary density
    clo : matrix, shape (Np,Np+Nm)
        The closing matrix of the stationary density
    x : vector, length (K)
        The distribution function is computed at these 
        points.
    
    Returns
    -------
    pi : matrix, shape (K,Nm+Np)
        The ith row of pi is the probability that the fluid
        level is less than or equal to x(i), while being in
        different states of the background process.
    """

    m = clo.shape[1]
    y = ml.empty((len(x),m))
    closing = -K.I*clo
    for i in range(len(x)):
        y[i,:] = mass0 + ini*(ml.eye(K.shape[0])-la.expm(K*x[i]))*closing

    return y
Пример #40
0
def compute_integral_x_T(A, a, x0, T, dt=None, invertible_A=True):
    if (dt is not None):
        N = int(T / dt)
        int_x = dt * x0
        for i in range(1, N):
            x = compute_x_T(A, a, x0, i * dt)
            int_x += dt * x
        return int_x

    if (invertible_A):
        e_TA = expm(T * A)
        Ainv_a = solve(A, a)
        Ainv_x0_plus_Ainv_a = solve(A, x0 + Ainv_a)
        I = matlib.eye(A.shape[0])
        return (e_TA - I) * Ainv_x0_plus_Ainv_a - T * Ainv_a

    n = A.shape[0]
    C = matlib.zeros((n + 2, n + 2))
    C[0:n, 0:n] = A
    C[0:n, n] = a
    C[0:n, n + 1] = x0
    C[n:n + 1, n + 1:] = 1.0
    z0 = matlib.zeros((n + 2, 1))
    z0[-1, 0] = 1.0
    e_TC = expm(T * C)
    z = e_TC * z0
    int_x = z[:n, 0]
    return int_x
Пример #41
0
def vecm2varcoeffs(gammas, maxlag, alpha, beta):
    '''
    Converts Vecm coeffs to levels VAR representation.

    Gammas need to be coeffs in shape #endo x (maxlag-1)*#endo,
    such that contemp_diff = alpha*ect + Gammas * lagged_diffs 
    is okay when contemp_diff is  #endo x 1.
    We expect matrix input!
    '''
    if alpha.shape != beta.shape:  # hope this computes for tuples
        raise ValueError, 'alpha and beta must have equal dim'
    N_y = alpha.shape[0]
    if beta.shape[0] != N_y:
        raise ValueError, "alpha or beta dim doesn't match"
    if gammas.shape[0] != N_y:
        raise ValueError, "alpha or gammas dim doesn't match"
    if gammas.shape[1] != (maxlag - 1) * N_y:
        raise ValueError, "maxlag or gammas dim doesn't match"

    # starting point first lag:
    levelscoeffs = eye(N_y) + alpha * beta.T + gammas[:, :N_y]
    # intermediate lags:
    for lag in range(1, maxlag - 1):
        levelscoeffs = c_[ levelscoeffs, gammas[:, N_y*lag : N_y*(lag+1)] - \
                          gammas[:,  N_y*(lag-1) : N_y*lag ] ]
    # last diff-lag, now this should be N_y x maxlags*N_y:
    return c_[levelscoeffs, -gammas[:, -N_y:]]
Пример #42
0
def DRPSolve(P):
    """
    Computes the stationary solution of a discrete time 
    Markov chain.
    
    Parameters
    ----------
    P : matrix, shape (M,M)
        The matrix parameter of the rational process
        
    Returns
    -------
    pi : row vector, shape (1,M)
        The vector that satisfies 
        `\pi\, P = \pi, \sum_i \pi_i=1`
    
    Notes
    -----
    Discrete time rational processes are like discrete time 
    Markov chains, but the P matrix does not have to pass 
    the :func:`CheckProbMatrix` test (but the rowsums still 
    have to be ones).
    """

    if butools.checkInput and np.any(
            np.sum(P, 1) - 1.0 > butools.checkPrecision):
        raise Exception("DRPSolve: The matrix has a rowsum which isn't 1!")

    if not isinstance(P, np.ndarray):
        P = np.array(P)

    return CRPSolve(P - ml.eye(P.shape[0]))
Пример #43
0
def permutation_matrix(dim):
    '''
    Generate haar permuation matrix.
    '''
    I = M.eye(dim, dim)
    return M.concatenate((I[::2][:],
                          I[1::2][:]))
Пример #44
0
def MarginalDistributionFromDMRAP (H):
    """
    Returns the matrix geometrically distributed marginal 
    distribution of a discrete marked rational arrival 
    process.
    
    Parameters
    ----------
    H : list/cell of matrices of shape(M,M), length(N)
        The H0...HN matrices of the DMRAP
    precision : double, optional
        Numerical precision for checking if the input is valid.
        The default value is 1e-14
    
    Returns
    -------
    alpha : matrix, shape (1,M)
        The initial vector of the matrix geometrically
        distributed marginal distribution
    A : matrix, shape (M,M)
        The matrix parameter of the matrix geometrically
        distributed marginal distribution    
    """

    if butools.checkInput and not CheckDMRAPRepresentation (H):
        raise Exception("MarginalDistributionFromDMRAP: Input is not a valid DMRAP representation!")    

    return (DRPSolve(la.inv(ml.eye(H[0].shape[0])-H[0])*SumMatrixList(H[1:])), H[0])
Пример #45
0
def eig_ortho(Kc, Beta):
    """Takes Beta, a matrix with coefficients for eigenvectors of two datasets. This function returns Omega, which has coefficients for the 'combined' orthonormal eigenvector set.
    Specifically, Beta[:,0:r] represents the coefficients for an orthonormal set of eigenvectors, and Beta[:,r:2r] represents the coefficients for another orthonormal set of eigenvectors. Omega has the coefficients for the combined orthonormal basis
    Omega[:,0:r] = Beta[:,0:r] since those eigenvectors were already orthonormal. Omega[:,r:2*r] is the second set of eigenvectors, orthogonalized relative to the first set"""
    (n, rr) = Beta.shape
    # rr literally stands for '2r'
    (n_, n__) = Kc.shape
    assert n == n_ == n__
    Gamma = mat.eye(rr)
    r = rr / 2
    for i in xrange(
            r,
            rr):  # Skips the first r columns since they're already orthonormal
        for j in xrange(
                i
        ):  # Make sure the vector is orthogonal to each previous vector
            Omega = Beta * Gamma
            g = float(Omega.T[i, :] * Kc * Omega[:, j])
            Gamma[:, i] -= g * Gamma[:, j]
        Omega = Beta * Gamma
        nrm = float(Omega.T[i, :] * Kc * Omega[:, i])
        assert nrm > 0
        nrm **= .5
        Gamma[:, i] /= nrm
    return Beta * Gamma
Пример #46
0
def nk_bhatta(X1, X2, eta):
    # Returns the non-kernelized Bhattacharrya
    #I.e. fits normal distributions in input space and calculates Bhattacharrya overlap between them
    (n1, d1) = X1.shape
    (n2, d ) = X2.shape
    assert d1 == d
    mu1 = mat.sum(X1,0) / n1
    mu2 = mat.sum(X2,0) / n2
    X1c = X1 - mat.tile(mu1, (n1,1))
    X2c = X2 - mat.tile(mu2, (n2,1))
    Eta = mat.eye(d) * eta
    S1 = X1c.T * X1c / n1 + Eta
    S2 = X2c.T * X2c / n2 + Eta

    mu3 = .5 * (S1.I * mu1.T + S2.I * mu2.T).T
    S3  = 2  * (S1.I + S2.I).I

    d1 = la.det(S1) ** -.25
    d2 = la.det(S2) ** -.25
    d3 = la.det(S3) ** .5
    dterm = d1 * d2 * d3

    e1 = -.25 * mu1 * S1.I * mu1.T
    e2 = -.25 * mu2 * S2.I * mu2.T
    e3 = .5   * mu3 * S3   * mu3.T

    eterm = math.exp(e1 + e2 + e3)

    return float(dterm * eterm)
Пример #47
0
def vecm2varcoeffs(gammas, maxlag, alpha, beta):
    '''
    Converts Vecm coeffs to levels VAR representation.

    Gammas need to be coeffs in shape #endo x (maxlag-1)*#endo,
    such that contemp_diff = alpha*ect + Gammas * lagged_diffs 
    is okay when contemp_diff is  #endo x 1.
    We expect matrix input!
    '''
    if alpha.shape != beta.shape:   # hope this computes for tuples
        raise ValueError, 'alpha and beta must have equal dim'
    N_y = alpha.shape[0]
    if beta.shape[0] != N_y:
        raise ValueError, "alpha or beta dim doesn't match"
    if gammas.shape[0] != N_y:
        raise ValueError, "alpha or gammas dim doesn't match"
    if gammas.shape[1] != (maxlag-1)*N_y:
        raise ValueError, "maxlag or gammas dim doesn't match"

    # starting point first lag:
    levelscoeffs = eye(N_y) + alpha * beta.T + gammas[ : , :N_y ]
    # intermediate lags:
    for lag in range(1, maxlag-1):
        levelscoeffs = c_[ levelscoeffs, gammas[:, N_y*lag : N_y*(lag+1)] - \
                          gammas[:,  N_y*(lag-1) : N_y*lag ] ]
    # last diff-lag, now this should be N_y x maxlags*N_y:
    return c_[ levelscoeffs, -gammas[:, -N_y: ] ]
Пример #48
0
def MarginalDistributionFromDRAP (H0, H1):
    """
    Returns the matrix geometrically distributed marginal 
    distribution of a discrete rational arrival process.
    
    Parameters
    ----------
    H0 : matrix, shape (M,M)
        The H0 matrix of the discrete rational arrival process
    H1 : matrix, shape (M,M)
        The H1 matrix of the discrete rational arrival process
    precision : double, optional
        Numerical precision for checking if the input is valid.
        The default value is 1e-14
    
    Returns
    -------
    alpha : matrix, shape (1,M)
        The initial vector of the matrix geometrically
        distributed marginal distribution
    A : matrix, shape (M,M)
        The matrix parameter of the matrix geometrically
        distributed marginal distribution    
    """

    if butools.checkInput and not CheckDRAPRepresentation (H0, H1):
        raise Exception("MarginalDistributionFromDRAP: Input is not a valid DRAP representation!")    

    return (DRPSolve(la.inv(ml.eye(H0.shape[0])-H0)*H1), H0)
Пример #49
0
def Jacobi(A, N, TOL = 0.000000000005, max_it = 100):
    it = 1
    while (it <= max_it):
        for i in range(N - 1):
            j = i + 1
            for k in range(j):
                P = matlib.eye(N, N)
                if (A[j, k] != A[k, j]):
                    c = 2 * A[j, k] * sgn(A[j, j] - A[k, k])
                    b = math.fabs(A[j, j] - A[k, k])
                    P[j, j] = P[k, k] = math.sqrt((1 + b / math.sqrt(c*c + b*b)) / 2)
                    P[k, j] = c / (2 * P[j, j] + math.sqrt(c*c + b*b))
                    P[j, k] = - P[k, j]
                else:
                    P[j, j] = P[k, k] = math.sqrt(1/2)
                    P[k, j] = P[j, j]
                    P[j, k] = - P[k, j]
                A = np.transpose(P).dot(A).dot(P)
        if (sum_off_diagonal(A, N) < TOL):
            print('it = ' + str(it) + ',    A = ')
            for r in range(N):
                row = ' '
                for c in range(N):
                    row += (' ' + str(A[r, c])).rjust(28)
                print(row)
            return A
        it += 1
    print("Maximum number of iterations exceeded")
    return
Пример #50
0
def sde_coupled(x0, y0, t0, b, sigma, N, h, delta=None):
    if delta is None:
        delta = h
#    t = np.arange(T[0], T[1]+h/2, h)
    t = np.arange(t0, t0 + N * h, h)
    t.shape = (t.shape[0], 1)
    #    N = t.size
    d = x0.shape[0]
    dB = np.mat(np.random.normal(0, np.sqrt(h), (d, N)))
    dB[:, 0] = 0
    X = mat.zeros((d, N))
    X[:, 0] = x0
    Y = mat.zeros((d, N))
    Y[:, 0] = y0
    Id = mat.eye(d)
    dBhat = mat.zeros((d, N))
    for i in range(0, N - 1):
        dBhat[:,
              i + 1] = (Id - 2 * proj(sigma, X[:, i] - Y[:, i])) * dB[:, i + 1]
        X[:, i + 1] = X[:, i] + h * b(t[i], X[:, i]) + sigma * dB[:, i + 1]
        Y[:, i + 1] = Y[:, i] + h * b(t[i], Y[:, i]) + sigma * dBhat[:, i + 1]
        if la.norm(X[:, i + 1] - Y[:, i + 1]) < delta:
            Y[:, i + 1] = X[:, i + 1]
        if d == 1 and (X[:, i] - Y[:, i]) * (X[:, i + 1] - Y[:, i + 1]) < 0:
            Y[:, i + 1] = X[:, i + 1]
    B = np.cumsum(dB, axis=1)
    Bhat = np.cumsum(dBhat, axis=1)
    return (X, Y, B, Bhat, t.T)
Пример #51
0
    def hotCmntsForTest(self, postId, nCmnts = 5):
        self.buildgraph(postId)
        
        testsizes = [shape(self.prg)[0], 800, 600, 400, 200]
        
        for size in testsizes:
            
            self.prg = self.prg[0:size,0:size]
            lil = lil_matrix(self.prg)
            
            start = clock()
            #eig  = eigs(self.prg, k=1, return_eigenvectors =False)
            eig = eigs(lil, return_eigenvectors =False, maxiter=10, tol=1E-5)
            eig = eig[0].real
            eig = 1/eig
            eigTime = clock() - start            
            print 'test_size:',size, 'eigTime:',eigTime        

            one = ones(size)
            m = eye(size) - eig*lil  
            
            start = clock()
            cmnts_ranking = lu_solve((m, one), one)
            solveTime = clock() - start
            
            print 'test_size:',size, 'solveTime:',solveTime
Пример #52
0
def GM1StationaryDistr(B, R, K):
    """
    Returns the stationary distribution of the G/M/1 type
    Markov chain up to a given level K.
    
    Parameters
    ----------
    A : length(M) list of matrices of shape (N,N)
        Matrix blocks of the G/M/1 type generator in the 
        regular part, from 0 to M-1.
    B : length(M) list of matrices of shape (N,N)
        Matrix blocks of the G/M/1 type generator at the
    R : matrix, shape (N,N)
        Matrix R of the G/M/1 type Markov chain
    K : integer
        The stationary distribution is returned up to
        this level.
    
    Returns
    -------
    pi : array, shape (1,(K+1)*N)
        The stationary probability vector up to level K
    """

    if not isinstance(B, np.ndarray):
        B = np.vstack(B)

    m = R.shape[0]
    I = ml.eye(m)

    temp = (I - R).I
    if np.max(temp < -100 * butools.checkPrecision):
        raise Exception(
            "The spectral radius of R is not below 1: GM1 is not pos. recurrent"
        )

    maxb = B.shape[0] // m
    BR = B[(maxb - 1) * m:, :]
    for i in range(maxb - 1, 0, -1):
        BR = R * BR + B[(i - 1) * m:i * m, :]

    pix = DTMCSolve(BR)
    pix = pix / np.sum(pix * temp)

    pi = [pix]
    sumpi = np.sum(pix)
    numit = 1
    while sumpi < 1.0 - 1e-10 and numit < 1 + K:
        pix = pix * R
        # compute pi_(numit+1)
        numit += 1
        sumpi += np.sum(pix)
        pi.append(ml.matrix(pix))
        if butools.verbose:
            print("Accumulated mass after ", numit, " iterations: ", sumpi)

    if butools.verbose and numit == K + 1:
        print("Maximum Number of Components ", numit - 1, " reached")

    return np.hstack(pi)
Пример #53
0
def MomentsFromMG (alpha, A, K=0):
    """
    Returns the first K moments of a matrix geometric 
    distribution.
    
    Parameters
    ----------
    alpha : vector, shape (1,M)
        The initial vector of the matrix-geometric distribution.
        The sum of the entries of alpha is less or equal to 1.
    A : matrix, shape (M,M)
        The matrix parameter of the matrix-geometric 
        distribution.
    K : int, optional
        Number of moments to compute. If K=0, 2*M-1 moments are
        computed. The default value is 0.
    prec : double, optional
        Numerical precision for checking the input.
        The default value is 1e-14.
    
    Returns
    -------
    moms : row vector of doubles
        The vector of moments.
        
    """

    if butools.checkInput and not CheckMGRepresentation (alpha, A):
        raise Exception("MomentsFromMG: Input is not a valid MG representation!")

    m = A.shape[0]
    if K==0:
        K = 2*m-1
    Ai = la.inv(ml.eye(m)-A)
    return MomsFromFactorialMoms([math.factorial(i)*np.sum(alpha*Ai**i*A**(i-1)) for i in range(1,K+1)])
Пример #54
0
	def __init__(self,nextState,measurement,uncertainty,state,covariance):
		self.F=matrix(nextState)
		self.H=matrix(measurement)
		self.R=matrix(uncertainty)
		self.state=matrix(state)
		self.covariance=matrix(covariance)
		self.I=eye(F.shape[1])
Пример #55
0
 def _update(self):
     self.K = self.kernel(self.x)
     if self.K.shape[0] == 0:
         self.L = np.zeros((0, 0))
     else:
         sn2 = np.exp(2*self.kernel.lik)
         self.K = self.K + sn2*np.eye(len(self))
         self.L = scipy.linalg.cholesky(self.K, lower=True)
Пример #56
0
def LagkJointMomentsFromMRAP (H, K=0, L=1):
    """
    Returns the lag-L joint moments of a marked rational 
    arrival process.
    
    Parameters
    ----------
    H : list/cell of matrices of shape(M,M), length(N)
        The H0...HN matrices of the MRAP to check
    K : int, optional
        The dimension of the matrix of joint moments to 
        compute. If K=0, the MxM joint moments will be 
        computed. The default value is 0
    L : int, optional
        The lag at which the joint moments are computed.
        The default value is 1
    prec : double, optional
        Numerical precision to check if the input is valid. 
        The default value is 1e-14
    
    Returns
    -------
    Nm : list/cell of matrices of shape(K+1,K+1), length(N)
        The matrices containing the lag-L joint moments,
        starting from moment 0.
    """

    if butools.checkInput and not CheckMRAPRepresentation (H):
        raise Exception("LagkJointMomentsFromMRAP: Input is not a valid MRAP representation!")    

    if K==0:
        K = H[0].shape[0]-1
    M = len(H)-1
    H0 = H[0]
    sumH = ml.zeros(H[0].shape)
    for i in range(M):
        sumH += H[i+1]

    H0i = la.inv(-H0)
    P = H0i*sumH
    pi = DRPSolve(P)
    
    Pw = ml.eye(H0.shape[0])
    H0p = [ml.matrix(Pw)]
    for i in range(1,K+1):
        Pw *= i*H0i
        H0p.append(ml.matrix(Pw))

    Pl = la.matrix_power (P, L-1)

    Nm = []
    for m in range(M):
        Nmm = ml.zeros ((K+1,K+1))
        for i in range(K+1):
            for j in range(K+1):
                Nmm[i,j] = np.sum (pi * H0p[i] * H0i * H[m+1] * Pl * H0p[j])
        Nm.append(ml.matrix(Nmm))
    return Nm
Пример #57
0
def GM1StationaryDistr (B, R, K):
    """
    Returns the stationary distribution of the G/M/1 type
    Markov chain up to a given level K.
    
    Parameters
    ----------
    A : length(M) list of matrices of shape (N,N)
        Matrix blocks of the G/M/1 type generator in the 
        regular part, from 0 to M-1.
    B : length(M) list of matrices of shape (N,N)
        Matrix blocks of the G/M/1 type generator at the
    R : matrix, shape (N,N)
        Matrix R of the G/M/1 type Markov chain
    K : integer
        The stationary distribution is returned up to
        this level.
    
    Returns
    -------
    pi : array, shape (1,(K+1)*N)
        The stationary probability vector up to level K
    """

    if not isinstance(B,np.ndarray):
        B = np.vstack(B)

    m = R.shape[0]
    I = ml.eye(m)   

    temp = (I-R).I
    if np.max(temp<-100*butools.checkPrecision):
        raise Exception("The spectral radius of R is not below 1: GM1 is not pos. recurrent")
    
    maxb = B.shape[0]//m
    BR = B[(maxb-1)*m:,:]
    for i in range(maxb-1,0,-1):
        BR = R * BR + B[(i-1)*m:i*m,:]

    pix = DTMCSolve(BR)
    pix = pix / np.sum(pix*temp)
    
    pi = [pix]    
    sumpi = np.sum(pix)
    numit = 1
    while sumpi < 1.0-1e-10 and numit < 1+K:
        pix = pix*R; # compute pi_(numit+1)
        numit += 1
        sumpi += np.sum(pix);
        pi.append(ml.matrix(pix))
        if butools.verbose:
            print("Accumulated mass after ", numit, " iterations: ", sumpi)

    if butools.verbose and numit == K+1:
        print("Maximum Number of Components ", numit-1, " reached")
    
    return np.hstack(pi)
Пример #58
0
 def bhatta_matrix(self):
     """Returns a matrix of Bhattacharrya kernel evaluations between datasets"""
     n = len(self.datasets)
     BM = mat.eye(n)
     for i in xrange(n):
         for j in xrange(i):
             BM[i,j] = self.bhatta(i,j)
             BM[j,i] = BM[i,j]
     return BM
Пример #59
0
	def __init__(self, **kwargs):
		super(GPS, self).__init__(**kwargs)

		self.MeasurementCovariance= 0.4 * matlib.eye( len(self.measurementList) )

		easting, northing, number, letter = utm.from_latlon( kwargs.get('latitude', 0) , kwargs.get('longitude', 0) )

		self.gps_zero = dict( x = easting, y = northing, z = kwargs.get('altitude', utm.conversion.R), yaw = pi/2)

		self.calibrated = False