Example #1
0
 def segmentIntersect1(k):
     p3, p4 = lineArray[k]
     line2 = '[' + vcode(4)(p3) + ',' + vcode(4)(p4) + ']'
     (x3, y3), (x4, y4) = p3, p4
     b1, b2, b3, b4 = boxes[k]
     if not (b3 < B1 or B3 < b1 or b4 < B2 or B4 < b2):
         #if True:
         m23 = mat([p2, p3])
         m14 = mat([p1, p4])
         m = m23 - m14
         v3 = mat([p3])
         v1 = mat([p1])
         v = v3 - v1
         a = m[0, 0]
         b = m[0, 1]
         c = m[1, 0]
         d = m[1, 1]
         det = a * d - b * c
         if det != 0:
             m_inv = mat([[d, -b], [-c, a]]) * (1. / det)
             alpha, beta = (v * m_inv).tolist()[0]
             #alpha, beta = (v*m.I).tolist()[0]
             if -0.0 <= alpha <= 1 and -0.0 <= beta <= 1:
                 pointStorage[line1] += [alpha]
                 pointStorage[line2] += [beta]
                 return list(
                     array(p1) + alpha * (array(p2) - array(p1)))
     return None
def con2vert(A, b):
    """
    Convert sets of constraints to a list of vertices (of the feasible region).
    If the shape is open, con2vert returns False for the closed property.
    """
    # Python implementation of con2vert.m by Michael Kleder (July 2005),
    #  available: http://www.mathworks.com/matlabcentral/fileexchange/7894
    #  -con2vert-constraints-to-vertices
    # Author: Michael Kelder (Original)
    #         Andre Campher (Python implementation)
    c = linalg.lstsq(mat(A), mat(b))[0]
    btmp = mat(b)-mat(A)*c
    D = mat(A)/matlib.repmat(btmp, 1, A.shape[1])

    fmatv = qhull(D, "Ft") #vertices on facets

    G  = zeros((fmatv.shape[0], D.shape[1]))
    for ix in range(0, fmatv.shape[0]):
        F = D[fmatv[ix, :], :].squeeze()
        G[ix, :] = linalg.lstsq(F, ones((F.shape[0], 1)))[0].transpose()

    V = G + matlib.repmat(c.transpose(), G.shape[0], 1)
    ux = uniqm(V)

    eps = 1e-13
    Av = dot(A, ux.T)
    bv = tile(b, (1, ux.shape[0]))
    closed = sciall(Av - bv <= eps)

    return ux, closed
Example #3
0
    def __init__(self,
                 m,
                 n,
                 l1_weight,
                 eval_func,
                 eval_func_for_test_set=None,
                 output_func=None,
                 tol=None):
        self.M = m
        self.N = n

        d = np.ones((n))

        self.diagH = sparse.diags(d, 0)
        # to reference the element, use diag.data[0,0]

        self.iter = 0

        self.l1_weight = l1_weight
        self.eval_func = eval_func

        self.eval_func_for_test_set = eval_func_for_test_set
        self.output_func = output_func

        self.x = scipy.mat([0] * n).T
        self.grad = scipy.mat([0] * n).T
        self.dir = scipy.mat([0] * n).T
        self.loss = 0

        if tol == None:
            self.tol = 1e-4
        else:
            self.tol = tol
Example #4
0
def gradient_per_datum_from_coeffs(coeffs, R, kernel, phi0=False, 
    regularized=True):
    """ For optimizer. Computes gradient from coefficients. """
    if not isinstance(phi0,np.ndarray):
        phi0 = np.zeros(R.size)
    else:
        assert all(np.isreal(phi0))

    phi = coeffs_to_field(coeffs, kernel)
    quasiQ = utils.field_to_quasiprob(phi+phi0)
    G = len(phi)

    R_row = sp.mat(R) # 1 x G
    quasiQ_row = sp.mat(quasiQ) # 1 x G
    reg_row = (1./G)*sp.mat(phi)/(PHI_STD_REG**2) # 1 x G
    kernel_mat = sp.mat(kernel) # G x kernel_dim

    mu_R_row = R_row*kernel_mat # 1 x kernel_dim
    mu_quasiQ_row = quasiQ_row*kernel_mat # 1 x kernel_dim
    mu_reg_row = reg_row*kernel_mat

    if regularized:
        grad_row = mu_R_row - mu_quasiQ_row + mu_reg_row
    else:
        grad_row = mu_R_row - mu_quasiQ_row

    return sp.array(grad_row).ravel() # Returns an array
Example #5
0
def BuildDataSetfromList(filelist, xlabel='t'):
    """Loads data from *.mat files and builds a datastructure with
    matrices of the signals.  The files in filelist are loaded using
    loadmat(filename) without any attempt to search for the file.  So,
    filelist should either contain fullpaths or the files should be in
    the current directory."""
    mydict={}
    mydict['filenames']=filelist
    mydict['ynames']=[]
    firstfile=1

    for filename in filelist:
#        print("filename="+filename)
        d=loadmat(filename)
        if firstfile==1:
            mydict[xlabel]=d[xlabel]
            mydict['xname']=xlabel
            for name, vector in d.iteritems():
                if name!=xlabel:
                    mydict[name]=c_[mat(vector).T]
                    mydict['ynames'].append(name)
            firstfile=0
        else:
            for name, vector in d.iteritems():
                if name!=xlabel:
                    tempmat=mydict[name]
                    mydict[name]=c_[tempmat,mat(vector).T]
    return mydict
Example #6
0
    def __init__(self,m,n,l1_weight,eval_func, eval_func_for_test_set = None, output_func = None, tol = None):
        self.M = m
        self.N = n
        
        d = np.ones((n))
       
        self.diagH = sparse.diags(d,0)
        # to reference the element, use diag.data[0,0]

        self.iter = 0
        
        self.l1_weight = l1_weight
        self.eval_func = eval_func
        
        self.eval_func_for_test_set  = eval_func_for_test_set
        self.output_func = output_func

        self.x = scipy.mat([0]*n).T
        self.grad = scipy.mat([0]*n).T
        self.dir = scipy.mat([0]*n).T
        self.loss = 0
        
        if tol == None:
            self.tol = 1e-4
        else:
            self.tol = tol
def steepest_descent(A, b, x0, tol=1e-8):
    """
    Uses the steepest descent method to find the x that satisfies Ax = b.

    Inputs:
        A: An m x n NumPy array
        b: An m x 1 NumPy array
        x0: An n x 1 NumPy array that represents the initial guess at a
            solution.
        tol (optional): The tolerance level for convergence. This is compared
                        against the norm(x_n+1 - x_n) each iteration.

    Outputs:
        x: The x that satisfies the equation.
    """
    A = sp.mat(A)
    b = sp.reshape(sp.mat(b),(b.size,1))


    def grad(A, b, x):
        """
        Find the gradient of ||Ax - b||
        Inputs:
            A: An m x n NumPy matrix.
            b: An m x 1 NumPy matrix.
            x: An n x a NumPy matrix.

        Outputs:
            grad: A NumPy matrix representing the gradient of ||Ax - b||
        """
        return np.mat(2  * A.T*(A*x - b))

    def solve_alpha_k(A, b, x):
        """
        Solves for alpha in the steepest descent algorithm
        x_n+1 = x_n - alpha * grad(x_n)

        Inputs:
            A: An m x n NumPy array
            b: An m x 1 NumPy array
            x: The x value where you want alpha to be defined for.

        Outputs:
            alpha: The alpha satisfying the algorithm above.
        """

        gradient = grad(A, b, x)
        return np.array(
            (gradient.T * gradient)/(2 * gradient.T * A.T * A * gradient))[0]



    xold = sp.reshape(sp.mat(x0),(x0.size,1))
    xnew = xold - grad(A, b, xold) * solve_alpha_k(A,b,xold)

    while la.norm(xold - xnew) > tol:
        xold = xnew
        xnew = xold - grad(A, b, xold) * solve_alpha_k(A,b,xold)

    return xnew
Example #8
0
def similarity(k1, k2):

    a1 = mat(k1)
    b1 = mat(k2)
    c = dot(a1, b1.T) / linalg.norm(a1) / linalg.norm(b1)
    print('\n\n\nMatrices #1 and #2 are {}{} similar.'.format(
        c[0][0] * 100, '%'))
Example #9
0
def bb_dcgain(sys):
    """Return the steady state value of the step response os sys

    Usage
    =====
    dcgain=dcgain(sys)

    Inputs
    ------

    sys: system

    Outputs
    -------
    dcgain : steady state value
    """

    a = mat(sys.A)
    b = mat(sys.B)
    c = mat(sys.C)
    d = mat(sys.D)
    nx = shape(a)[0]
    if sys.dt != 0.0:
        a = a - eye(nx, nx)
    r = rank(a)
    if r < nx:
        gm = []
    else:
        gm = -c * inv(a) * b + d
    return array(gm)
Example #10
0
def full_obs(sys, poles):
    """Full order observer of the system sys

    Call:
    obs=full_obs(sys,poles)

    Parameters
    ----------
    sys : System in State Space form
    poles: desired observer poles

    Returns
    -------
    obs: ss
    Observer

    """
    if isinstance(sys, TransferFunction):
        "System must be in state space form"
        return
    a = mat(sys.A)
    b = mat(sys.B)
    c = mat(sys.C)
    d = mat(sys.D)
    L = place(a.T, c.T, poles)
    L = mat(L).T
    Ao = a - L * c
    Bo = hstack((b - L * d, L))
    n = shape(Ao)
    m = shape(Bo)
    Co = eye(n[0], n[1])
    Do = zeros((n[0], m[1]))
    obs = StateSpace(Ao, Bo, Co, Do, sys.dt)
    return obs
def accuracy(p, colname='ClassLabel'):
    # find out how many classes are in the experiment
    numSamples = {}      # this is a list containing list of labels
    labels = p.column(col=2)[1]   # get the column of the actual labels
    for l in labels:
        if not(l in numSamples.keys()):
            numSamples.update({l:0})
    numLabels = len(numSamples.keys()) 
    # count number of samples per class
    labels = p.column(col=2)[1]
    for l in labels:
        numSamples[l] = numSamples[l] + 1  
    numLabels = len(numSamples.keys())
    confusionMatrix = scipy.mat( numpy.zeros( (numLabels,numLabels) ) ) 
    hdr_actual = '%s-actual' % colname
    hdr_predic = '%s-prediction' % colname
    mapLabels = {}
    cnt = 0
    for l in numSamples.keys():
       mapLabels.update({l:cnt})
       cnt = cnt + 1
    confusionMatrix = scipy.mat( numpy.zeros( (numLabels,numLabels) ) )  
    hdr_actual = '%s-actual' % colname
    hdr_predic = '%s-prediction' % colname
    actualLabels = p.column(hdr_actual)
    predictLabels = p.column(hdr_predic)
    for cnt in range(0,len(actualLabels[0])):
        pLabel = mapLabels[predictLabels[1][cnt]]     # predicted label
        aLabel = mapLabels[actualLabels[1][cnt]]      # actual Label
        if ( not(  predictLabels[0][cnt] ==   actualLabels[0][cnt]   ) ):     # it is just a sanity check, this event should happen ever. Just to make sure that it is comparing the same subject
              assert False, "This event should NOT happend ever !!! Are you sure you are using the correct Pyxel version??? I am comparing labels of two different subjects !! " 
        confusionMatrix[aLabel,pLabel] = confusionMatrix[aLabel,pLabel] + 1.0
    return confusionMatrix
Example #12
0
def dsimul(sys, u):
    """Simulate the discrete system sys
    Only for discrete systems!!!

    Call:
    y=dsimul(sys,u)

    Parameters
    ----------
    sys : Discrete System in State Space form
    u   : input vector
    Returns
    -------
    y: ndarray
    Simulation results

    """
    a = mat(sys.A)
    b = mat(sys.B)
    c = mat(sys.C)
    d = mat(sys.D)
    nx = shape(a)[0]
    ns = shape(u)[1]
    xk = zeros((nx, 1))
    for i in arange(0, ns):
        uk = u[:, i]
        xk_1 = a * xk + b * uk
        yk = c * xk + d * uk
        xk = xk_1
        if i == 0:
            y = yk
        else:
            y = hstack((y, yk))
    y = array(y).T
    return y
Example #13
0
def qnwcheb1(n, a, b):
    """ Univariate Gauss-Chebyshev quadrature nodes and weights

    Parameters
    -----------
    n : int
        number of nodes
    a : float
        left endpoint
    b : float
        right endpoint

    Returns
    ---------
    x : array, shape (n,)
        nodes
    x : array, shape (n,)
        weights

    Notes
    ---------
    
    Port of the qnwcheb1 function in the compecon matlab toolbox.
    """
    x = ((b + a) / 2 - (b - a) / 2
         * sp.cos(sp.pi / n * sp.arange(0.5, n + 0.5, 1)))
    w2 =  sp.r_[1, -2. / (sp.r_[1:(n - 1):2] * sp.r_[3:(n + 1):2])]
    w1 = (sp.cos(sp.pi / n * sp.mat((sp.r_[0:n] + 0.5)).T *
                 sp.mat((sp.r_[0:n:2]))).A)
    w0 = (b - a) / n
    w = w0 * sp.dot(w1, w2)
    return x, w
def dcgain(sys):
    """Return the steady state value of the step response os sys

    Usage
    =====
    dcgain=dcgain(sys)

    Inputs
    ------

    sys: system

    Outputs
    -------
    dcgain : steady state value
    """

    a=mat(sys.A)
    b=mat(sys.B)
    c=mat(sys.C)
    d=mat(sys.D)
    nx=shape(a)[0]
    if sys.Tsamp!=0.0:
        a=a-eye(nx,nx)
    r=rank(a)
    if r<nx:
        gm=[]
    else:
        gm=-c*inv(a)*b+d
    return array(gm)
def qhull(V, qstring):
    """
    Use qhull to determine convex hull / volume / normals.
     V - [matrix] vertices
     qstring - [string] arguments to pass to qhull
    """
    try:
        qhullp = subprocess.Popen(["qhull", qstring],
                              stdin=subprocess.PIPE, stdout=subprocess.PIPE)
        Vc = qhullp.communicate(qhullstr(V))[0] #qhull output to Vc
        
        if qstring == "FS": #calc area and volume
            ks = Vc.split('\n')[-2]
            Vol = float(ks.split(' ')[-2]) #get volume of D-hull
            return Vol
        elif qstring == "Ft": #calc vertices and facets
            ks = Vc.split('\n')
            fms = int(ks[1].split(' ')[1]) #get size of facet matrix
            fmat = ks[-fms-1:-1]
            fmat = mat(';'.join(fmat)) #generate matrix
            fmatv = fmat[:, 1:] #vertices on facets
            return array(fmatv)
        elif qstring == "n": #calc convex hull and get normals
            ks = ';'.join(Vc.split('\n')[2:]) #remove leading dimension output
            k = mat(ks[:-1]) #convert to martrix with vertices
            return array(k)
        else:
            exit(1)
    except:
        raise NameError('QhullError')
def dsimul(sys,u):
    """Simulate the discrete system sys
    Only for discrete systems!!!

    Call:
    y=dsimul(sys,u)

    Parameters
    ----------
    sys : Discrete System in State Space form
    u   : input vector
    Returns
    -------
    y: ndarray
    Simulation results

    """
    a=mat(sys.A)
    b=mat(sys.B)
    c=mat(sys.C)
    d=mat(sys.D)
    nx=shape(a)[0]
    ns=shape(u)[1]
    xk=zeros((nx,1))
    for i in arange(0,ns):
        uk=u[:,i]
        xk_1=a*xk+b*uk
        yk=c*xk+d*uk
        xk=xk_1
        if i==0:
            y=yk
        else:
            y=hstack((y,yk))
    y=array(y).T
    return y
Example #17
0
def testGauss(k1=1.3):
    dataArr, labelArr = loadData('testSetRBF.txt')

    # 训练,得到参数
    b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('Gauss', k1))
    datMat = sp.mat(dataArr)
    labelMat = sp.mat(labelArr).transpose()
    svInd = sp.nonzero(alphas.A > 0)[0]  # 支持向量的矩阵
    sVs = datMat[svInd]
    labelSV = labelMat[svInd]
    print("there are %d Support Vectors" % np.shape(sVs)[0])
    m, n = np.shape(datMat)
    errorCount = 0
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i, :], ('Gauss', k1))
        predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b
        if np.sign(predict) != np.sign(labelArr[i]):
            errorCount += 1
    print("the training error rate is: %f" % (float(errorCount) / m))

    # 测试参数在新数据上如何
    dataArr, labelArr = loadData('testSetRBF2.txt')
    errorCount = 0
    datMat = sp.mat(dataArr)
    labelMat = sp.mat(labelArr).transpose()
    m, n = np.shape(datMat)
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i, :], ('Gauss', k1))
        predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b
        if np.sign(predict) != np.sign(labelArr[i]):
            errorCount += 1
    print("the test error rate is: %f" % (float(errorCount) / m))
Example #18
0
def cssBlk(pin, pout, sys, X0=[]):
    """ 

    Continous state space block

    Call: cssBlk(pin,pout, sys,X0)

    Parameters
    ----------
        pin : connected input ports
        pout: connected output ports
        sys: Discrete system in SS form
        X0: Initial conditions

    Returns
    -------
        blk  : RCPblk

    """
    if isinstance(sys, TransferFunction):
        sys = tf2ss(sys)

    nin = size(pin)
    ni = shape(sys.B)[1]
    if (nin != ni):
        raise ValueError("Block Robi have %i inputs: received %i input ports" %
                         (nin, ni))

    no = shape(sys.C)[0]
    nout = size(pout)
    if (no != nout):
        raise ValueError("Block have %i outputs: received %i output ports" %
                         (nout, no))

    a = reshape(sys.A, (1, size(sys.A)), 'C')
    b = reshape(sys.B, (1, size(sys.B)), 'C')
    c = reshape(sys.C, (1, size(sys.C)), 'C')
    d = reshape(sys.D, (1, size(sys.D)), 'C')
    nx = shape(sys.A)[0]

    if (size(X0) == nx):
        X0 = reshape(X0, (1, size(X0)), 'C')
    else:
        X0 = mat(zeros((1, nx)))

    indA = 1
    indB = indA + nx * nx
    indC = indB + nx * ni
    indD = indC + nx * no
    indX = indD + ni * no
    intPar = [nx, ni, no, indA, indB, indC, indD, indX]
    realPar = hstack((mat([0.0]), a, b, c, d, X0))

    if d.any() == True:
        uy = 1
    else:
        uy = 0

    blk = RCPblk('css', pin, pout, [nx, 0], uy, realPar, intPar)
    return blk
Example #19
0
def smoP(dataMatIn, classLabels, C, toler, maxIter, kTup=('normal', 0)):
    # 对应的外层循环,和smoSimple是类似的,不同的是退出循环的条件更多了,貌似迭代6次左右就停止了。
    oS = optStruct(sp.mat(dataMatIn),
                   sp.mat(classLabels).transpose(), C, toler, kTup)
    iterm = 0
    entireSet = True
    alphaPairsChanged = 0
    while (iterm < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
        alphaPairsChanged = 0
        if entireSet:
            for i in range(oS.m):  # 遍历所有的值,找第一个a
                alphaPairsChanged += innerL(i, oS)  # 找第二个a
                print("fullSet, iter: %d i:%d, pairs changed %d" %
                      (iterm, i, alphaPairsChanged))
            iterm += 1
        else:  # 遍历非边界的值,找第一个a,就是0<a<c那个正方形中的
            nonBoundIs = sp.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
            for i in nonBoundIs:
                alphaPairsChanged += innerL(i, oS)  # 找第二个
                print("non-bound, iter: %d i:%d, pairs changed %d" %
                      (iterm, i, alphaPairsChanged))
            iterm += 1
        if entireSet:  # 控制在边界和非边界循环切换
            entireSet = False
        elif (alphaPairsChanged == 0):
            entireSet = True
        print("iteration number: %d" % iterm)
    return oS.b, oS.alphas
Example #20
0
def Feynman_diagrams(phi_t, R, Delta, t, N):
    # Prepare the stuff for the case of maxent or finite t
    if not np.isfinite(t):
        G = len(phi_t)
        alpha = Delta._kernel_dim
        # Evaluate propagator matrix
        Delta_sparse = Delta.get_sparse_matrix()
        Delta_mat = Delta_sparse.todense() * (N / G)
        Delta_diagonalized = np.linalg.eigh(Delta_mat)
        kernel_basis = np.zeros([G, alpha])
        for i in range(alpha):
            kernel_basis[:, i] = Delta_diagonalized[1][:, i].ravel()
        M_mat = diags(sp.exp(-phi_t), 0).todense() * (N / G)
        M_mat_on_kernel = sp.mat(kernel_basis).T * M_mat * sp.mat(kernel_basis)
        M_inv_on_kernel = sp.linalg.inv(M_mat_on_kernel)
        P_mat = sp.mat(kernel_basis) * M_inv_on_kernel * sp.mat(kernel_basis).T
        # Evaluate vertex vector
        V = sp.exp(-phi_t) * (N / G)
    else:
        G = len(phi_t)
        # Evaluate propagator matrix
        H = deft_core.hessian(phi_t, R, Delta, t, N)
        A_mat = H.todense() * (N / G)
        P_mat = np.linalg.inv(A_mat)
        # Evaluate vertex vector
        V = sp.exp(-phi_t) * (N / G)

    # Calculate Feynman diagrams
    correction = diagrams_1st_order(G, P_mat, V)

    # Return the correction and other stuff
    w_sample_mean = 1.0
    w_sample_mean_std = 0.0
    return correction, w_sample_mean, w_sample_mean_std
Example #21
0
def calcInvFisher(sigma, invSigma=None, factorSigma=None):
    """ Efficiently compute the exact inverse of the FIM of a Gaussian.
    Returns a list of the diagonal blocks. """
    if invSigma == None:
        invSigma = inv(sigma)
    if factorSigma == None:
        factorSigma = cholesky(sigma)
    dim = sigma.shape[0]

    invF = [mat(1 / (invSigma[-1, -1] + factorSigma[-1, -1]**-2))]
    invD = 1 / invSigma[-1, -1]
    for k in reversed(list(range(dim - 1))):
        v = invSigma[k + 1:, k]
        w = invSigma[k, k]
        wr = w + factorSigma[k, k]**-2
        u = dot(invD, v)
        s = dot(v, u)
        q = 1 / (w - s)
        qr = 1 / (wr - s)
        t = -(1 + q * s) / w
        tr = -(1 + qr * s) / wr
        invF.append(
            blockCombine([[qr, tr * u],
                          [mat(tr * u).T, invD + qr * outer(u, u)]]))
        invD = blockCombine([[q, t * u],
                             [mat(t * u).T, invD + q * outer(u, u)]])

    invF.append(sigma)
    invF.reverse()
    return invF
Example #22
0
def calcInvFisher(sigma, invSigma=None, factorSigma=None):
    """ Efficiently compute the exact inverse of the FIM of a Gaussian.
    Returns a list of the diagonal blocks. """
    if invSigma == None:
        invSigma = inv(sigma)
    if factorSigma == None:
        factorSigma = cholesky(sigma)
    dim = sigma.shape[0]

    invF = [mat(1 / (invSigma[-1, -1] + factorSigma[-1, -1] ** -2))]
    invD = 1 / invSigma[-1, -1]
    for k in reversed(list(range(dim - 1))):
        v = invSigma[k + 1:, k]
        w = invSigma[k, k]
        wr = w + factorSigma[k, k] ** -2
        u = dot(invD, v)
        s = dot(v, u)
        q = 1 / (w - s)
        qr = 1 / (wr - s)
        t = -(1 + q * s) / w
        tr = -(1 + qr * s) / wr
        invF.append(blockCombine([[qr, tr * u], [mat(tr * u).T, invD + qr * outer(u, u)]]))
        invD = blockCombine([[q , t * u], [mat(t * u).T, invD + q * outer(u, u)]])

    invF.append(sigma)
    invF.reverse()
    return invF
Example #23
0
def ned2ecef(lat, lon, alt, n, e, d):
    X0, Y0, Z0 = coord.geodetic2ecef(lat, lon, alt)
    lat, lon = radians(lat), radians(lon)
    
    pitch = math.pi/2 + lat
    yaw = -lon 
    
    my = mat('[%f %f %f; %f %f %f; %f %f %f]' %
        (cos(pitch), 0, -sin(pitch),
         0,1,0,
         sin(pitch), 0, cos(pitch)))
    
    mz = mat('[%f %f %f; %f %f %f; %f %f %f]' %
        (cos(yaw), sin(yaw),0,
         -sin(yaw),cos(yaw),0,
         0,0,1))
    
    mr = mat('[%f %f %f; %f %f %f; %f %f %f]' %
        (-cos(lon)*sin(lat), -sin(lon), -cos(lat) * cos(lon), 
         -sin(lat)*sin(lon), cos(lon), -sin(lon)*cos(lat),
         cos(lat), 0, -sin(lat)))
    
    geo = mat('[%f; %f; %f]' % (X0, Y0, Z0))
    ned = mat('[%f; %f; %f]' % (n, e, d))
    res = mr*ned + geo
    return res[0], res[1], res[2]  
Example #24
0
def arnoldi(A, v0, k):
    """
    Arnoldi algorithm (Krylov approximation of a matrix)
        input: 
            A: matrix to approximate
            v0: initial vector (should be in matrix form) 
            k: number of Krylov steps 
        output: 
            V: matrix (large, N*k) containing the orthogonal vectors
            H: matrix (small, k*k) containing the Krylov approximation of A

    Author: Vasile Gradinaru, 14.12.2007 (Rennes)
    """
    #print 'ARNOLDI METHOD'
    inputtype = A.dtype.type
    V = mat( v0.copy() / norm(v0), dtype=inputtype)
    H = mat( zeros((k+1,k), dtype=inputtype) )
    for m in xrange(k):
        vt = A*V[ :, m]
        for j in xrange( m+1):
            H[ j, m] = (V[ :, j].H * vt )[0,0]
            vt -= H[ j, m] * V[:, j]
        H[ m+1, m] = norm(vt);
        if m is not k-1:
            V =  hstack( (V, vt.copy() / H[ m+1, m] ) ) 
    return V,  H
Example #25
0
def testDigits(kTup=('normal', 10)):
    # 和testGauss基本上差不多也
    dataArr, labelArr = loadImage('trainingDigits')
    b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, kTup)
    datMat = sp.mat(dataArr)
    labelMat = sp.mat(labelArr).transpose()
    svInd = sp.nonzero(alphas.A > 0)[0]  # 支持向量的矩阵
    sVs = datMat[svInd]
    labelSV = labelMat[svInd]
    print("there are %d Support Vectors" % np.shape(sVs)[0])
    m, n = np.shape(datMat)
    errorCount = 0
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i, :], kTup)
        predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b
        if np.sign(predict) != np.sign(labelArr[i]):
            errorCount += 1
    print("the training error rate is: %f" % (float(errorCount) / m))

    # 测试参数在新数据上如何
    dataArr, labelArr = loadImage('testDigits')
    errorCount = 0
    datMat = sp.mat(dataArr)
    labelMat = sp.mat(labelArr).transpose()
    m, n = np.shape(datMat)
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i, :], kTup)
        predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b
        if np.sign(predict) != np.sign(labelArr[i]):
            errorCount += 1
    print("the test error rate is: %f" % (float(errorCount) / m))
Example #26
0
File: nes.py Project: HKou/pybrain
 def genInitSigmaFactor(self):
     """ depending on the algorithm settings, we start out with in identity matrix, or perturb it """
     if self.perturbedInitSigma:
         res = mat(eye(self.xdim)*self.initSigmaCoeff+randn(self.xdim, self.xdim)*self.initSigmaRandCoeff)            
     else:
         res = mat(eye(self.xdim)*self.initSigmaCoeff)
     return res   
Example #27
0
def make_rot(a):
    """Make 3D rotation (around z, then new x, then new y)
    """
    a1 = a[0]
    a2 = a[1]
    a3 = a[2]

    ca1 = math.cos(a1)
    sa1 = math.sin(a1)
    ca2 = math.cos(a2)
    sa2 = math.sin(a2)
    ca3 = math.cos(a3)
    sa3 = math.sin(a3)

    # rot: new y, by a3
    roty = augment_3x3_matrix(
        mat([[ca3, 0., sa3], [0., 1., 0.], [-sa3, 0, ca3]]))

    # rot: new x, by a2
    rotx = augment_3x3_matrix(
        mat([[1., 0., 0.], [0., ca2, -sa2], [0., sa2, ca2]]))

    # rot: z, by a1
    rotz = augment_3x3_matrix(
        mat([[ca1, -sa1, 0.], [sa1, ca1, 0.], [0., 0., 1.]]))
    return roty * rotx * rotz
def acker(A,B,poles):
    """Pole placemenmt using Ackermann method

    Call:
    k=acker(A,B,poles)

    Parameters
    ----------
    A, B : State and input matrix of the system
    poles: desired poles

    Returns
    -------
    k: matrix
    State feedback gains

    """
    a=mat(A)
    b=mat(B)
    p=real(poly(poles))
    ct=ctrb(A,B)
    if det(ct)==0:
        k=0
        print "Pole placement invalid"
    else:
        n=size(p)
        pmat=p[n-1]*a**0
        for i in arange(1,n):
            pmat=pmat+p[n-i-1]*a**i
        k=inv(ct)*pmat
        k=k[-1][:]
    return k
Example #29
0
def acker(A,B,poles):
    """Pole placemenmt using Ackermann method

    Call:
    k=acker(A,B,poles)

    Parameters
    ----------
    A, B : State and input matrix of the system
    poles: desired poles

    Returns
    -------
    k: matrix
    State feedback gains

    """
    a=mat(A)
    b=mat(B)
    p=real(poly(poles))
    ct=ctrb(A,B)
    if det(ct)==0:
        k=0
        print "Pole placement invalid"
    else:
        n=size(p)
        pmat=p[n-1]*a**0
        for i in arange(1,n):
            pmat=pmat+p[n-i-1]*a**i
        k=inv(ct)*pmat
        k=k[-1][:]
    return k
Example #30
0
def full_obs(sys,poles):
    """Full order observer of the system sys

    Call:
    obs=full_obs(sys,poles)

    Parameters
    ----------
    sys : System in State Space form
    poles: desired observer poles

    Returns
    -------
    obs: ss
    Observer

    """
    if isinstance(sys, TransferFunction):
        "System must be in state space form"
        return
    a=mat(sys.A)
    b=mat(sys.B)
    c=mat(sys.C)
    d=mat(sys.D)
    L=place(a.T,c.T,poles)
    L=mat(L).T
    Ao=a-L*c
    Bo=hstack((b-L*d,L))
    n=shape(Ao)
    m=shape(Bo)
    Co=eye(n[0],n[1])
    Do=zeros((n[0],m[1]))
    obs=StateSpace(Ao,Bo,Co,Do,sys.dt)
    return obs
Example #31
0
def minreal(sys):
    """Minimal representation for state space systems

    Usage
    =====
    [sysmin]=minreal[sys]

    Inputs
    ------

    sys: system in ss or tf form

    Outputs
    -------
    sysfin: system in state space form
    """
    a=mat(sys.A)
    b=mat(sys.B)
    c=mat(sys.C)
    d=mat(sys.D)
    nx=shape(a)[0]
    ni=shape(b)[1]
    no=shape(c)[0]

    out=tb03ad(nx,no,ni,a,b,c,d,'R')

    nr=out[3]
    A=out[0][:nr,:nr]
    B=out[1][:nr,:ni]
    C=out[2][:no,:nr]
    sysf=ss(A,B,C,sys.D,sys.Tsamp)
    return sysf
Example #32
0
def get_projection_matrix(u,z,v,k=TOP_NUM_SINGULAR_VALUES):
    """
        generate the projection matrix which contains a 
        score vector for the patterns for each word pair 
    """ 
    #determine the best patterns for comparison   
    column_indexes=get_top_k_column_indexes(z)
    
    #using the column indexes of the best patterns recreate the 
    #u & z matrices containing only those correspoding columns
    
    #creating the uk matrix
    uk=[]
    for r in range(len(u)):
        uk.append([])
        for index in column_indexes:
            uk[r].append(u[r][index])
        r+=1

    #creating the zk matrix
    zk=[]
    for index in column_indexes:
        zk.append([])
        for col in range(len(v)):
            if (col==index):
                zk[len(zk)-1].append(z[index])
            else: 
                zk[len(zk)-1].append(0)
    
    #calcualte the projecttion matrix by u.z
    return mat(uk)*mat(zk)
Example #33
0
def TMSolve(C, T, Number):
	NewCMatrix = []
	NewTMatrix = []
	#print len(T)
	#Cff = scipy.mat(C)
	#print Cff
	#Tff= scipy.mat(T)
	#print Tff
	nodelen = len(Number)
	i = 1
	Num = 0
	while i < nodelen:
		TList = []
		CList = []
		j = 1
		while j < nodelen:
			#print j - 1
			#Ftt = 'T' + str(Num+i-1) + '_' + str(Num+j-1)
			Ftt = T[Number[i]-1][Number[j]-1]
			TList.append(Ftt)
			#Fcc = 'C' + str(Num+i-1) + '_' + str(Num+j-1)
			Fcc = C[Number[i]-1][Number[j]-1]
			CList.append(Fcc)	
			j = j + 1
		i = i + 1
		NewTMatrix.append(TList)
		NewCMatrix.append(CList)
	Tff = scipy.mat(NewTMatrix)
	Cff = scipy.mat(NewCMatrix)
	TffI = Tff.I
	AMat = TffI*Cff
	lam = scipy.linalg.eigvals(AMat)
	return lam
Example #34
0
def coeffs_to_field(coeffs, kernel):
    """ For maxent algorithm. """

    # Get number of gridpoints and dimension of kernel
    G = kernel.shape[0]
    kernel_dim = kernel.shape[1]

    # Make sure coeffs is valid
    if not (len(coeffs) == kernel_dim):
        raise ControlledError(
            '/coeffs_to_field/ coeffs must have length %d: len(coeffs) = %d' %
            (kernel_dim, len(coeffs)))
    if not all(np.isreal(coeffs)):
        raise ControlledError(
            '/coeffs_to_field/ coeffs is not real: coeffs = %s' % coeffs)
    if not all(np.isfinite(coeffs)):
        raise ControlledError(
            '/coeffs_to_field/ coeffs is not finite: coeffs = %s' % coeffs)

    # Convert to matrices
    kernel_mat = sp.mat(kernel)  # G x kernel_dim matrix
    coeffs_col = sp.mat(coeffs).T  # kernel_dim x 1 matrix
    field_col = kernel_mat * coeffs_col  # G x 1 matrix

    return sp.array(field_col).ravel()  # Returns an array
Example #35
0
def TESolve(C, T, Number, PNodes):
	NewCMatrix = []
	NewTMatrix = []
	for x in PNodes:
		Number.append(x)
	Number.sort()
	Cff = scipy.mat(C)
	#print Cff
	Tff= scipy.mat(T)
	#print Tff
	nodelen = len(Number)
	i = 0
	Num = 0
	while i < nodelen:
		TList = []
		CList = []
		j = 0
		while j < nodelen:
			#Ftt = 'T' + str(Num+i-1) + '_' + str(Num+j-1)
			Ftt = T[Number[i]-1][Number[j]-1]
			TList.append(Ftt)
			#Fcc = 'C' + str(Num+i-1) + '_' + str(Num+j-1)
			Fcc = C[Number[i]-1][Number[j]-1]
			CList.append(Fcc)	
			j = j + 1
		i = i + 1
		NewTMatrix.append(TList)
		NewCMatrix.append(CList)
	Tff = scipy.mat(NewTMatrix)
	Cff = scipy.mat(NewCMatrix)
	#print Cff
	TffI = Tff.I
	AMat = TffI*Cff
	lam = scipy.linalg.eigvals(AMat)
	return lam
Example #36
0
 def cosine(self, list1, list2):
     a = mat(list1)
     b = mat(list2)
     if linalg.norm(a) < 1e-3 or linalg.norm(b) < 1e-3:
         return 10
     c = dot(a, b.T) / linalg.norm(a) / linalg.norm(b)
     return c[0, 0]
Example #37
0
def kNN(segment_list, k, emb_dict):
  
  pred_matrix = []
  gold_word_list = []
  
  '''
  # TEST
  segment_list = []
  counter = 0
  for key, value in emb_dict.items():
    segment_list.append(key)
    counter += 1
    if counter == 5:
      break
  '''
  
  for seg in segment_list:
    print(len(emb_dict[seg]))
    print(seg)
    pred_matrix.append(emb_dict[seg])
    gold_word_list.append(seg)
    
  # Obtain now the closest neighbors.
  word_list = []
  emb_matrix = []
  for key, value in emb_dict.items():
    if containsPunctuation(key + " "):
      continue
    word_list.append(key)
    emb_matrix.append(value)
 
  pred_matrix=mat(pred_matrix)
  emb_matrix = mat(emb_matrix)
  rows=len(segment_list) + 1

  print('Calculating simi_matrix...')
  print(pred_matrix.shape)
  print(emb_matrix.shape)
  simi_matrix=1-cdist(pred_matrix, emb_matrix, 'cosine')
  print('...simi_matrix done!')
    
  max_index_matrix=simi_matrix.argsort()[:,-k-1:]
  #max_index_matrix=simi_matrix.argsort()[:,k+1:]

  pred_word_matrix=[]
  for row in range(max_index_matrix.shape[0]):
    pred_list=[word_list[i] for i in max_index_matrix[row]] 
    pred_word_matrix.append(pred_list)

  for i in range(len(segment_list)):
    print('The original segment: ' + gold_word_list[i])
    #for j in [4,3,2,1,0]: 
    for ij in range(k):
            j = k - ij
            #if pred_word_matrix[i][j] == gold_word_list[i]:
            #    continue
            print('Predicted neighbor segment: ' + pred_word_matrix[i][j])
            #print('Distance: ' + str(cdist([word_list[i]], [emb_dict[pred_word_matrix[i][j]]], 'cosine')))
            print('Similarity: ' + str(1-cdist([emb_dict[gold_word_list[i]]], [emb_dict[pred_word_matrix[i][j]]], 'cosine')))
Example #38
0
def nonna_lsq(target, aux, idx=(), names=(), order=2):
	"""
	This function returns the coefficients of the least square prediction of the target
	signal, using the auxiliary signals and their powers, as specified by the order argument.
	
	Input arguments:
	target = target signal
	aux    = matrix of auxiliary signals
	idx    = boolean vector to select a subset of the data for the LSQ fit
	order  = order of the polynomial of aux signals to be used in the fit, default is 2
	names  = list of the auxiliary signal names
	
	Output:
	p      = list of coefficients
	X      = matrix of the signals used in the reconstruction
	cnames = list of the corresponding signals
	
	Note that the mean will be removed from the auxiliary signals. 
	"""
	# number of auxiliary channels
	naux = scipy.shape(aux[1])
	
	if len(names) == 0:
		# since the user didn't provide signal names, let's build some
		names = map(lambda x: 'S'+str(x), scipy.arange(naux)+1)
		
	if len(idx) == 0:
		# no index means use all
		idx = numpy.array(target, dtype=bool)
		idx[:] = True
	
	##### PREPARE CHANNELS FOR LSQ PREDICTION 

	# prepare channels and their squared values
	X = scipy.zeros((scipy.shape(aux)[0], order*scipy.shape(aux)[1]+1))
	cnames = []
	for i in range(scipy.shape(aux)[1]):
		for j in range(order):
			# add the (j+1)th power of the signal after removing the mean
			X[:,order*i+j] = numpy.power((aux[:,i] - scipy.mean(aux[idx,i])), j+1)
			# then remove the mean of the result
			X[:,order*i+j] = X[:,order*i+j] - scipy.mean(X[idx,order*i+j])
			# save the name, including the power
			if j==0:
				cnames.append(names[i])
			else:
				cnames.append(names[i]+'^'+str(j+1))
				
	# add a constant at the end of the list
	X[:,-1] = 1
	cnames.append('1')
	# convert to matrix object for simpler manipulation
	X = scipy.mat(X)
	
	##### best estimate of coefficients to minimize the squared error
	p = scipy.linalg.inv(X[idx,:].T * X[idx,:]) * X[idx,:].T * scipy.mat(target[idx]).T

	# return all the results
	return p, X, cnames
Example #39
0
def scipy_cosine_similarity(tf_idf1, tf_idf2):
  # input; vector 1, 2 (vector의 길이가 다른 경우 없는 key에 대해서는 0을 채워서 같은 길이로 만든 vector임)
  # output; cosine similarity
  # scipy library를 사용해 계산

  # http://stackoverflow.com/questions/21980644/calculate-cosine-similarity-of-two-matrices-python
  a, b = mat(tf_idf1), mat(tf_idf2)
  c = dot(a, b.T) / linalg.norm(a) / linalg.norm(b)
  return c.A1[0]    # http://stackoverflow.com/questions/3337301/numpy-matrix-to-array
Example #40
0
    def test_approximate_spectral_radius(self):
        cases = []

        cases.append(matrix([[-4 - 4.0j]]))
        cases.append(matrix([[-4 + 8.2j]]))

        cases.append(matrix([[2.0 - 2.9j, 0], [0, 1.5]]))
        cases.append(matrix([[-2.0 - 2.4j, 0], [0, 1.21]]))

        cases.append(
            matrix([[100 + 1.0j, 0, 0], [0, 101 - 1.0j, 0], [0, 0,
                                                             99 + 9.9j]]))

        for i in range(1, 6):
            cases.append(matrix(rand(i, i) + 1.0j * rand(i, i)))

        # method should be almost exact for small matrices
        for A in cases:
            Asp = csr_matrix(A)
            [E, V] = linalg.eig(A)
            E = abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(A), expected_eig)
            assert_almost_equal(approximate_spectral_radius(Asp), expected_eig)
            vec = approximate_spectral_radius(A, return_vector=True)[1]
            rayleigh = abs(
                dot(ravel(A * vec), ravel(vec)) / dot(ravel(vec), ravel(vec)))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
            vec = approximate_spectral_radius(Asp, return_vector=True)[1]
            rayleigh = abs(
                dot(ravel(Asp * vec), ravel(vec)) /
                dot(ravel(vec), ravel(vec)))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)

            AA = mat(A).H * mat(A)
            AAsp = csr_matrix(AA)
            [E, V] = linalg.eig(AA)
            E = abs(E)
            largest_eig = (E == E.max()).nonzero()[0]
            expected_eig = E[largest_eig]
            expected_vec = V[:, largest_eig]

            assert_almost_equal(approximate_spectral_radius(AA), expected_eig)
            assert_almost_equal(approximate_spectral_radius(AAsp),
                                expected_eig)
            vec = approximate_spectral_radius(AA, return_vector=True)[1]
            rayleigh = abs(
                dot(ravel(AA * vec), ravel(vec)) / dot(ravel(vec), ravel(vec)))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
            vec = approximate_spectral_radius(AAsp, return_vector=True)[1]
            rayleigh = abs(
                dot(ravel(AAsp * vec), ravel(vec)) /
                dot(ravel(vec), ravel(vec)))
            assert_almost_equal(rayleigh, expected_eig, decimal=4)
Example #41
0
 def genInitSigmaFactor(self):
     """ depending on the algorithm settings, we start out with in identity matrix, or perturb it """
     if self.perturbedInitSigma:
         res = mat(
             eye(self.xdim) * self.initSigmaCoeff +
             randn(self.xdim, self.xdim) * self.initSigmaRandCoeff)
     else:
         res = mat(eye(self.xdim) * self.initSigmaCoeff)
     return res
Example #42
0
def calcWs(alphas, dataArr, classLabels):
    # 计算w的,就是a的集合和数据集结合处的结果
    X = sp.mat(dataArr)
    labelMat = sp.mat(classLabels).transpose()
    m, n = np.shape(X)
    w = np.zeros((n, 1))
    for i in range(m):
        w += sp.multiply(alphas[i] * labelMat[i], X[i, :].T)
    return w
Example #43
0
def hessian_per_datum_from_coeffs(coeffs,
                                  R,
                                  kernel,
                                  phi0=False,
                                  regularized=False):
    """ For optimizer. Computes hessian from coefficients. """

    # Get number of gridpoints and dimension of kernel
    G = kernel.shape[0]
    kernel_dim = kernel.shape[1]

    # Make sure coeffs is valid
    if not (len(coeffs) == kernel_dim):
        raise ControlledError(
            '/hessian_per_datum_from_coeffs/ coeffs must have length %d: len(coeffs) = %d'
            % (kernel_dim, len(coeffs)))
    if not all(np.isreal(coeffs)):
        raise ControlledError(
            '/hessian_per_datum_from_coeffs/ coeffs is not real: coeffs = %s' %
            coeffs)
    if not all(np.isfinite(coeffs)):
        raise ControlledError(
            '/hessian_per_datum_from_coeffs/ coeffs is not finite: coeffs = %s'
            % coeffs)
    # Make sure phi0 is valid
    if not isinstance(phi0, np.ndarray):
        phi0 = np.zeros(G)
    else:
        if not all(np.isreal(phi0)):
            raise ControlledError(
                '/hessian_per_datum_from_coeffs/ phi0 is not real: phi0 = %s' %
                phi0)
        if not all(np.isfinite(phi0)):
            raise ControlledError(
                '/hessian_per_datum_from_coeffs/ phi0 is not finite: phi0 = %s'
                % phi0)
    # Make sure regularized is valid
    if not isinstance(regularized, bool):
        raise ControlledError(
            '/hessian_per_datum_from_coeffs/ regularized must be a boolean: regularized = %s'
            % type(regularized))

    phi = coeffs_to_field(coeffs, kernel)
    quasiQ = utils.field_to_quasiprob(phi + phi0)

    kernel_mat = sp.mat(kernel)  # G x kernel_dim
    H = sp.mat(sp.diag(quasiQ))  # G x G

    if regularized:
        H += (1. / G) * sp.diag(np.ones(G)) / (PHI_STD_REG**2)

    hessian_mat = kernel_mat.T * H * kernel_mat  # kernel_dim x kernel_dim

    # Make sure hessian_array is valid ?

    return sp.array(hessian_mat)  # Returns an array
 def distanceState(self, belief1, belief2):
     '''
     Calculate distance between two beliefs by 1 - (cosine similarity)
     '''
     if len(belief1) != len(belief2):
         return 2.0
     b1 = mat(belief1)
     b2 = mat(belief2)
     cosSim = dot(b1,b2.T)/linalg.norm(b1)/linalg.norm(b2)
     return 1.0 - cosSim[0,0]
Example #45
0
def ecef2enu(X, Y, Z, lat, lon, alt):
    X0, Y0, Z0 = coord.geodetic2ecef(lat, lon, alt)
    lat, lon = radians(lat), radians(lon)
    mx = mat('[%f %f %f; %f %f %f; %f %f %f]' %
        (-sin(lon), -sin(lat) * cos(lon), cos(lat) * cos(lon), cos(lon),
         -sin(lat) * sin(lon), cos(lat) * sin(lon), 0, cos(lat), sin(lat)))
    geo = mat('[%f; %f; %f]' % (X0, Y0, Z0))
    res = mat('[%f; %f; %f]' % (X, Y, Z))
    enu = mx.transpose()*(res - geo)
    return enu[1], enu[0], -enu[2]
Example #46
0
def comp_form_i(sys,obs,K,Ts,Cy=[[1]]):
    """Compact form Conroller+Observer+Integral part
    Only for discrete systems!!!

    Call:
    contr=comp_form_i(sys,obs,K,Ts[,Cy])

    Parameters
    ----------
    sys : System in State Space form
    obs : Observer in State Space form
    K: State feedback gains
    Ts: Sampling time
    Cy: feedback matric to choose the output for integral part

    Returns
    -------
    contr: ss
    Controller

    """
    if sys.dt==0.0:
        print "contr_form_i works only with discrete systems!"
        return

    ny=shape(sys.C)[0]
    nu=shape(sys.B)[1]
    nx=shape(sys.A)[0]
    no=shape(obs.A)[0]
    ni=shape(mat(Cy))[0]

    B_obsu = mat(obs.B[:,0:nu])
    B_obsy = mat(obs.B[:,nu:nu+ny])
    D_obsu = mat(obs.D[:,0:nu])
    D_obsy = mat(obs.D[:,nu:nu+ny])

    k=mat(K)
    nk=shape(k)[1]
    Ke=k[:,nk-ni:]
    K=k[:,0:nk-ni]
    X = inv(eye(nu,nu)+K*D_obsu);

    a=mat(obs.A)
    c=mat(obs.C)
    Cy=mat(Cy)

    tmp1=hstack((a-B_obsu*X*K*c,-B_obsu*X*Ke))

    tmp2=hstack((zeros((ni,no)),eye(ni,ni)))
    A_ctr=vstack((tmp1,tmp2))

    tmp1=hstack((zeros((no,ni)),-B_obsu*X*K*D_obsy+B_obsy))
    tmp2=hstack((eye(ni,ni)*Ts,-Cy*Ts))
    B_ctr=vstack((tmp1,tmp2))

    C_ctr=hstack((-X*K*c,-X*Ke))
    D_ctr=hstack((zeros((nu,ni)),-X*K*D_obsy))

    contr=StateSpace(A_ctr,B_ctr,C_ctr,D_ctr,sys.dt)
    return contr
Example #47
0
def comp_form_i(sys, obs, K, Ts, Cy=[[1]]):
    """Compact form Conroller+Observer+Integral part
    Only for discrete systems!!!

    Call:
    contr=comp_form_i(sys,obs,K,Ts[,Cy])

    Parameters
    ----------
    sys : System in State Space form
    obs : Observer in State Space form
    K: State feedback gains
    Ts: Sampling time
    Cy: feedback matric to choose the output for integral part

    Returns
    -------
    contr: ss
    Controller

    """
    if sys.dt == 0.0:
        print "contr_form_i works only with discrete systems!"
        return

    ny = shape(sys.C)[0]
    nu = shape(sys.B)[1]
    nx = shape(sys.A)[0]
    no = shape(obs.A)[0]
    ni = shape(mat(Cy))[0]

    B_obsu = mat(obs.B[:, 0:nu])
    B_obsy = mat(obs.B[:, nu:nu + ny])
    D_obsu = mat(obs.D[:, 0:nu])
    D_obsy = mat(obs.D[:, nu:nu + ny])

    k = mat(K)
    nk = shape(k)[1]
    Ke = k[:, nk - ni:]
    K = k[:, 0:nk - ni]
    X = inv(eye(nu, nu) + K * D_obsu)

    a = mat(obs.A)
    c = mat(obs.C)
    Cy = mat(Cy)

    tmp1 = hstack((a - B_obsu * X * K * c, -B_obsu * X * Ke))

    tmp2 = hstack((zeros((ni, no)), eye(ni, ni)))
    A_ctr = vstack((tmp1, tmp2))

    tmp1 = hstack((zeros((no, ni)), -B_obsu * X * K * D_obsy + B_obsy))
    tmp2 = hstack((eye(ni, ni) * Ts, -Cy * Ts))
    B_ctr = vstack((tmp1, tmp2))

    C_ctr = hstack((-X * K * c, -X * Ke))
    D_ctr = hstack((zeros((nu, ni)), -X * K * D_obsy))

    contr = StateSpace(A_ctr, B_ctr, C_ctr, D_ctr, sys.dt)
    return contr
Example #48
0
    def plot_phen_relatedness(self, k, k_accessions, plot_file_prefix, pids=None):
        import kinship
        import pylab
        import scipy as sp
        from scipy import linalg

        if not pids:
            pids = self.get_pids()
        self.convert_to_averages(pids)
        self.filter_ecotypes_2(k_accessions, pids)
        for pid in pids:
            ets = self.get_ecotypes(pid)
            vals = self.get_values(pid)
            k_m = kinship.prepare_k(k, k_accessions, ets)
            c = sp.sum((sp.eye(len(k_m)) - (1.0 / len(k_m)) * sp.ones(k_m.shape)) * sp.array(k_m))
            k_scaled = (len(k) - 1) * k / c
            p_her = self.get_pseudo_heritability(pid, k_m)
            x_list = []
            y_list = []
            for i in range(len(ets)):
                for j in range(i):
                    x_list.append(k_m[i, j])
                    y_list.append(vals[i] - vals[j])
            ys = sp.array(y_list)
            ys = ys * ys
            xs = sp.array(x_list)
            phen_name = self.get_name(pid)
            phen_name = phen_name.replace("<i>", "")
            phen_name = phen_name.replace("</i>", "")
            phen_name = phen_name.replace("+", "_plus_")
            phen_name = phen_name.replace("/", "_div_")
            file_name = plot_file_prefix + "_%d_%s.png" % (pid, phen_name)
            pylab.figure()
            pylab.plot(xs, ys, "k.", alpha=0.2)
            pylab.xlabel("Relatedness")
            pylab.ylabel("Squared phenotypic difference")
            # Plot regression line
            Y_mat = sp.mat(ys).T
            X_mat = sp.hstack((sp.mat(sp.ones(len(xs))).T, sp.mat(xs).T))
            (betas, residues, rank, s) = linalg.lstsq(X_mat, Y_mat)
            x_min, x_max = pylab.xlim()
            pylab.plot([x_min, x_max], [betas[0] + x_min * betas[1], betas[0] + x_max * betas[1]])
            corr = sp.corrcoef(xs, ys)[0, 1]
            y_min, y_max = pylab.ylim()
            x_range = x_max - x_min
            y_range = y_max - y_min
            pylab.axis(
                [x_min - 0.025 * x_range, x_max + 0.025 * x_range, y_min - 0.025 * y_range, y_max + 0.15 * y_range]
            )
            pylab.text(x_min + 0.1 * x_range, y_max + 0.03 * y_range, "Correlation: %0.4f" % (corr))
            pylab.text(x_min + 0.5 * x_range, y_max + 0.03 * y_range, "Pseudo-heritability: %0.4f" % (p_her))
            pylab.savefig(file_name)
            del k_m
            del k_scaled
Example #49
0
 def get_incidence_matrix(self):
     ets = sp.array(self.ecotypes)
     unique_ets = []
     i = 0
     while i < len(ets):
         et = ets[i]
         unique_ets.append(et)
         while i < len(ets) and ets[i] == et: #The ecotypes are assumed to be sorted
             i += 1
     Z = sp.int8(sp.mat(ets).T == sp.mat(unique_ets))
     return Z
Example #50
0
def centroid (obj,face):
	""" To compute the centroid od a d-face.

	`face` is the canonical representation of face (list of vertex indices)
	Return a n-point, convex combination of d+1 n-points.
	"""
	simplex = [ obj.vertices.points[v]  for v in face ]
	d = len(simplex)
	A = mat(simplex, dtype=scipy.float32)
	C = mat(d*[1.0/d], dtype=scipy.float32)
	point = (C * A).tolist()[0]
	return point
Example #51
0
def enu2ecef(lat, lon, alt, n, e, d):
    """NED (north/east/down) to ECEF coordinate system conversion."""
    x, y, z = e, n, -d
    lat, lon = radians(lat), radians(lon)
    X, Y, Z = geodetic2ecef(lat, lon, alt)
    mx = mat('[%f %f %f; %f %f %f; %f %f %f]' %
        (-sin(lon), -sin(lat) * cos(lon), cos(lat) * cos(lon), cos(lon),
         -sin(lat) * sin(lon), cos(lat) * sin(lon), 0, cos(lat), sin(lat)))
    enu = mat('[%f; %f; %f]' % (x, y, z))
    geo = mat('[%f; %f; %f]' % (X, Y, Z))
    res = mx * enu + geo
    return float(res[0]), float(res[1]), float(res[2])
Example #52
0
def prepare_k(k, k_accessions, accessions):
    if k_accessions == accessions:
        return sp.mat(k)
    indices_to_keep = []
    for acc in accessions:
        try:
            i = k_accessions.index(acc)
            indices_to_keep.append(i)
        except:
            continue
    k = k[indices_to_keep, :][:, indices_to_keep]
    return sp.mat(k)
Example #53
0
File: lsa2.py Project: dx88968/NLP
 def get_usv(self, num_dimension):
     """
     return matrix of s,u,v in a new dimentsion
     """
     if num_dimension > len(self.S):
         num_dimension = len(self.S)
     s = mat(zeros([num_dimension, num_dimension]))
     for i in range(num_dimension):
         s[i, i] = self.S[i]
     u = mat(self.U[:, 0:num_dimension])
     v = mat(self.Vt[0:num_dimension, :])
     return (u, s, v)
Example #54
0
    def get_incidence_matrix(self, pid):
        ets = sp.array(self.phen_dict[pid]['ecotypes'])
        unique_ets = []
        i = 0
        while i < len(ets):
            et = ets[i]
            unique_ets.append(et)
            while i < len(ets) and ets[i] == et: #The ecotypes are assumed to be sorted
                i += 1
#        unique_ets = sp.mat(sp.unique(ets))
        Z = sp.int8(sp.mat(ets).T == sp.mat(unique_ets))
        #print Z
        return Z
Example #55
0
def ecef2ned(lat, lon, alt, X, Y, Z):
    X0, Y0, Z0 = coord.geodetic2ecef(lat, lon, alt)
    lat, lon = radians(lat), radians(lon)
    
    mr = mat('[%f %f %f; %f %f %f; %f %f %f]' %
        (-cos(lon)*sin(lat), -sin(lon), -cos(lat) * cos(lon), 
         -sin(lat)*sin(lon), cos(lon), -sin(lon)*cos(lat),
         cos(lat), 0, -sin(lat)))
    
    geo = mat('[%f; %f; %f]' % (X0, Y0, Z0))
    res = mat('[%f; %f; %f]' % (X, Y, Z))
    res = mr.transpose()*(res - geo)
    return float(res[0]), res[1], res[2]