Example #1
0
def roots_of_matrix(M, x=sympy.abc.x):
    '''
    Find roots of det( M(x) ) = 0 equation, where M(x) = A + B*x.
    
    Parameters
    ----------
    M : sympy.Matrix
        Square matrix depends linearly on only one variable
    x : sympy.Symbol, optional
        Symbol which m depends on

    Returns
    -------
    numpy.ndarray
        1-D array of roots
    '''
    M_wo_x = M.replace(x, 0.)
    A = ml.empty(M.shape, dtype=np.complex)
    for (i, j), element in np.ndenumerate(M_wo_x):
        A[i, j] = complex(element)

    M_coeffs_x = (M - M_wo_x).replace(x, 1.)
    B = ml.empty(M.shape, dtype=np.complex)
    for (i, j), element in np.ndenumerate(M_coeffs_x):
        B[i, j] = complex(element)

    C = -A.I * B

    evs = np.linalg.eigvals(C)
    roots = 1 / evs[np.nonzero(evs)]
    return roots
Example #2
0
def getOrthColumns(m):
    '''
    Constructs the orthogonally complementing columns of the input.

    Input of the form pxr is assumed to have r<=p,
    and have either full column rank r or rank 0 (scalar or matrix)
    Output is of the form px(p-r), except:
    a) if M square and full rank p, returns scalar 0
    b) if rank(M)=0 (zero matrix), returns I_p
    (Note you cannot pass scalar zero, because dimension info would be
    missing.)
    Return type is as input type.
    '''
    if type(m) == type(asarray(m)):
        m = mat(m)
        output = 'array'
    else: output = 'matrix'
    p, r = m.shape
    # first catch the stupid input case
    if p < r: raise ValueError, 'need at least as many rows as columns'
    # we use lstsq(M, ones) just to exploit its rank-finding algorithm,
    rk = lstsq(m, ones(p).T)[2]
    # first the square and full rank case:
    if rk == p: result = zeros((p,0))   # note the shape! hopefully octave-like
    # then the zero-matrix case (within machine precision):
    elif rk == 0: result = eye(p)
    # now the rank-deficient case:
    elif rk < r:
        raise ValueError, 'sorry, matrix does not have full column rank'
    # (what's left should be ok)
    else:
        # we have to watch out for zero rows in M,
        # if they are in the first p-r positions!
        # so the (probably inefficient) algorithm:
            # 1. check the rank of each row
            # 2. if zero, then also put a zero row in c
            # 3. if not, put the next unit vector in c-row
        idr = eye(r)
        idpr = eye(p-r)
        c = empty([0,r])    # starting point  
        co = empty([0, p-r]) # will hold orth-compl.
        idrcount = 0
        for row in range(p):
            # (must be ones() instead of 1 because of 2d-requirement
            if lstsq( m[row,:], ones(1) )[2] == 0 or idrcount >= r:
                c = r_[ c, zeros(r) ]
                co = r_[ co, idpr[row-idrcount, :] ]
            else:     # row is non-zero, and we haven't used all unit vecs 
                c = r_[ c, idr[idrcount, :] ] 
                co = r_[ co, zeros(p-r) ]
                idrcount += 1
        # earlier non-general (=bug) line: c = mat(r_[eye(r), zeros((p-r, r))])
        # and:  co = mat( r_[zeros((r, p-r)), eye(p-r)] )
        # old:
        # result = ( eye(p) - c * (M.T * c).I * M.T ) * co
        result = co - c * solve(m.T * c, m.T * co)
    if output == 'array': return result.A
    else: return result
def simulate_LDS_with_LPF(A, B, K, x0, DT, N, ndt, fc, plot):
    '''Simulate a Linear Dynamical System (LDS) forward in time assuming
       the control inputs are low-pass filtered
        A: transition matrix
        B: control matrix
        K: feedback gain matrix
        x0: initial state
        DT: time step
        N: number of time steps to simulate
        ndt: number of sub time steps (to make simulation more accurate)
    '''
    n = A.shape[0]
    m = B.shape[1]
    x = matlib.empty((n, N))
    u = matlib.empty((m, N - 1))
    u_filt = matlib.empty((m, N - 1))
    x[:, 0] = x0
    u[:, 0] = -K * x0
    u_filt[:, 0] = -K * x0
    dt = DT / ndt
    lpf = FirstOrderLowPassFilter(dt, fc, u[:, 0].A1)
    for i in range(N - 1):
        u[:, i] = -K * x[:, i]
        x_pre = x[:, i]
        for ii in range(ndt):
            if (fc > 0):
                u_filt[:, i] = np.matrix(lpf.filter_data(u[:, i].A1)).T
            else:
                u_filt[:, i] = u[:, i]
            x_post = x_pre + dt * (A * x_pre + B * u_filt[:, i])
            x_pre = x_post
        x[:, i + 1] = x_post

    if plot:
        max_rows = 4
        n_cols = 1 + (n + m + 1) / max_rows
        n_rows = int(np.ceil(float(n + m) / n_cols))
        f, ax = plt.subplots(n_rows, n_cols, sharex=True)
        ax = ax.reshape(n_cols * n_rows)
        time = np.arange(N * DT, step=DT)
        for i in range(n):
            ax[i].plot(time, x[i, :].A1)
            ax[i].set_title('x ' + str(i))
        for i in range(m):
            ax[n + i].plot(time[:-1], u[i, :].A1, label='u')
            ax[n + i].plot(time[:-1],
                           u_filt[i, :].A1,
                           '--',
                           label='u filtered')
            ax[n + i].set_title('u ' + str(i))
            ax[n + 1].legend(loc='best')
    return (x, u)
    def record_activity(self, inputs):
        """
            Returns states and outputs through time after beeing fed inputs          
        """
        
        nb_inputs = len(inputs)
        states = npmat.empty((self.state_size, nb_inputs))
        outputs = npmat.empty((self.output_size, nb_inputs))

        for i in range(nb_inputs):
           self.next_output(inputs[i])
           states[:, i] = self.state
           outputs[:, i] = self.output_values
           
        return states, outputs
Example #5
0
def simulate_ALDS(H, x0, dt, N, plot=False, show_plot=None):
    '''Simulate an Autonomous Linear Dynamical System (ALDS) forward in time 
        H: transition matrix
        x0: initial state
        dt: time step
        N: number of time steps to simulate
        plot: if True it plots the time evolution of the state
    '''
    n = H.shape[0]
    x = matlib.empty((n, N))
    x[:, 0] = x0
    e_dtH = expm(dt * H)
    for i in range(N - 1):
        x[:, i + 1] = e_dtH * x[:, i]

    if plot:
        max_rows = 4
        n_cols = 1 + (n + 1) / max_rows
        n_rows = int(np.ceil(float(n) / n_cols))
        f, ax = plt.subplots(n_rows, n_cols, sharex=True)
        ax = ax.reshape(n_cols * n_rows)
        time = np.arange(N * dt, step=dt)
        for i in range(n):
            ax[i].plot(time, x[i, :].A1)
            ax[i].set_title(str(i))
        if (show_plot is None):
            plt.show()
    return x
Example #6
0
def FluidStationaryDistr (mass0, ini, K, clo, x):
    """
    Returns the stationary distribution of a Markovian 
    fluid model at the given points.
    
    Parameters
    ----------
    mass0 : matrix, shape (1,Np+Nm)
        The stationary probability vector of zero level
    ini : matrix, shape (1,Np)
        The initial vector of the stationary density
    K : matrix, shape (Np,Np)
        The matrix parameter of the stationary density
    clo : matrix, shape (Np,Np+Nm)
        The closing matrix of the stationary density
    x : vector, length (K)
        The distribution function is computed at these 
        points.
    
    Returns
    -------
    pi : matrix, shape (K,Nm+Np)
        The ith row of pi is the probability that the fluid
        level is less than or equal to x(i), while being in
        different states of the background process.
    """

    m = clo.shape[1]
    y = ml.empty((len(x),m))
    closing = -K.I*clo
    for i in range(len(x)):
        y[i,:] = mass0 + ini*(ml.eye(K.shape[0])-la.expm(K*x[i]))*closing

    return y
Example #7
0
def QBDStationaryDistr (pi0, R, K):
    """
    Returns the stationary distribution of a QBD up to a
    given level K.
    
    Parameters
    ----------
    pi0 : matrix, shape (1,N)
        The stationary probability vector of level zero
    R : matrix, shape (N,N)
        The matrix parameter of the matrix geometrical
        distribution of the QBD 
    K : integer
        The stationary distribution is returned up to
        this level.
    
    Returns
    -------
    pi : array, length (K+1)*N
        The stationary probability vector up to level K
    """

    m = R.shape[0]    
    qld = ml.empty((1,(K+1)*m))
    qld[0,0:m] = pi0
    pix = pi0
    for k in range(1,K+1):
        pix = pix*R
        qld[0,k*m:(k+1)*m] = pix
    return qld
Example #8
0
def compute_closed_loop_eigenvales(robot, gains_array):
    ''' Compute eigenvalues of linear part of closed-loop system:
            d3f = -Kd_bar*d2f - (K*Upsilon+Kp_bar)*df - Kp_bar*K*A*Kf*e_f + ...
    '''
    ny=3
    ei_cls_f = matlib.empty((3*nf,T), dtype=complex)*np.nan
    ei_cls   = matlib.empty((3*nf+2*ny,T), dtype=complex)*np.nan
    
    for t in range(T):
        H = compute_closed_loop_transition_matrix(gains_array, robot, q[:,t], v[:,t])
        H_f = H[2*ny:, 2*ny:]
        ei_cls_f[:,t] = np.sort_complex(eigvals(H_f)).reshape((3*nf,1))
        ei_cls[:,t] = np.sort_complex(eigvals(H)).reshape((3*nf+2*ny,1))
    
    plot_stats_eigenvalues(ei_cls_f, name='Closed-loop force tracking')
    plot_stats_eigenvalues(ei_cls, name='Closed-loop momentum tracking')
    return ei_cls
Example #9
0
    def __init__(self,g,d,sigma,L,U,Q):
        """
        defines a generative model for diffusion of disease

        Arguments
        =========
        g : function
            describes the individual contribution of a location to the dynamics
        d : function
            defines the distance between two locations
        L : list of ntuples
            the spatial locations (in R^n) over which the model is defined
        sigma : scalar
            controls the scale of the diffusion
        U : matrix
            input matrix
        Q : matrix
            disturbance covariance matrix
        """
        # number of states
        self.n = len(L)
        # store state locations
        self.L = L
        # diffusion matrix
        F = mb.empty((self.n,self.n))
        for i,li in enumerate(L):
            for j,lj in enumerate(L):
                F[i,j] = (1./np.sqrt(2*np.pi*sigma**2))*np.exp(-d(li,lj)/(2*sigma**2))
        # normalise columns
        for j,col in enumerate(F.T):
            F[:,j] = F[:,j]/np.sum(col)
        # initialise A matrix
        self.A = mb.empty((self.n,self.n)) 
        # populate A matrix
        for i,li in enumerate(L):
            for j,lj in enumerate(L):
                self.A[i,j] = F[i,j] * g(lj)
        # store U matrix
        self.U = U
        # store disturbance covariance matrix
        self.Q = Q
        # store distance function
        self.d = d
Example #10
0
 def predict(self, X, *args, **kwargs):
     m = X.shape[0]
     RBF_result = matlib.empty((m, self._n_RBF + 1))
     
     for x_index in range(m):
         x_output = matlib.empty((1, self._n_RBF + 1))
         
         # RBF process
         for RBF_index in range(self._n_RBF):
             x_output[0, RBF_index + 1] = Euler_distance(X[x_index], self._Theta1[RBF_index])
         
         sigmoid_output = sigmoid(x_output)
         sigmoid_output[0] = 1
         
         RBF_result[x_index] = x_output
     
     # Logistic Regression Process
     raw_output = RBF_result * self._Theta2
     return sigmoid(raw_output) > 0.5
Example #11
0
def simulate_LDS(A, B, K, x0, dt, N):
    '''Simulate a Linear Dynamical System (LDS) forward in time 
        A: transition matrix
        B: control matrix
        K: feedback gain matrix
        x0: initial state
        dt: time step
        N: number of time steps to simulate
    '''
    n = A.shape[0]
    m = B.shape[1]
    x = matlib.empty((n, N))
    u = matlib.empty((m, N - 1))
    x[:, 0] = x0
    H = A - B * K
    e_dtH = expm(dt * H)
    for i in range(N - 1):
        x[:, i + 1] = e_dtH * x[:, i]
        u[:, i] = -K * x[:, i]
    return (x, u)
Example #12
0
 def make_C_matrix(self, r_t, alpha_t, o_t):
     c = len(o_t)
     # initialise C matrix
     C = mb.empty((c, self.n))
     # populate C matrix
     for i in range(c):
         for j in range(self.n):
             rti2 = r_t[i]**2
             const = alpha_t[i] / np.sqrt(2*np.pi*rti2)
             C[i,j] = const * np.exp(-self.d(o_t[i], self.L[j]) / (2*rti2)) 
     return C
def trouverParenthesageOptimalAvecStockage(dim, frontiere, i, j):
    """Calcule récursivement le meilleure parenthésage pour une
       série de multiplications de matrices. Il effectue le calcul
       de façon récursive
       frontiere: matrice de position des parentheses pour deux matrices
       dim: tableau de dimensions des matrices à multiplier
       m: résultats déjà calculés
    """
    m = matlib.empty((j, j), dtype=int)
    m.fill(-1)
    return __trouverParenthesageOptimalAvecStockage(frontiere, dim, m, i, j)
Example #14
0
    def colliding_boxes(self):
        
        nb_solids = self.nb_solids
        points = npmat.hstack([s.center_position for s in self.solids])
        center = npmat.asmatrix(npmat.average(points, axis = 1)).T
        centered_points = points - center
        correlation_matrix = (centered_points * centered_points.T) / nb_solids
        u = iterate(correlation_matrix, self.NB_ITERATIONS)

    
        if self.projected_bounds is None:
            
            self.projected_bounds = []
            
            for i in range(nb_solids):
                self.projected_bounds.append((i, 0, 0.))
                self.projected_bounds.append((i, 1, 0.))
                
        
        min_max_projections = npmat.empty((nb_solids, 2))
        
        for solid_id in range(nb_solids):
            solid_AABB_corners = self.solids[solid_id].AABB_corners()
            corners_projections = u.T * solid_AABB_corners
            min_max_projections[solid_id, 0] = np.min(corners_projections)
            min_max_projections[solid_id, 1] = np.max(corners_projections)
            
        for i in range(2 * nb_solids):            
            solid_id, begin_or_end_id, value = self.projected_bounds[i]
            new_value = min_max_projections[solid_id, begin_or_end_id]
            self.projected_bounds[i] = (solid_id, begin_or_end_id, new_value)
                
        # TODO: linear sorting
        self.projected_bounds.sort(key = lambda x : x[2])
        
        output = []
        active_solids = []
    
        for i in range(2 * nb_solids):
            
            solid_id, begin_or_end_id, value = self.projected_bounds[i]
            
            if begin_or_end_id == 0:
                for active_solid_id in active_solids:
                    if self.solids[active_solid_id].AABB_intersect_with(self.solids[solid_id]):
                        output.append((active_solid_id, solid_id))
                        
                active_solids.append(solid_id)
                
            else:
                active_solids.remove(solid_id)
                
        return output
Example #15
0
 def getMfeature(self):
     mata = npmatrix.empty((3, 4))  # random data
     mata = npmatrix.zeros((3, 4))  # zeros
     mata = npmatrix.ones((3, 4))  # one
     mata = npmatrix.eye(3)  # one along diagonal
     mata = npmatrix.eye(3, 5)  # one along diagonal
     mata = npmatrix.identity(3)  # identity square matrix
     mata = npmatrix.rand(3, 7)  # rand data
     mata = npmatrix.ones((3, 1))  # one
     print(mata)
     print(mata.shape)
     print(mata.dtype)
Example #16
0
def compute_costs(x, u, dt, P, Q_x, Q_u):
    N = x.shape[1]
    T = N * dt
    xu_proj = matlib.empty((P.shape[0], N))  # com state
    state_cost, control_cost = 0.0, 0.0
    for t in range(N):
        if (t < N - 1):
            xup = P * np.vstack([x[:, t], u[:, t]])
        else:
            xup = P * np.vstack([x[:, t], matlib.zeros_like(u[:, 0])])
        xu_proj[:, t] = xup
        state_cost += dt * (xup.T * Q_x * xup)[0, 0]
        control_cost += dt * (xup.T * Q_u * xup)[0, 0]
    return xu_proj, np.sqrt(state_cost / T), np.sqrt(control_cost / T)
Example #17
0
    def AABB_corners(self):

        AABB = self.AABB
        output = npmat.empty((3, 8))

        i = 0

        for x_i in range(2):
            for y_i in range(2):
                for z_i in range(2):
                    output[0, i] = AABB[0, x_i]
                    output[1, i] = AABB[1, y_i]
                    output[2, i] = AABB[2, z_i]
                    i += 1

        return output
Example #18
0
def getImpulseDummies(sampledateslist, periodslist):
    '''
    Returns a (numpy-)matrix of impulse dummies for the specified periods.

    sampledateslist must consist of 1999.25 -style dates (quarterly or monthly).
    However, because periodslist is probably human-made, it expects strings
     such as '1999q3' or '1999M12'.
    Variables in columns.
    So far only for quarterly and monthly data.
    '''
    nobs = len(sampledateslist)
    result = empty([nobs, 0])
    for periodstring in periodslist:
        period = dateString2dateFloat(periodstring)
        result = c_[result, getDeterministics(nobs, 'i', \
                            sampledateslist.index(period))]
    return result
Example #19
0
def getImpulseDummies(sampledateslist, periodslist):
    '''
    Returns a (numpy-)matrix of impulse dummies for the specified periods.

    sampledateslist must consist of 1999.25 -style dates (quarterly or monthly).
    However, because periodslist is probably human-made, it expects strings
     such as '1999q3' or '1999M12'.
    Variables in columns.
    So far only for quarterly and monthly data.
    '''
    nobs = len(sampledateslist)
    result = empty([nobs,0])
    for periodstring in periodslist:
        period = dateString2dateFloat(periodstring)
        result = c_[result, getDeterministics(nobs, 'i', \
                            sampledateslist.index(period))]
    return result
def multiplierMatrice(a, b):
    """Multiplie les matrices a et b, et retourne
       le résultat dans une nouvelle matrice
    """

    #Vérifie si on peut bien multiplier les matrices
    dimA = a.shape
    dimB = b.shape

    #On multiplie -- vérifier le type de données
    c = matlib.empty((dimA[0], dimB[1]), dtype=a.dtype)
    for i in range(0, dimA[0]):
        for j in range(0, dimB[1]):
            total = 0
            for k in range(0, dimA[1]):
                total += a[i,k] * b[k,j]
            c[i,j] = total
    return c
Example #21
0
    def betas(prices_m):
        """
        Count beta parameters for all stocks, described in 'prices_m' matrix,
        according to 'index' benchmark.

        :param prices_m: matrix of prices. Each column represents a stock.
        Each row represents price at successive time stamp
        :return: matrix with betas. Each column represents a stock. Each row
        represents beta at successive time period
        """
        returns_m = ml.divide(ml.subtract(prices_m[1:], prices_m[:-1]),
                              prices_m[:-1])
        index_m = ml.matrix(index).T
        index_returns_m = ml.divide(ml.subtract(index_m[1:], index_m[:-1]),
                                    index_m[:-1])
        result = ml.empty((k, prices_m.shape[1]))
        for i in range(k):
            for j in range(stock_amount):
                x = returns_m[:, j]
                y = index_returns_m[:, i]
                result[i, j] = np.cov(x, y, rowvar=0)[0][1]/np.var(y)
        return result
Example #22
0
 def generate(self, x0, V, O, R, Alpha):
     """
     Arguments
     ========
     x0 : nx1 matrix
         initial state
     V : T-length list of nx1 matrices
         inputs
     O : T-length list of lists
         location of each clinic reporting at time t
     R : T-length list of lists
         radius proportional to the catchment of clinic i at time t
     Alpha : T-length list of lists
         some number proportional to the daily throughput of clinic i at time t
     """
     # check that x0 is the right size and type
     assert type(x0) is np.matrix
     assert x0.shape == (self.n,1)
     x = x0
     # assign the multivariate normal function to a local function (faster and prettier!)
     # we can do a bit of wrapping to make life easier too
     def N(mean,sigma):
         mean = np.array(mean).flatten()
         x = np.random.multivariate_normal(mean, sigma)
         return np.matrix(x).T
     # yield each new state variable by looping through the input
     for t, v in enumerate(V):
         # draw next state
         x = N(self.A*x + self.U*v, self.Q)
         # build observation matrix
         C = self.make_C_matrix(R[t], Alpha[t], O[t])
         # calculate rate parameters at each clinic
         rates = np.array(C*x).flatten()**2
         # initialise next y
         y = mb.empty((C.shape[0],1))
         for i,rate in enumerate(rates):
             # draw from the poisson
             y[i]=np.random.poisson(rate)
         yield x,y
    def _record_state_matrices(self, inputs, target_outputs, nb_washing_steps = 0):
        
        training_size = len(inputs)
        
        state_recording_matrix = npmat.empty((self.state_size, training_size - nb_washing_steps))
        
        for i in range(training_size):
      
            self.next_output(inputs[i])
            
            if self.use_feedback:
                #teacher forcing : output_values are changed for the next self.next_output() call
                self.output_values = target_outputs[i]
            
            if i < nb_washing_steps:
                continue
                                    
            state_recording_matrix[:, i - nb_washing_steps] = self.state
            
        output_target_matrix = np.hstack(target_outputs[nb_washing_steps : training_size])

        return state_recording_matrix, output_target_matrix
Example #24
0
def dare(F, G1, G2, H):
    """Solves the discrete-time algebraic Riccati equation

    0 = F ^ T * X * F
        - X - F ^ T * X * G1 * (G2 + G1 ^ T * X * G1) ^ -1 * G1 ^ T * X * F + H

    Under the assumption that X ^ -1 exists, this equation is equivalent to

    0 = F ^ T * (X ^ -1 + G1 * G2 ^ -1 * G1 ^ T) ^ -1 * F - X + H

    Parameters
    ==========
    Inputs are real matrices:

    F : n x n
    G1 : n x m
    G2 : m x m, symmetric, positive definite
    H : n x n, symmetric, positive semi-definite

    Assumptions
    ===========
    (F, G1) is a stabilizable pair
    (C, F) is a detectable pair (where C is full rank factorization of H, i.e.,
        C ^ T * C = H and rank(C) = rank(H).
    F is invertible

    Returns
    =======

    Unique nonnegative definite solution of discrete Algrebraic Ricatti
    equation.

    Notes
    =====
    This is an implementation of the Schur method for solving algebraic Riccati
    eqautions as described in dx.doi.org/10.1109/TAC.1979.1102178

    """
    # Verify that F is non-singular
    u, s, v = la.svd(F)
    assert(np.all(s > 0.0))
    # Verify that (F, G1) controllable
    C = ctrb(F, G1)
    u, s, v = la.svd(C)
    assert(np.all(s > 0.0))
    # Verify that (H**.5, F) is observable
    O = obsv(H**.5, F)
    u, s, v = la.svd(O)
    assert(np.all(s > 0.0))
    
    n = F.shape[0]
    m = G2.shape[0]

    G = np.dot(G1, np.dot(inv(G2), G1.T))
    Finv = inv(F)
    Finvt = Finv.T

    # Form symplectic matrix
    Z = empty((2*n, 2*n))
    Z[:n, :n] = F + np.dot(G, np.dot(Finvt, H))
    Z[:n, n:] = -np.dot(G, Finvt)
    Z[n:, :n] = -np.dot(Finvt, H)
    Z[n:, n:] = Finvt

    S, U, sdim = schur(Z, sort='iuc')

    # Verify that the n eigenvalues of the upper left block stable
    assert(sdim == n)

    U11 = U[:n, :n]
    U21 = U[n:, :n]
    return solve(U[:n, :n].T, U[n:, :n].T).T
Example #25
0
#!/usr/bin/python
# Generate absorbance data and save testing file for A->B
import random
from numpy import matrix, matlib
import csv
from frange import frange

t_0 = matrix(frange(-1e-6, 0, 5e-11)).transpose()
t_1 = matrix(frange(0, 1.9999500e-6, 5e-11)).transpose()
k = [2.2e7, 3.124e7] # rate constant

a_0 = 1e-3 # initial concentration of A
a_1 = 2e-3 # initial concentration of C
c = matlib.empty([t_1.size, 2])
c[:,0] = a_0 * matlib.exp(-k[0] * t_1)
c[:,1] = a_1 * matlib.exp(-k[1] * t_1)

# molar absorption of species A
a = matlib.empty([2, 1])
a[0,0] = 1e3
a[1,0] = 1e3

y_1 = matlib.dot(c, a)
y_1 = y_1.transpose().tolist()[0]
y_1 = map(lambda y: y + (0.04 * random.random() - 0.02), y_1)

t_0 = t_0.transpose().tolist()[0]
t_1 = t_1.transpose().tolist()[0]

fullLightVoltage = -0.0951192897786
y_1 = map(lambda y:fullLightVoltage*(10**-y), y_1)
Example #26
0
#!/usr/bin/python
# Generate absorbance data and save testing file for A->B
import random
from numpy import matrix, matlib
import csv
from frange import frange

t_0 = matrix(frange(-1e-6, 0, 5e-11)).transpose()
t_1 = matrix(frange(0, 1.9999500e-6, 5e-11)).transpose()
k = 2.2e7  # rate constant

a_0 = 1e-3  # initial concentration of A
c = a_0 * matlib.exp(-k * t_1)

# molar absorption of species A
a = matlib.empty([1, 1])
a[0, 0] = 1e3

y_1 = matlib.dot(c, a)
y_1 = y_1.transpose().tolist()[0]
y_1 = map(lambda y: y + (0.04 * random.random() - 0.02), y_1)

t_0 = t_0.transpose().tolist()[0]
t_1 = t_1.transpose().tolist()[0]

fullLightVoltage = -0.0951192897786
y_1 = map(lambda y: fullLightVoltage * (10**-y), y_1)
y_0 = []
for i in range(0, len(t_0)):
    y_0.append(fullLightVoltage + 0.01 * random.random() - 0.005)
#   All seeds the same
# 
#===============================================================================

mainPath = 'C:/DataSets/Results/Fraust'

os.chdir(mainPath)

# Parameters List  (of Dictionaries)
param = {'alpha' : 0.996,
         'rr' : 4,
         'sci' : 1}

# Results list (of dictionaris) 
# results = []
e_qq_mat = npm.empty((10000,numRuns))

for i in range(numRuns):
    # Generate artificial data streams
    streams = genCosSignals(i, -3)
    
    # Run Fast row householder subspace tracker
    Q_t, S_t, rr, E_t, E_dash_t, hid_var, z_dash, RSRE, no_inp_count, \
    no_inp_marker = FRHH(streams, param['rr'], param['alpha'], param['sci'])
    
    # Calculate deviations from orthogonality and subspace
    e_qq, f_qq  = plotEqqFqq(streams, Q_t, param['alpha'],0, 0)
    
    # Store results in Dictionary
    dic_name = 'res_' + str(i) # string of the name of the Dictionary
    vars()[dic_name] = {'param' : param,
Example #28
0
#!/usr/bin/python
# Generate absorbance data and save testing file for A->B
import random
from numpy import matrix, matlib
import csv
from frange import frange

t_0 = matrix(frange(-1e-6, 0, 5e-11)).transpose()
t_1 = matrix(frange(0, 1.9999500e-6, 5e-11)).transpose()
k = 2.2e7 # rate constant

a_0 = 1e-3 # initial concentration of A
c = a_0 * matlib.exp(-k * t_1)

# molar absorption of species A
a = matlib.empty([1, 1])
a[0,0] = 1e3

y_1 = matlib.dot(c, a)
y_1 = y_1.transpose().tolist()[0]
y_1 = map(lambda y: y + (0.04 * random.random() - 0.02), y_1)

t_0 = t_0.transpose().tolist()[0]
t_1 = t_1.transpose().tolist()[0]

fullLightVoltage = -0.0951192897786
y_1 = map(lambda y:fullLightVoltage*(10**-y), y_1)
y_0 = []
for i in range(0, len(t_0)):
    y_0.append(fullLightVoltage + 0.01 * random.random() - 0.005)
Example #29
0
# -*- encoding: utf-8 -*-
"""
4.6.1 创建矩阵
"""

import numpy as np
import numpy.matlib as mat

print(np.mat([[1, 2, 3], [4, 5, 6]], dtype=np.int))  # 使用列表创建矩阵
print(np.mat(np.arange(6).reshape((2, 3))))  # 使用数组创建矩阵
print(np.mat('1 4 7; 2 5 8; 3 6 9'))  # 使用Matlab风格的字符串创建矩阵

print(mat.zeros((2, 3)))  # 全0矩阵
print(mat.ones((2, 3)))  # 全1矩阵
print(mat.eye(3))  # 单位矩阵
print(mat.empty((2, 3)))  # 空矩阵
print(mat.rand((2, 3)))  # [0,1)区间随机数矩阵
print(mat.randn((2, 3)))  # 均值0方差1的高斯(正态)分布矩阵
Example #30
0
def MG1FundamentalMatrix (A, precision=1e-14, maxNumIt=50, method="ShiftPWCR", maxNumRoot=2048, shiftType="one"):
    """
    Returns matrix G corresponding to the M/G/1 type Markov
    chain defined by matrices A.
    
    Matrix G is the minimal non-negative solution of the 
    following matrix equation:
    
    .. math::
        G = A_0 + A_1 G + A_2 G^2 + A_3 G^3 + \dots.
    
    The implementation is based on [1]_, please cite it if
    you use this method.
    
    Parameters
    ----------
    A : length(M) list of matrices of shape (N,N)
        Matrix blocks of the M/G/1 type generator from
        0 to M-1.
    precision : double, optional
        Matrix G is computed iteratively up to this
        precision. The default value is 1e-14
    maxNumIt : int, optional
        The maximal number of iterations. The default value
        is 50.
    method : {"CR", "RR", "NI", "FI", "IS"}, optional
        The method used to solve the matrix-quadratic
        equation (CR: cyclic reduction, RR: Ramaswami
        reduction, NI: Newton iteration, FI: functional
        iteration, IS: invariant subspace method). The 
        default is "CR".
    
    Returns
    -------
    G : matrix, shape (N,N)
        The G matrix of the M/G/1 type Markov chain.
        (G is stochastic.)
    
    References
    ----------
    .. [1] Bini, D. A., Meini, B., Steffé, S., Van Houdt,
           B. (2006, October). Structured Markov chains 
           solver: software tools. In Proceeding from the
           2006 workshop on Tools for solving structured 
           Markov chains (p. 14). ACM.
    """

    if not isinstance(A,np.ndarray):
        D = np.hstack(A)
    else:
        D = ml.matrix(A)
   
    if method=="ShiftPWCR":
        if butools.verbose:
            Dold = ml.matrix(D)
        D, drift, tau, v = MG1TypeShifts (D, shiftType)

    m = D.shape[0]
    M = D.shape[1]
    I = ml.eye(m)
    D = D.T
    D = np.vstack((D, ml.zeros(((2**(1+math.floor(math.log(M/m-1,2)))+1)*m-M,m))))

    # Step 0
    G = ml.zeros((m,m))
    Aeven = D[np.remainder(np.kron(np.arange(D.shape[0]/m),np.ones(m)),2)==0,:]
    Aodd  = D[np.remainder(np.kron(np.arange(D.shape[0]/m),np.ones(m)),2)==1,:]
    
    Ahatodd = np.vstack((Aeven[m:,:], D[-m:,:]))
    Ahateven = Aodd

    Rj = D[m:2*m,:]
    for i in range(3,M//m+1):
        Rj = Rj + D[(i-1)*m:i*m,:]
    Rj = la.inv(I-Rj)
    Rj = D[:m,:] * Rj
    
    numit = 0

    while numit < maxNumIt:
        numit+=1
        nj=Aodd.shape[0]//m
        if nj > 0:
            # Evaluate the 4 functions in the nj+1 roots using FFT
            # prepare for FFTs (such that they can be performed in 4 calls)
            
            temp1=np.reshape(Aodd[:nj*m,:].T,(m*m,nj),order='F').T
            temp2=np.reshape(Aeven[:nj*m,:].T,(m*m,nj),order='F').T
            temp3=np.reshape(Ahatodd[:nj*m,:].T,(m*m,nj),order='F').T
            temp4=np.reshape(Ahateven[:nj*m,:].T,(m*m,nj),order='F').T
            # FFTs           
            temp1=fft(temp1,nj,0)
            temp2=fft(temp2,nj,0)
            temp3=fft(temp3,nj,0)
            temp4=fft(temp4,nj,0)            
            # reform the 4*nj matrices
            temp1=np.reshape(temp1.T,(m,m*nj),order='F').T
            temp2=np.reshape(temp2.T,(m,m*nj),order='F').T
            temp3=np.reshape(temp3.T,(m,m*nj),order='F').T
            temp4=np.reshape(temp4.T,(m,m*nj),order='F').T
            
            # Next, we perform a point-wise evaluation of (6.20) - Thesis Meini
            Ahatnew = ml.empty((nj*m,m), dtype=complex)
            Anew = ml.empty((nj*m,m), dtype=complex)
            for cnt in range(1,nj+1):
                Ahatnew[(cnt-1)*m:cnt*m,:] = temp4[(cnt-1)*m:cnt*m,:] + temp2[(cnt-1)*m:cnt*m,:] * la.inv(I-temp1[(cnt-1)*m:cnt*m,:]) * temp3[(cnt-1)*m:cnt*m,:]
                Anew[(cnt-1)*m:cnt*m,:] = cmath.exp(-(cnt-1)*2.0j*math.pi/nj) * temp1[(cnt-1)*m:cnt*m,:] + temp2[(cnt-1)*m:cnt*m,:] * la.inv(I-temp1[(cnt-1)*m:cnt*m,:]) * temp2[(cnt-1)*m:cnt*m,:]
    
            # We now invert the FFTs to get Pz and Phatz   
            # prepare for IFFTs (in 2 calls)
            Ahatnew = np.reshape(Ahatnew[:nj*m,:].T,(m*m,nj),order='F').T
            Anew = np.reshape(Anew[:nj*m,:].T,(m*m,nj),order='F').T    
            # IFFTs
            Ahatnew = np.real(ifft(Ahatnew,nj,0))
            Anew = np.real(ifft(Anew,nj,0))
            # reform matrices Pi and Phati
            Ahatnew = np.reshape(Ahatnew.T,(m,m*nj),order='F').T
            Anew = np.reshape(Anew.T,(m,m*nj),order='F').T            
        else: # series Aeven, Aodd, Ahateven and Ahatodd are constant
            temp = Aeven * la.inv(I-Aodd)
            Ahatnew = Ahateven + temp*Ahatodd
            Anew = np.vstack((temp*Aeven, Aodd))
    
        nAnew = 0
        deg = Anew.shape[0]//m
        for i in range(deg//2,deg):
            nAnew = max(nAnew, la.norm(Anew[i*m:(i+1)*m,:],np.inf))

        nAhatnew = 0
        deghat = Ahatnew.shape[0]//m
        for i in range(deghat//2,deghat):
            nAhatnew = max(nAhatnew, la.norm(Ahatnew[i*m:(i+1)*m,:],np.inf))
        
        # c) the test
        while (nAnew > nj*precision or nAhatnew > nj*precision) and nj < maxNumRoot:
    
            nj *= 2
            stopv = min(nj, Aodd.shape[0]/m)
    
            # prepare for FFTs
            temp1=np.reshape(Aodd[:stopv*m,:].T,(m*m,stopv),order='F').T
            temp2=np.reshape(Aeven[:stopv*m,:].T,(m*m,stopv),order='F').T
            temp3=np.reshape(Ahatodd[:stopv*m,:].T,(m*m,stopv),order='F').T
            temp4=np.reshape(Ahateven[:stopv*m,:].T,(m*m,stopv),order='F').T
            # FFTs
            temp1=fft(temp1,nj,0)
            temp2=fft(temp2,nj,0)
            temp3=fft(temp3,nj,0)
            temp4=fft(temp4,nj,0)
            # reform the 4*(nj+1) matrices
            temp1=np.reshape(temp1.T,(m,m*nj),order='F').T
            temp2=np.reshape(temp2.T,(m,m*nj),order='F').T
            temp3=np.reshape(temp3.T,(m,m*nj),order='F').T
            temp4=np.reshape(temp4.T,(m,m*nj),order='F').T
    
            # Next, we perform a point-wise evaluation of (6.20) - Thesis Meini
            Ahatnew = ml.empty((nj*m,m), dtype=complex)
            Anew = ml.empty((nj*m,m), dtype=complex)
            for cnt in range(1,nj+1):
                Ahatnew[(cnt-1)*m:cnt*m,:] = temp4[(cnt-1)*m:cnt*m,:] + temp2[(cnt-1)*m:cnt*m,:] * la.inv(I-temp1[(cnt-1)*m:cnt*m,:]) * temp3[(cnt-1)*m:cnt*m,:]
                Anew[(cnt-1)*m:cnt*m,:] = cmath.exp(-(cnt-1)*2j*math.pi/nj) * temp1[(cnt-1)*m:cnt*m,:] + temp2[(cnt-1)*m:cnt*m,:] * la.inv(I-temp1[(cnt-1)*m:cnt*m,:]) * temp2[(cnt-1)*m:cnt*m,:]

            # We now invert the FFTs to get Pz and Phatz
            # prepare for IFFTs
            Ahatnew = np.reshape(Ahatnew[:nj*m,:].T,(m*m,nj),order='F').T
            Anew = np.reshape(Anew[:nj*m,:].T,(m*m,nj),order='F').T   
            # IFFTs
            Ahatnew = ml.matrix(np.real(ifft(Ahatnew,nj,0)))
            Anew = ml.matrix(np.real(ifft(Anew,nj,0)))
            # reform matrices Pi and Phati
            Ahatnew = np.reshape(Ahatnew.T,(m,m*nj),order='F').T
            Anew = np.reshape(Anew.T,(m,m*nj),order='F').T
    
            vec1 = ml.zeros((1,m))
            vec2 = ml.zeros((1,m))
            for i in range(1,Anew.shape[0]//m):
                vec1 += i*np.sum(Anew[i*m:(i+1)*m,:],0)
                vec2 += i*np.sum(Ahatnew[i*m:(i+1)*m,:],0)

            nAnew = 0
            deg = Anew.shape[0]//m
            for i in range(deg//2,deg):
                nAnew = max(nAnew, la.norm(Anew[i*m:(i+1)*m,:],np.inf))

            nAhatnew = 0
            deghat = Ahatnew.shape[0]//m
            for i in range(deghat//2,deghat):
                nAhatnew = max(nAhatnew, la.norm(Ahatnew[i*m:(i+1)*m,:],np.inf))
        if (nAnew > nj*precision or nAhatnew > nj*precision) and nj >= maxNumRoot:
            print("MaxNumRoot reached, accuracy might be affected!")
    
        if nj > 2:
            Anew = Anew[:m*nj/2,:]
            Ahatnew = Ahatnew[:m*nj/2,:]
        
        # compute Aodd, Aeven, ...
        Aeven = Anew[np.remainder(np.kron(np.arange(Anew.shape[0]/m),np.ones(m)),2)==0,:]
        Aodd = Anew[np.remainder(np.kron(np.arange(Anew.shape[0]/m),np.ones(m)),2)==1,:]
        
        Ahateven = Ahatnew[np.remainder(np.kron(np.arange(Ahatnew.shape[0]/m),np.ones(m)),2)==0,:]
        Ahatodd = Ahatnew[np.remainder(np.kron(np.arange(Ahatnew.shape[0]/m),np.ones(m)),2)==1,:]
        
        if butools.verbose==True:
            if method == "PWCR":
                print("The Point-wise evaluation of Iteration ", numit, " required ", nj, " roots")
            else:
                print("The Shifted PWCR evaluation of Iteration ", numit, " required ", nj, " roots")
        
        # test stopcriteria
        if method == "PWCR":
            Rnewj = Anew[m:2*m,:]
            for i in range (3, Anew.shape[0]/m+1):
                Rnewj = Rnewj + Anew[(i-1)*m:i*m,:]
            Rnewj = la.inv(I-Rnewj)
            Rnewj = Anew[:m,:]*Rnewj

            if np.max(np.abs(Rj-Rnewj)) < precision or np.max(np.sum(I-Anew[:m,:]*la.inv(I-Anew[m:2*m,:]),0)) < precision:
                G = Ahatnew[:m,:]
                for i in range (2,Ahatnew.shape[0]/m+1):
                    G = G + Rnewj * Ahatnew[(i-1)*m:i*m,:]
                G = D[:m,:]*la.inv(I-G)
                break
            
            Rj = Rnewj
            # second condition tests whether Ahatnew is degree 0 (numerically)
            if la.norm(Anew[:m,:m]) < precision or np.sum(Ahatnew[m:,:]) < precision or np.max(np.sum(I-D[:m,:]*la.inv(I-Ahatnew[:m,:]),0)) < precision:
                G = D[0:m,:] * la.inv(I-Ahatnew[:m,:])
                break
        else:
            Gold = G
            G = D[:m,:]*la.inv(I-Ahatnew[:m,:])
            if la.norm(G-Gold,np.inf) < precision or la.norm(Ahatnew[m:,:],np.inf) < precision:
                break
    
    if numit == maxNumIt and G==ml.zeros((m,m)):
        print("Maximum Number of Iterations reached!")
        G = D[:m,:] * la.inv(I-Ahatnew[:m,:])
    
    G=G.T
    
    if method=="ShiftPWCR":
        if shiftType=="one":
            G = G + (drift<1)*ml.ones((m,m))/m
        elif shiftType=="tau":
            G = G + (drift>1)*tau*v*ml.ones((1,m))
        elif shiftType=="dbl":
            G = G + (drift<1)*ml.ones((m,m))/m+(drift>1)*tau*v*ml.ones((1,m))
        
    if butools.verbose:
        if method=="PWCR":
            D = D.T
        else:
            D = Dold
        temp = D[:,-m:]
        for i in range (D.shape[1]//m-1,0,-1):
            temp = D[:,(i-1)*m:i*m] + temp*G
        res_norm = la.norm(G-temp,np.inf)
        print("Final Residual Error for G: ", res_norm)

    return G
Example #31
0
def getDeterministics(nobs, which = 'c', date = 0.5):
    '''
    Returns various useful deterministic terms for a given sample length T.

    Return object is a numpy-matrix-type of dimension Tx(len(which));
    (early periods first, where relevant).
    In the 'which' argument pass a string composed of the following letters,
    in arbitrary order:
    c - constant (=1) term
    t - trend (starting with 0)
    q - centered quarterly seasonal dummies (starting with 0.75, -0.25...)
    m - centered monthly seasonal dummies (starting with 11/12, -1/12, ...)
    l - level shift (date applies) 
    s - slope shift (date applies) 
    i - impulse dummy (date applies)

    If the date argument is a floating point number (between 0 and 1),
    it is treated as the fraction of the sample where the break occurs.
    If instead it is an integer between 0 and T, then that observation is
    treated as the shift date.    
    '''
    # some input checks (as well as assignment of shiftperiod):
    if type(nobs) != type(4):  # is not an integer
        raise TypeError, 'need integer for sample length'
    if nobs <=0: raise ValueError, 'need positive sample length'
    if type(date) == type(0.5):     #is a float, treat as break fraction
        if date < 0 or date > 1:
            raise ValueError, 'need break fraction between 0 and 1'
        shiftperiod = int(date * nobs)
    elif type(date) == type(4):     # is integer, treat as period number
        if date not in range(1, nobs+1):
            raise ValueError, 'need period within sample range'
        shiftperiod = date
    else: raise TypeError, 'need float or integer input for date'
    if type(which) != type('a string'):
        raise TypeError, 'need string for case spec' 
    # end input checks

    out = empty([nobs,0])   # create starting point
    if 'c' in which: out = c_[ out, ones(nobs).T ]
    if 't' in which: out = c_[ out, r_['c', :nobs] ]
    if 'l' in which:
        shift = r_[ zeros(shiftperiod).T, ones(nobs-shiftperiod).T ]
        out = c_[ out, shift ]
    if 's' in which:
        slopeshift = r_[ zeros(shiftperiod).T, r_['c', 1:(nobs - shiftperiod + 1)] ]
        out = c_[ out, slopeshift ]
    if 'i' in which:
        impulse = r_[ zeros(shiftperiod).T, ones(1), zeros(nobs-shiftperiod-1).T ]
        out = c_[ out, impulse ]
    if 'q' in which or 'Q' in which:
        # to end of next full year, thus need to slice at T below:
        q1 = [0.75, -0.25, -0.25, -0.25] * (1 + nobs/4)
        q2 = [-0.25, 0.75, -0.25, -0.25] * (1 + nobs/4)
        q3 = [-0.25, -0.25, 0.75, -0.25] * (1 + nobs/4)
        out = c_[ out, mat(q1[:nobs]).T, mat(q2[:nobs]).T, mat(q3[:nobs]).T ]
    if 'm' in which or 'M' in which:
        temp = [-1./12] * 11 
        for month in range(11):
            temp.insert(month, 1-temp[0])
            # again, to end of next full year, thus need to slice at T below:
            monthly = temp * (1 + nobs/12)  # temp is still a list here!
            out = c_[ out, mat(monthly[:nobs]).T ]
    return out
Example #32
0
def kernal(x, gamma, n):
    k = ml.empty((n, n))
    for i in range(n):
        for j in range(n):
            k[i, j] = np.exp(-gamma * la.norm(x[i, :] - x[j, :]) ** 2)
    return k
Example #33
0
#-*-coding:utf-8-*-
"""
    NumPy - 矩阵库
    NumPy 包包含一个 Matrix库numpy.matlib。此模块的函数返回矩阵而不是返回ndarray对象。
"""
import numpy.matlib as mt
import numpy as np

if __name__ == '__main__':
    #numpy.matlib.empty(shape, dtype, order)函数返回一个新的矩阵,而不初始化元素
    print mt.empty((2, 2))
    #numpy.matlib.zeros()返回以零填充的矩阵。
    print mt.zeros((3, 3))
    #numpy.matlib.ones()返回以一填充的矩阵。
    print mt.ones((4, 4))
    #numpy.matlib.eye(n, M,k, dtype)返回一个矩阵,对角线元素为 1,其他位置为零。
    print mt.eye(n=5, M=5, k=0, dtype=float)
    #numpy.matlib.identity()函数返回给定大小的单位矩阵。单位矩阵是主对角线元素都为 1 的方阵。
    print mt.identity(5)
    #numpy.matlib.rand()函数返回给定大小的填充随机值的矩阵。
    print mt.rand(3, 3)
    #矩阵总是二维的,而ndarray是一个 n 维数组。 两个对象都是可互换的。
    m = np.matrix('1,2;3,4')
    print m
    print type(m)
    print np.asarray(m)
Example #34
0
def signal(prices, index):
    """
    Signals to buy/sell stocks.

    Returns tuple (X1, X2, ..., Xn), where n is amount of stocks,
    Xi is one of the ('sell', 'buy', None).
    :param prices: list of price lists of stocks, in time ascending order
    :param index: list of k(!) price lists of benchmark values for k last
    periods, in time ascending order, with same time frame as stocks.
    """
    def betas(prices_m):
        """
        Count beta parameters for all stocks, described in 'prices_m' matrix,
        according to 'index' benchmark.

        :param prices_m: matrix of prices. Each column represents a stock.
        Each row represents price at successive time stamp
        :return: matrix with betas. Each column represents a stock. Each row
        represents beta at successive time period
        """
        returns_m = ml.divide(ml.subtract(prices_m[1:], prices_m[:-1]),
                              prices_m[:-1])
        index_m = ml.matrix(index).T
        index_returns_m = ml.divide(ml.subtract(index_m[1:], index_m[:-1]),
                                    index_m[:-1])
        result = ml.empty((k, prices_m.shape[1]))
        for i in range(k):
            for j in range(stock_amount):
                x = returns_m[:, j]
                y = index_returns_m[:, i]
                result[i, j] = np.cov(x, y, rowvar=0)[0][1]/np.var(y)
        return result

    def regime(reduced_returns_m):
        """
        Make a regime switch based on PCA standard deviation acceleration.

        :param reduced_returns_m: matrix of PCA. Each column represents a
        stock. Each row represents price at successive time stamp
        :return: one of the strings, 'momentum' - if trend is ment to
        continue its movement, 'mean_reversion' - otherwise
        """
        cross_sect_vol = np.std(reduced_returns_m, axis=1)
        changes = cross_sect_vol[1:] - cross_sect_vol[:-1]
        squared_changes = np.square(changes)

        distance_times = reduced_returns_m.shape[0] - 1  # because there is
                                                         # T - 1 changes
        distance = np.zeros(distance_times)
        for t in range(distance_times):
            sum_amount = min(t + 1, H)
            for i in range(sum_amount):
                distance[t] += squared_changes[t - i, 0]
            distance[t] = np.sqrt(distance[t])
        signal = distance[1:] - distance[:-1]
        if np.max(signal) > 0:
            return 'momentum'
        else:
            return 'mean_reversion'

    prices_m = ml.matrix(prices).T

    # Preparing main matrices for further computations
    try:
        log_returns_m = np.log(ml.divide(prices_m[1:], prices_m[:-1]))
    except TypeError as e:
        raise WrongPricesError(prices_m)

    time_period, stock_amount = log_returns_m.shape
    mean_log_returns_m = ml.average(log_returns_m, axis=0)
    demeaned_log_returns_m = log_returns_m - mean_log_returns_m
    covariation_m = demeaned_log_returns_m.T * demeaned_log_returns_m

    # Count eigenvectors of covariation matrix and compose PCA matrix from them
    e_values, e_vectors = eig(covariation_m)
    abs_e_values = np.absolute(e_values)
    # TODO: np.absolute(e_vectors) or something like that
    indexed_abs_e_values = [(i, v) for i, v in enumerate(abs_e_values)]
    w = sorted(indexed_abs_e_values, reverse=False,
               key=lambda x: x[1])
    e_vectors_m = ml.empty((stock_amount, k))
    for j in range(k):
        e_vectors_m[:, j] = e_vectors[:, w[j][0]]

    # Main part: project returns on PCA universe
    reduced_returns_m = (e_vectors_m.T * demeaned_log_returns_m.T).T

    # Count beta parameters with respect to given benchmark index
    betas_m = betas(prices_m)

    time = time_period - time_shift
    if time < H:
        raise WrongParameterException("time_shift should be less than H")

    # Collect data from returns in one vector
    accumulated_reduced_returns = ml.zeros((1, k))
    for i in range(H):
        accumulated_reduced_returns += reduced_returns_m[time - 1 - i]

    # Make a prediction about further returns behaviour
    estimation = accumulated_reduced_returns * betas_m + \
                 mean_log_returns_m

    if regime_switcher:
        regime = regime(reduced_returns_m)
    else:
        regime = 'mean_reversion'

    # Finally, decide for each stock, whether we need to sell it as
    # overvalued or buy as undervalued. Other way around for momentum switch
    max_recent_log_returns = log_returns_m[-H:].max(0)
    result = []
    for i in range(stock_amount):
        if max_recent_log_returns[0, i] > estimation[0, i] + epsilon:
            if regime == 'mean_reversion':
                result.append('sell')
            else:
                result.append('buy')
        elif max_recent_log_returns[0, i] < estimation[0, i] - epsilon:
            if regime == 'mean_reversion':
                result.append('buy')
            else:
                result.append('sell')
        else:
            result.append(None)
    return result
Example #35
0
#!/usr/bin/python
# Generate absorbance data and save testing file for A->B
import random
from numpy import matrix, matlib
import csv
from frange import frange

t_0 = matrix(frange(-1e-6, 0, 5e-11)).transpose()
t_1 = matrix(frange(0, 1.9999500e-6, 5e-11)).transpose()
#t_1 = matrix(frange(0, 4000, 25)).transpose()
# the following constants work well
k = [2.2e7, 3.17e6] # rate constant
#k = [0.003, 0.0015]

a_0 = 1e-3 # initial concentration of A
c = matlib.empty([t_1.size, 3])
c[:,0] = a_0 * matlib.exp(-k[0] * t_1) # concentration of A
c[:,1] = a_0 * (k[0] / (k[1] - k[0])) * (matlib.exp(-k[0] * t_1) - matlib.exp(-k[1] * t_1)) # concentration of B
c[:,2] = a_0 - c[:,0] - c[:,1] # concentration of C

a = matlib.empty([3, 1])
a[0,0] = 1e3 # molar absorption of species A
a[1,0] = 4e2 # molar absorption of species B
a[2,0] = 7e2 # molar absorption of species C

y_1 = matlib.dot(c, a)
y_1 = y_1.transpose().tolist()[0]
y_1 = map(lambda y: y + (0.04 * random.random() - 0.02), y_1)

t_0 = t_0.transpose().tolist()[0]
t_1 = t_1.transpose().tolist()[0]
Example #36
0
 def mesh(self, lsid: int, lpid: int):
     from pygeom.geom3d import Vector
     from numpy.matlib import empty
     nums = len(self.sects)
     self.shts = []
     for i in range(nums - 1):
         a, b = i, i + 1
         secta = self.sects[a]
         sectb = self.sects[b]
         self.shts.append(LatticeSheet(secta, sectb))
     self.strps = []
     for sht in self.shts:
         lsid = sht.mesh_strips(lsid)
         self.strps += sht.strps
     pnts = [strp.pnt1 for strp in self.strps]
     pnts.append(self.strps[-1].pnt2)
     crds = [strp.crd1 for strp in self.strps]
     crds.append(self.strps[-1].crd2)
     lenb = len(pnts)
     lenc = len(self.cspc)
     self.pnts = empty((lenb, lenc + 1), dtype=Vector)
     for i in range(lenb):
         minx = pnts[i].x
         y = pnts[i].y
         z = pnts[i].z
         c = crds[i]
         cd = self.cspc[0][0]
         x = minx + cd * c
         self.pnts[i, 0] = Vector(x, y, z)
         for j in range(1, lenc + 1):
             cd = self.cspc[j - 1][-1]
             x = minx + cd * c
             self.pnts[i, j] = Vector(x, y, z)
     self.pnls = empty((lenb - 1, lenc), dtype=LatticePanel)
     for i, strp in enumerate(self.strps):
         for j in range(lenc):
             pnts = [
                 self.pnts[i, j], self.pnts[i + 1, j], self.pnts[i, j + 1],
                 self.pnts[i + 1, j + 1]
             ]
             cspc = self.cspc[j]
             pnl = LatticePanel(lpid, pnts, cspc, strp)
             self.pnls[i, j] = pnl
             lpid += 1
     if self.mirror:
         self.sgrp = [[], []]
         numstrp = len(self.strps)
         hlfstrp = int(numstrp / 2)
         for i in range(hlfstrp):
             lstrp = self.strps[numstrp - 1 - i]
             mstrp = self.strps[i]
             self.sgrp[0].append(lstrp.lsid)
             self.sgrp[1].append(mstrp.lsid)
     else:
         self.sgrp = [[]]
         numstrp = len(self.strps)
         for i in range(numstrp):
             lstrp = self.strps[numstrp - 1 - i]
             self.sgrp[0].append(lstrp.lsid)
     bpos = [0.0]
     for sht in self.shts:
         sht.inherit_panels()
         sht.set_control_panels()
         bpos.append(bpos[-1] + sht.width)
     if self.mirror:
         numsht = len(self.shts)
         wmir = bpos[int(numsht / 2)]
         for i in range(len(bpos)):
             bpos[i] = bpos[i] - wmir
     for i, sect in enumerate(self.sects):
         sect.bpos = bpos[i]
     for sht in self.shts:
         sht.set_strip_bpos()
     bmax = max(bpos)
     for func in self.funcs:
         func.set_spline(bmax)
         var = func.var
         if var == 'twist':
             var = '_ang'
         if self.mirror:
             for i in range(hlfstrp):
                 strp = self.strps[numstrp - 1 - i]
                 mstrp = self.strps[i]
                 bpos = strp.bpos
                 val = func.interpolate(bpos)
                 strp.__dict__[var] = val
                 mstrp.__dict__[var] = val
         else:
             for strp in self.strps:
                 bpos = strp.bpos
                 val = func.interpolate(bpos)
                 strp.__dict__[var] = val
     self.area = 0.0
     for sht in self.shts:
         if not sht.noload:
             self.area += sht.area
     return lsid, lpid
Example #37
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2019/9/21 14:23
# @Author  : ganliang
# @File    : npmatlib.py
# @Desc    : 矩阵
import numpy as np
import numpy.matlib as ml

print ("empty")
print(ml.empty((3, 3), dtype=np.int, order='F'))
print(ml.empty((3, 3), dtype=np.int, order='C'))

print ("\nzeros")
print(ml.zeros((3, 3), dtype=np.int, order='C'))

print ("\nones")
print(ml.ones((3, 3), dtype=np.int, order='C'))

print ("\neye")
print(ml.eye(3, dtype=np.int, order='C'))

print ("\nidentity")
print(ml.identity(3, dtype=np.int))

print ("\nrand")
print(ml.rand(2, 3))

print ("\nmatrix")
a = np.arange(12).reshape(3, 4)
mr = ml.matrix(a)
            finalState = oldState
    else:
        finalState = newState
        t = sum(finalState)
    #print(finalState)
    return finalState, t


def print20x20(
        array):  # prints the 20x20 row by row rather than as a single line
    for i in range(0, 20):
        print(array[i])


z = 0
m = empty((20, 20), int)
mplot = []
# creates the 20x20 canvas
for i in range(0, 20):
    #    m.append([])
    for j in range(0, 20):
        #        # m[i].append([])
        x = random.rand()
        if x > 0.5:
            m[i, j] = -1
        else:
            m[i, j] = 1
z = sum(m[i, 0])  # Counts to check net magnetization
#print20x20(m)
print(m)
print(z)
Example #39
0
#numpy.matlib is used for matrices instead of numpy ndarray objects
#matrices functions

#importing the numpy module
import numpy as np

#importing the numpy.matlib module
import numpy.matlib as nm

#matlib empty function
print(
    "\nprinting the matrix with uninitializing the values using empty function:"
)
print(nm.empty((3, 3)))

#matlib zeros function
print(
    "\n\nprinting the matrix with initializing the values with value \"0\" using zero function:"
)
print(nm.zeros((3, 3), dtype=int))

#matlib ones function
print(
    "\n\nprinting the matrix with initializing the values with value \"1\" using ones function:"
)
print(nm.ones((3, 4), dtype=int))

#matlib eye function
print("\n\nprinting the matrix with diagonal elements values equals to 1:")
print(nm.eye(n=4, k=0, dtype=int, M=5))
Example #40
0
def graph2featureVect(training_files,featureNames):
	graphs=[]
	for training_file in training_files:
		try:
			graphs.append( graph_analysis.input_graph(training_file))  	# open graph data and copy contents to memory
			print ("Succefully read in %s\n" %training_file)	
		except:
			print("Error reading in %s\n" %training_file)
			
	#### initialize the feature_vector to be zeros(#datapoints=sum(len(g.edge_list()) w label!=0 , len(featureNames))
	##	initialize the target_vector to be zeros(#datapoints=sum(len(g.edge_list()) w label!=0 , 1)
	MRI_labels=[]
	if 'mri_label_dist' in featureNames:
		MRI_labels=[l[0] for l in graphs[0].edge_property(graphs[0].edge_list()[0],'mri_label_dist')]
		feature_vector = Matlib.empty((0,len(featureNames)+len(MRI_labels)-1),float)
	else:
		feature_vector = Matlib.empty((0,len(featureNames)),float)
	target_vector = []	#Matlib.empty((0,1),float)
	labelNumerics = []
	edge_w_indx = {}
	
	g_cnt=-1
	e_cnt = 0
	for g in graphs:
		g_cnt =g_cnt+1
		for e in g.edge_list():
			if 'label' not in g.edge_properties(e).keys():
				print ("ERROR! The edge (%d,%d) in graph %s doesn't have label property.\nAborted!\n" %(e[0],e[1],training_files[g_cnt]))
				exit (0)
			#else:
			if g.edge_property(e,'label')>-1:	#including the edges with label 0
				#target_vector= numpy.vstack([target_vector, array(g.edge_property(e,'label'))])
				target_vector.append(int(g.edge_property(e,'label')))
				if g.edge_property(e,'label') not in labelNumerics and g.edge_property(e,'label')>0:
					labelNumerics.append(int(g.edge_property(e,'label')))
				features=[]
				for f in featureNames:
					if f not in g.edge_properties(e).keys():
						print ("ERROR! The edge (%d,%d) in graph %s doesn't have feature %s property.\nAborted!\n" %(e[0],e[1],training_files[g_cnt],f))
						exit (0)
					if not f== 'mri_label_dist' and not f=='rel_dir' and not f=='rel_diameter':	
						features.append(g.edge_property(e,f))
					elif f== 'mri_label_dist':
						for mr_l in MRI_labels:
							#if type(g.edge_property(e,'mri_label_dist'))==list:
								#print mr_l,":",e, " list ", training_files[g_cnt]
								#ind=[l[0] for l in g.edge_property(e,'mri_label_dist')].index(mr_l)
							#else:
								#print mr_l,":",e, ", ", training_files[g_cnt]
								#ind=[l[0] for l in g.edge_property(e,'mri_label_dist').tolist()].index(mr_l)
							mri_label_dist_list = [int(l[0]) for l in g.edge_property(e,'mri_label_dist')]
							if mr_l in mri_label_dist_list:
								ind = mri_label_dist_list.index(mr_l)
								features.append(g.edge_property(e,'mri_label_dist')[ind][1])
					elif f=='rel_dir' or f=='rel_diameter':
						rel_f = 1.0
						for rel_i in range(min(len(g.edge_property(e,f)),4)):	#we only consider up to 4 adjacent edges, if there are less 1.0 will be used
							rel_f = rel_f * g.edge_property(e,f)[rel_i]
						features.append(rel_f)	
							
				feature_vector	= numpy.vstack([feature_vector, array(features)])		#hstack #a = matrix([[10,20,30]]); a=append(a,[[1,2,3]],axis=0); a=append(a,[[15],[15]],axis=1)
			edge_w_indx[e_cnt]= e 
			e_cnt = e_cnt+1
			
	
	#### !!find neighbouring edges and in iterations find their
	
	adjacency_matrix = (1.0/len(labelNumerics))*Matlib.ones((len(labelNumerics),len(labelNumerics)+1),float)		#### smoothing (for 0s in the adj_matrix)! to be minimum of 1 occurance for the nieghbourhood!
	edge_neighboring_indx = {}
	
	g_cnt=-1
	e_cnt = 0
	for g in graphs:
		g_cnt =g_cnt+1
		for e in g.edge_list():
			#if 'label' not in g.edge_properties(e).keys():
				#print ("ERROR! The edge (%d,%d) in graph %s doesn't have label property.\nAborted!\n" %(e[0],e[1],training_files[g_cnt]))
                                #exit (0)
                        #else:
			if g.edge_property(e,'label')>0:
				indx0= labelNumerics.index(g.edge_property(e,'label'))
					
				e1_neighbours= g.vertices[e[0]].edges
				#e1_neighbours.remove(e2)
				for v in e1_neighbours:
					if v!=e[1]:
						neighbor_edge=tuple((min(e[0],v),max(e[0],v)))
						if g.edge_property(neighbor_edge,'label')>0:
							indx1= labelNumerics.index(g.edge_property(neighbor_edge,'label'))
							adjacency_matrix[indx0,indx1]= adjacency_matrix[indx0,indx1]+1
				e2_neighbours= g.vertices[e[1]].edges
				#e2_neighbours.remove(e1)
				for v in e2_neighbours:
					if v!=e[0]:
						neighbor_edge=tuple((min(e[1],v),max(e[1],v)))
						if g.edge_property(neighbor_edge,'label')>0:
							indx1= labelNumerics.index(g.edge_property(neighbor_edge,'label'))
							adjacency_matrix[indx0,indx1]= adjacency_matrix[indx0,indx1]+1
					
				if len(g.vertices[e[0]].edges)==1:		#end point
					adjacency_matrix[indx0,len(labelNumerics)] +=1
				if len(g.vertices[e[1]].edges)==1:		#end point
					adjacency_matrix[indx0,len(labelNumerics)] +=1					
			#find edge neighbouring indeces
			neighbor_indx=[]
			e1_neighbours= g.vertices[e[0]].edges
			for v in e1_neighbours:
				if v!=e[1]:
					neighbor_edge=tuple((min(e[0],v),max(e[0],v)))
					neighbor_indx.append(find_key(edge_w_indx, neighbor_edge, e_cnt))
			e2_neighbours= g.vertices[e[1]].edges
			for v in e2_neighbours:
				if v!=e[0]:
					neighbor_edge=tuple((min(e[1],v),max(e[1],v)))
					neighbor_indx.append(find_key(edge_w_indx, neighbor_edge, e_cnt))
					
			#if (len(neighbor_indx)!=2 and len(neighbor_indx)!=4):
				#print ("\n\nERROR! edge (%d,%d) has %d neighbouring vertices!" %(e[0],e[1],len(neighbor_indx)))
				#if (len(neighbor_indx)!=0):
					#print "Aborted!\n"
					#exit(0)
			edge_neighboring_indx[e_cnt]=neighbor_indx
			e_cnt = e_cnt+1
			
	#### adjacency matrix normalization 
	for i in range(adjacency_matrix.shape[0]):
		adjacency_matrix[i,:]= adjacency_matrix[i,:]/sum(adjacency_matrix[i,:])
		
	
	print_adj_mat (adjacency_matrix,labelNumerics)
	
	return labelNumerics, feature_vector, target_vector, adjacency_matrix, edge_w_indx, edge_neighboring_indx 	#featureVect
Example #41
0
def MG1FundamentalMatrix(A,
                         precision=1e-14,
                         maxNumIt=50,
                         method="ShiftPWCR",
                         maxNumRoot=2048,
                         shiftType="one"):
    """
    Returns matrix G corresponding to the M/G/1 type Markov
    chain defined by matrices A.
    
    Matrix G is the minimal non-negative solution of the 
    following matrix equation:
    
    .. math::
        G = A_0 + A_1 G + A_2 G^2 + A_3 G^3 + \dots.
    
    The implementation is based on [1]_, please cite it if
    you use this method.
    
    Parameters
    ----------
    A : length(M) list of matrices of shape (N,N)
        Matrix blocks of the M/G/1 type generator from
        0 to M-1.
    precision : double, optional
        Matrix G is computed iteratively up to this
        precision. The default value is 1e-14
    maxNumIt : int, optional
        The maximal number of iterations. The default value
        is 50.
    method : {"CR", "RR", "NI", "FI", "IS"}, optional
        The method used to solve the matrix-quadratic
        equation (CR: cyclic reduction, RR: Ramaswami
        reduction, NI: Newton iteration, FI: functional
        iteration, IS: invariant subspace method). The 
        default is "CR".
    
    Returns
    -------
    G : matrix, shape (N,N)
        The G matrix of the M/G/1 type Markov chain.
        (G is stochastic.)
    
    References
    ----------
    .. [1] Bini, D. A., Meini, B., Steffé, S., Van Houdt,
           B. (2006, October). Structured Markov chains 
           solver: software tools. In Proceeding from the
           2006 workshop on Tools for solving structured 
           Markov chains (p. 14). ACM.
    """

    if not isinstance(A, np.ndarray):
        D = np.hstack(A)
    else:
        D = ml.matrix(A)

    if method == "ShiftPWCR":
        if butools.verbose:
            Dold = ml.matrix(D)
        D, drift, tau, v = MG1TypeShifts(D, shiftType)

    m = D.shape[0]
    M = D.shape[1]
    I = ml.eye(m)
    D = D.T
    D = np.vstack(
        (D,
         ml.zeros(
             ((2**(1 + math.floor(math.log(M / m - 1, 2))) + 1) * m - M, m))))

    # Step 0
    G = ml.zeros((m, m))
    Aeven = D[np.remainder(np.kron(np.arange(D.shape[0] /
                                             m), np.ones(m)), 2) == 0, :]
    Aodd = D[np.remainder(np.kron(np.arange(D.shape[0] /
                                            m), np.ones(m)), 2) == 1, :]

    Ahatodd = np.vstack((Aeven[m:, :], D[-m:, :]))
    Ahateven = Aodd

    Rj = D[m:2 * m, :]
    for i in range(3, M // m + 1):
        Rj = Rj + D[(i - 1) * m:i * m, :]
    Rj = la.inv(I - Rj)
    Rj = D[:m, :] * Rj

    numit = 0

    while numit < maxNumIt:
        numit += 1
        nj = Aodd.shape[0] // m
        if nj > 0:
            # Evaluate the 4 functions in the nj+1 roots using FFT
            # prepare for FFTs (such that they can be performed in 4 calls)

            temp1 = np.reshape(Aodd[:nj * m, :].T, (m * m, nj), order='F').T
            temp2 = np.reshape(Aeven[:nj * m, :].T, (m * m, nj), order='F').T
            temp3 = np.reshape(Ahatodd[:nj * m, :].T, (m * m, nj), order='F').T
            temp4 = np.reshape(Ahateven[:nj * m, :].T, (m * m, nj),
                               order='F').T
            # FFTs
            temp1 = fft(temp1, nj, 0)
            temp2 = fft(temp2, nj, 0)
            temp3 = fft(temp3, nj, 0)
            temp4 = fft(temp4, nj, 0)
            # reform the 4*nj matrices
            temp1 = np.reshape(temp1.T, (m, m * nj), order='F').T
            temp2 = np.reshape(temp2.T, (m, m * nj), order='F').T
            temp3 = np.reshape(temp3.T, (m, m * nj), order='F').T
            temp4 = np.reshape(temp4.T, (m, m * nj), order='F').T

            # Next, we perform a point-wise evaluation of (6.20) - Thesis Meini
            Ahatnew = ml.empty((nj * m, m), dtype=complex)
            Anew = ml.empty((nj * m, m), dtype=complex)
            for cnt in range(1, nj + 1):
                Ahatnew[(cnt - 1) * m:cnt *
                        m, :] = temp4[(cnt - 1) * m:cnt * m, :] + temp2[
                            (cnt - 1) * m:cnt * m, :] * la.inv(I - temp1[
                                (cnt - 1) * m:cnt * m, :]) * temp3[
                                    (cnt - 1) * m:cnt * m, :]
                Anew[(cnt - 1) * m:cnt * m, :] = cmath.exp(
                    -(cnt - 1) * 2.0j * math.pi / nj) * temp1[
                        (cnt - 1) * m:cnt * m, :] + temp2[
                            (cnt - 1) * m:cnt * m, :] * la.inv(I - temp1[
                                (cnt - 1) * m:cnt * m, :]) * temp2[
                                    (cnt - 1) * m:cnt * m, :]

            # We now invert the FFTs to get Pz and Phatz
            # prepare for IFFTs (in 2 calls)
            Ahatnew = np.reshape(Ahatnew[:nj * m, :].T, (m * m, nj),
                                 order='F').T
            Anew = np.reshape(Anew[:nj * m, :].T, (m * m, nj), order='F').T
            # IFFTs
            Ahatnew = np.real(ifft(Ahatnew, nj, 0))
            Anew = np.real(ifft(Anew, nj, 0))
            # reform matrices Pi and Phati
            Ahatnew = np.reshape(Ahatnew.T, (m, m * nj), order='F').T
            Anew = np.reshape(Anew.T, (m, m * nj), order='F').T
        else:  # series Aeven, Aodd, Ahateven and Ahatodd are constant
            temp = Aeven * la.inv(I - Aodd)
            Ahatnew = Ahateven + temp * Ahatodd
            Anew = np.vstack((temp * Aeven, Aodd))

        nAnew = 0
        deg = Anew.shape[0] // m
        for i in range(deg // 2, deg):
            nAnew = max(nAnew, la.norm(Anew[i * m:(i + 1) * m, :], np.inf))

        nAhatnew = 0
        deghat = Ahatnew.shape[0] // m
        for i in range(deghat // 2, deghat):
            nAhatnew = max(nAhatnew,
                           la.norm(Ahatnew[i * m:(i + 1) * m, :], np.inf))

        # c) the test
        while (nAnew > nj * precision
               or nAhatnew > nj * precision) and nj < maxNumRoot:

            nj *= 2
            stopv = min(nj, Aodd.shape[0] / m)

            # prepare for FFTs
            temp1 = np.reshape(Aodd[:stopv * m, :].T, (m * m, stopv),
                               order='F').T
            temp2 = np.reshape(Aeven[:stopv * m, :].T, (m * m, stopv),
                               order='F').T
            temp3 = np.reshape(Ahatodd[:stopv * m, :].T, (m * m, stopv),
                               order='F').T
            temp4 = np.reshape(Ahateven[:stopv * m, :].T, (m * m, stopv),
                               order='F').T
            # FFTs
            temp1 = fft(temp1, nj, 0)
            temp2 = fft(temp2, nj, 0)
            temp3 = fft(temp3, nj, 0)
            temp4 = fft(temp4, nj, 0)
            # reform the 4*(nj+1) matrices
            temp1 = np.reshape(temp1.T, (m, m * nj), order='F').T
            temp2 = np.reshape(temp2.T, (m, m * nj), order='F').T
            temp3 = np.reshape(temp3.T, (m, m * nj), order='F').T
            temp4 = np.reshape(temp4.T, (m, m * nj), order='F').T

            # Next, we perform a point-wise evaluation of (6.20) - Thesis Meini
            Ahatnew = ml.empty((nj * m, m), dtype=complex)
            Anew = ml.empty((nj * m, m), dtype=complex)
            for cnt in range(1, nj + 1):
                Ahatnew[(cnt - 1) * m:cnt *
                        m, :] = temp4[(cnt - 1) * m:cnt * m, :] + temp2[
                            (cnt - 1) * m:cnt * m, :] * la.inv(I - temp1[
                                (cnt - 1) * m:cnt * m, :]) * temp3[
                                    (cnt - 1) * m:cnt * m, :]
                Anew[(cnt - 1) * m:cnt *
                     m, :] = cmath.exp(-(cnt - 1) * 2j * math.pi / nj) * temp1[
                         (cnt - 1) * m:cnt * m, :] + temp2[
                             (cnt - 1) * m:cnt * m, :] * la.inv(I - temp1[
                                 (cnt - 1) * m:cnt * m, :]) * temp2[
                                     (cnt - 1) * m:cnt * m, :]

            # We now invert the FFTs to get Pz and Phatz
            # prepare for IFFTs
            Ahatnew = np.reshape(Ahatnew[:nj * m, :].T, (m * m, nj),
                                 order='F').T
            Anew = np.reshape(Anew[:nj * m, :].T, (m * m, nj), order='F').T
            # IFFTs
            Ahatnew = ml.matrix(np.real(ifft(Ahatnew, nj, 0)))
            Anew = ml.matrix(np.real(ifft(Anew, nj, 0)))
            # reform matrices Pi and Phati
            Ahatnew = np.reshape(Ahatnew.T, (m, m * nj), order='F').T
            Anew = np.reshape(Anew.T, (m, m * nj), order='F').T

            vec1 = ml.zeros((1, m))
            vec2 = ml.zeros((1, m))
            for i in range(1, Anew.shape[0] // m):
                vec1 += i * np.sum(Anew[i * m:(i + 1) * m, :], 0)
                vec2 += i * np.sum(Ahatnew[i * m:(i + 1) * m, :], 0)

            nAnew = 0
            deg = Anew.shape[0] // m
            for i in range(deg // 2, deg):
                nAnew = max(nAnew, la.norm(Anew[i * m:(i + 1) * m, :], np.inf))

            nAhatnew = 0
            deghat = Ahatnew.shape[0] // m
            for i in range(deghat // 2, deghat):
                nAhatnew = max(nAhatnew,
                               la.norm(Ahatnew[i * m:(i + 1) * m, :], np.inf))
        if (nAnew > nj * precision
                or nAhatnew > nj * precision) and nj >= maxNumRoot:
            print("MaxNumRoot reached, accuracy might be affected!")

        if nj > 2:
            Anew = Anew[:m * nj / 2, :]
            Ahatnew = Ahatnew[:m * nj / 2, :]

        # compute Aodd, Aeven, ...
        Aeven = Anew[
            np.remainder(np.kron(np.arange(Anew.shape[0] /
                                           m), np.ones(m)), 2) == 0, :]
        Aodd = Anew[
            np.remainder(np.kron(np.arange(Anew.shape[0] /
                                           m), np.ones(m)), 2) == 1, :]

        Ahateven = Ahatnew[
            np.remainder(np.kron(np.arange(Ahatnew.shape[0] /
                                           m), np.ones(m)), 2) == 0, :]
        Ahatodd = Ahatnew[
            np.remainder(np.kron(np.arange(Ahatnew.shape[0] /
                                           m), np.ones(m)), 2) == 1, :]

        if butools.verbose == True:
            if method == "PWCR":
                print("The Point-wise evaluation of Iteration ", numit,
                      " required ", nj, " roots")
            else:
                print("The Shifted PWCR evaluation of Iteration ", numit,
                      " required ", nj, " roots")

        # test stopcriteria
        if method == "PWCR":
            Rnewj = Anew[m:2 * m, :]
            for i in range(3, Anew.shape[0] / m + 1):
                Rnewj = Rnewj + Anew[(i - 1) * m:i * m, :]
            Rnewj = la.inv(I - Rnewj)
            Rnewj = Anew[:m, :] * Rnewj

            if np.max(np.abs(Rj - Rnewj)) < precision or np.max(
                    np.sum(I - Anew[:m, :] * la.inv(I - Anew[m:2 * m, :]),
                           0)) < precision:
                G = Ahatnew[:m, :]
                for i in range(2, Ahatnew.shape[0] / m + 1):
                    G = G + Rnewj * Ahatnew[(i - 1) * m:i * m, :]
                G = D[:m, :] * la.inv(I - G)
                break

            Rj = Rnewj
            # second condition tests whether Ahatnew is degree 0 (numerically)
            if la.norm(Anew[:m, :m]) < precision or np.sum(
                    Ahatnew[m:, :]) < precision or np.max(
                        np.sum(I - D[:m, :] * la.inv(I - Ahatnew[:m, :]),
                               0)) < precision:
                G = D[0:m, :] * la.inv(I - Ahatnew[:m, :])
                break
        else:
            Gold = G
            G = D[:m, :] * la.inv(I - Ahatnew[:m, :])
            if la.norm(G - Gold, np.inf) < precision or la.norm(
                    Ahatnew[m:, :], np.inf) < precision:
                break

    if numit == maxNumIt and G == ml.zeros((m, m)):
        print("Maximum Number of Iterations reached!")
        G = D[:m, :] * la.inv(I - Ahatnew[:m, :])

    G = G.T

    if method == "ShiftPWCR":
        if shiftType == "one":
            G = G + (drift < 1) * ml.ones((m, m)) / m
        elif shiftType == "tau":
            G = G + (drift > 1) * tau * v * ml.ones((1, m))
        elif shiftType == "dbl":
            G = G + (drift < 1) * ml.ones(
                (m, m)) / m + (drift > 1) * tau * v * ml.ones((1, m))

    if butools.verbose:
        if method == "PWCR":
            D = D.T
        else:
            D = Dold
        temp = D[:, -m:]
        for i in range(D.shape[1] // m - 1, 0, -1):
            temp = D[:, (i - 1) * m:i * m] + temp * G
        res_norm = la.norm(G - temp, np.inf)
        print("Final Residual Error for G: ", res_norm)

    return G
Example #42
0
def Kent_calculation (Lpos):	#receiving Lpos= feature_vector[indx,axialfeatureidx], Li=[lx,ly,lz]) of type numpy.matrix
	print "Kent_calculation:"
	for i in range(Lpos.shape[0]):
		Lpos[i] = Lpos[i]/numpy.linalg.norm(Lpos[i])

	

	Lneg=-Lpos
	L=bmat('Lpos; Lneg')	#?matrix(numpy.negative(array(Lpos)))	#[a b; c d] {MATLAB}= vstack([hstack([a,b]),hstack([c,d])]) {numpy.array}= bmat('a b; c d') {numpy.matrix}		
#f(x) = 1/c(k,beta) exp(k*gamma1*x + beta*((gamma2*x)^2 - (gamma3*x)^2))
#g(x) = exp(k*gamma1*x + beta*((gamma2*x)^2 - (gamma3*x)^2))
#LL = log likelihood = k*gamma1*x + beta*((gamma2*x)^2 - (gamma3*x)^2)
#dLL/dk = gamma1*x
#dLL/dbeta = (gamma2*x)^2 - (gamma3*x)^2

	T= (L.transpose())*L
	D,V = numpy.linalg.eig(T)		# a: array_like shape (M, M), D:eigenvalues ndarray shape (M,) NOT ordered, V: eignevectors ndarray shape (M, M) The normalized (unit “length”) eigenvectors, column V[:,i] is the eigenvector corresponding to the eigenvalue D[i]			 
	#i = numpy.argsort(D)	#Returns the indices that would sort an array, ascending
	i = numpy.argsort(D[::-1])	#Returns the indices that would sort an array, descending
	Tau = D[i]	#sorted eigenvals
	t = V[:,i]	#sorted a=eigenvects
	Tau_bar = Tau/float(L.shape[0])
	gamma1 = t[:,0]
	gamma2 = t[:,1]
	gamma3 = t[:,2]
	
	noOutlierL = Matlib.empty((0,L.shape[1]),float)
	for i in range(L.shape[0]):
		if ( arccos((dot(L[i,:],gamma1))/(linalg.norm(L[i,:])*linalg.norm(gamma1))) < (pi/4.0)):
			noOutlierL = numpy.vstack([noOutlierL, array(L[i,:])])
	
	
	#### run EM to find k and beta
	#### 1.initialize
	k0 = 10
	beta0 = 0 
	v0 = [k0,beta0]
	v = scipy.optimize.fmin(fp,v0, args=(noOutlierL,gamma1,gamma2,gamma3),maxiter=10000,maxfun=10000)
	
	k=v[0]
	beta=v[1]
	
	if (k==0):
		k=1
	elif (k<0):
		k=-k
	if (beta<0):
		beta=-beta
	#we should have 2*abs(b) < abs(k)
	if (2*beta >= k):
		beta = (k-1)/2
	if (beta <0):
		beta = 0  

	
	ck = sqrt((k-2*beta)*(k+2*beta))/(2*pi*exp(k))
	
	if math.isnan(ck) or math.isnan(k) or math.isnan(beta) or math.isinf(ck) or math.isinf(k) or math.isinf(beta) or (k <0) or (beta< 0):
		print "Error!\n"
		print "ck :", ck , "\nbeta : ", beta, "\nk :", k, "\ngamma :", gamma1, gamma2, gamma3
		#print "\nAborted!"
		#exit (0)
		#put unit kent distribution there!
		k = 0.1
		beta = 0
		
		

	#print type(gamma1), " ", gamma1
	return [k,beta,ck,gamma1,gamma2,gamma3]
Example #43
0
from numpy import nan
from numpy.linalg import norm as norm
import matplotlib.pyplot as plt
import plot_utils as plut
import time
import romeo_conf as conf
from tsid_biped import TsidBiped

print "".center(conf.LINE_WIDTH, '#')
print " Test Task Space Inverse Dynamics ".center(conf.LINE_WIDTH, '#')
print "".center(conf.LINE_WIDTH, '#'), '\n'

tsid = TsidBiped(conf)

N = conf.N_SIMULATION
com_pos = matlib.empty((3, N)) * nan
com_vel = matlib.empty((3, N)) * nan
com_acc = matlib.empty((3, N)) * nan

com_pos_ref = matlib.empty((3, N)) * nan
com_vel_ref = matlib.empty((3, N)) * nan
com_acc_ref = matlib.empty((3, N)) * nan
com_acc_des = matlib.empty(
    (3, N)) * nan  # acc_des = acc_ref - Kp*pos_err - Kd*vel_err

offset = tsid.robot.com(tsid.formulation.data())
amp = np.matrix([0.0, 0.05, 0.0]).T
two_pi_f = 2 * np.pi * np.matrix([0.0, 0.5, 0.0]).T
two_pi_f_amp = np.multiply(two_pi_f, amp)
two_pi_f_squared_amp = np.multiply(two_pi_f, two_pi_f_amp)
Example #44
0
    if variable == 0:
        filepath = './Run' + str(N) + '.dat'
        with open(filepath, 'r') as myfile:
            result = pickle.load(myfile)
    else:
        filepath = './Run' + str(N) + '.dat'
        with open(filepath, 'r') as myfile:
            temp = pickle.load(myfile)
        result = temp[variable]
    return result


if __name__ == '__main__':
    numRuns = 10

    e_qq_mat = npm.empty((10000, 10))
    f_qq_mat = npm.empty((10001, 10))

    for i in range(numRuns):
        result = viewResult(i)
        figure(1)
        plot(result['e_qq'])
        e_qq_mat[:, i] = result['e_qq']

        figure(2)
        plot(result['f_qq'])
        f_qq_mat[:, i] = result['f_qq']

    figure(1)
    title('Deviation of true tracked subspace')
    figure(2)
转置矩阵

NumPy 中除了可以使用 numpy.transpose 函数来对换数组的维度,还可以使用 T 属性。。
例如有个 m 行 n 列的矩阵,使用 t() 函数就能转换为 n 行 m 列的矩阵。

'''
a = np.arange(12).reshape(3, 4)

print('原数组:')
print(a)

print('转置数组:')
print(a.T)
print('\n')
'''
matlib.empty()
matlib.empty() 函数返回一个新的矩阵,语法格式为:
numpy.matlib.empty(shape, dtype, order)

参数说明:

    shape: 定义新矩阵形状的整数或整数元组
    Dtype: 可选,数据类型
    order: C(行序优先) 或者 F(列序优先)

numpy.matlib.zeros()
numpy.matlib.zeros() 函数创建一个以 0 填充的矩阵。

numpy.matlib.ones()
numpy.matlib.ones()函数创建一个以 1 填充的矩阵。
Example #46
0
def runBatch(numRuns,BatchName,param):
    """
    Function to run Batch of 'numRuns' initial conditions for FRHH using the 
    'param' parameters  
    """
    
    # Directory in which to create new data directory
    mainPath = 'C:/DataSets/Results/Fraust/'   
    os.chdir(mainPath)
             
    # Results matrix
    e_qq_mat = npm.empty((10000,numRuns))
    f_qq_mat = npm.empty((10001,numRuns))    
    g_qq_mat = npm.empty((10000,numRuns))    
     
    for i in range(numRuns):
        # Generate artificial data streams
        streams = genSimSignalsA(i, -3.0)
        
        # Run Basic Fast row householder subspace tracker
        Q_t, S_t = frhh_min(streams, param['alpha'], param['rr'])
        
        #Q_t, S_t, rr, E_t, E_dash_t, hid_var, z_dash, RSRE, no_inp_count, \
        #no_inp_marker = FRHH32(streams, param['rr'], param['alpha'], param['sci'])
         
        # Calculate deviations from orthogonality and subspace
        e_qq, f_qq, g_qq  = plotEqqFqqA(streams, Q_t, param['alpha'])
     
        # Store results in Dictionary
        dic_name = 'res_' + str(i) # string of the name of the Dictionary
        vars()[dic_name] = {'param' : param,
                            'Q_t' : Q_t,
                            'S_t': S_t,
                            'e_qq' : e_qq,
                            'f_qq' : f_qq,
                            'g_qq' : g_qq} 
                            
                           # 'rr' : rr,
                           # 'E_t' : E_t, 
                           # 'E_dash_t' : E_dash_t, 
                           # 'hid_var' : hid_var, 
                           # 'z_dash' : z_dash,
                           # 'RSRE' : RSRE, 
                           # 'no_inp_count' : no_inp_count,
                           # 'no_inp_marker' : no_inp_marker,
       
        myDic =  vars()[dic_name]
       
        e_qq_mat[:,i] = e_qq   
        f_qq_mat[:,i] = f_qq
        g_qq_mat[:,i] = g_qq
        
        # Save data files  
        mypath = mainPath + '\\' + BatchName 
    
        if not os.path.exists(mypath):
            os.makedirs(mypath)    
    
        runfile = mypath + '/Run' + str(i) + '.dat'
        with open(runfile, 'w') as outfile:  
            pickle.dump(myDic, outfile)
        
        streamfile = mypath + '/stream_inputs'  + str(i) + '.dat'
        streams.tofile(streamfile)
        
        print 'finished ', i, 'th batch'
        
        del myDic, vars()[dic_name] # saves memory
    
    # Save summary Matrixs
    e_qq_path = mypath + '/e_qq_mat.dat'
    e_qq_mat.tofile(e_qq_path)
    f_qq_path = mypath + '/f_qq_mat.dat'
    f_qq_mat.tofile(f_qq_path)
    g_qq_path = mypath + '/g_qq_mat.dat'
    g_qq_mat.tofile(g_qq_path)

    return e_qq_mat, f_qq_mat, g_qq_mat