コード例 #1
0
ファイル: util.py プロジェクト: 3lectrologos/LimnoPlanner
 def __init__(self, x1, x2, fclass=None):
     super(Graph, self).__init__()
     self.xres = x1.shape[0]
     self.yres = x1.shape[1]
     self.fclass = fclass
     n = 2*x1.size - 2*self.yres
     self.add_nodes_from(range(1, n+1))
     x1 = np.vstack((x1.reshape((-1, 1)), x1[-2:0:-1].reshape((-1, 1))))
     x2 = np.vstack((x2.reshape((-1, 1)), x2[-2:0:-1].reshape((-1, 1))))
     self.x = np.hstack((x1, x2))
     self.pos = dict(zip(range(1, n+1),
                         zip(x1.T.tolist()[0], x2.T.tolist()[0])))
     self.full = nx.DiGraph(self)
     for i in range(1, n+1):
         for j in self.column(i):
             dx1 = np.absolute(self.pos[j][0] - self.pos[i][0])
             dx2 = np.absolute(self.pos[j][1] - self.pos[i][1])
             if dx2 <= dx1 / _GRAPH_ASPECT_RATIO:
                 self.full.add_edge(i, j)
     tmp = 1
     self.basic = set()
     step = self.yres/_GRAPH_YRES_BASIC
     while tmp < n+1:
         self.basic = self.basic | set(self.column(tmp)[0::step])
         tmp = tmp + self.yres
     self.update_active()
コード例 #2
0
ファイル: util.py プロジェクト: 3lectrologos/LimnoPlanner
 def __init__(self, x1, x2, fclass=None):
     super(Graph, self).__init__()
     self.xres = x1.shape[0]
     self.yres = x1.shape[1]
     self.fclass = fclass
     n = 2 * x1.size - 2 * self.yres
     self.add_nodes_from(range(1, n + 1))
     x1 = np.vstack((x1.reshape((-1, 1)), x1[-2:0:-1].reshape((-1, 1))))
     x2 = np.vstack((x2.reshape((-1, 1)), x2[-2:0:-1].reshape((-1, 1))))
     self.x = np.hstack((x1, x2))
     self.pos = dict(
         zip(range(1, n + 1), zip(x1.T.tolist()[0],
                                  x2.T.tolist()[0])))
     self.full = nx.DiGraph(self)
     for i in range(1, n + 1):
         for j in self.column(i):
             dx1 = np.absolute(self.pos[j][0] - self.pos[i][0])
             dx2 = np.absolute(self.pos[j][1] - self.pos[i][1])
             if dx2 <= dx1 / _GRAPH_ASPECT_RATIO:
                 self.full.add_edge(i, j)
     tmp = 1
     self.basic = set()
     step = self.yres / _GRAPH_YRES_BASIC
     while tmp < n + 1:
         self.basic = self.basic | set(self.column(tmp)[0::step])
         tmp = tmp + self.yres
     self.update_active()
コード例 #3
0
    def __init__(self):
        # model dimensions
        self.xDim = 2 # state space dimension
        self.uDim = 2 # control input dimension
        self.qDim = 2 # dynamics noise dimension
        self.zDim = 2 # observation dimension
        self.rDim = 2 # observtion noise dimension

        # belief space dimension
        # note that we only store the lower (or upper) triangular portion
        # of the covariance matrix to eliminate redundancy
        self.bDim = int(self.xDim + self.xDim*((self.xDim+1)/2.))

        self.dT = 1. # time step for dynamics function
        self.T = 15 # number of time steps in trajectory

        self.alpha_belief = 10. # weighting factor for penalizing uncertainty at intermediate time steps
        self.alpha_final_belief = 10. # weighting factor for penalizing uncertainty at final time step
        self.alpha_control = 1. # weighting factor for penalizing control cost
        
        self.xMin = ml.vstack([-5,-3]) # minimum limits on state (xMin <= x)
        self.xMax = ml.vstack([5,3]) # maximum limits on state (x <= xMax)
        self.uMin = ml.vstack([-1,-1]) # minimum limits on control (uMin <= u)
        self.uMax = ml.vstack([1,1]) # maximum limits on control (u <= uMax)

        self.Q = ml.eye(self.qDim) # dynamics noise variance
        self.R = ml.eye(self.rDim) # observation noise variance

        self.start = ml.zeros([self.xDim,1]) # start state, OVERRIDE
        self.goal = ml.zeros([self.xDim,1]) # end state, OVERRIDE

        self.sqpParams = LightDarkSqpParams()
コード例 #4
0
    def __init__(self):
        # model dimensions
        self.xDim = 2 # state space dimension
        self.uDim = 2 # control input dimension
        self.qDim = 2 # dynamics noise dimension
        self.zDim = 2 # observation dimension
        self.rDim = 2 # observtion noise dimension

        # belief space dimension
        # note that we only store the lower (or upper) triangular portion
        # of the covariance matrix to eliminate redundancy
        self.bDim = int(self.xDim + self.xDim*((self.xDim+1)/2.))

        self.dT = 1. # time step for dynamics function
        self.T = 15 # number of time steps in trajectory

        self.alpha_belief = 10. # weighting factor for penalizing uncertainty at intermediate time steps
        self.alpha_final_belief = 10. # weighting factor for penalizing uncertainty at final time step
        self.alpha_control = 1. # weighting factor for penalizing control cost
        
        self.xMin = ml.vstack([-5,-3]) # minimum limits on state (xMin <= x)
        self.xMax = ml.vstack([5,3]) # maximum limits on state (x <= xMax)
        self.uMin = ml.vstack([-1,-1]) # minimum limits on control (uMin <= u)
        self.uMax = ml.vstack([1,1]) # maximum limits on control (u <= uMax)

        self.Q = ml.eye(self.qDim) # dynamics noise variance
        self.R = ml.eye(self.rDim) # observation noise variance

        self.start = ml.zeros([self.xDim,1]) # start state, OVERRIDE
        self.goal = ml.zeros([self.xDim,1]) # end state, OVERRIDE

        self.sqpParams = LightDarkSqpParams()
コード例 #5
0
 def increase_output_size(self, nb_added_output = 1):
     """
         Increases the number of inputs the network needs
     """
     
     self.output_size += nb_added_output
     self.output_weights = npmat.vstack([self.output_weights, npmat.zeros((nb_added_output, self.state_size))])
コード例 #6
0
ファイル: solid.py プロジェクト: aJuvenn/Quike
    def _update_orientation(self, total_force_moment, dt):
        """
            Integrates the provided force momentum to change angular momentum,
            uses solid inertia to compute the angular speed and uses it to
            update the quaternion representing the object orientation
        """

        total_force_moment = Vect(total_force_moment)

        # Integration of the force momentum to get the angular momentum
        self.angular_momentum += dt * total_force_moment

        # Rotation matrix corresponding to the current object angle
        R = self.rotation_matrix

        # Inertia matrix inverse is obtained from the initial one by a change of bases
        current_inertia_matrix_inverse = R * self.initial_inertia_matrix_inverse * R.T

        # Angular speed vector is obtained from the momentum and the inertia
        w = current_inertia_matrix_inverse * self.angular_momentum
        w_as_quaternion = npmat.vstack([0., w])

        # Rotation quaternion integration from a derivative computed with a quaternion multiplication
        self.rotation_quaternion += dt * 0.5 * quaternion_mult(
            w_as_quaternion, self.rotation_quaternion)

        # Normalization of the quaternion to ensure numerical stability through time steps
        self.rotation_quaternion /= npmat.linalg.norm(self.rotation_quaternion)

        # Update of the rotation matrix from the new quaternion
        self.rotation_matrix = rotation_matrix_from_quaternion(
            self.rotation_quaternion)
コード例 #7
0
    def __new__(cls, c):
        """
        """

        c = c.view(matlib.matrix).reshape(1, -1)

        T = matlib.vstack((
            matlib.hstack((matlib.identity(c.size), c)),
            matlib.hstack((matlib.zeros(c.size), matlib.ones(1))),
        ))
        return super().__new__(cls, T)
コード例 #8
0
ファイル: transform.py プロジェクト: rmaguire31/ernoe-cube
 def __new__(cls, c):
     """
     """
     
     c = c.view(matlib.matrix).reshape(1, -1)
         
     T = matlib.vstack((
         matlib.hstack((matlib.identity(c.size), c)),
         matlib.hstack((matlib.zeros(c.size), matlib.ones(1))),
     ))
     return super().__new__(cls, T)
コード例 #9
0
ファイル: solid.py プロジェクト: aJuvenn/Quike
def quaternion_mult(q1, q2):
    """
        Returns the quaternion product of two quaternions
        represented by a 4 dimensional vector
    """
    s1 = q1[0]
    v1 = q1[1:]
    s2 = q2[0]
    v2 = q2[1:]
    return npmat.vstack(
        [s1 * s2 - v1.T * v2, v2 * s1 + v1 * s2 + cross_product(v1, v2)])
コード例 #10
0
 def increase_input_size(self, nb_added_input = 1):
     """
         Increases the number of inputs the network needs
     """
     
     added_input_weights = self.input_scaling * EchoStateNetwork._rand_matrix(self.reservoir_size, nb_added_input)
     self.input_weights = npmat.hstack([self.input_weights, added_input_weights])
     self.input_size += nb_added_input
     
     if not self.use_raw_input:
         return
     
     self.state_size += nb_added_input
     self.state = npmat.vstack([self.state, npmat.zeros((nb_added_input, 1))])
     self.output_weights = npmat.hstack([self.output_weights, npmat.zeros((self.output_size, nb_added_input))])
コード例 #11
0
 def __new__(cls, m, n):
     """
     """
     
     if not all((
             isinstance(m, int),
             isinstance(n, int),
         )):
         raise ValueError
     
     data = matlib.hstack([
         matlib.vstack((
             matlib.hstack((matlib.identity(n, dtype=int), matlib.matrix(p, dtype=int).T)),
             matlib.hstack((matlib.matrix(p, dtype=int), matlib.ones(1))),
         ))
         for p in product(range(m), repeat=n)
     ])
     return super().__new__(cls, data, dtype=int).view(cls)
コード例 #12
0
ファイル: fluid.py プロジェクト: ghorvath78/butools
def GeneralFluidSolve (Q, R, Q0=[], prec=1e-14):
    """
    Returns the parameters of the matrix-exponentially 
    distributed stationary distribution of a general 
    Markovian fluid model, where the fluid rates associated
    with the states of the background process can be
    arbitrary (zero is allowed as well).
    
    Using the returned 4 parameters the stationary
    solution can be obtained as follows.
    
    The probability that the fluid level is zero while 
    being in different states of the background process
    is given by vector mass0.
    
    The density that the fluid level is x while being in
    different states of the background process is
    
    .. math::
        \pi(x)=ini\cdot e^{K x}\cdot clo.    
    
    Parameters
    ----------
    Q : matrix, shape (N,N)
        The generator of the background Markov chain
    R : diagonal matrix, shape (N,N)
        The diagonal matrix of the fluid rates associated
        with the different states of the background process
    Q0 : matrix, shape (N,N), optional
        The generator of the background Markov chain at 
        level 0. If not provided, or empty, then Q0=Q is 
        assumed. The default value is empty.
    precision : double, optional
        Numerical precision for computing the fundamental
        matrix. The default value is 1e-14
    
    Returns
    -------
    mass0 : matrix, shape (1,Np+Nm)
        The stationary probability vector of zero level
    ini : matrix, shape (1,Np)
        The initial vector of the stationary density
    K : matrix, shape (Np,Np)
        The matrix parameter of the stationary density
    clo : matrix, shape (Np,Np+Nm)
        The closing matrix of the stationary density
    """
    
    N = Q.shape[0]
    # partition the state space according to zero, positive and negative fluid rates
    ix = np.arange(N)
    ixz = ix[np.abs(np.diag(R))<=prec]
    ixp = ix[np.diag(R)>prec]
    ixn = ix[np.diag(R)<-prec]
    Nz = len(ixz)
    Np = len(ixp)
    Nn = len(ixn)
    # permutation matrix that converts between the original and the partitioned state ordering
    P = ml.zeros((N,N))
    for i in range(Nz):
        P[i,ixz[i]]=1
    for i in range(Np):
        P[Nz+i,ixp[i]]=1
    for i in range(Nn):
        P[Nz+Np+i,ixn[i]]=1
    iP = P.I
    Qv = P*Q*iP
    Rv = P*R*iP

    # new fluid process censored to states + and -
    iQv00 = la.pinv(-Qv[:Nz,:Nz])
    Qbar = Qv[Nz:, Nz:] + Qv[Nz:,:Nz]*iQv00*Qv[:Nz,Nz:]
    absRi = Diag(np.abs(1./np.diag(Rv[Nz:,Nz:])))
    Qz = absRi * Qbar

    Psi, K, U = FluidFundamentalMatrices (Qz[:Np,:Np], Qz[:Np,Np:], Qz[Np:,:Np], Qz[Np:,Np:], "PKU", prec)

    # closing matrix
    Pm = np.hstack((ml.eye(Np), Psi)) * absRi
    iCn = absRi[Np:,Np:]
    iCp = absRi[:Np,:Np]
    clo = np.hstack(((iCp*Qv[Nz:Nz+Np,:Nz]+Psi*iCn*Qv[Nz+Np:,:Nz])*iQv00, Pm))
    
    if len(Q0)==0: # regular boundary behavior
        clo = clo * P # go back the the original state ordering

        # calculate boundary vector   
        Ua = iCn*Qv[Nz+Np:,:Nz]*iQv00*ml.ones((Nz,1)) + iCn*ml.ones((Nn,1)) + Qz[Np:,:Np]*la.inv(-K)*clo*ml.ones((Nz+Np+Nn,1))
        pm = Linsolve (ml.hstack((U,Ua)).T, ml.hstack((ml.zeros((1,Nn)),ml.ones((1,1)))).T).T

        # create the result
        mass0 = ml.hstack((pm*iCn*Qv[Nz+Np:,:Nz]*iQv00, ml.zeros((1,Np)), pm*iCn))*P
        ini = pm*Qz[Np:,:Np]        
    else:
        # solve a linear system for ini(+), pm(-) and pm(0)        
        Q0v = P*Q0*iP
        M = ml.vstack((-clo*Rv, Q0v[Nz+Np:,:], Q0v[:Nz,:]))
        Ma = ml.vstack((np.sum(la.inv(-K)*clo,1), ml.ones((Nz+Nn,1))))
        sol = Linsolve (ml.hstack((M,Ma)).T, ml.hstack((ml.zeros((1,N)),ml.ones((1,1)))).T).T;
        ini = sol[:,:Np]
        clo = clo * P
        mass0 = ml.hstack((sol[:,Np+Nn:], ml.zeros((1,Np)), sol[:,Np:Np+Nn]))*P

    return mass0, ini, K, clo
コード例 #13
0
def QuadProg(H, c, Ae, be, Ai, bi, x0):
    epsilon = 1e-9
    err = 1e-6
    k = 0
    x = x0
    n = len(x)
    kmax = 1e2
    ne = len(be)
    ni = len(bi)
    index = mat.ones((ni, 1))
    for i in range(0, ni):
        if Ai[i,:] * x > bi[i] + epsilon:
            index[i] = 0

    while k <= kmax:
        Aee = []
        #if ne > 0:
        #    Aee = Ae
        AeeList = []
        if ne > 0:
            AeeList.append(Ae)
        for j in range(0, ni):
            if index[j] > 0:
                AeeList.append(Ai[j, :])
                #Aee = mat.vstack((Aee, Ai[j,:]))
        if len(AeeList) == 0:
            Aee = []
        else:
            Aee = mat.vstack(AeeList)
            m1, n1 = mat.shape(Aee)
        gk = H * x + c
        dk, lamk = qsubp(H, gk, Aee, mat.zeros((m1, 1)))
        if np.linalg.norm(dk) <= err:
            y = 0.0
            if len(lamk) > ne:
                jk = np.argmin(lamk[ne:len(lamk)])
                y = lamk[jk]
            if y >= 0:
                exitflag = 0
            else:
                exitflag = 1
                for i in range(0, ni):
                    if index[i] and (ne + sum(index[0:i]))  == jk:
                        index[i] = 0;
                        break;
            #k += 1
        else:
            exitflag = 1
            alpha = 1.0
            tm = 1.0
            for i in range(0, ni):
                if index[i] == 0 and Ai[i, :] * dk < 0:
                    tmm = (bi[i] - Ai[i, :] * x) / (Ai[i, :] * dk)
                    tm1 = abs(tmm[0,0])
                    if tm1 < tm:
                        tm = tm1;
                        ti = i;
            alpha = min(alpha, abs(tm))
            x = x + alpha * dk
            if tm < 1:
                index[ti] = 1
        if exitflag == 0:
            break
        k += 1
    print k
    return x
コード例 #14
0
ファイル: SPIRIT.py プロジェクト: ngoluuduythai/Old-PhD-Code
def SPIRIT(A, lamb, energy, k0=1, holdOffTime=0, reorthog=False, evalMetrics="F"):

    A = np.mat(A)

    n = A.shape[1]
    totalTime = A.shape[0]
    Proj = npm.ones((totalTime, n)) * np.nan
    recon = npm.zeros((totalTime, n))

    # initialize w_i to unit vectors
    W = npm.eye(n)
    d = 0.01 * npm.ones((n, 1))
    m = k0  # number of eigencomponents

    relErrors = npm.zeros((totalTime, 1))

    sumYSq = 0.0
    E_t = []
    sumXSq = 0.0
    E_dash_t = []

    res = {}
    k_hist = []
    W_hist = []
    anomalies = []

    # incremental update W
    lastChangeAt = 0

    for t in range(totalTime):

        k_hist.append(m)

        # update W for each y_t
        x = A[t, :].T  # new data as column vector

        for j in range(m):
            W[:, j], d[j], x = updateW(x, W[:, j], d[j], lamb)
            Wj = W[:, j]

        # Grams smit reorthog
        if reorthog == True:
            W[:, :m], R = npm.linalg.qr(W[:, :m])

        # compute low-D projection, reconstruction and relative error
        Y = W[:, :m].T * A[t, :].T  # project to m-dimensional space
        xActual = A[t, :].T  # actual vector of the current time
        xProj = W[:, :m] * Y  # reconstruction of the current time
        Proj[t, :m] = Y.T
        recon[t, :] = xProj.T
        xOrth = xActual - xProj
        relErrors[t] = npm.sum(npm.power(xOrth, 2)) / npm.sum(npm.power(xActual, 2))

        # update energy
        sumYSq = lamb * sumYSq + npm.sum(npm.power(Y, 2))
        E_dash_t.append(sumYSq)
        sumXSq = lamb * sumXSq + npm.sum(npm.power(A[t, :], 2))
        E_t.append(sumXSq)

        # Record RSRE
        if t == 0:
            top = 0.0
            bot = 0.0

        top = top + npm.power(npm.linalg.norm(xActual - xProj), 2)

        bot = bot + npm.power(npm.linalg.norm(xActual), 2)

        new_RSRE = top / bot

        if t == 0:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### Metric EVALUATION ###
        # deviation from truth
        if evalMetrics == "T":

            Qt = W[:, :m]

            if t == 0:
                res["subspace_error"] = npm.zeros((totalTime, 1))
                res["orthog_error"] = npm.zeros((totalTime, 1))
                res["angle_error"] = npm.zeros((totalTime, 1))
                Cov_mat = npm.zeros([n, n])

            # Calculate Covarentce Matrix of data up to time t
            Cov_mat = lamb * Cov_mat + npm.dot(xActual, xActual.T)
            # Get eigenvalues and eigenvectors
            WW, V = npm.linalg.eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = WW.argsort()  # Get sort index
            eig_idx = eig_idx[::-1]  # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:m]]
            # Calculate subspace error
            C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T)
            res["subspace_error"][t, 0] = 10 * np.log10(npm.trace(npm.dot(C.T, C)))  # frobenius norm in dB
            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k)
            eigVal, eigVec = npm.linalg.eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))
            res["angle_error"][t, 0] = angle

            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T, Qt) - npm.eye(m)
            res["orthog_error"][t, 0] = 10 * np.log10(npm.trace(npm.dot(F.T, F)))  # frobenius norm in dB

        # Energy thresholding
        ######################
        # check the lower bound of energy level
        if sumYSq < energy[0] * sumXSq and lastChangeAt < t - holdOffTime and m < n:
            lastChangeAt = t
            m = m + 1
            anomalies.append(t)
        # print 'Increasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        # check the upper bound of energy level
        elif sumYSq > energy[1] * sumXSq and lastChangeAt < t - holdOffTime and m < n and m > 1:
            lastChangeAt = t
            m = m - 1
        # print 'Decreasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        W_hist.append(W[:, :m])
    # set outputs

    # Grams smit reorthog
    if reorthog == True:
        W[:, :m], R = npm.linalg.qr(W[:, :m])

    # Data Stores
    res2 = {
        "hidden": Proj,  # Array for hidden Variables
        "E_t": np.array(E_t),  # total energy of data
        "E_dash_t": np.array(E_dash_t),  # hidden var energy
        "e_ratio": np.array(E_dash_t) / np.array(E_t),  # Energy ratio
        "rel_orth_err": relErrors,  # orthoX error
        "RSRE": RSRE,  # Relative squared Reconstruction error
        "recon": recon,  # reconstructed data
        "r_hist": k_hist,  # history of r values
        "W_hist": W_hist,  # history of Weights
        "anomalies": anomalies,
    }

    res.update(res2)

    return res
コード例 #15
0
def run_simu():
    global push_robot_active
    i, t = 0, 0.0
    q, v = tsid.q, tsid.v
    time_avg = 0.0
    while True:
        time_start = time.time()

        tsid.comTask.setReference(tsid.trajCom.computeNext())
        tsid.postureTask.setReference(tsid.trajPosture.computeNext())
        tsid.rightFootTask.setReference(tsid.trajRF.computeNext())
        tsid.leftFootTask.setReference(tsid.trajLF.computeNext())

        HQPData = tsid.formulation.computeProblemData(t, q, v)

        sol = tsid.solver.solve(HQPData)
        if (sol.status != 0):
            print("QP problem could not be solved! Error code:", sol.status)
            break

    #    tau = tsid.formulation.getActuatorForces(sol)
        dv = tsid.formulation.getAccelerations(sol)
        q, v = tsid.integrate_dv(q, v, dv, conf.dt)
        i, t = i + 1, t + conf.dt

        if (push_robot_active):
            push_robot_active = False
            data = tsid.formulation.data()
            if (tsid.contact_LF_active):
                J_LF = tsid.contactLF.computeMotionTask(0.0, q, v, data).matrix
            else:
                J_LF = matlib.zeros((0, tsid.model.nv))
            if (tsid.contact_RF_active):
                J_RF = tsid.contactRF.computeMotionTask(0.0, q, v, data).matrix
            else:
                J_RF = matlib.zeros((0, tsid.model.nv))
            J = matlib.vstack((J_LF, J_RF))
            J_com = tsid.comTask.compute(t, q, v, data).matrix
            A = matlib.vstack((J_com, J))
            b = matlib.vstack(
                (np.matrix(push_robot_com_vel).T, matlib.zeros(
                    (J.shape[0], 1))))
            v = np.linalg.lstsq(A, b, rcond=-1)[0]

        if i % conf.DISPLAY_N == 0:
            tsid.robot_display.display(q)
            x_com = tsid.robot.com(tsid.formulation.data()).A1
            x_com_ref = tsid.trajCom.getSample(t).pos().A1
            H_lf = tsid.robot.position(tsid.formulation.data(), tsid.LF)
            H_rf = tsid.robot.position(tsid.formulation.data(), tsid.RF)
            x_lf_ref = tsid.trajLF.getSample(t).pos().A1[:3]
            x_rf_ref = tsid.trajRF.getSample(t).pos().A1[:3]
            tsid.gui.applyConfiguration('world/com',
                                        x_com.tolist() + [0, 0, 0, 1.])
            tsid.gui.applyConfiguration('world/com_ref',
                                        x_com_ref.tolist() + [0, 0, 0, 1.])
            tsid.gui.applyConfiguration('world/rf',
                                        pin.se3ToXYZQUATtuple(H_rf))
            tsid.gui.applyConfiguration('world/lf',
                                        pin.se3ToXYZQUATtuple(H_lf))
            tsid.gui.applyConfiguration('world/rf_ref',
                                        x_rf_ref.tolist() + [0, 0, 0, 1.])
            tsid.gui.applyConfiguration('world/lf_ref',
                                        x_lf_ref.tolist() + [0, 0, 0, 1.])

        if i % 1000 == 0:
            print("Average loop time: %.1f (expected is %.1f)" %
                  (1e3 * time_avg, 1e3 * conf.dt))

        time_spent = time.time() - time_start
        time_avg = (i * time_avg + time_spent) / (i + 1)

        if (time_avg < 0.9 * conf.dt): time.sleep(conf.dt - time_avg)
コード例 #16
0
ファイル: npprio.py プロジェクト: ghorvath78/butools
def MMAPPH1NPPR(D, sigma, S, *argv):
    """
    Returns various performane measures of a continuous time 
    MMAP[K]/PH[K]/1 non-preemptive priority queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
        D1 corresponds to the lowest, DK to the highest priority.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati and |
        |                |                    | the matrix-quadratic equations         |
        +----------------+--------------------+----------------------------------------+
        | "erlMaxOrder"  | Integer number     | The maximal Erlang order used in the   |
        |                |                    | erlangization procedure. The default   |
        |                |                    | value is 200.                          |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] G. Horvath, "Efficient analysis of the MMAP[K]/PH[K]/1
           priority queue", European Journal of Operational 
           Research, 246(1), 128-139, 2015.
    """
    
    K = len(D)-1

    # parse options
    eaten = []
    erlMaxOrder = 200;
    precision = 1e-14;
    classes = np.arange(0,K)
    for i in range(len(argv)):
        if argv[i]=="prec":
            precision = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="erlMaxOrder":
            erlMaxOrder = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="classes":
            classes = np.array(argv[i+1])-1
            eaten.append(i)
            eaten.append(i+1) 
    
    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception('MMAPPH1PRPR: The arrival process is not a valid MMAP representation!')
    
    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k],S[k]):
                raise Exception('MMAPPH1PRPR: the vector and matrix describing the service times is not a valid PH representation!')

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    I = ml.eye(N)
    sD = ml.zeros((N,N))
    for Di in D:
        sD += Di
    
    s = []
    M = np.empty(K)
    for i in range(K):
        s.append(np.sum(-S[i],1))
        M[i] = sigma[i].size
    
    # step 1. solution of the workload process of the joint queue
    # ===========================================================
    sM = np.sum(M)
    Qwmm = ml.matrix(D0)
    Qwpm = ml.zeros((N*sM, N))
    Qwmp = ml.zeros((N, N*sM))
    Qwpp = ml.zeros((N*sM, N*sM)) 
    kix = 0
    for i in range(K):
        Qwmp[:,kix:kix+N*M[i]] = np.kron(D[i+1], sigma[i])
        Qwpm[kix:kix+N*M[i],:] = np.kron(I,s[i])
        Qwpp[kix:kix+N*M[i],:][:,kix:kix+N*M[i]] = np.kron(I,S[i])
        kix += N*M[i]

    # calculate fundamental matrices
    Psiw, Kw, Uw = FluidFundamentalMatrices (Qwpp, Qwpm, Qwmp, Qwmm, 'PKU', precision)
    
    # calculate boundary vector
    Ua = ml.ones((N,1)) + 2*np.sum(Qwmp*(-Kw).I,1)
    pm = Linsolve (ml.hstack((Uw,Ua)).T, ml.hstack((ml.zeros((1,N)),ml.ones((1,1)))).T).T

    ro =  ((1.0-np.sum(pm))/2.0)/(np.sum(pm)+(1.0-np.sum(pm))/2.0) # calc idle time with weight=1, and the busy time with weight=1/2
    kappa = pm/np.sum(pm)
    
    pi = CTMCSolve (sD)
    lambd = []
    for i in range(K):
        lambd.append(np.sum(pi*D[i+1]))

    Psiw = []
    Qwmp = []
    Qwzp = []
    Qwpp = []
    Qwmz = []
    Qwpz = []
    Qwzz = []
    Qwmm = []
    Qwpm = []
    Qwzm = []
    for k in range(K):
        # step 2. construct a workload process for classes k...K
        # ======================================================
        Mlo = np.sum(M[:k])
        Mhi = np.sum(M[k:])

        Qkwpp = ml.zeros((N*Mlo*Mhi+N*Mhi, N*Mlo*Mhi+N*Mhi))
        Qkwpz = ml.zeros((N*Mlo*Mhi+N*Mhi, N*Mlo)) 
        Qkwpm = ml.zeros((N*Mlo*Mhi+N*Mhi, N))
        Qkwmz = ml.zeros((N, N*Mlo))
        Qkwmp = ml.zeros((N, N*Mlo*Mhi+N*Mhi))
        Dlo = ml.matrix(D0)
        for i in range(k):
            Dlo = Dlo + D[i+1]
        Qkwmm = Dlo
        Qkwzp = ml.zeros((N*Mlo, N*Mlo*Mhi+N*Mhi))
        Qkwzm = ml.zeros((N*Mlo, N))
        Qkwzz = ml.zeros((N*Mlo, N*Mlo))
        kix = 0
        for i in range(k,K):
            kix2 = 0
            for j in range(k):
                bs = N*M[j]*M[i]
                bs2 = N*M[j]
                Qkwpp[kix:kix+bs,kix:kix+bs] = np.kron(I,np.kron(ml.eye(M[j]),S[i]))
                Qkwpz[kix:kix+bs,kix2:kix2+bs2] = np.kron(I,np.kron(ml.eye(M[j]),s[i]))
                Qkwzp[kix2:kix2+bs2,kix:kix+bs] = np.kron(D[i+1],np.kron(ml.eye(M[j]), sigma[i]))
                kix += bs
                kix2 += bs2
        for i in range(k,K):
            bs = N*M[i]
            Qkwpp[kix:kix+bs,:][:,kix:kix+bs] = np.kron(I,S[i])
            Qkwpm[kix:kix+bs,:] = np.kron(I,s[i])
            Qkwmp[:,kix:kix+bs] = np.kron(D[i+1],sigma[i])
            kix += bs
        kix = 0
        for j in range(k):
            bs = N*M[j]
            Qkwzz[kix:kix+bs,kix:kix+bs] = np.kron(Dlo, ml.eye(M[j])) + np.kron(I, S[j])
            Qkwzm[kix:kix+bs,:] = np.kron(I, s[j])
            kix += bs

        if Qkwzz.shape[0]>0:
            Psikw = FluidFundamentalMatrices (Qkwpp+Qkwpz*(-Qkwzz).I*Qkwzp, Qkwpm+Qkwpz*(-Qkwzz).I*Qkwzm, Qkwmp, Qkwmm, 'P', precision)
        else:
            Psikw = FluidFundamentalMatrices (Qkwpp, Qkwpm, Qkwmp, Qkwmm, 'P', precision)
        Psiw.append(Psikw)
        
        Qwzp.append(Qkwzp)
        Qwmp.append(Qkwmp)
        Qwpp.append(Qkwpp)
        Qwmz.append(Qkwmz)
        Qwpz.append(Qkwpz)
        Qwzz.append(Qkwzz)
        Qwmm.append(Qkwmm)
        Qwpm.append(Qkwpm)
        Qwzm.append(Qkwzm)
    
    # step 3. calculate Phi vectors
    # =============================
    lambdaS = sum(lambd)
    phi = [(1-ro)*kappa*(-D0) / lambdaS]
    q0 = [[]]
    qL = [[]]
    for k in range(K-1):
        sDk = ml.matrix(D0)
        for j in range(k+1):
            sDk = sDk + D[j+1]
        # pk
        pk = sum(lambd[:k+1])/lambdaS - (1-ro)*kappa*np.sum(sDk,1)/lambdaS
        # A^(k,1)
        Qwzpk = Qwzp[k+1]
        vix = 0
        Ak = []
        for ii in range(k+1):
            bs = N*M[ii]
            V1 = Qwzpk[vix:vix+bs,:]
            Ak.append (np.kron(I,sigma[ii]) * (-np.kron(sDk,ml.eye(M[ii]))-np.kron(I,S[ii])).I * (np.kron(I,s[ii]) + V1*Psiw[k+1]))
            vix += bs
        # B^k
        Qwmpk = Qwmp[k+1]
        Bk = Qwmpk * Psiw[k+1]
        ztag = phi[0]*((-D0).I*D[k+1]*Ak[k] - Ak[0] + (-D0).I*Bk)
        for i in range(k):
            ztag += phi[i+1]*(Ak[i]-Ak[i+1]) + phi[0]*(-D0).I*D[i+1]*Ak[i]
        Mx = ml.eye(Ak[k].shape[0])-Ak[k]
        Mx[:,0] = ml.ones((N,1))
        phi.append(ml.hstack((pk, ztag[:,1:]))*Mx.I)  # phi(k) = Psi^(k)_k * p(k). Psi^(k)_i = phi(i) / p(k)

        q0.append(phi[0]*(-D0).I)
        qLii = []
        for ii in range(k+1):
            qLii.append((phi[ii+1] - phi[ii] + phi[0]*(-D0).I*D[ii+1]) * np.kron(I,sigma[ii]) * (-np.kron(sDk,ml.eye(M[ii]))-np.kron(I,S[ii])).I)
        qL.append(ml.hstack(qLii))
    
    
    # step 4. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:

        sD0k = ml.matrix(D0)
        for i in range(k):
            sD0k +=  D[i+1]     
       
        if k<K-1:
            # step 4.1 calculate distribution of the workload process right 
            # before the arrivals of class k jobs
            # ============================================================
            if Qwzz[k].shape[0]>0:
                Kw = Qwpp[k]+Qwpz[k]*(-Qwzz[k]).I*Qwzp[k] + Psiw[k]*Qwmp[k]
            else:
                Kw = Qwpp[k] + Psiw[k]*Qwmp[k]
            BM = ml.zeros((0,0))
            CM = ml.zeros((0,N))
            DM = ml.zeros((0,0))
            for i in range(k):
                BM = la.block_diag(BM,np.kron(I,S[i]))
                CM = ml.vstack((CM, np.kron(I,s[i])))
                DM = la.block_diag(DM,np.kron(D[k+1],ml.eye(M[i])))
            if k>0:
                Kwu = ml.vstack((ml.hstack((Kw, (Qwpz[k]+Psiw[k]*Qwmz[k])*(-Qwzz[k]).I*DM)), ml.hstack((ml.zeros((BM.shape[0],Kw.shape[1])), BM))))
                Bwu = ml.vstack((Psiw[k]*D[k+1], CM))
                iniw = ml.hstack((q0[k]*Qwmp[k]+qL[k]*Qwzp[k], qL[k]*DM))
                pwu = q0[k]*D[k+1]
            else:
                Kwu = Kw
                Bwu = Psiw[k]*D[k+1]
                iniw = pm*Qwmp[k]
                pwu = pm*D[k+1]

            norm = np.sum(pwu) + np.sum(iniw*(-Kwu).I*Bwu)
            pwu = pwu / norm
            iniw = iniw / norm

            # step 4.2 create the fluid model whose first passage time equals the
            # WAITING time of the low prioroity customers
            # ==================================================================
            KN = Kwu.shape[0]
            Qspp = ml.zeros((KN+N*np.sum(M[k+1:]), KN+N*np.sum(M[k+1:])))
            Qspm = ml.zeros((KN+N*np.sum(M[k+1:]), N))
            Qsmp = ml.zeros((N, KN+N*np.sum(M[k+1:])))
            Qsmm = sD0k + D[k+1]
            kix = 0
            for i in range(k+1,K):
                bs = N*M[i]
                Qspp[KN+kix:KN+kix+bs,:][:,KN+kix:KN+kix+bs] = np.kron(I,S[i])
                Qspm[KN+kix:KN+kix+bs,:] = np.kron(I,s[i])
                Qsmp[:,KN+kix:KN+kix+bs] = np.kron(D[i+1],sigma[i])
                kix += bs

            Qspp[:KN,:][:,:KN] = Kwu
            Qspm[:KN,:] = Bwu
            inis = ml.hstack((iniw, ml.zeros((1,N*np.sum(M[k+1:])))))

            # calculate fundamental matrix
            Psis = FluidFundamentalMatrices (Qspp, Qspm, Qsmp, Qsmm, 'P', precision)

            # step 4.3. calculate the performance measures
            # ==========================================   
            argIx = 0
            while argIx<len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx]=="stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx+1]
                    # calculate waiting time moments
                    Pn = [Psis]
                    wtMoms = []
                    for n in range(1,numOfSTMoms+1):
                        A = Qspp + Psis*Qsmp
                        B = Qsmm + Qsmp*Psis
                        C = -2*n*Pn[n-1]
                        bino = 1
                        for i in range(1,n):
                            bino = bino * (n-i+1) / i
                            C += bino * Pn[i]*Qsmp*Pn[n-i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        wtMoms.append(np.sum(inis*P*(-1)**n) / 2**n)
                    # calculate RESPONSE time moments
                    Pnr = [np.sum(inis*Pn[0])*sigma[k]]
                    rtMoms = []
                    for n in range(1,numOfSTMoms+1):
                        P =  n*Pnr[n-1]*(-S[k]).I + (-1)**n*np.sum(inis*Pn[n])*sigma[k] / 2**n
                        Pnr.append(P)
                        rtMoms.append(np.sum(P)+np.sum(pwu)*math.factorial(n)*np.sum(sigma[k]*(-S[k]).I**n))
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx]=="stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx+1]
                    res = []
                    for t in stCdfPoints:
                        L = erlMaxOrder
                        lambdae = L/t/2
                        Psie = FluidFundamentalMatrices (Qspp-lambdae*ml.eye(Qspp.shape[0]), Qspm, Qsmp, Qsmm-lambdae*ml.eye(Qsmm.shape[0]), 'P', precision)
                        Pn = [Psie]
                        pr = (np.sum(pwu) + np.sum(inis*Psie)) * (1-np.sum(sigma[k]*(ml.eye(S[k].shape[0])-S[k]/2/lambdae).I**L))
                        for n in range(1,L):
                            A = Qspp + Psie*Qsmp - lambdae*ml.eye(Qspp.shape[0])
                            B = Qsmm + Qsmp*Psie - lambdae*ml.eye(Qsmm.shape[0])
                            C = 2*lambdae*Pn[n-1]
                            for i in range(1,n):
                                C += Pn[i]*Qsmp*Pn[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            pr += np.sum(inis*P) * (1-np.sum(sigma[k]*(np.eye(S[k].shape[0])-S[k]/2/lambdae).I**(L-n)))
                        res.append(pr)
                    Ret.append(np.array(res))
                    argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx]=="ncMoms" or argv[argIx]=="ncDistr"):
                    W = (-np.kron(sD-D[k+1],ml.eye(M[k]))-np.kron(I,S[k])).I*np.kron(D[k+1],ml.eye(M[k]))
                    iW = (ml.eye(W.shape[0])-W).I
                    w = np.kron(ml.eye(N),sigma[k])
                    omega = (-np.kron(sD-D[k+1],ml.eye(M[k]))-np.kron(I,S[k])).I*np.kron(I,s[k])
                    if argv[argIx]=="ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx+1]
                        # first calculate it at departure instants
                        Psii = [Psis]
                        QLDPn = [inis*Psii[0]*w*iW]
                        for n in range(1,numOfQLMoms+1):
                            A = Qspp + Psis*Qsmp
                            B = Qsmm + Qsmp*Psis
                            C = n*Psii[n-1]*D[k+1]
                            bino = 1
                            for i in range(1,n):
                                bino = bino * (n-i+1) / i
                                C = C + bino * Psii[i]*Qsmp*Psii[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Psii.append(P)
                            QLDPn.append(n*QLDPn[n-1]*iW*W + inis*P*w*iW)
                        for n in range(numOfQLMoms+1):
                            QLDPn[n] = (QLDPn[n] + pwu*w*iW**(n+1)*W**n)*omega
                        # now calculate it at random time instance
                        QLPn = [pi]
                        qlMoms = []
                        iTerm = (ml.ones((N,1))*pi - sD).I
                        for n in range(1,numOfQLMoms+1):
                            sumP = np.sum(QLDPn[n]) + n*np.sum((QLDPn[n-1] - QLPn[n-1]*D[k+1]/lambd[k])*iTerm*D[k+1])
                            P = sumP*pi + n*(QLPn[n-1]*D[k+1] - QLDPn[n-1]*lambd[k])*iTerm
                            QLPn.append(P)
                            qlMoms.append(np.sum(P))
                        qlMoms = MomsFromFactorialMoms(qlMoms)
                        Ret.append(qlMoms)
                        argIx += 1
                    elif argv[argIx]=="ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx+1]
                        Psid = FluidFundamentalMatrices (Qspp, Qspm, Qsmp, sD0k, 'P', precision)
                        Pn = [Psid]
                        XDn = inis*Psid*w
                        dqlProbs = (XDn+pwu*w)*omega
                        for n in range(1,numOfQLProbs):
                            A = Qspp + Psid*Qsmp
                            B = sD0k + Qsmp*Psid
                            C = Pn[n-1]*D[k+1]
                            for i in range(1,n):
                                C += Pn[i]*Qsmp*Pn[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            XDn = XDn*W + inis*P*w
                            dqlProbs = ml.vstack((dqlProbs, (XDn+pwu*w*W**n)*omega))
                        # now calculate it at random time instance
                        iTerm = -(sD-D[k+1]).I
                        qlProbs = lambd[k]*dqlProbs[0,:]*iTerm
                        for n in range(1,numOfQLProbs):
                            P = (qlProbs[n-1,:]*D[k+1]+lambd[k]*(dqlProbs[n,:]-dqlProbs[n-1,:]))*iTerm
                            qlProbs = ml.vstack((qlProbs, P))
                        qlProbs = np.sum(qlProbs,1).A.flatten()
                        Ret.append(qlProbs)
                        argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter "+str(argv[argIx]))
                argIx += 1
        elif k==K-1:
            # step 3. calculate the performance measures
            # ==========================================   
            argIx = 0
            while argIx<len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and (argv[argIx]=="stMoms" or argv[argIx]=="stDistr"):
                    Kw = Qwpp[k]+Qwpz[k]*(-Qwzz[k]).I*Qwzp[k] + Psiw[k]*Qwmp[k]
                    AM = ml.zeros((0,0))
                    BM = ml.zeros((0,0))
                    CM = ml.zeros((0,1))
                    DM = ml.zeros((0,0))
                    for i in range(k):
                        AM = la.block_diag(AM,np.kron(ml.ones((N,1)),np.kron(ml.eye(M[i]),s[k])))
                        BM = la.block_diag(BM,S[i])
                        CM = ml.vstack((CM, s[i]))
                        DM = la.block_diag(DM,np.kron(D[k+1],ml.eye(M[i])))                        
                    Z = ml.vstack((ml.hstack((Kw, ml.vstack((AM,ml.zeros((N*M[k],AM.shape[1])))))), ml.hstack((ml.zeros((BM.shape[0],Kw.shape[1])), BM))))
                    z = ml.vstack((ml.zeros((AM.shape[0],1)), np.kron(ml.ones((N,1)),s[k]), CM))
                    iniw = ml.hstack((q0[k]*Qwmp[k]+qL[k]*Qwzp[k], ml.zeros((1,BM.shape[0]))))
                    zeta = iniw/np.sum(iniw*(-Z).I*z)
                    if argv[argIx]=="stMoms":
                        # MOMENTS OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfSTMoms = argv[argIx+1]
                        rtMoms = []
                        for i in range(1,numOfSTMoms+1):
                            rtMoms.append(np.sum(math.factorial(i)*zeta*(-Z).I**(i+1)*z))
                        Ret.append(rtMoms)
                        argIx += 1
                    if argv[argIx]=="stDistr":
                        # DISTRIBUTION OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        stCdfPoints = argv[argIx+1]
                        rtDistr = []
                        for t in stCdfPoints:
                            rtDistr.append (np.sum(zeta*(-Z).I*(ml.eye(Z.shape[0])-la.expm(Z*t))*z))
                        Ret.append(np.array(rtDistr))
                        argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx]=="ncMoms" or argv[argIx]=="ncDistr"):
                    L = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    B = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    F = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    kix = 0
                    for i in range(K):
                        bs = N*M[i]
                        F[kix:kix+bs,:][:,kix:kix+bs] = np.kron(D[k+1],ml.eye(M[i]))
                        L[kix:kix+bs,:][:,kix:kix+bs] = np.kron(sD0k,ml.eye(M[i])) + np.kron(I,S[i])
                        if i<K-1:
                            L[kix:kix+bs,:][:,N*np.sum(M[:k]):] = np.kron(I,s[i]*sigma[k])
                        else:
                            B[kix:kix+bs,:][:,N*np.sum(M[:k]):] = np.kron(I,s[i]*sigma[k])
                        kix += bs
                    R = QBDFundamentalMatrices (B, L, F, 'R', precision)
                    p0 = ml.hstack((qL[k], q0[k]*np.kron(I,sigma[k])))
                    p0 = p0/np.sum(p0*(ml.eye(R.shape[0])-R).I)
                    if argv[argIx]=="ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx+1]
                        qlMoms = []
                        for i in range(1,numOfQLMoms+1):
                            qlMoms.append(np.sum(math.factorial(i)*p0*R**i*(ml.eye(R.shape[0])-R).I**(i+1)))
                        Ret.append(MomsFromFactorialMoms(qlMoms))
                    elif argv[argIx]=="ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx+1]
                        qlProbs = [np.sum(p0)]
                        for i in range(1,numOfQLProbs):
                            qlProbs.append(np.sum(p0*R**i))
                        Ret.append(np.array(qlProbs))
                    argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter "+str(argv[argIx]))
                argIx += 1

    if len(Ret)==1:
        return Ret[0]
    else:
        return Ret
コード例 #17
0
ファイル: SPIRIT_faulty.py プロジェクト: MrKriss/Old-PhD-Code
def SPIRIT(streams, energyThresh, lamb, evalMetrics):

    # Make 
    if type(streams) == np.ndarray:
        streams_iter = iter(streams)

    # Max No. Streams
    if streams.ndim == 1:
        streams = np.expand_dims(streams, axis=1)
        num_streams = streams.shape[1]
    else: 
        num_streams = streams.shape[1]

    count_over = 0
    count_under = 0

#===============================================================================
#      Initalise k, w and d, lamb
#===============================================================================

    k = 1 # Hidden Variables, initialise to one 
    
    # Weights
    pc_weights = npm.zeros(num_streams)
    pc_weights[0, 0] = 1
    
    # initialise outputs
    res = {}
    all_weights = []
    k_hist = []
    anomalies = []
    x_dash = npm.zeros((1,num_streams))
    
    Eng = mat([0.00000001, 0.00000001])    
    
    E_xt = 0  # Energy of X at time t
    E_rec_i = mat([0.000000000000001]) # Energy of reconstruction

    Y = npm.zeros(num_streams)
    
    timeSteps = streams.shape[0]
    
#===============================================================================
# Main Loop 
#===============================================================================
    for t in range(1, timeSteps + 1): # t = 1,...,200

        k_hist.append(k)

        x_t_plus_1 = mat(streams_iter.next()) # Read in next signals

        d_i = E_rec_i * t

        # Step 1 - Update Weights 
        pc_weights, y_t_i, error = track_W(x_t_plus_1, 
                                               k, pc_weights, d_i,
                                               num_streams, 
                                               lamb)
        # Record hidden variables
        padding = num_streams - k
        y_bar_t = npm.hstack((y_t_i, mat([nan] * padding)))
        Y = npm.vstack((Y,y_bar_t))
        
        # Record Weights
        all_weights.append(pc_weights)  
        # Record reconstrunted z and RSRE
        x_dash = npm.vstack((x_dash, y_t_i * pc_weights))
               
        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0
            
        top = top + (norm(x_t_plus_1 - x_dash) ** 2 )

        bot = bot + (norm(x_t_plus_1) ** 2)
        
        new_RSRE = top / bot   
                  
        if t == 1:
            RSRE = new_RSRE
        else:                  
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### FOR EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T' :
            
            Qt = pc_weights.T            
            
            if t == 1 :
                res['subspace_error'] = npm.zeros((timeSteps,1))
                res['orthog_error'] = npm.zeros((timeSteps,1))                


                res['angle_error'] = npm.zeros((timeSteps,1))
                Cov_mat = npm.zeros([num_streams,num_streams])
                
            # Calculate Covarentce Matrix of data up to time t   
            Cov_mat = lamb * Cov_mat +  npm.dot(x_t_plus_1,  x_t_plus_1.T)
            # Get eigenvalues and eigenvectors             
            W , V = eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = W.argsort() # Get sort index
            eig_idx = eig_idx[::-1] # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:k]]          
            # Calculate subspace error        
            C = npm.dot(V_k , V_k.T) - npm.dot(Qt , Qt.T)  
            res['subspace_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(C.T , C))) #frobenius norm in dB
        
            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k) 
            eigVal, eigVec = eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))        
            res['angle_error'][t-1,0] = angle        
    
            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T , Qt) - npm.eye(k)
            res['orthog_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(F.T , F))) #frobenius norm in dB
              

        # Step 2 - Update Energy estimate
        E_xt = ((lamb * (t-1) * E_xt) + norm(x_t_plus_1) ** 2) / t
    
        for i in range(k):
            E_rec_i[0, i] = ((lamb * (t-1) * E_rec_i[0, i]) + (y_t_i[0, i] ** 2)) / t

        # Step 3 - Estimate the retained energy
        E_retained = npm.sum(E_rec_i,1)
    
        # Record Energy  
        Eng_new = npm.hstack((E_xt, E_retained[0,0]))
        Eng = npm.vstack((Eng, Eng_new))
    
        if E_retained < energyThresh[0] * E_xt:
            if k != num_streams:
                k = k + 1       
                # Initalise Ek+1 <-- 0 
                E_rec_i = npm.hstack((E_rec_i, mat([0]))) 
                # Initialise W_i+1
                new_weight_vec = npm.zeros(num_streams)  
                new_weight_vec[0, k-1] = 1
                pc_weights = npm.vstack((pc_weights, new_weight_vec))
                anomalies.append(t -1)
            else:
                count_over += 1
        elif E_retained > energyThresh[1] * E_xt:
            if k > 1 :
                k = k - 1
                # discard w_k and error
                pc_weights = delete(pc_weights, -1, 0)    
                # Discard E_rec_i[k]
                E_rec_i = delete(E_rec_i, -1)
            else:
                count_under += 1
          
          
    # Data Stores
    res2 = {'hidden' :  Y,                        # Array for hidden Variables
           'weights' : all_weights,
           'E_t' : Eng[:,0],                     # total energy of data 
           'E_dash_t' : Eng[:,1],                # hidden var energy
           'e_ratio' : np.divide(Eng[:,1], Eng[:,0]),      # Energy ratio 
           'RSRE' : RSRE,                        # Relative squared Reconstruction error 
           'recon' : x_dash,                     # reconstructed data
           'r_hist' : k_hist, # history of r values 
           'anomalies' : anomalies}  
           
    res.update(res2)
              
    return res, all_weights
コード例 #18
0
ファイル: K_means_test.py プロジェクト: timeloveboy/pyimg
# coding:utf-8

from matplotlib.pyplot import plot, show, figure
from numpy.matlib import randn, array, vstack, where
from scipy.cluster.vq import kmeans, vq

# 生成正态分布的二维数据
class1 = 1.5 * randn(100, 2)
class2 = randn(100, 2) + array([5, 5])
features = vstack((class1, class2))

# 用k=2对这些数据进行聚类

centroids, variance = kmeans(features, 2)

# 通过矢量化函数对每个数据点进行归类:
code, distance = vq(features, centroids)

figure()
ndx = where(code == 0)[0]
plot(features[ndx, 0], features[ndx, 1], '*')
ndx = where(code == 1)[0]
plot(features[ndx, 0], features[ndx, 1], 'r.')
plot(features[:, 0], features[:, 1], 'go')
show()
コード例 #19
0
def QuadProg(H, c, Ae, be, Ai, bi, x0):
    epsilon = 1e-9
    err = 1e-6
    k = 0
    x = x0
    n = len(x)
    kmax = 1e2
    ne = len(be)
    ni = len(bi)
    index = mat.ones((ni, 1))
    for i in range(0, ni):
        if Ai[i, :] * x > bi[i] + epsilon:
            index[i] = 0

    while k <= kmax:
        Aee = []
        #if ne > 0:
        #    Aee = Ae
        AeeList = []
        if ne > 0:
            AeeList.append(Ae)
        for j in range(0, ni):
            if index[j] > 0:
                AeeList.append(Ai[j, :])
                #Aee = mat.vstack((Aee, Ai[j,:]))
        if len(AeeList) == 0:
            Aee = []
        else:
            Aee = mat.vstack(AeeList)
            m1, n1 = mat.shape(Aee)
        gk = H * x + c
        dk, lamk = qsubp(H, gk, Aee, mat.zeros((m1, 1)))
        if np.linalg.norm(dk) <= err:
            y = 0.0
            if len(lamk) > ne:
                jk = np.argmin(lamk[ne:len(lamk)])
                y = lamk[jk]
            if y >= 0:
                exitflag = 0
            else:
                exitflag = 1
                for i in range(0, ni):
                    if index[i] and (ne + sum(index[0:i])) == jk:
                        index[i] = 0
                        break
            #k += 1
        else:
            exitflag = 1
            alpha = 1.0
            tm = 1.0
            for i in range(0, ni):
                if index[i] == 0 and Ai[i, :] * dk < 0:
                    tmm = (bi[i] - Ai[i, :] * x) / (Ai[i, :] * dk)
                    tm1 = abs(tmm[0, 0])
                    if tm1 < tm:
                        tm = tm1
                        ti = i
            alpha = min(alpha, abs(tm))
            x = x + alpha * dk
            if tm < 1:
                index[ti] = 1
        if exitflag == 0:
            break
        k += 1
    print k
    return x
コード例 #20
0
def SPIRIT_pedro(A,
                 k0=1,
                 lamb=0.96,
                 holdOffTime=0,
                 energy_low=0.95,
                 energy_high=0.98):

    n = A.shape[1]
    totalTime = A.shape[0]

    W = npm.eye(n)  #initialize the k (up to n) w_i to unit vectors
    d = 0.01 * npm.ones(
        (n, 1))  #energy associated with given eigenvalue of covariance X_t'X_t
    m = k0  # number of eigencomponents

    sumYSq = 0
    sumXSq = 0

    #data structures for evaluating (~ totalTime)
    print "Running incremental simulation on ", n, " streams with total of ", totalTime, "ticks.\n"
    anomalies = []
    hidden = npm.zeros((totalTime, n)) * nan
    m_hist = npm.zeros((totalTime, 1)) * nan
    ratio_energy_hist = npm.zeros((totalTime, 1)) * nan
    Proj = npm.zeros((totalTime, n))
    recon = npm.zeros((totalTime, n))
    relErrors = npm.zeros((totalTime, 1))
    W_hist = []
    errors = npm.zeros((totalTime, 1))
    angle_error = []
    E_t = []
    E_dash_t = []

    #incremental update W
    lastChangeAt = 1
    for t in range(totalTime):
        #actual vector (transposed) of the current time
        xActual = matrix(A[t, :]).T

        #project to m-dimensional space
        Y = W[:, m:] * xActual

        #reconstruction of the current time
        xProj = W[:, :m] * Y
        Proj[t, :m] = Y
        recon[t, :] = xProj
        xOrth = xActual - xProj
        errors[t] = sum(xOrth**2)
        relErrors[t] = sum(xOrth**2) / sum(xActual**2)

        #update W for each y_t
        x = xActual
        for j in range(m):
            w, d, x = updateW(x, W[:, j], d[j], lamb)
            W[:, j] = w
            d[j] = d
            x = x

        #keep the weights orthogonal
        #if(qr) {
#            W[,1:m] <- qr.Q(qr(W[,1:m]))*-1
#      }

#eval
        Y = W[:, :m].T * xActual
        hidden[t, :m] = Y
        ang_err = W[:, :m].T * W[:, :m] - eye(m)
        ang_err = sqrt(sum(diag(ang_err.T * ang_err)))  #frobenius norm
        angle_error[t] = ang_err

        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0

        top = top + (norm(xActual - xProj)**2)

        bot = bot + (norm(xActual)**2)

        new_RSRE = top / bot

        if t == 1:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        #update energy
        sumYSq = lamb * sumYSq + sum(Y**2)
        sumXSq = lamb * sumXSq + sum(xActual**2)

        E_t.append(sumXSq)
        E_dash_t.append(sumYSq)

        #for evaluating:
        m_hist[t] = m
        ratio_energy_hist[t] = sumYSq / sumXSq

        # check the lower bound of energy level
        if (sumYSq < energy_low * sumXSq and lastChangeAt < t - holdOffTime
                and m < n):
            lastChangeAt = t
            m = m + 1
            print "Increasing m to ", m," at time ", t, " (ratio energy", \
                    100*sumYSq/sumXSq, ")\n"
            print "Max stream for each hidden variable", \
                    (W[:,:m].T).argmax(axis=0), "\n"
            anomalies.append(t)
            W_hist.append(W[:, :m])

        # check the upper bound of energy level
        elif (sumYSq >= energy_high * sumXSq and lastChangeAt < t - holdOffTime
              and m > 1):
            lastChangeAt = t
            m = m - 1
            print "Increasing m to ", m," at time ", t, " (ratio energy", \
                    100*sumYSq/sumXSq, ")\n"
            print "Max stream for each hidden variable", \
                    (W[:,:m].T).argmax(axis=0), "\n"

            W_hist.append(W[:, :m])

    # Data Stores
    res = {
        'hidden': hidden,
        'Proj': Proj,  # Array for hidden Variables
        'weights': W_hist,
        'E_t': np.array(E_t),  # total energy of data 
        'E_dash_t': np.array(E_dash_t),  # hidden var energy
        'e_ratio': ratio_energy_hist,
        'relErrors': relErrors,
        'errors': errors,  # Energy ratio 
        'RSRE': RSRE,  # Relative squared Reconstruction error 
        'recon': recon,  # reconstructed data
        'r_hist': m_hist,  # history of r values 
        'angle_err': angle_error,
        'anomalies': anomalies
    }

    return res
コード例 #21
0
def FRHH32(streams, rr, alpha, sci = 0):
    """ Fast row-Householder Subspace Traking Algorithm, Non adaptive version 
    
    """
#===============================================================================
#     #Initialise variables and data structures 
#===============================================================================
    # check input is type float32     
    
    streams = float32(streams)
    alpha = float32(alpha)
    
    N = streams.shape[1] # No. of streams 
    
    # Data Stores
    E_t = [float32(0)] # time series of total energy 
    E_dash_t = [float32(0)] # time series of reconstructed energy
    z_dash = npm.zeros((1,N), dtype = float32) # time series of reconstructed data 
    RSRE = mat([float32(0)])  # time series of Root squared Reconstruction Error
    hid_var = npm.zeros((streams.shape[0], N), dtype = float32) # Array of hidden Variables 
    
    seed(111)
     
    # Initial Q(0) - either random or I
    
    # Random     
    qq,RR = qr(rand(N,rr))   # generate random orthonormal matrix N x r 
    Q_t = [mat(float32(qq))]   # Initialise Q_t - N x r
    
    # Identity     
    # q_I = npm.eye(N, rr) 
    # Q_t = [q_I]

    S_t = [npm.ones((rr,rr), dtype = float32) * float32(0.00001)]   # Initialise S_t - r x r 
    
    No_inp_count = 0 # count of number of times there was no input i.e. z_t = [0,...,0]
    No_inp_marker = zeros((1,streams.shape[0] + 1))
    
    v_vec_min_1 = npm.zeros((rr,1), dtype = float32)
    
    iter_streams = iter(streams)
    
    for t in range(1, streams.shape[0] + 1):
        
        z_vec = mat(iter_streams.next())
        
        z_vec = z_vec.T # Now a column Vector
        
        hh = Q_t[t-1].T * z_vec                  # 13a

        Z = z_vec.T * z_vec - hh.T * hh           # 13b
        
        # Z = float(Z) # cheak that Z is really scalar
        
        if Z > 0.00000000001 :       
            
            # Refined version, sci accounts better for tracked eigen values
            if sci != 0: 
                u_vec = S_t[t-1] * v_vec_min_1 
                extra_term = 2 * alpha * sci * u_vec * v_vec_min_1.T
                extra_term = float32(extra_term)
            else:
                extra_term = float32(0)
                
            X = alpha * S_t[t-1]  + hh * hh.T - extra_term
            
            # QR method - hopefully more stable 
            aa = X.T
            b = sqrt(Z[0,0]) * hh
            
            # b_vec = solve(aa,b)
            b_vec = QRsolveM(aa,b)   
            
            b_vec =float32(b_vec)           
            
            beta = float32(4) * (b_vec.T * b_vec + 1)
            
            phi_sq_t = float32(0.5) + (float32(1.0) / sqrt(beta))
            
            phi_t = sqrt(phi_sq_t) 
            
            gamma = (float32(1) - float32(2) * phi_sq_t) / (float32(2) * phi_t)   
            
            delta = phi_t / sqrt(Z)          
            
            v_vec_t = multiply(gamma , b_vec)
            
            S_t.append(X - multiply(float32(1) /delta , v_vec_t * hh.T))         
            
            w_vec = multiply(delta , hh) - v_vec_t        
            
            e_vec = multiply(delta, z_vec) - (Q_t[t-1] * w_vec)
            
            Q_t.append(Q_t[t-1] - float32(2) * (e_vec * v_vec_t.T))
        
            v_vec_min_1 = v_vec_t # update for next time step
        
            # Record hidden variables
            hid_var[t-1,:hh.shape[0]] = hh.T
                
            # Record reconstrunted z 
            new_z_dash = Q_t[t-1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))
        
            # Record RSRE
            new_RSRE = RSRE[0,-1] + (((norm(new_z_dash - z_vec)) ** 2) / 
                                        (norm(z_vec) ** 2))                           
            RSRE = npm.vstack((RSRE, mat(new_RSRE))) 
        
        else:
            
            # Record hidden variables
            hid_var[t-1,:hh.shape[0]] = hh.T
            
            # Record reconstrunted z 
            new_z_dash = Q_t[t-1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))
        
            # Record RSRE
            new_RSRE = RSRE[0,-1] + (((norm(new_z_dash - z_vec)) ** 2) / 
                                    (norm(z_vec) ** 2))                           
            RSRE = npm.vstack((RSRE, mat(new_RSRE)))            
            
            # Repeat last entries
            Q_t.append(Q_t[-1])
            S_t.append(S_t[-1])            
                         
            # increment count
            No_inp_count += 1                        
            No_inp_marker[t-1] = 1 
            
    # convert to tuples to save memory        
    Q_t = tuple(Q_t)
    S_t = tuple(S_t)
    rr = array(rr)
    E_t = array(E_t)
    E_dash_t = array(E_dash_t)
            
    return  Q_t, S_t, rr, E_t, E_dash_t, hid_var, z_dash, RSRE, No_inp_count, No_inp_marker
コード例 #22
0
def MMAPPH1NPPR(D, sigma, S, *argv):
    """
    Returns various performane measures of a continuous time 
    MMAP[K]/PH[K]/1 non-preemptive priority queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
        D1 corresponds to the lowest, DK to the highest priority.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati and |
        |                |                    | the matrix-quadratic equations         |
        +----------------+--------------------+----------------------------------------+
        | "erlMaxOrder"  | Integer number     | The maximal Erlang order used in the   |
        |                |                    | erlangization procedure. The default   |
        |                |                    | value is 200.                          |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] G. Horvath, "Efficient analysis of the MMAP[K]/PH[K]/1
           priority queue", European Journal of Operational 
           Research, 246(1), 128-139, 2015.
    """

    K = len(D) - 1

    # parse options
    eaten = []
    erlMaxOrder = 200
    precision = 1e-14
    classes = np.arange(0, K)
    for i in range(len(argv)):
        if argv[i] == "prec":
            precision = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "erlMaxOrder":
            erlMaxOrder = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "classes":
            classes = np.array(argv[i + 1]) - 1
            eaten.append(i)
            eaten.append(i + 1)

    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception(
            'MMAPPH1PRPR: The arrival process is not a valid MMAP representation!'
        )

    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k], S[k]):
                raise Exception(
                    'MMAPPH1PRPR: the vector and matrix describing the service times is not a valid PH representation!'
                )

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    I = ml.eye(N)
    sD = ml.zeros((N, N))
    for Di in D:
        sD += Di

    s = []
    M = np.empty(K)
    for i in range(K):
        s.append(np.sum(-S[i], 1))
        M[i] = sigma[i].size

    # step 1. solution of the workload process of the joint queue
    # ===========================================================
    sM = np.sum(M)
    Qwmm = ml.matrix(D0)
    Qwpm = ml.zeros((N * sM, N))
    Qwmp = ml.zeros((N, N * sM))
    Qwpp = ml.zeros((N * sM, N * sM))
    kix = 0
    for i in range(K):
        Qwmp[:, kix:kix + N * M[i]] = np.kron(D[i + 1], sigma[i])
        Qwpm[kix:kix + N * M[i], :] = np.kron(I, s[i])
        Qwpp[kix:kix + N * M[i], :][:, kix:kix + N * M[i]] = np.kron(I, S[i])
        kix += N * M[i]

    # calculate fundamental matrices
    Psiw, Kw, Uw = FluidFundamentalMatrices(Qwpp, Qwpm, Qwmp, Qwmm, 'PKU',
                                            precision)

    # calculate boundary vector
    Ua = ml.ones((N, 1)) + 2 * np.sum(Qwmp * (-Kw).I, 1)
    pm = Linsolve(
        ml.hstack((Uw, Ua)).T,
        ml.hstack((ml.zeros((1, N)), ml.ones((1, 1)))).T).T

    ro = ((1.0 - np.sum(pm)) / 2.0) / (
        np.sum(pm) + (1.0 - np.sum(pm)) / 2.0
    )  # calc idle time with weight=1, and the busy time with weight=1/2
    kappa = pm / np.sum(pm)

    pi = CTMCSolve(sD)
    lambd = []
    for i in range(K):
        lambd.append(np.sum(pi * D[i + 1]))

    Psiw = []
    Qwmp = []
    Qwzp = []
    Qwpp = []
    Qwmz = []
    Qwpz = []
    Qwzz = []
    Qwmm = []
    Qwpm = []
    Qwzm = []
    for k in range(K):
        # step 2. construct a workload process for classes k...K
        # ======================================================
        Mlo = np.sum(M[:k])
        Mhi = np.sum(M[k:])

        Qkwpp = ml.zeros((N * Mlo * Mhi + N * Mhi, N * Mlo * Mhi + N * Mhi))
        Qkwpz = ml.zeros((N * Mlo * Mhi + N * Mhi, N * Mlo))
        Qkwpm = ml.zeros((N * Mlo * Mhi + N * Mhi, N))
        Qkwmz = ml.zeros((N, N * Mlo))
        Qkwmp = ml.zeros((N, N * Mlo * Mhi + N * Mhi))
        Dlo = ml.matrix(D0)
        for i in range(k):
            Dlo = Dlo + D[i + 1]
        Qkwmm = Dlo
        Qkwzp = ml.zeros((N * Mlo, N * Mlo * Mhi + N * Mhi))
        Qkwzm = ml.zeros((N * Mlo, N))
        Qkwzz = ml.zeros((N * Mlo, N * Mlo))
        kix = 0
        for i in range(k, K):
            kix2 = 0
            for j in range(k):
                bs = N * M[j] * M[i]
                bs2 = N * M[j]
                Qkwpp[kix:kix + bs,
                      kix:kix + bs] = np.kron(I, np.kron(ml.eye(M[j]), S[i]))
                Qkwpz[kix:kix + bs,
                      kix2:kix2 + bs2] = np.kron(I,
                                                 np.kron(ml.eye(M[j]), s[i]))
                Qkwzp[kix2:kix2 + bs2,
                      kix:kix + bs] = np.kron(D[i + 1],
                                              np.kron(ml.eye(M[j]), sigma[i]))
                kix += bs
                kix2 += bs2
        for i in range(k, K):
            bs = N * M[i]
            Qkwpp[kix:kix + bs, :][:, kix:kix + bs] = np.kron(I, S[i])
            Qkwpm[kix:kix + bs, :] = np.kron(I, s[i])
            Qkwmp[:, kix:kix + bs] = np.kron(D[i + 1], sigma[i])
            kix += bs
        kix = 0
        for j in range(k):
            bs = N * M[j]
            Qkwzz[kix:kix + bs, kix:kix +
                  bs] = np.kron(Dlo, ml.eye(M[j])) + np.kron(I, S[j])
            Qkwzm[kix:kix + bs, :] = np.kron(I, s[j])
            kix += bs

        if Qkwzz.shape[0] > 0:
            Psikw = FluidFundamentalMatrices(
                Qkwpp + Qkwpz * (-Qkwzz).I * Qkwzp,
                Qkwpm + Qkwpz * (-Qkwzz).I * Qkwzm, Qkwmp, Qkwmm, 'P',
                precision)
        else:
            Psikw = FluidFundamentalMatrices(Qkwpp, Qkwpm, Qkwmp, Qkwmm, 'P',
                                             precision)
        Psiw.append(Psikw)

        Qwzp.append(Qkwzp)
        Qwmp.append(Qkwmp)
        Qwpp.append(Qkwpp)
        Qwmz.append(Qkwmz)
        Qwpz.append(Qkwpz)
        Qwzz.append(Qkwzz)
        Qwmm.append(Qkwmm)
        Qwpm.append(Qkwpm)
        Qwzm.append(Qkwzm)

    # step 3. calculate Phi vectors
    # =============================
    lambdaS = sum(lambd)
    phi = [(1 - ro) * kappa * (-D0) / lambdaS]
    q0 = [[]]
    qL = [[]]
    for k in range(K - 1):
        sDk = ml.matrix(D0)
        for j in range(k + 1):
            sDk = sDk + D[j + 1]
        # pk
        pk = sum(lambd[:k + 1]) / lambdaS - (1 - ro) * kappa * np.sum(
            sDk, 1) / lambdaS
        # A^(k,1)
        Qwzpk = Qwzp[k + 1]
        vix = 0
        Ak = []
        for ii in range(k + 1):
            bs = N * M[ii]
            V1 = Qwzpk[vix:vix + bs, :]
            Ak.append(
                np.kron(I, sigma[ii]) *
                (-np.kron(sDk, ml.eye(M[ii])) - np.kron(I, S[ii])).I *
                (np.kron(I, s[ii]) + V1 * Psiw[k + 1]))
            vix += bs
        # B^k
        Qwmpk = Qwmp[k + 1]
        Bk = Qwmpk * Psiw[k + 1]
        ztag = phi[0] * ((-D0).I * D[k + 1] * Ak[k] - Ak[0] + (-D0).I * Bk)
        for i in range(k):
            ztag += phi[i + 1] * (Ak[i] - Ak[i + 1]) + phi[0] * (
                -D0).I * D[i + 1] * Ak[i]
        Mx = ml.eye(Ak[k].shape[0]) - Ak[k]
        Mx[:, 0] = ml.ones((N, 1))
        phi.append(
            ml.hstack((pk, ztag[:, 1:])) *
            Mx.I)  # phi(k) = Psi^(k)_k * p(k). Psi^(k)_i = phi(i) / p(k)

        q0.append(phi[0] * (-D0).I)
        qLii = []
        for ii in range(k + 1):
            qLii.append((phi[ii + 1] - phi[ii] + phi[0] *
                         (-D0).I * D[ii + 1]) * np.kron(I, sigma[ii]) *
                        (-np.kron(sDk, ml.eye(M[ii])) - np.kron(I, S[ii])).I)
        qL.append(ml.hstack(qLii))

    # step 4. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:

        sD0k = ml.matrix(D0)
        for i in range(k):
            sD0k += D[i + 1]

        if k < K - 1:
            # step 4.1 calculate distribution of the workload process right
            # before the arrivals of class k jobs
            # ============================================================
            if Qwzz[k].shape[0] > 0:
                Kw = Qwpp[k] + Qwpz[k] * (
                    -Qwzz[k]).I * Qwzp[k] + Psiw[k] * Qwmp[k]
            else:
                Kw = Qwpp[k] + Psiw[k] * Qwmp[k]
            BM = ml.zeros((0, 0))
            CM = ml.zeros((0, N))
            DM = ml.zeros((0, 0))
            for i in range(k):
                BM = la.block_diag(BM, np.kron(I, S[i]))
                CM = ml.vstack((CM, np.kron(I, s[i])))
                DM = la.block_diag(DM, np.kron(D[k + 1], ml.eye(M[i])))
            if k > 0:
                Kwu = ml.vstack((ml.hstack(
                    (Kw, (Qwpz[k] + Psiw[k] * Qwmz[k]) * (-Qwzz[k]).I * DM)),
                                 ml.hstack((ml.zeros(
                                     (BM.shape[0], Kw.shape[1])), BM))))
                Bwu = ml.vstack((Psiw[k] * D[k + 1], CM))
                iniw = ml.hstack(
                    (q0[k] * Qwmp[k] + qL[k] * Qwzp[k], qL[k] * DM))
                pwu = q0[k] * D[k + 1]
            else:
                Kwu = Kw
                Bwu = Psiw[k] * D[k + 1]
                iniw = pm * Qwmp[k]
                pwu = pm * D[k + 1]

            norm = np.sum(pwu) + np.sum(iniw * (-Kwu).I * Bwu)
            pwu = pwu / norm
            iniw = iniw / norm

            # step 4.2 create the fluid model whose first passage time equals the
            # WAITING time of the low prioroity customers
            # ==================================================================
            KN = Kwu.shape[0]
            Qspp = ml.zeros(
                (KN + N * np.sum(M[k + 1:]), KN + N * np.sum(M[k + 1:])))
            Qspm = ml.zeros((KN + N * np.sum(M[k + 1:]), N))
            Qsmp = ml.zeros((N, KN + N * np.sum(M[k + 1:])))
            Qsmm = sD0k + D[k + 1]
            kix = 0
            for i in range(k + 1, K):
                bs = N * M[i]
                Qspp[KN + kix:KN + kix + bs, :][:, KN + kix:KN + kix +
                                                bs] = np.kron(I, S[i])
                Qspm[KN + kix:KN + kix + bs, :] = np.kron(I, s[i])
                Qsmp[:, KN + kix:KN + kix + bs] = np.kron(D[i + 1], sigma[i])
                kix += bs

            Qspp[:KN, :][:, :KN] = Kwu
            Qspm[:KN, :] = Bwu
            inis = ml.hstack((iniw, ml.zeros((1, N * np.sum(M[k + 1:])))))

            # calculate fundamental matrix
            Psis = FluidFundamentalMatrices(Qspp, Qspm, Qsmp, Qsmm, 'P',
                                            precision)

            # step 4.3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx + 1]
                    # calculate waiting time moments
                    Pn = [Psis]
                    wtMoms = []
                    for n in range(1, numOfSTMoms + 1):
                        A = Qspp + Psis * Qsmp
                        B = Qsmm + Qsmp * Psis
                        C = -2 * n * Pn[n - 1]
                        bino = 1
                        for i in range(1, n):
                            bino = bino * (n - i + 1) / i
                            C += bino * Pn[i] * Qsmp * Pn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        wtMoms.append(np.sum(inis * P * (-1)**n) / 2**n)
                    # calculate RESPONSE time moments
                    Pnr = [np.sum(inis * Pn[0]) * sigma[k]]
                    rtMoms = []
                    for n in range(1, numOfSTMoms + 1):
                        P = n * Pnr[n - 1] * (-S[k]).I + (-1)**n * np.sum(
                            inis * Pn[n]) * sigma[k] / 2**n
                        Pnr.append(P)
                        rtMoms.append(
                            np.sum(P) + np.sum(pwu) * math.factorial(n) *
                            np.sum(sigma[k] * (-S[k]).I**n))
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx + 1]
                    res = []
                    for t in stCdfPoints:
                        L = erlMaxOrder
                        lambdae = L / t / 2
                        Psie = FluidFundamentalMatrices(
                            Qspp - lambdae * ml.eye(Qspp.shape[0]), Qspm, Qsmp,
                            Qsmm - lambdae * ml.eye(Qsmm.shape[0]), 'P',
                            precision)
                        Pn = [Psie]
                        pr = (np.sum(pwu) + np.sum(inis * Psie)) * (1 - np.sum(
                            sigma[k] *
                            (ml.eye(S[k].shape[0]) - S[k] / 2 / lambdae).I**L))
                        for n in range(1, L):
                            A = Qspp + Psie * Qsmp - lambdae * ml.eye(
                                Qspp.shape[0])
                            B = Qsmm + Qsmp * Psie - lambdae * ml.eye(
                                Qsmm.shape[0])
                            C = 2 * lambdae * Pn[n - 1]
                            for i in range(1, n):
                                C += Pn[i] * Qsmp * Pn[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            pr += np.sum(inis * P) * (
                                1 - np.sum(sigma[k] *
                                           (np.eye(S[k].shape[0]) -
                                            S[k] / 2 / lambdae).I**(L - n)))
                        res.append(pr)
                    Ret.append(np.array(res))
                    argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx] == "ncMoms" or
                                                   argv[argIx] == "ncDistr"):
                    W = (-np.kron(sD - D[k + 1], ml.eye(M[k])) -
                         np.kron(I, S[k])).I * np.kron(D[k + 1], ml.eye(M[k]))
                    iW = (ml.eye(W.shape[0]) - W).I
                    w = np.kron(ml.eye(N), sigma[k])
                    omega = (-np.kron(sD - D[k + 1], ml.eye(M[k])) -
                             np.kron(I, S[k])).I * np.kron(I, s[k])
                    if argv[argIx] == "ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx + 1]
                        # first calculate it at departure instants
                        Psii = [Psis]
                        QLDPn = [inis * Psii[0] * w * iW]
                        for n in range(1, numOfQLMoms + 1):
                            A = Qspp + Psis * Qsmp
                            B = Qsmm + Qsmp * Psis
                            C = n * Psii[n - 1] * D[k + 1]
                            bino = 1
                            for i in range(1, n):
                                bino = bino * (n - i + 1) / i
                                C = C + bino * Psii[i] * Qsmp * Psii[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Psii.append(P)
                            QLDPn.append(n * QLDPn[n - 1] * iW * W +
                                         inis * P * w * iW)
                        for n in range(numOfQLMoms + 1):
                            QLDPn[n] = (QLDPn[n] +
                                        pwu * w * iW**(n + 1) * W**n) * omega
                        # now calculate it at random time instance
                        QLPn = [pi]
                        qlMoms = []
                        iTerm = (ml.ones((N, 1)) * pi - sD).I
                        for n in range(1, numOfQLMoms + 1):
                            sumP = np.sum(QLDPn[n]) + n * np.sum(
                                (QLDPn[n - 1] - QLPn[n - 1] * D[k + 1] /
                                 lambd[k]) * iTerm * D[k + 1])
                            P = sumP * pi + n * (
                                QLPn[n - 1] * D[k + 1] -
                                QLDPn[n - 1] * lambd[k]) * iTerm
                            QLPn.append(P)
                            qlMoms.append(np.sum(P))
                        qlMoms = MomsFromFactorialMoms(qlMoms)
                        Ret.append(qlMoms)
                        argIx += 1
                    elif argv[argIx] == "ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx + 1]
                        Psid = FluidFundamentalMatrices(
                            Qspp, Qspm, Qsmp, sD0k, 'P', precision)
                        Pn = [Psid]
                        XDn = inis * Psid * w
                        dqlProbs = (XDn + pwu * w) * omega
                        for n in range(1, numOfQLProbs):
                            A = Qspp + Psid * Qsmp
                            B = sD0k + Qsmp * Psid
                            C = Pn[n - 1] * D[k + 1]
                            for i in range(1, n):
                                C += Pn[i] * Qsmp * Pn[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            XDn = XDn * W + inis * P * w
                            dqlProbs = ml.vstack(
                                (dqlProbs, (XDn + pwu * w * W**n) * omega))
                        # now calculate it at random time instance
                        iTerm = -(sD - D[k + 1]).I
                        qlProbs = lambd[k] * dqlProbs[0, :] * iTerm
                        for n in range(1, numOfQLProbs):
                            P = (qlProbs[n - 1, :] * D[k + 1] + lambd[k] *
                                 (dqlProbs[n, :] - dqlProbs[n - 1, :])) * iTerm
                            qlProbs = ml.vstack((qlProbs, P))
                        qlProbs = np.sum(qlProbs, 1).A.flatten()
                        Ret.append(qlProbs)
                        argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1
        elif k == K - 1:
            # step 3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and (argv[argIx] == "stMoms" or
                                                   argv[argIx] == "stDistr"):
                    Kw = Qwpp[k] + Qwpz[k] * (
                        -Qwzz[k]).I * Qwzp[k] + Psiw[k] * Qwmp[k]
                    AM = ml.zeros((0, 0))
                    BM = ml.zeros((0, 0))
                    CM = ml.zeros((0, 1))
                    DM = ml.zeros((0, 0))
                    for i in range(k):
                        AM = la.block_diag(
                            AM,
                            np.kron(ml.ones((N, 1)),
                                    np.kron(ml.eye(M[i]), s[k])))
                        BM = la.block_diag(BM, S[i])
                        CM = ml.vstack((CM, s[i]))
                        DM = la.block_diag(DM, np.kron(D[k + 1], ml.eye(M[i])))
                    Z = ml.vstack((ml.hstack(
                        (Kw, ml.vstack((AM, ml.zeros(
                            (N * M[k], AM.shape[1])))))),
                                   ml.hstack((ml.zeros(
                                       (BM.shape[0], Kw.shape[1])), BM))))
                    z = ml.vstack((ml.zeros(
                        (AM.shape[0], 1)), np.kron(ml.ones((N, 1)), s[k]), CM))
                    iniw = ml.hstack((q0[k] * Qwmp[k] + qL[k] * Qwzp[k],
                                      ml.zeros((1, BM.shape[0]))))
                    zeta = iniw / np.sum(iniw * (-Z).I * z)
                    if argv[argIx] == "stMoms":
                        # MOMENTS OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfSTMoms = argv[argIx + 1]
                        rtMoms = []
                        for i in range(1, numOfSTMoms + 1):
                            rtMoms.append(
                                np.sum(
                                    math.factorial(i) * zeta *
                                    (-Z).I**(i + 1) * z))
                        Ret.append(rtMoms)
                        argIx += 1
                    if argv[argIx] == "stDistr":
                        # DISTRIBUTION OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        stCdfPoints = argv[argIx + 1]
                        rtDistr = []
                        for t in stCdfPoints:
                            rtDistr.append(
                                np.sum(zeta * (-Z).I *
                                       (ml.eye(Z.shape[0]) - la.expm(Z * t)) *
                                       z))
                        Ret.append(np.array(rtDistr))
                        argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx] == "ncMoms" or
                                                   argv[argIx] == "ncDistr"):
                    L = ml.zeros((N * np.sum(M), N * np.sum(M)))
                    B = ml.zeros((N * np.sum(M), N * np.sum(M)))
                    F = ml.zeros((N * np.sum(M), N * np.sum(M)))
                    kix = 0
                    for i in range(K):
                        bs = N * M[i]
                        F[kix:kix + bs, :][:, kix:kix + bs] = np.kron(
                            D[k + 1], ml.eye(M[i]))
                        L[kix:kix + bs, :][:, kix:kix + bs] = np.kron(
                            sD0k, ml.eye(M[i])) + np.kron(I, S[i])
                        if i < K - 1:
                            L[kix:kix + bs, :][:,
                                               N * np.sum(M[:k]):] = np.kron(
                                                   I, s[i] * sigma[k])
                        else:
                            B[kix:kix + bs, :][:,
                                               N * np.sum(M[:k]):] = np.kron(
                                                   I, s[i] * sigma[k])
                        kix += bs
                    R = QBDFundamentalMatrices(B, L, F, 'R', precision)
                    p0 = ml.hstack((qL[k], q0[k] * np.kron(I, sigma[k])))
                    p0 = p0 / np.sum(p0 * (ml.eye(R.shape[0]) - R).I)
                    if argv[argIx] == "ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx + 1]
                        qlMoms = []
                        for i in range(1, numOfQLMoms + 1):
                            qlMoms.append(
                                np.sum(
                                    math.factorial(i) * p0 * R**i *
                                    (ml.eye(R.shape[0]) - R).I**(i + 1)))
                        Ret.append(MomsFromFactorialMoms(qlMoms))
                    elif argv[argIx] == "ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx + 1]
                        qlProbs = [np.sum(p0)]
                        for i in range(1, numOfQLProbs):
                            qlProbs.append(np.sum(p0 * R**i))
                        Ret.append(np.array(qlProbs))
                    argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1

    if len(Ret) == 1:
        return Ret[0]
    else:
        return Ret
コード例 #23
0
def SPIRIT(streams, energyThresh, lamb, evalMetrics):

    # Make
    if type(streams) == np.ndarray:
        streams_iter = iter(streams)

    # Max No. Streams
    if streams.ndim == 1:
        streams = np.expand_dims(streams, axis=1)
        num_streams = streams.shape[1]
    else:
        num_streams = streams.shape[1]

    count_over = 0
    count_under = 0

    #===============================================================================
    #      Initalise k, w and d, lamb
    #===============================================================================

    k = 1  # Hidden Variables, initialise to one

    # Weights
    pc_weights = npm.zeros(num_streams)
    pc_weights[0, 0] = 1

    # initialise outputs
    res = {}
    all_weights = []
    k_hist = []
    anomalies = []
    x_dash = npm.zeros((1, num_streams))

    Eng = mat([0.00000001, 0.00000001])

    E_xt = 0  # Energy of X at time t
    E_rec_i = mat([0.000000000000001])  # Energy of reconstruction

    Y = npm.zeros(num_streams)

    timeSteps = streams.shape[0]

    #===============================================================================
    # Main Loop
    #===============================================================================
    for t in range(1, timeSteps + 1):  # t = 1,...,200

        k_hist.append(k)

        x_t_plus_1 = mat(streams_iter.next())  # Read in next signals

        d_i = E_rec_i * t

        # Step 1 - Update Weights
        pc_weights, y_t_i, error = track_W(x_t_plus_1, k, pc_weights, d_i,
                                           num_streams, lamb)
        # Record hidden variables
        padding = num_streams - k
        y_bar_t = npm.hstack((y_t_i, mat([nan] * padding)))
        Y = npm.vstack((Y, y_bar_t))

        # Record Weights
        all_weights.append(pc_weights)
        # Record reconstrunted z and RSRE
        x_dash = npm.vstack((x_dash, y_t_i * pc_weights))

        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0

        top = top + (norm(x_t_plus_1 - x_dash)**2)

        bot = bot + (norm(x_t_plus_1)**2)

        new_RSRE = top / bot

        if t == 1:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### FOR EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T':

            Qt = pc_weights.T

            if t == 1:
                res['subspace_error'] = npm.zeros((timeSteps, 1))
                res['orthog_error'] = npm.zeros((timeSteps, 1))

                res['angle_error'] = npm.zeros((timeSteps, 1))
                Cov_mat = npm.zeros([num_streams, num_streams])

            # Calculate Covarentce Matrix of data up to time t
            Cov_mat = lamb * Cov_mat + npm.dot(x_t_plus_1, x_t_plus_1.T)
            # Get eigenvalues and eigenvectors
            W, V = eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = W.argsort()  # Get sort index
            eig_idx = eig_idx[::-1]  # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:k]]
            # Calculate subspace error
            C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T)
            res['subspace_error'][t - 1, 0] = 10 * np.log10(
                npm.trace(npm.dot(C.T, C)))  #frobenius norm in dB

            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k)
            eigVal, eigVec = eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))
            res['angle_error'][t - 1, 0] = angle

            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T, Qt) - npm.eye(k)
            res['orthog_error'][t - 1, 0] = 10 * np.log10(
                npm.trace(npm.dot(F.T, F)))  #frobenius norm in dB

        # Step 2 - Update Energy estimate
        E_xt = ((lamb * (t - 1) * E_xt) + norm(x_t_plus_1)**2) / t

        for i in range(k):
            E_rec_i[0, i] = ((lamb * (t - 1) * E_rec_i[0, i]) +
                             (y_t_i[0, i]**2)) / t

        # Step 3 - Estimate the retained energy
        E_retained = npm.sum(E_rec_i, 1)

        # Record Energy
        Eng_new = npm.hstack((E_xt, E_retained[0, 0]))
        Eng = npm.vstack((Eng, Eng_new))

        if E_retained < energyThresh[0] * E_xt:
            if k != num_streams:
                k = k + 1
                # Initalise Ek+1 <-- 0
                E_rec_i = npm.hstack((E_rec_i, mat([0])))
                # Initialise W_i+1
                new_weight_vec = npm.zeros(num_streams)
                new_weight_vec[0, k - 1] = 1
                pc_weights = npm.vstack((pc_weights, new_weight_vec))
                anomalies.append(t - 1)
            else:
                count_over += 1
        elif E_retained > energyThresh[1] * E_xt:
            if k > 1:
                k = k - 1
                # discard w_k and error
                pc_weights = delete(pc_weights, -1, 0)
                # Discard E_rec_i[k]
                E_rec_i = delete(E_rec_i, -1)
            else:
                count_under += 1

    # Data Stores
    res2 = {
        'hidden': Y,  # Array for hidden Variables
        'weights': all_weights,
        'E_t': Eng[:, 0],  # total energy of data 
        'E_dash_t': Eng[:, 1],  # hidden var energy
        'e_ratio': np.divide(Eng[:, 1], Eng[:, 0]),  # Energy ratio 
        'RSRE': RSRE,  # Relative squared Reconstruction error 
        'recon': x_dash,  # reconstructed data
        'r_hist': k_hist,  # history of r values 
        'anomalies': anomalies
    }

    res.update(res2)

    return res, all_weights
コード例 #24
0
def MMAPPH1FCFS(D, sigma, S, *argv):
    """
    Returns various performane measures of a MMAP[K]/PH[K]/1 
    first-come-first-serve queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "stDistrME"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution              |
        +----------------+--------------------+----------------------------------------+
        | "stDistrPH"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution, converted   |
        |                |                    | to a continuous PH representation      |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati     |
        |                |                    | equation                               |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] Qiming He, "Analysis of a continuous time 
           SM[K]/PH[K]/1/FCFS queue: Age process, sojourn times,
           and queue lengths", Journal of Systems Science and 
           Complexity, 25(1), pp 133-155, 2012.
    """

    K = len(D) - 1

    # parse options
    eaten = []
    precision = 1e-14
    classes = np.arange(0, K)
    for i in range(len(argv)):
        if argv[i] == "prec":
            precision = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "classes":
            classes = np.array(argv[i + 1]) - 1
            eaten.append(i)
            eaten.append(i + 1)

    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception(
            'MMAPPH1FCFS: The arrival process is not a valid MMAP representation!'
        )

    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k], S[k]):
                raise Exception(
                    'MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!'
                )

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    Ia = ml.eye(N)
    Da = ml.zeros((N, N))
    for q in range(K):
        Da += D[q + 1]
    theta = CTMCSolve(D0 + Da)
    beta = [CTMCSolve(S[k] + ml.sum(-S[k], 1) * sigma[k]) for k in range(K)]
    lambd = [np.sum(theta * D[k + 1]) for k in range(K)]
    mu = [np.sum(beta[k] * (-S[k])) for k in range(K)]
    Nsk = [S[k].shape[0] for k in range(K)]
    ro = np.sum(np.array(lambd) / np.array(mu))
    alpha = theta * Da / sum(lambd)
    D0i = (-D0).I

    Sa = S[0]
    sa = [ml.zeros(sigma[0].shape)] * K
    sa[0] = sigma[0]
    ba = [ml.zeros(beta[0].shape)] * K
    ba[0] = beta[0]
    sv = [ml.zeros((Nsk[0], 1))] * K
    sv[0] = ml.sum(-S[0], 1)
    Pk = [D0i * D[q + 1] for q in range(K)]

    for k in range(1, K):
        Sa = la.block_diag(Sa, S[k])
        for q in range(K):
            if q == k:
                sa[q] = ml.hstack((sa[q], sigma[k]))
                ba[q] = ml.hstack((ba[q], beta[k]))
                sv[q] = ml.vstack((sv[q], -np.sum(S[k], 1)))
            else:
                sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape)))
                ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape)))
                sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k], 1))))
    Sa = ml.matrix(Sa)
    P = D0i * Da
    iVec = ml.kron(D[1], sa[0])
    for k in range(1, K):
        iVec += ml.kron(D[k + 1], sa[k])
    Ns = Sa.shape[0]
    Is = ml.eye(Ns)

    # step 1. solve the age process of the queue
    # ==========================================

    # solve Y0 and calculate T
    Y0 = FluidFundamentalMatrices(ml.kron(Ia, Sa), ml.kron(Ia, -ml.sum(Sa, 1)),
                                  iVec, D0, "P", precision)
    T = ml.kron(Ia, Sa) + Y0 * iVec

    # calculate pi0 and v0
    pi0 = ml.zeros((1, T.shape[0]))
    for k in range(K):
        pi0 += ml.kron(theta * D[k + 1], ba[k] / mu[k])
    pi0 = -pi0 * T

    iT = (-T).I
    oa = ml.ones((N, 1))

    # step 2. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:
        argIx = 0
        clo = iT * ml.kron(oa, sv[k])
        while argIx < len(argv):
            if argIx in eaten:
                argIx += 1
                continue
            elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                numOfSTMoms = argv[argIx + 1]
                rtMoms = []
                for m in range(1, numOfSTMoms + 1):
                    rtMoms.append(
                        math.factorial(m) * np.sum(pi0 * iT**m * clo /
                                                   (pi0 * clo)))
                Ret.append(rtMoms)
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                stCdfPoints = argv[argIx + 1]
                cdf = []
                for t in stCdfPoints:
                    pr = 1 - np.sum(pi0 * la.expm(T * t) * clo / (pi0 * clo))
                    cdf.append(pr)
                Ret.append(np.array(cdf))
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx] == "stDistrME":
                Bm = SimilarityMatrixForVectors(clo / (pi0 * clo),
                                                ml.ones((N * Ns, 1)))
                Bmi = Bm.I
                A = Bm * T * Bmi
                alpha = pi0 * Bmi
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx] == "stDistrPH":
                vv = pi0 * iT
                ix = np.arange(N * Ns)
                nz = ix[vv.flat > precision]
                delta = Diag(vv[:, nz])
                cl = -T * clo / (pi0 * clo)
                alpha = cl[nz, :].T * delta
                A = delta.I * T[nz, :][:, nz].T * delta
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx] == "ncDistr":
                numOfQLProbs = argv[argIx + 1]
                argIx += 1
                values = np.empty(numOfQLProbs)
                jm = ml.zeros((Ns, 1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 1
                jmc = ml.ones((Ns, 1))
                jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 0
                LmCurr = la.solve_sylvester(T, ml.kron(D0 + Da - D[k + 1], Is),
                                            -ml.eye(N * Ns))
                values[0] = 1 - ro + np.sum(pi0 * LmCurr * ml.kron(oa, jmc))
                for i in range(1, numOfQLProbs):
                    LmPrev = LmCurr
                    LmCurr = la.solve_sylvester(
                        T, ml.kron(D0 + Da - D[k + 1], Is),
                        -LmPrev * ml.kron(D[k + 1], Is))
                    values[i] = np.sum(pi0 * LmCurr * ml.kron(oa, jmc) +
                                       pi0 * LmPrev * ml.kron(oa, jm))
                Ret.append(values)
            elif type(argv[argIx]) is str and argv[argIx] == "ncMoms":
                numOfQLMoms = argv[argIx + 1]
                argIx += 1
                jm = ml.zeros((Ns, 1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 1
                ELn = [
                    la.solve_sylvester(T, ml.kron(D0 + Da, Is),
                                       -ml.eye(N * Ns))
                ]
                qlMoms = []
                for n in range(1, numOfQLMoms + 1):
                    bino = 1
                    Btag = ml.zeros((N * Ns, N * Ns))
                    for i in range(n):
                        Btag += bino * ELn[i]
                        bino *= (n - i) / (i + 1)
                    ELn.append(
                        la.solve_sylvester(T, ml.kron(D0 + Da, Is),
                                           -Btag * ml.kron(D[k + 1], Is)))
                    qlMoms.append(
                        np.sum(pi0 * ELn[n]) +
                        np.sum(pi0 * Btag * ml.kron(oa, jm)))
                Ret.append(qlMoms)
            else:
                raise Exception("MMAPPH1FCFS: Unknown parameter " +
                                str(argv[argIx]))
            argIx += 1

    if len(Ret) == 1:
        return Ret[0]
    else:
        return Ret
コード例 #25
0
def SPIRIT(A,
           lamb,
           energy,
           k0=1,
           holdOffTime=0,
           reorthog=False,
           evalMetrics='F'):

    A = np.mat(A)

    n = A.shape[1]
    totalTime = A.shape[0]
    Proj = npm.ones((totalTime, n)) * np.nan
    recon = npm.zeros((totalTime, n))

    # initialize w_i to unit vectors
    W = npm.eye(n)
    d = 0.01 * npm.ones((n, 1))
    m = k0  # number of eigencomponents

    relErrors = npm.zeros((totalTime, 1))

    sumYSq = 0.
    E_t = []
    sumXSq = 0.
    E_dash_t = []

    res = {}
    k_hist = []
    W_hist = []
    anomalies = []

    # incremental update W
    lastChangeAt = 0

    for t in range(totalTime):

        k_hist.append(m)

        # update W for each y_t
        x = A[t, :].T  # new data as column vector

        for j in range(m):
            W[:, j], d[j], x = updateW(x, W[:, j], d[j], lamb)
            Wj = W[:, j]

        # Grams smit reorthog
        if reorthog == True:
            W[:, :m], R = npm.linalg.qr(W[:, :m])

        # compute low-D projection, reconstruction and relative error
        Y = W[:, :m].T * A[t, :].T  # project to m-dimensional space
        xActual = A[t, :].T  # actual vector of the current time
        xProj = W[:, :m] * Y  # reconstruction of the current time
        Proj[t, :m] = Y.T
        recon[t, :] = xProj.T
        xOrth = xActual - xProj
        relErrors[t] = npm.sum(npm.power(xOrth, 2)) / npm.sum(
            npm.power(xActual, 2))

        # update energy
        sumYSq = lamb * sumYSq + npm.sum(npm.power(Y, 2))
        E_dash_t.append(sumYSq)
        sumXSq = lamb * sumXSq + npm.sum(npm.power(A[t, :], 2))
        E_t.append(sumXSq)

        # Record RSRE
        if t == 0:
            top = 0.0
            bot = 0.0

        top = top + npm.power(npm.linalg.norm(xActual - xProj), 2)

        bot = bot + npm.power(npm.linalg.norm(xActual), 2)

        new_RSRE = top / bot

        if t == 0:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### Metric EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T':

            Qt = W[:, :m]

            if t == 0:
                res['subspace_error'] = npm.zeros((totalTime, 1))
                res['orthog_error'] = npm.zeros((totalTime, 1))
                res['angle_error'] = npm.zeros((totalTime, 1))
                Cov_mat = npm.zeros([n, n])

            # Calculate Covarentce Matrix of data up to time t
            Cov_mat = lamb * Cov_mat + npm.dot(xActual, xActual.T)
            # Get eigenvalues and eigenvectors
            WW, V = npm.linalg.eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = WW.argsort()  # Get sort index
            eig_idx = eig_idx[::-1]  # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:m]]
            # Calculate subspace error
            C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T)
            res['subspace_error'][t, 0] = 10 * np.log10(
                npm.trace(npm.dot(C.T, C)))  #frobenius norm in dB
            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k)
            eigVal, eigVec = npm.linalg.eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))
            res['angle_error'][t, 0] = angle

            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T, Qt) - npm.eye(m)
            res['orthog_error'][t, 0] = 10 * np.log10(
                npm.trace(npm.dot(F.T, F)))  #frobenius norm in dB

        # Energy thresholding
        ######################
        # check the lower bound of energy level
        if sumYSq < energy[
                0] * sumXSq and lastChangeAt < t - holdOffTime and m < n:
            lastChangeAt = t
            m = m + 1
            anomalies.append(t)
        # print 'Increasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        # check the upper bound of energy level
        elif sumYSq > energy[
                1] * sumXSq and lastChangeAt < t - holdOffTime and m < n and m > 1:
            lastChangeAt = t
            m = m - 1
        # print 'Decreasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        W_hist.append(W[:, :m])
    # set outputs

    # Grams smit reorthog
    if reorthog == True:
        W[:, :m], R = npm.linalg.qr(W[:, :m])

    # Data Stores
    res2 = {
        'hidden': Proj,  # Array for hidden Variables
        'E_t': np.array(E_t),  # total energy of data 
        'E_dash_t': np.array(E_dash_t),  # hidden var energy
        'e_ratio': np.array(E_dash_t) / np.array(E_t),  # Energy ratio 
        'rel_orth_err': relErrors,  # orthoX error
        'RSRE': RSRE,  # Relative squared Reconstruction error 
        'recon': recon,  # reconstructed data
        'r_hist': k_hist,  # history of r values 
        'W_hist': W_hist,  # history of Weights
        'anomalies': anomalies
    }

    res.update(res2)

    return res
コード例 #26
0
 sigma2 = ml.matrix([[1.]])
 print('>>> S2 = ml.matrix([[-2.]])')
 S2 = ml.matrix([[-2.]])
 print('>>> sigma1 = ml.matrix([[0.25,0.75]])')
 sigma1 = ml.matrix([[0.25, 0.75]])
 print('>>> S1 = ml.matrix([[-2.5, 2.5],[0., -10.]])')
 S1 = ml.matrix([[-2.5, 2.5], [0., -10.]])
 print(
     '>>> ncm1, ncd1, ncm2, ncd2, ncm3, ncd3 = MMAPPH1PRPR([D0, D1, D2, D3], [sigma1, sigma2, sigma3], [S1, S2, S3], "ncMoms", 3, "ncDistr", 500)'
 )
 ncm1, ncd1, ncm2, ncd2, ncm3, ncd3 = MMAPPH1PRPR([D0, D1, D2, D3],
                                                  [sigma1, sigma2, sigma3],
                                                  [S1, S2, S3], "ncMoms", 3,
                                                  "ncDistr", 500)
 momFromDistr1 = ncd1 * ml.vstack(
     (ml.matrix(np.arange(0, 500.0, 1)), ml.matrix(
         np.arange(0, 500.0, 1)**2), ml.matrix(np.arange(0, 500.0, 1)**
                                               3))).T
 momFromDistr2 = ncd2 * ml.vstack(
     (ml.matrix(np.arange(0, 500.0, 1)), ml.matrix(
         np.arange(0, 500.0, 1)**2), ml.matrix(np.arange(0, 500.0, 1)**
                                               3))).T
 momFromDistr3 = ncd3 * ml.vstack(
     (ml.matrix(np.arange(0, 500.0, 1)), ml.matrix(
         np.arange(0, 500.0, 1)**2), ml.matrix(np.arange(0, 500.0, 1)**
                                               3))).T
 print('>>> distrPoints = [1., 5., 10.]')
 distrPoints = [1., 5., 10.]
 print(
     '>>> stm1, std1, stm2, std2, stm3, std3 = MMAPPH1PRPR([D0, D1, D2, D3], [sigma1, sigma2, sigma3], [S1, S2, S3], "stMoms", 3, "stDistr", distrPoints)'
 )
 stm1, std1, stm2, std2, stm3, std3 = MMAPPH1PRPR([D0, D1, D2, D3],
コード例 #27
0
ファイル: FRAHST_M.py プロジェクト: aanchan/Old-PhD-Code
def FRAHST_M(streams, energyThresh, alpha):
    """ Fast rank adaptive row-Householder Subspace Traking Algorithm   
    
    """
    #Initialise 
    N = streams.shape[1]
    rr = [1]    
    hiddenV = npm.zeros((streams.shape[0], N))   
    # generate random orthonormal  - N x r 
    qq,RR = qr(rand(N,1))    
    Q_t = [mat(qq)]    
    S_t = [mat([0.000001])] 
    E_t = [0]
    E_dash_t = [0]
    z_dash = npm.zeros(N)
    RSRE = mat([0])
    No_inp_count = 0
    
    iter_streams = iter(streams)
    
    for t in range(1, streams.shape[0] + 1):
        
        z_vec = mat(iter_streams.next())
        
        z_vec = z_vec.T # Now a column Vector
        
        hh = Q_t[t-1].T * z_vec                       # 13a

        Z = z_vec.T * z_vec - hh.T * hh           # 13b
        
        Z = float(Z) # cheak that Z is really scalar
        
        if Z > 0.0000001:        
            
            X = alpha * S_t[t-1] + hh * hh.T               # 13c
        
            # X.T * b = sqrt(Z) * hh                           # 13d        
        
            b = multiply(inv(X.T), sqrt(Z)) * hh  # inverse method 
        
            phi_sq_t = 0.5 + (1 / sqrt(4 *((b.T * b) + 1)))   # 13e

            phi_t = sqrt(phi_sq_t)        

            delta = phi_t / sqrt(Z)                        # 13f
        
            gamma = (1 - 2 * phi_sq_t) / (2 * phi_t)         #13 g
        
            v = multiply(gamma, b)  
        
            S_t.append(X - multiply(1/delta , v * hh.T))         # 13 h  S_t[t] = 

            
            e = multiply(delta, z_vec) - (Q_t[t-1] * (multiply(delta, hh) - v))  # 13 i
            
            Q_t.append(Q_t[t-1] - 2 * (e * v.T))                 # 13 j  Q[t] = 

            # Record hidden variables
            hiddenV[t-1,:hh.shape[0]] = hh.T
            
            # Record reconstrunted z 
            new_z_dash = Q_t[t-1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))
        
            # Record RSRE
            new_RSRE = RSRE[0,-1] + (((norm(new_z_dash - z_vec)) ** 2) / 
                                    (norm(z_vec) ** 2))                           
            RSRE = npm.vstack((RSRE, mat(new_RSRE))) 
        
            E_t.append(alpha * E_t[-1] + norm(z_vec) ** 2)        # 13 k
                
            E_dash_t.append( alpha * E_dash_t[-1] + norm(hh) ** 2)  # 13 l
        
            if E_dash_t[-1] < energyThresh[0] * E_t[-1] and rr[-1] < N: # 13 m 
        
                z_dag_orthog =  z_vec - Q_t[t] * Q_t[t].T * z_vec 
            
                # try Q[t], not Q[t + 1]
                
                Q_t[t] = npm.bmat([Q_t[t], z_dag_orthog/norm(z_dag_orthog)])
                                                 
                TR = npm.zeros((S_t[t].shape[0], 1))
                BL = npm.zeros((1 ,S_t[t].shape[1]))
                BR = mat(norm(z_dag_orthog) ** 2 )
                                
                S_t[t] = npm.bmat([[S_t[t],  TR],
                                   [  BL  ,  BR]])
                  
                rr.append(rr[-1] + 1)
            
            elif E_dash_t[-1] > energyThresh[1] * E_t[-1] and rr[-1] > 1 :
            
                Q_t[t] = Q_t[t][:, :-1]   # delete the last column of Q_t
        
                S_t[t] = S_t[t][:-1, :-1] # delete last row and colum of S_t 
        
                rr.append(rr[-1] - 1)
        
        else:
            
            # Record hidden variables
            hiddenV[t-1,:hh.shape[0]] = hh.T
            
            # Record reconstrunted z 
            new_z_dash = Q_t[t-1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))
        
            # Record RSRE
            new_RSRE = RSRE[0,-1] + (((norm(new_z_dash - z_vec)) ** 2) / 
                                    (norm(z_vec) ** 2))                           
            RSRE = npm.vstack((RSRE, mat(new_RSRE)))            
            
            # Repeat last entries
            Q_t.append(Q_t[-1])
            S_t.append(S_t[-1])            
            rr.append(rr[-1])  
            E_t.append(E_t[-1])        
            E_dash_t.append(E_dash_t[-1])              
            
            # increment count
            No_inp_count += 1                        
            
    return Q_t, S_t, rr, E_t, E_dash_t, hiddenV, z_dash, RSRE, No_inp_count
コード例 #28
0
ファイル: SPIRIT_Pedro.py プロジェクト: MrKriss/Old-PhD-Code
def SPIRIT_pedro(A, k0 = 1, lamb=0.96, holdOffTime=0, 
           energy_low=0.95, energy_high=0.98):

    n = A.shape[1]
    totalTime = A.shape[0]
    
    W = npm.eye(n)      #initialize the k (up to n) w_i to unit vectors
    d = 0.01 * npm.ones((n, 1)) #energy associated with given eigenvalue of covariance X_t'X_t
    m = k0              # number of eigencomponents
        
    sumYSq=0
    sumXSq=0
    
    #data structures for evaluating (~ totalTime)
    print "Running incremental simulation on ", n, " streams with total of ", totalTime, "ticks.\n"
    anomalies = []
    hidden = npm.zeros((totalTime, n)) * nan
    m_hist = npm.zeros((totalTime, 1)) * nan
    ratio_energy_hist = npm.zeros((totalTime, 1)) * nan
    Proj = npm.zeros((totalTime, n))
    recon = npm.zeros((totalTime, n))    
    relErrors = npm.zeros((totalTime, 1))
    W_hist = []    
    errors = npm.zeros((totalTime, 1)) 
    angle_error = []
    E_t = []
    E_dash_t = []
  
    
    #incremental update W
    lastChangeAt = 1
    for t in range(totalTime):      
        #actual vector (transposed) of the current time
        xActual = matrix(A[t,:]).T  
                
        #project to m-dimensional space    
        Y = W[:,m:] * xActual
        
        #reconstruction of the current time
        xProj = W[:,:m] * Y
        Proj[t,:m] = Y
        recon[t,:] = xProj
        xOrth = xActual - xProj        
        errors[t] = sum(xOrth**2)
        relErrors[t] = sum(xOrth**2) / sum(xActual**2)

        #update W for each y_t        
        x = xActual        
        for j in range(m):       
            w,d,x = updateW(x, W[:,j], d[j], lamb)
            W[:,j] = w  
            d[j] = d
            x = x
            
        #keep the weights orthogonal 
        #if(qr) {                
#            W[,1:m] <- qr.Q(qr(W[,1:m]))*-1  
  #      }

        #eval
        Y = W[:, :m].T * xActual
        hidden[t, :m] = Y
        ang_err = W[:, :m].T * W[:, :m] - eye(m)
        ang_err = sqrt(sum(diag(ang_err.T * ang_err))) #frobenius norm        
        angle_error[t] = ang_err
        
        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0
            
        top = top + (norm(xActual - xProj) ** 2 )

        bot = bot + (norm(xActual) ** 2)
        
        new_RSRE = top / bot   
                  
        if t == 1:
            RSRE = new_RSRE
        else:                  
            RSRE = npm.vstack((RSRE, new_RSRE))        
        

        #update energy
        sumYSq = lamb * sumYSq + sum(Y ** 2)
        sumXSq = lamb * sumXSq + sum(xActual ** 2)
        
        E_t.append(sumXSq)
        E_dash_t.append(sumYSq)
        
        #for evaluating:
        m_hist[t] = m        
        ratio_energy_hist[t] = sumYSq/sumXSq


        # check the lower bound of energy level
        if (sumYSq < energy_low * sumXSq and 
                    lastChangeAt < t - holdOffTime and m < n) :
            lastChangeAt = t
            m = m + 1
            print "Increasing m to ", m," at time ", t, " (ratio energy", \
                    100*sumYSq/sumXSq, ")\n"
            print "Max stream for each hidden variable", \
                    (W[:,:m].T).argmax(axis=0), "\n"
            anomalies.append(t)
            W_hist.append(W[:,:m])

        # check the upper bound of energy level
        elif (sumYSq >= energy_high * sumXSq and 
                    lastChangeAt < t - holdOffTime and  m > 1):
            lastChangeAt = t 
            m = m - 1
            print "Increasing m to ", m," at time ", t, " (ratio energy", \
                    100*sumYSq/sumXSq, ")\n"
            print "Max stream for each hidden variable", \
                    (W[:,:m].T).argmax(axis=0), "\n"
            
            W_hist.append(W[:,:m])
        
    # Data Stores
    res = {'hidden' :  hidden,
            'Proj' : Proj,                        # Array for hidden Variables
           'weights' : W_hist,
           'E_t' : np.array(E_t),                     # total energy of data 
           'E_dash_t' : np.array(E_dash_t),                # hidden var energy
           'e_ratio' : ratio_energy_hist,
           'relErrors' : relErrors,  
           'errors' : errors,           # Energy ratio 
           'RSRE' : RSRE,                        # Relative squared Reconstruction error 
           'recon' : recon,                     # reconstructed data
           'r_hist' : m_hist, # history of r values 
           'angle_err' : angle_error,
           'anomalies' : anomalies}  
        
    return res
コード例 #29
0
ファイル: prprio.py プロジェクト: wangnangg/butools-demo
def MMAPPH1PRPR(D, sigma, S, *argv):
    """
    Returns various performane measures of a MMAP[K]/PH[K]/1 
    preemptive resume priority queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
        D1 corresponds to the lowest, DK to the highest priority.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati and |
        |                |                    | the matrix-quadratic equations         |
        +----------------+--------------------+----------------------------------------+
        | "erlMaxOrder"  | Integer number     | The maximal Erlang order used in the   |
        |                |                    | erlangization procedure. The default   |
        |                |                    | value is 200.                          |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] G. Horvath, "Efficient analysis of the MMAP[K]/PH[K]/1
           priority queue", European Journal of Operational 
           Research, 246(1), 128-139, 2015.
    """

    K = len(D) - 1

    # parse options
    eaten = []
    erlMaxOrder = 200
    precision = 1e-14
    classes = np.arange(0, K)
    for i in range(len(argv)):
        if argv[i] == "prec":
            precision = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "erlMaxOrder":
            erlMaxOrder = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "classes":
            classes = np.array(argv[i + 1]) - 1
            eaten.append(i)
            eaten.append(i + 1)

    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception(
            'MMAPPH1PRPR: The arrival process is not a valid MMAP representation!'
        )

    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k], S[k]):
                raise Exception(
                    'MMAPPH1PRPR: the vector and matrix describing the service times is not a valid PH representation!'
                )

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    I = ml.eye(N)
    sD = ml.zeros((N, N))
    for Di in D:
        sD += Di

    s = []
    M = np.empty(K)
    for i in range(K):
        s.append(np.sum(-S[i], 1))
        M[i] = sigma[i].size

    Ret = []
    for k in classes:

        # step 1. solution of the workload process of the system
        # ======================================================
        sM = np.sum(M[k:K])
        Qwmm = ml.matrix(D0)
        for i in range(k):
            Qwmm += D[i + 1]

        Qwpm = ml.zeros((N * sM, N))
        Qwmp = ml.zeros((N, N * sM))
        Qwpp = ml.zeros((N * sM, N * sM))
        kix = 0
        for i in range(k, K):
            Qwmp[:, kix:kix + N * M[i]] = np.kron(D[i + 1], sigma[i])
            Qwpm[kix:kix + N * M[i], :] = np.kron(I, s[i])
            Qwpp[kix:kix + N * M[i], :][:,
                                        kix:kix + N * M[i]] = np.kron(I, S[i])
            kix += N * M[i]

        # calculate fundamental matrices
        Psiw, Kw, Uw = FluidFundamentalMatrices(Qwpp, Qwpm, Qwmp, Qwmm, 'PKU',
                                                precision)

        # calculate boundary vector
        Ua = ml.ones((N, 1)) + 2 * np.sum(Qwmp * (-Kw).I, 1)
        pm = Linsolve(
            ml.hstack((Uw, Ua)).T,
            ml.hstack((ml.zeros((1, N)), ml.ones((1, 1)))).T).T

        Bw = ml.zeros((N * sM, N))
        Bw[0:N * M[k], :] = np.kron(I, s[k])
        kappa = pm * Qwmp / np.sum(pm * Qwmp * (-Kw).I * Bw)

        if k < K - 1:
            # step 2. construct fluid model for the remaining sojourn time process
            # ====================================================================
            # (for each class except the highest priority)
            Qsmm = ml.matrix(D0)
            for i in range(k + 1):
                Qsmm += D[i + 1]

            Np = Kw.shape[0]
            Qspm = ml.zeros((Np + N * np.sum(M[k + 1:]), N))
            Qsmp = ml.zeros((N, Np + N * np.sum(M[k + 1:])))
            Qspp = ml.zeros(
                (Np + N * np.sum(M[k + 1:]), Np + N * np.sum(M[k + 1:])))
            Qspp[:Np, :Np] = Kw
            Qspm[:Np, :N] = Bw
            kix = Np
            for i in range(k + 1, K):
                Qsmp[:, kix:kix + N * M[i]] = np.kron(D[i + 1], sigma[i])
                Qspm[kix:kix + N * M[i], :] = np.kron(I, s[i])
                Qspp[kix:kix + N * M[i], kix:kix + N * M[i]] = np.kron(I, S[i])
                kix += N * M[i]

            inis = ml.hstack((kappa, ml.zeros((1, N * np.sum(M[k + 1:])))))
            Psis = FluidFundamentalMatrices(Qspp, Qspm, Qsmp, Qsmm, 'P',
                                            precision)

            # step 3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx + 1]
                    Pn = [Psis]
                    rtMoms = []
                    for n in range(1, numOfSTMoms + 1):
                        A = Qspp + Psis * Qsmp
                        B = Qsmm + Qsmp * Psis
                        C = -2 * n * Pn[n - 1]
                        bino = 1
                        for i in range(1, n):
                            bino = bino * (n - i + 1) / i
                            C += bino * Pn[i] * Qsmp * Pn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        rtMoms.append(np.sum(inis * P * (-1)**n) / 2**n)
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx + 1]
                    res = []
                    for t in stCdfPoints:
                        L = erlMaxOrder
                        lambd = L / t / 2
                        Psie = FluidFundamentalMatrices(
                            Qspp - lambd * ml.eye(Qspp.shape[0]), Qspm, Qsmp,
                            Qsmm - lambd * ml.eye(Qsmm.shape[0]), 'P',
                            precision)
                        Pn = [Psie]
                        pr = np.sum(inis * Psie)
                        for n in range(1, L):
                            A = Qspp + Psie * Qsmp - lambd * ml.eye(
                                Qspp.shape[0])
                            B = Qsmm + Qsmp * Psie - lambd * ml.eye(
                                Qsmm.shape[0])
                            C = 2 * lambd * Pn[n - 1]
                            for i in range(1, n):
                                C += Pn[i] * Qsmp * Pn[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            pr += np.sum(inis * P)
                        res.append(pr)
                    Ret.append(np.array(res))
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "ncMoms":
                    # MOMENTS OF THE NUMBER OF JOBS
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfQLMoms = argv[argIx + 1]
                    # first calculate it at departure instants
                    QLDPn = [Psis]
                    dqlMoms = []
                    for n in range(1, numOfQLMoms + 1):
                        A = Qspp + Psis * Qsmp
                        B = Qsmm + Qsmp * Psis
                        C = n * QLDPn[n - 1] * D[k + 1]
                        bino = 1
                        for i in range(1, n):
                            bino = bino * (n - i + 1) / i
                            C = C + bino * QLDPn[i] * Qsmp * QLDPn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        QLDPn.append(P)
                        dqlMoms.append(np.sum(inis * P))
                    dqlMoms = MomsFromFactorialMoms(dqlMoms)
                    # now calculate it at random time instance
                    pi = CTMCSolve(sD)
                    lambdak = np.sum(pi * D[k + 1])
                    QLPn = [pi]
                    qlMoms = []
                    iTerm = (ml.ones((N, 1)) * pi - sD).I
                    for n in range(1, numOfQLMoms + 1):
                        sumP = np.sum(inis * QLDPn[n]) + n * (
                            inis * QLDPn[n - 1] - QLPn[n - 1] * D[k + 1] /
                            lambdak) * iTerm * np.sum(D[k + 1], 1)
                        P = sumP * pi + n * (QLPn[n - 1] * D[k + 1] - inis *
                                             QLDPn[n - 1] * lambdak) * iTerm
                        QLPn.append(P)
                        qlMoms.append(np.sum(P))
                    qlMoms = MomsFromFactorialMoms(qlMoms)
                    Ret.append(qlMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "ncDistr":
                    # DISTRIBUTION OF THE NUMBER OF JOBS
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfQLProbs = argv[argIx + 1]
                    sDk = ml.matrix(D0)
                    for i in range(k):
                        sDk += D[i + 1]
                    # first calculate it at departure instants
                    Psid = FluidFundamentalMatrices(Qspp, Qspm, Qsmp, sDk, 'P',
                                                    precision)
                    Pn = [Psid]
                    dqlProbs = inis * Psid
                    for n in range(1, numOfQLProbs):
                        A = Qspp + Psid * Qsmp
                        B = sDk + Qsmp * Psid
                        C = Pn[n - 1] * D[k + 1]
                        for i in range(1, n):
                            C += Pn[i] * Qsmp * Pn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        dqlProbs = ml.vstack((dqlProbs, inis * P))
                    # now calculate it at random time instance
                    pi = CTMCSolve(sD)
                    lambdak = np.sum(pi * D[k + 1])
                    iTerm = -(sD - D[k + 1]).I
                    qlProbs = lambdak * dqlProbs[0, :] * iTerm
                    for n in range(1, numOfQLProbs):
                        P = (qlProbs[n - 1, :] * D[k + 1] + lambdak *
                             (dqlProbs[n, :] - dqlProbs[n - 1, :])) * iTerm
                        qlProbs = ml.vstack((qlProbs, P))
                    qlProbs = np.sum(qlProbs, 1).A.flatten()
                    Ret.append(qlProbs)
                    argIx += 1
                else:
                    raise Exception("MMAPPH1PRPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1
        elif k == K - 1:
            # step 3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx + 1]
                    rtMoms = []
                    for i in range(1, numOfSTMoms + 1):
                        rtMoms.append(
                            np.sum(
                                math.factorial(i) * kappa * (-Kw).I**(i + 1) *
                                Bw))
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx + 1]
                    rtDistr = []
                    for t in stCdfPoints:
                        rtDistr.append(
                            np.sum(kappa * (-Kw).I *
                                   (ml.eye(Kw.shape[0]) - la.expm(Kw * t)) *
                                   Bw))
                    Ret.append(np.array(rtDistr))
                    argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx] == "ncMoms" or
                                                   argv[argIx] == "ncDistr"):
                    L = np.kron(sD - D[k + 1], ml.eye(M[k])) + np.kron(
                        ml.eye(N), S[k])
                    B = np.kron(ml.eye(N), s[k] * sigma[k])
                    F = np.kron(D[k + 1], ml.eye(M[k]))
                    L0 = np.kron(sD - D[k + 1], ml.eye(M[k]))
                    R = QBDFundamentalMatrices(B, L, F, 'R', precision)
                    p0 = CTMCSolve(L0 + R * B)
                    p0 = p0 / np.sum(p0 * (ml.eye(R.shape[0]) - R).I)
                    if argv[argIx] == "ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx + 1]
                        qlMoms = []
                        for i in range(1, numOfQLMoms + 1):
                            qlMoms.append(
                                np.sum(
                                    math.factorial(i) * p0 * R**i *
                                    (ml.eye(R.shape[0]) - R).I**(i + 1)))
                        Ret.append(MomsFromFactorialMoms(qlMoms))
                    elif argv[argIx] == "ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx + 1]
                        qlProbs = [np.sum(p0)]
                        for i in range(1, numOfQLProbs):
                            qlProbs.append(np.sum(p0 * R**i))
                        Ret.append(np.array(qlProbs))
                    argIx += 1
                else:
                    raise Exception("MMAPPH1PRPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1

    if len(Ret) == 1:
        return Ret[0]
    else:
        return Ret
コード例 #30
0
def FRHH(streams, rr, alpha, sci=0):
    """ Fast row-Householder Subspace Traking Algorithm, Non adaptive version 
    
    """
    #===============================================================================
    #     #Initialise variables and data structures
    #===============================================================================
    # check input is type float32

    streams = float32(streams)
    alpha = float32(alpha)

    N = streams.shape[1]  # No. of streams

    # Data Stores
    E_t = [float32(0)]  # time series of total energy
    E_dash_t = [float32(0)]  # time series of reconstructed energy
    z_dash = npm.zeros(N, dtype=float32)  # time series of reconstructed data
    RSRE = mat([float32(0)
                ])  # time series of Root squared Reconstruction Error
    hid_var = npm.zeros((streams.shape[0], N),
                        dtype=float32)  # Array of hidden Variables

    seed(111)

    # Initial Q(0) - either random or I

    # Random
    qq, RR = qr(rand(N, rr))  # generate random orthonormal matrix N x r
    Q_t = [mat(float32(qq))]  # Initialise Q_t - N x r

    # Identity
    # q_I = npm.eye(N, rr)
    # Q_t = [q_I]

    S_t = [npm.ones(
        (rr, rr), dtype=float32) * float32(0.00001)]  # Initialise S_t - r x r

    No_inp_count = 0  # count of number of times there was no input i.e. z_t = [0,...,0]
    No_inp_marker = zeros((1, streams.shape[0] + 1))

    v_vec_min_1 = npm.zeros((rr, 1), dtype=float32)

    iter_streams = iter(streams)

    for t in range(1, streams.shape[0] + 1):

        z_vec = mat(iter_streams.next())

        z_vec = z_vec.T  # Now a column Vector

        hh = Q_t[t - 1].T * z_vec  # 13a

        Z = z_vec.T * z_vec - hh.T * hh  # 13b

        # Z = float(Z) # cheak that Z is really scalar

        if Z > 0.00000000001:

            # Refined version, sci accounts better for tracked eigen values
            if sci != 0:
                u_vec = S_t[t - 1] * v_vec_min_1
                extra_term = 2 * alpha * sci * u_vec * v_vec_min_1.T
                extra_term = float32(extra_term)
            else:
                extra_term = float32(0)

            X = alpha * S_t[t - 1] + hh * hh.T - extra_term

            # QR method - hopefully more stable
            aa = X.T
            b = sqrt(Z[0, 0]) * hh

            # b_vec = solve(aa,b)
            b_vec = QRsolve(aa, b)

            b_vec = float32(b_vec)

            beta = float32(4) * (b_vec.T * b_vec + 1)

            phi_sq_t = float32(0.5) + (
                float32(1.0) / sqrt(beta)
            )  # AGGGGGGGGGGGGGGGGGHHHHHHHHHHHHHHHHHH!

            phi_t = sqrt(phi_sq_t)

            gamma = (float32(1) - float32(2) * phi_sq_t) / (float32(2) * phi_t)

            delta = phi_t / sqrt(Z)

            v_vec_t = multiply(gamma, b_vec)

            S_t.append(X - multiply(float32(1) / delta, v_vec_t * hh.T))

            w_vec = multiply(delta, hh) - v_vec_t

            e_vec = multiply(delta, z_vec) - (Q_t[t - 1] * w_vec)

            Q_t.append(Q_t[t - 1] - float32(2) * (e_vec * v_vec_t.T))

            v_vec_min_1 = v_vec_t  # update for next time step

            # Record hidden variables
            hid_var[t - 1, :hh.shape[0]] = hh.T

            # Record reconstrunted z
            new_z_dash = Q_t[t - 1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))

            # Record RSRE
            new_RSRE = RSRE[0, -1] + (((norm(new_z_dash - z_vec))**2) /
                                      (norm(z_vec)**2))
            RSRE = npm.vstack((RSRE, mat(new_RSRE)))

        else:

            # Record hidden variables
            hid_var[t - 1, :hh.shape[0]] = hh.T

            # Record reconstrunted z
            new_z_dash = Q_t[t - 1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))

            # Record RSRE
            new_RSRE = RSRE[0, -1] + (((norm(new_z_dash - z_vec))**2) /
                                      (norm(z_vec)**2))
            RSRE = npm.vstack((RSRE, mat(new_RSRE)))

            # Repeat last entries
            Q_t.append(Q_t[-1])
            S_t.append(S_t[-1])

            # increment count
            No_inp_count += 1
            No_inp_marker[t - 1] = 1

    # convert to tuples to save memory
    Q_t = tuple(Q_t)
    S_t = tuple(S_t)
    rr = array(rr)
    E_t = array(E_t)
    E_dash_t = array(E_dash_t)

    return Q_t, S_t, rr, E_t, E_dash_t, hid_var, z_dash, RSRE, No_inp_count, No_inp_marker
コード例 #31
0
ファイル: mtfcfs.py プロジェクト: ghorvath78/butools
def MMAPPH1FCFS(D, sigma, S, *argv):
    """
    Returns various performane measures of a MMAP[K]/PH[K]/1 
    first-come-first-serve queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "stDistrME"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution              |
        +----------------+--------------------+----------------------------------------+
        | "stDistrPH"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution, converted   |
        |                |                    | to a continuous PH representation      |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati     |
        |                |                    | equation                               |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] Qiming He, "Analysis of a continuous time 
           SM[K]/PH[K]/1/FCFS queue: Age process, sojourn times,
           and queue lengths", Journal of Systems Science and 
           Complexity, 25(1), pp 133-155, 2012.
    """
    
    K = len(D)-1

    # parse options
    eaten = []
    precision = 1e-14;
    classes = np.arange(0,K)
    for i in range(len(argv)):
        if argv[i]=="prec":
            precision = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="classes":
            classes = np.array(argv[i+1])-1
            eaten.append(i)
            eaten.append(i+1) 
    
    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception('MMAPPH1FCFS: The arrival process is not a valid MMAP representation!')
    
    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k],S[k]):
                raise Exception('MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!')

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    Ia = ml.eye(N);
    Da = ml.zeros((N,N))
    for q in range(K):
        Da += D[q+1]
    theta = CTMCSolve(D0+Da)
    beta = [CTMCSolve(S[k]+ml.sum(-S[k],1)*sigma[k]) for k in range(K)]
    lambd = [np.sum(theta*D[k+1]) for k in range(K)]    
    mu = [np.sum(beta[k]*(-S[k])) for k in range(K)]
    Nsk = [S[k].shape[0] for k in range(K)]    
    ro = np.sum(np.array(lambd)/np.array(mu))
    alpha = theta*Da/sum(lambd)
    D0i = (-D0).I

    Sa = S[0];
    sa = [ml.zeros(sigma[0].shape)]*K
    sa[0] = sigma[0]
    ba = [ml.zeros(beta[0].shape)]*K
    ba[0] = beta[0]
    sv = [ml.zeros((Nsk[0],1))]*K
    sv[0] = ml.sum(-S[0],1)
    Pk = [D0i*D[q+1] for q in range(K)]

    for k in range(1,K):
        Sa = la.block_diag(Sa, S[k])
        for q in range(K):
            if q==k:
                sa[q] = ml.hstack((sa[q], sigma[k]))
                ba[q] = ml.hstack((ba[q], beta[k]))
                sv[q] = ml.vstack((sv[q], -np.sum(S[k],1)))
            else:
                sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape)))
                ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape)))
                sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k],1))))
    Sa = ml.matrix(Sa)
    P = D0i*Da
    iVec = ml.kron(D[1],sa[0])
    for k in range(1,K):
        iVec += ml.kron(D[k+1],sa[k])
    Ns = Sa.shape[0]
    Is = ml.eye(Ns)
    
    # step 1. solve the age process of the queue
    # ==========================================

    # solve Y0 and calculate T
    Y0 = FluidFundamentalMatrices (ml.kron(Ia,Sa), ml.kron(Ia,-ml.sum(Sa,1)), iVec, D0, "P", precision)
    T = ml.kron(Ia,Sa) + Y0 * iVec
    
    # calculate pi0 and v0
    pi0 = ml.zeros((1,T.shape[0]))
    for k in range(K):
        pi0 += ml.kron(theta*D[k+1],ba[k]/mu[k])
    pi0 = - pi0 * T

    iT = (-T).I
    oa = ml.ones((N,1))

    # step 2. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:
        argIx = 0
        clo = iT*ml.kron(oa,sv[k])
        while argIx<len(argv):
            if argIx in eaten:
                argIx += 1
                continue
            elif type(argv[argIx]) is str and argv[argIx]=="stMoms":
                numOfSTMoms = argv[argIx+1]
                rtMoms = []
                for m in range(1,numOfSTMoms+1):
                    rtMoms.append(math.factorial(m) * np.sum(pi0 * iT**m * clo / (pi0*clo)))
                Ret.append(rtMoms)
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx]=="stDistr":
                stCdfPoints = argv[argIx+1]
                cdf = [];
                for t in stCdfPoints:
                    pr = 1 - np.sum(pi0 * la.expm(T*t) * clo / (pi0*clo))
                    cdf.append(pr)
                Ret.append(np.array(cdf))
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx]=="stDistrME":
                Bm = SimilarityMatrixForVectors(clo/(pi0*clo),ml.ones((N*Ns,1)))
                Bmi = Bm.I
                A = Bm * T * Bmi
                alpha = pi0 * Bmi
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx]=="stDistrPH":
                vv = pi0*iT
                ix = np.arange(N*Ns)
                nz = ix[vv.flat>precision]
                delta = Diag(vv[:,nz])
                cl = -T*clo/(pi0*clo)
                alpha = cl[nz,:].T*delta
                A = delta.I*T[nz,:][:,nz].T*delta
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx]=="ncDistr":
                numOfQLProbs = argv[argIx+1]
                argIx += 1
                values = np.empty(numOfQLProbs)
                jm = ml.zeros((Ns,1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1
                jmc = ml.ones((Ns,1))
                jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 0
                LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -ml.eye(N*Ns))
                values[0] = 1-ro+np.sum(pi0*LmCurr*ml.kron(oa,jmc))
                for i in range(1,numOfQLProbs):
                    LmPrev = LmCurr
                    LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -LmPrev*ml.kron(D[k+1],Is))
                    values[i] = np.sum(pi0*LmCurr*ml.kron(oa,jmc) + pi0*LmPrev*ml.kron(oa,jm));
                Ret.append(values)
            elif type(argv[argIx]) is str and argv[argIx]=="ncMoms":
                numOfQLMoms = argv[argIx+1]
                argIx += 1
                jm = ml.zeros((Ns,1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1
                ELn = [la.solve_sylvester(T, ml.kron(D0+Da,Is), -ml.eye(N*Ns))]
                qlMoms = []
                for n in range(1,numOfQLMoms+1):
                    bino = 1
                    Btag = ml.zeros((N*Ns,N*Ns))
                    for i in range(n):
                        Btag += bino * ELn[i]
                        bino *= (n-i) / (i+1)
                    ELn.append(la.solve_sylvester(T, ml.kron(D0+Da,Is), -Btag*ml.kron(D[k+1],Is)))
                    qlMoms.append(np.sum(pi0*ELn[n]) + np.sum(pi0*Btag*ml.kron(oa,jm)))
                Ret.append(qlMoms)
            else:
                raise Exception("MMAPPH1FCFS: Unknown parameter "+str(argv[argIx]))
            argIx += 1

    if len(Ret)==1:
        return Ret[0]
    else:
        return Ret