Пример #1
0
    def __new__(cls, c):
        """
        """

        c = c.view(matlib.matrix).reshape(1, -1)

        T = matlib.vstack((
            matlib.hstack((matlib.identity(c.size), c)),
            matlib.hstack((matlib.zeros(c.size), matlib.ones(1))),
        ))
        return super().__new__(cls, T)
Пример #2
0
 def __new__(cls, c):
     """
     """
     
     c = c.view(matlib.matrix).reshape(1, -1)
         
     T = matlib.vstack((
         matlib.hstack((matlib.identity(c.size), c)),
         matlib.hstack((matlib.zeros(c.size), matlib.ones(1))),
     ))
     return super().__new__(cls, T)
 def increase_input_size(self, nb_added_input = 1):
     """
         Increases the number of inputs the network needs
     """
     
     added_input_weights = self.input_scaling * EchoStateNetwork._rand_matrix(self.reservoir_size, nb_added_input)
     self.input_weights = npmat.hstack([self.input_weights, added_input_weights])
     self.input_size += nb_added_input
     
     if not self.use_raw_input:
         return
     
     self.state_size += nb_added_input
     self.state = npmat.vstack([self.state, npmat.zeros((nb_added_input, 1))])
     self.output_weights = npmat.hstack([self.output_weights, npmat.zeros((self.output_size, nb_added_input))])
Пример #4
0
def firstInitElem(m33, sortEigs, alpha, A):

    l1 = sortEigs[0]
    l2 = sortEigs[1]
    l3 = sortEigs[2]

    m1 = -m33 + l1 + l2 + l3
    m2=-((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3-2*m33**2*l1+m33*l1**2-2*m33**2*l2+3*m33*l1*l2-l1**2*l2+m33*l2**2- \
      l1*l2**2-2*m33**2*l3+3*m33*l1*l3-l1**2*l3+3*m33*l2*l3-2*l1*l2*l3-l2**2*l3+m33*l3**2-l1*l3**2-l2*l3**2))/ \
      (2*m33*l1**2*l2+2*m33**2*l1**2*l2-l1**3*l2-3*m33*l1**3*l2+l1**4*l2-2*m33*l1*l2**2-2*m33**2*l1*l2**2+l1**3*l2**2+ \
      l1*l2**3+3*m33*l1*l2**3-l1**2*l2**3-l1*l2**4-2*m33*l1**2*l3-2*m33**2*l1**2*l3+l1**3*l3+3*m33*l1**3*l3-l1**4*l3+ \
      2*m33*l2**2*l3+2*m33**2*l2**2*l3-l2**3*l3-3*m33*l2**3*l3+l2**4*l3+2*m33*l1*l3**2+2*m33**2*l1*l3**2-l1**3*l3**2- \
      2*m33*l2*l3**2-2*m33**2*l2*l3**2+l2**3*l3**2-l1*l3**3-3*m33*l1*l3**3+l1**2*l3**3+l2*l3**3+3*m33*l2*l3**3-l2**2*l3**3+ \
      l1*l3**4-l2*l3**4)
    m3=((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3+m33**4-m33**2*l1-2*m33**3*l1+m33**2*l1**2-m33**2*l2-2*m33**3*l2+ \
      m33*l1*l2+3*m33**2*l1*l2-m33*l1**2*l2+m33**2*l2**2-m33*l1*l2**2-m33**2*l3-2*m33**3*l3+m33*l1*l3+3*m33**2*l1*l3- \
      m33*l1**2*l3+m33*l2*l3+3*m33**2*l2*l3-l1*l2*l3-4*m33*l1*l2*l3+l1**2*l2*l3-m33*l2**2*l3+l1*l2**2*l3+m33**2*l3**2- \
      m33*l1*l3**2-m33*l2*l3**2+l1*l2*l3**2))/(-2*m33*l1**2*l2-2*m33**2*l1**2*l2-m33**3*l1**2*l2+l1**3*l2+3*m33*l1**3*l2+ \
      2*m33**2*l1**3*l2-l1**4*l2-m33*l1**4*l2+2*m33*l1*l2**2+2*m33**2*l1*l2**2+m33**3*l1*l2**2-l1**3*l2**2-2*m33*l1**3*l2**2+ \
      l1**4*l2**2-l1*l2**3-3*m33*l1*l2**3-2*m33**2*l1*l2**3+l1**2*l2**3+2*m33*l1**2*l2**3+l1*l2**4+m33*l1*l2**4-l1**2*l2**4+ \
      2*m33*l1**2*l3+2*m33**2*l1**2*l3+m33**3*l1**2*l3-l1**3*l3-3*m33*l1**3*l3-2*m33**2*l1**3*l3+l1**4*l3+m33*l1**4*l3- \
      2*m33*l2**2*l3-2*m33**2*l2**2*l3-m33**3*l2**2*l3+l2**3*l3+3*m33*l2**3*l3+2*m33**2*l2**3*l3-l2**4*l3-m33*l2**4*l3- \
      2*m33*l1*l3**2-2*m33**2*l1*l3**2-m33**3*l1*l3**2+l1**3*l3**2+2*m33*l1**3*l3**2-l1**4*l3**2+2*m33*l2*l3**2+2*m33**2*l2*l3**2+ \
      m33**3*l2*l3**2-l2**3*l3**2-2*m33*l2**3*l3**2+l2**4*l3**2+l1*l3**3+3*m33*l1*l3**3+2*m33**2*l1*l3**3-l1**2*l3**3- \
      2*m33*l1**2*l3**3-l2*l3**3-3*m33*l2*l3**3-2*m33**2*l2*l3**3+l2**2*l3**3+2*m33*l2**2*l3**3-l1*l3**4-m33*l1*l3**4+ \
      l1**2*l3**4+l2*l3**4+m33*l2*l3**4-l2**2*l3**4)
    b3 = np.sum(ml.eye(3) - A, 1) / (1 - m3 - m33)
    b2 = (-m33 * ml.eye(3) + A) * b3 / (1 - m2)
    b1 = (-m3 * b3 + A * b2) / (1 - m1)
    B = ml.hstack((b1, b2, b3))
    a1 = alpha * b1
    return (a1, m1, m2, m3, B)
Пример #5
0
 def __init__(self, x1, x2, fclass=None):
     super(Graph, self).__init__()
     self.xres = x1.shape[0]
     self.yres = x1.shape[1]
     self.fclass = fclass
     n = 2 * x1.size - 2 * self.yres
     self.add_nodes_from(range(1, n + 1))
     x1 = np.vstack((x1.reshape((-1, 1)), x1[-2:0:-1].reshape((-1, 1))))
     x2 = np.vstack((x2.reshape((-1, 1)), x2[-2:0:-1].reshape((-1, 1))))
     self.x = np.hstack((x1, x2))
     self.pos = dict(
         zip(range(1, n + 1), zip(x1.T.tolist()[0],
                                  x2.T.tolist()[0])))
     self.full = nx.DiGraph(self)
     for i in range(1, n + 1):
         for j in self.column(i):
             dx1 = np.absolute(self.pos[j][0] - self.pos[i][0])
             dx2 = np.absolute(self.pos[j][1] - self.pos[i][1])
             if dx2 <= dx1 / _GRAPH_ASPECT_RATIO:
                 self.full.add_edge(i, j)
     tmp = 1
     self.basic = set()
     step = self.yres / _GRAPH_YRES_BASIC
     while tmp < n + 1:
         self.basic = self.basic | set(self.column(tmp)[0::step])
         tmp = tmp + self.yres
     self.update_active()
Пример #6
0
 def __init__(self, x1, x2, fclass=None):
     super(Graph, self).__init__()
     self.xres = x1.shape[0]
     self.yres = x1.shape[1]
     self.fclass = fclass
     n = 2*x1.size - 2*self.yres
     self.add_nodes_from(range(1, n+1))
     x1 = np.vstack((x1.reshape((-1, 1)), x1[-2:0:-1].reshape((-1, 1))))
     x2 = np.vstack((x2.reshape((-1, 1)), x2[-2:0:-1].reshape((-1, 1))))
     self.x = np.hstack((x1, x2))
     self.pos = dict(zip(range(1, n+1),
                         zip(x1.T.tolist()[0], x2.T.tolist()[0])))
     self.full = nx.DiGraph(self)
     for i in range(1, n+1):
         for j in self.column(i):
             dx1 = np.absolute(self.pos[j][0] - self.pos[i][0])
             dx2 = np.absolute(self.pos[j][1] - self.pos[i][1])
             if dx2 <= dx1 / _GRAPH_ASPECT_RATIO:
                 self.full.add_edge(i, j)
     tmp = 1
     self.basic = set()
     step = self.yres/_GRAPH_YRES_BASIC
     while tmp < n+1:
         self.basic = self.basic | set(self.column(tmp)[0::step])
         tmp = tmp + self.yres
     self.update_active()
Пример #7
0
def firstInitElem(m33,sortEigs,alpha,A):

    l1=sortEigs[0]
    l2=sortEigs[1]
    l3=sortEigs[2]
    
    m1=-m33+l1+l2+l3
    m2=-((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3-2*m33**2*l1+m33*l1**2-2*m33**2*l2+3*m33*l1*l2-l1**2*l2+m33*l2**2- \
      l1*l2**2-2*m33**2*l3+3*m33*l1*l3-l1**2*l3+3*m33*l2*l3-2*l1*l2*l3-l2**2*l3+m33*l3**2-l1*l3**2-l2*l3**2))/ \
      (2*m33*l1**2*l2+2*m33**2*l1**2*l2-l1**3*l2-3*m33*l1**3*l2+l1**4*l2-2*m33*l1*l2**2-2*m33**2*l1*l2**2+l1**3*l2**2+ \
      l1*l2**3+3*m33*l1*l2**3-l1**2*l2**3-l1*l2**4-2*m33*l1**2*l3-2*m33**2*l1**2*l3+l1**3*l3+3*m33*l1**3*l3-l1**4*l3+ \
      2*m33*l2**2*l3+2*m33**2*l2**2*l3-l2**3*l3-3*m33*l2**3*l3+l2**4*l3+2*m33*l1*l3**2+2*m33**2*l1*l3**2-l1**3*l3**2- \
      2*m33*l2*l3**2-2*m33**2*l2*l3**2+l2**3*l3**2-l1*l3**3-3*m33*l1*l3**3+l1**2*l3**3+l2*l3**3+3*m33*l2*l3**3-l2**2*l3**3+ \
      l1*l3**4-l2*l3**4)
    m3=((l2-l3)*(l1**2-l1*l2-l1*l3+l2*l3)*(m33**3+m33**4-m33**2*l1-2*m33**3*l1+m33**2*l1**2-m33**2*l2-2*m33**3*l2+ \
      m33*l1*l2+3*m33**2*l1*l2-m33*l1**2*l2+m33**2*l2**2-m33*l1*l2**2-m33**2*l3-2*m33**3*l3+m33*l1*l3+3*m33**2*l1*l3- \
      m33*l1**2*l3+m33*l2*l3+3*m33**2*l2*l3-l1*l2*l3-4*m33*l1*l2*l3+l1**2*l2*l3-m33*l2**2*l3+l1*l2**2*l3+m33**2*l3**2- \
      m33*l1*l3**2-m33*l2*l3**2+l1*l2*l3**2))/(-2*m33*l1**2*l2-2*m33**2*l1**2*l2-m33**3*l1**2*l2+l1**3*l2+3*m33*l1**3*l2+ \
      2*m33**2*l1**3*l2-l1**4*l2-m33*l1**4*l2+2*m33*l1*l2**2+2*m33**2*l1*l2**2+m33**3*l1*l2**2-l1**3*l2**2-2*m33*l1**3*l2**2+ \
      l1**4*l2**2-l1*l2**3-3*m33*l1*l2**3-2*m33**2*l1*l2**3+l1**2*l2**3+2*m33*l1**2*l2**3+l1*l2**4+m33*l1*l2**4-l1**2*l2**4+ \
      2*m33*l1**2*l3+2*m33**2*l1**2*l3+m33**3*l1**2*l3-l1**3*l3-3*m33*l1**3*l3-2*m33**2*l1**3*l3+l1**4*l3+m33*l1**4*l3- \
      2*m33*l2**2*l3-2*m33**2*l2**2*l3-m33**3*l2**2*l3+l2**3*l3+3*m33*l2**3*l3+2*m33**2*l2**3*l3-l2**4*l3-m33*l2**4*l3- \
      2*m33*l1*l3**2-2*m33**2*l1*l3**2-m33**3*l1*l3**2+l1**3*l3**2+2*m33*l1**3*l3**2-l1**4*l3**2+2*m33*l2*l3**2+2*m33**2*l2*l3**2+ \
      m33**3*l2*l3**2-l2**3*l3**2-2*m33*l2**3*l3**2+l2**4*l3**2+l1*l3**3+3*m33*l1*l3**3+2*m33**2*l1*l3**3-l1**2*l3**3- \
      2*m33*l1**2*l3**3-l2*l3**3-3*m33*l2*l3**3-2*m33**2*l2*l3**3+l2**2*l3**3+2*m33*l2**2*l3**3-l1*l3**4-m33*l1*l3**4+ \
      l1**2*l3**4+l2*l3**4+m33*l2*l3**4-l2**2*l3**4)
    b3=np.sum(ml.eye(3)-A,1)/(1-m3-m33)
    b2=(-m33*ml.eye(3)+A)*b3/(1-m2)
    b1=(-m3*b3+A*b2)/(1-m1)
    B=ml.hstack((b1,b2,b3))
    a1=alpha*b1
    return (a1,m1,m2,m3,B)
Пример #8
0
 def get_u(self):
     points = npmat.hstack([s.center_position for s in self.solids])
     center = npmat.asmatrix(npmat.average(points, axis = 1)).T
     centered_points = points - center
     correlation_matrix = (centered_points * centered_points.T)/self.nb_solids   
     u = iterate(correlation_matrix, self.NB_ITERATIONS)
     return u
Пример #9
0
 def __new__(cls, m, n):
     """
     """
     
     if not all((
             isinstance(m, int),
             isinstance(n, int),
         )):
         raise ValueError
     
     data = matlib.hstack([
         matlib.vstack((
             matlib.hstack((matlib.identity(n, dtype=int), matlib.matrix(p, dtype=int).T)),
             matlib.hstack((matlib.matrix(p, dtype=int), matlib.ones(1))),
         ))
         for p in product(range(m), repeat=n)
     ])
     return super().__new__(cls, data, dtype=int).view(cls)
Пример #10
0
    def world_to_view(self, point):
        i = self.top.cross(self.normal).normalize()
        k = self.normal.normalize()
        j = k.cross(i)

        s = hstack([i.vec3d(), j.vec3d(), k.vec3d()])
        p1 = s.T * point.vec3d()
        or1 = s.T * self.origin.vec3d()
        return p1 - or1
Пример #11
0
    def world_to_view(self, point):
        i = self.top.cross(self.normal).normalize()
        k = self.normal.normalize()
        j = k.cross(i)

        s = hstack([i.vec3d(), j.vec3d(), k.vec3d()])
        p1 = s.T * point.vec3d()
        or1 = s.T * self.origin.vec3d()
        return p1 - or1
Пример #12
0
    def colliding_boxes(self):
        
        nb_solids = self.nb_solids
        points = npmat.hstack([s.center_position for s in self.solids])
        center = npmat.asmatrix(npmat.average(points, axis = 1)).T
        centered_points = points - center
        correlation_matrix = (centered_points * centered_points.T) / nb_solids
        u = iterate(correlation_matrix, self.NB_ITERATIONS)

    
        if self.projected_bounds is None:
            
            self.projected_bounds = []
            
            for i in range(nb_solids):
                self.projected_bounds.append((i, 0, 0.))
                self.projected_bounds.append((i, 1, 0.))
                
        
        min_max_projections = npmat.empty((nb_solids, 2))
        
        for solid_id in range(nb_solids):
            solid_AABB_corners = self.solids[solid_id].AABB_corners()
            corners_projections = u.T * solid_AABB_corners
            min_max_projections[solid_id, 0] = np.min(corners_projections)
            min_max_projections[solid_id, 1] = np.max(corners_projections)
            
        for i in range(2 * nb_solids):            
            solid_id, begin_or_end_id, value = self.projected_bounds[i]
            new_value = min_max_projections[solid_id, begin_or_end_id]
            self.projected_bounds[i] = (solid_id, begin_or_end_id, new_value)
                
        # TODO: linear sorting
        self.projected_bounds.sort(key = lambda x : x[2])
        
        output = []
        active_solids = []
    
        for i in range(2 * nb_solids):
            
            solid_id, begin_or_end_id, value = self.projected_bounds[i]
            
            if begin_or_end_id == 0:
                for active_solid_id in active_solids:
                    if self.solids[active_solid_id].AABB_intersect_with(self.solids[solid_id]):
                        output.append((active_solid_id, solid_id))
                        
                active_solids.append(solid_id)
                
            else:
                active_solids.remove(solid_id)
                
        return output
Пример #13
0
    def __init__(self, points, masses):
        """
            Inputs:
                points: list of 3 dimensional points
                masses: list of float (same length as points)
        """

        self.nb_points = len(points)

        if self.nb_points == 0:
            raise (Exception, "Provide at least one point")

        if len(masses) != self.nb_points:
            raise (Exception,
                   "Same number of points and masses must be provided")

        self.masses = masses
        self.initial_points = [Vect(p) for p in points]

        # Center of mass p and its velocity
        self.center_position = npmat.sum(self.initial_points,
                                         axis=0) / self.nb_points
        self.center_velocity = npmat.zeros((3, 1))

        # Position of the points relative to the center of mass, stacked into a big matrix
        self.centered_initial_points_hstack = npmat.hstack(
            self.initial_points) - self.center_position

        # Total mass and its inverse
        self.total_mass = npmat.sum(self.masses)
        self.total_mass_inverse = 1. / self.total_mass

        # Inertia matrix and its inverse
        self.initial_inertia_matrix = Solid._inertia_matrix(
            self.masses, self.centered_initial_points_hstack)
        self.initial_inertia_matrix_inverse = npmat.linalg.pinv(
            self.initial_inertia_matrix)

        # Rotation quaternion (used to construct matrix representing orientation) and angular momentum (sigma)
        self.rotation_quaternion = Vect([1., 0., 0., 0.])
        self.rotation_matrix = rotation_matrix_from_quaternion(
            self.rotation_quaternion)
        self.angular_momentum = npmat.zeros((3, 1))

        self._compute_AABB()
Пример #14
0
 def _compute_AABB(self):
     points = self.points_hstack()
     self.AABB = npmat.hstack([points.min(axis=1), points.max(axis=1)])
Пример #15
0
def GeneralFluidSolve (Q, R, Q0=[], prec=1e-14):
    """
    Returns the parameters of the matrix-exponentially 
    distributed stationary distribution of a general 
    Markovian fluid model, where the fluid rates associated
    with the states of the background process can be
    arbitrary (zero is allowed as well).
    
    Using the returned 4 parameters the stationary
    solution can be obtained as follows.
    
    The probability that the fluid level is zero while 
    being in different states of the background process
    is given by vector mass0.
    
    The density that the fluid level is x while being in
    different states of the background process is
    
    .. math::
        \pi(x)=ini\cdot e^{K x}\cdot clo.    
    
    Parameters
    ----------
    Q : matrix, shape (N,N)
        The generator of the background Markov chain
    R : diagonal matrix, shape (N,N)
        The diagonal matrix of the fluid rates associated
        with the different states of the background process
    Q0 : matrix, shape (N,N), optional
        The generator of the background Markov chain at 
        level 0. If not provided, or empty, then Q0=Q is 
        assumed. The default value is empty.
    precision : double, optional
        Numerical precision for computing the fundamental
        matrix. The default value is 1e-14
    
    Returns
    -------
    mass0 : matrix, shape (1,Np+Nm)
        The stationary probability vector of zero level
    ini : matrix, shape (1,Np)
        The initial vector of the stationary density
    K : matrix, shape (Np,Np)
        The matrix parameter of the stationary density
    clo : matrix, shape (Np,Np+Nm)
        The closing matrix of the stationary density
    """
    
    N = Q.shape[0]
    # partition the state space according to zero, positive and negative fluid rates
    ix = np.arange(N)
    ixz = ix[np.abs(np.diag(R))<=prec]
    ixp = ix[np.diag(R)>prec]
    ixn = ix[np.diag(R)<-prec]
    Nz = len(ixz)
    Np = len(ixp)
    Nn = len(ixn)
    # permutation matrix that converts between the original and the partitioned state ordering
    P = ml.zeros((N,N))
    for i in range(Nz):
        P[i,ixz[i]]=1
    for i in range(Np):
        P[Nz+i,ixp[i]]=1
    for i in range(Nn):
        P[Nz+Np+i,ixn[i]]=1
    iP = P.I
    Qv = P*Q*iP
    Rv = P*R*iP

    # new fluid process censored to states + and -
    iQv00 = la.pinv(-Qv[:Nz,:Nz])
    Qbar = Qv[Nz:, Nz:] + Qv[Nz:,:Nz]*iQv00*Qv[:Nz,Nz:]
    absRi = Diag(np.abs(1./np.diag(Rv[Nz:,Nz:])))
    Qz = absRi * Qbar

    Psi, K, U = FluidFundamentalMatrices (Qz[:Np,:Np], Qz[:Np,Np:], Qz[Np:,:Np], Qz[Np:,Np:], "PKU", prec)

    # closing matrix
    Pm = np.hstack((ml.eye(Np), Psi)) * absRi
    iCn = absRi[Np:,Np:]
    iCp = absRi[:Np,:Np]
    clo = np.hstack(((iCp*Qv[Nz:Nz+Np,:Nz]+Psi*iCn*Qv[Nz+Np:,:Nz])*iQv00, Pm))
    
    if len(Q0)==0: # regular boundary behavior
        clo = clo * P # go back the the original state ordering

        # calculate boundary vector   
        Ua = iCn*Qv[Nz+Np:,:Nz]*iQv00*ml.ones((Nz,1)) + iCn*ml.ones((Nn,1)) + Qz[Np:,:Np]*la.inv(-K)*clo*ml.ones((Nz+Np+Nn,1))
        pm = Linsolve (ml.hstack((U,Ua)).T, ml.hstack((ml.zeros((1,Nn)),ml.ones((1,1)))).T).T

        # create the result
        mass0 = ml.hstack((pm*iCn*Qv[Nz+Np:,:Nz]*iQv00, ml.zeros((1,Np)), pm*iCn))*P
        ini = pm*Qz[Np:,:Np]        
    else:
        # solve a linear system for ini(+), pm(-) and pm(0)        
        Q0v = P*Q0*iP
        M = ml.vstack((-clo*Rv, Q0v[Nz+Np:,:], Q0v[:Nz,:]))
        Ma = ml.vstack((np.sum(la.inv(-K)*clo,1), ml.ones((Nz+Nn,1))))
        sol = Linsolve (ml.hstack((M,Ma)).T, ml.hstack((ml.zeros((1,N)),ml.ones((1,1)))).T).T;
        ini = sol[:,:Np]
        clo = clo * P
        mass0 = ml.hstack((sol[:,Np+Nn:], ml.zeros((1,Np)), sol[:,Np:Np+Nn]))*P

    return mass0, ini, K, clo
Пример #16
0
def FluidSolve (Fpp, Fpm, Fmp, Fmm, prec=1e-14):
    """
    Returns the parameters of the matrix-exponentially 
    distributed stationary distribution of a canonical 
    Markovian fluid model.
    
    The canonical Markov fluid model is defined by the 
    matrix blocks of the generator of the background Markov
    chain partitioned according to the sign of the 
    associated fluid rates (i.e., there are "+" and "-" states).   
    
    Using the returned 4 parameters the stationary
    solution can be obtained as follows.
    
    The probability that the fluid level is zero while 
    being in different states of the background process
    is given by vector mass0.
    
    The density that the fluid level is x while being in
    different states of the background process is
    
    .. math::
        \pi(x)=ini\cdot e^{K x}\cdot clo.    
    
    Parameters
    ----------
    Fpp : matrix, shape (Np,Np)
        The matrix of transition rates between states 
        having positive fluid rates
    Fpm : matrix, shape (Np,Nm)
        The matrix of transition rates where the source
        state has a positive, the destination has a 
        negative fluid rate associated.
    Fpm : matrix, shape (Nm,Np)
        The matrix of transition rates where the source
        state has a negative, the destination has a 
        positive fluid rate associated.
    Fpp : matrix, shape (Nm,Nm)
        The matrix of transition rates between states 
        having negative fluid rates
    precision : double, optional
        Numerical precision for computing the fundamental
        matrix. The default value is 1e-14
    
    Returns
    -------
    mass0 : matrix, shape (1,Np+Nm)
        The stationary probability vector of zero level
    ini : matrix, shape (1,Np)
        The initial vector of the stationary density
    K : matrix, shape (Np,Np)
        The matrix parameter of the stationary density
    clo : matrix, shape (Np,Np+Nm)
        The closing matrix of the stationary density
    """
    
    Psi, K, U = FluidFundamentalMatrices (Fpp, Fpm, Fmp, Fmm, "PKU", prec)
    
    mass0 = CTMCSolve(U)
    nr = np.sum(mass0) + 2.0*np.sum(mass0*Fmp*-K.I)
    
    return ml.hstack((ml.zeros((1,Fpp.shape[0])),mass0/nr)), mass0*Fmp/nr, K, ml.hstack((ml.eye(Fpp.shape[0]), Psi))
Пример #17
0
def MMAPPH1PRPR(D, sigma, S, *argv):
    """
    Returns various performane measures of a MMAP[K]/PH[K]/1 
    preemptive resume priority queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
        D1 corresponds to the lowest, DK to the highest priority.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati and |
        |                |                    | the matrix-quadratic equations         |
        +----------------+--------------------+----------------------------------------+
        | "erlMaxOrder"  | Integer number     | The maximal Erlang order used in the   |
        |                |                    | erlangization procedure. The default   |
        |                |                    | value is 200.                          |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] G. Horvath, "Efficient analysis of the MMAP[K]/PH[K]/1
           priority queue", European Journal of Operational 
           Research, 246(1), 128-139, 2015.
    """

    K = len(D) - 1

    # parse options
    eaten = []
    erlMaxOrder = 200
    precision = 1e-14
    classes = np.arange(0, K)
    for i in range(len(argv)):
        if argv[i] == "prec":
            precision = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "erlMaxOrder":
            erlMaxOrder = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "classes":
            classes = np.array(argv[i + 1]) - 1
            eaten.append(i)
            eaten.append(i + 1)

    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception(
            'MMAPPH1PRPR: The arrival process is not a valid MMAP representation!'
        )

    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k], S[k]):
                raise Exception(
                    'MMAPPH1PRPR: the vector and matrix describing the service times is not a valid PH representation!'
                )

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    I = ml.eye(N)
    sD = ml.zeros((N, N))
    for Di in D:
        sD += Di

    s = []
    M = np.empty(K)
    for i in range(K):
        s.append(np.sum(-S[i], 1))
        M[i] = sigma[i].size

    Ret = []
    for k in classes:

        # step 1. solution of the workload process of the system
        # ======================================================
        sM = np.sum(M[k:K])
        Qwmm = ml.matrix(D0)
        for i in range(k):
            Qwmm += D[i + 1]

        Qwpm = ml.zeros((N * sM, N))
        Qwmp = ml.zeros((N, N * sM))
        Qwpp = ml.zeros((N * sM, N * sM))
        kix = 0
        for i in range(k, K):
            Qwmp[:, kix:kix + N * M[i]] = np.kron(D[i + 1], sigma[i])
            Qwpm[kix:kix + N * M[i], :] = np.kron(I, s[i])
            Qwpp[kix:kix + N * M[i], :][:,
                                        kix:kix + N * M[i]] = np.kron(I, S[i])
            kix += N * M[i]

        # calculate fundamental matrices
        Psiw, Kw, Uw = FluidFundamentalMatrices(Qwpp, Qwpm, Qwmp, Qwmm, 'PKU',
                                                precision)

        # calculate boundary vector
        Ua = ml.ones((N, 1)) + 2 * np.sum(Qwmp * (-Kw).I, 1)
        pm = Linsolve(
            ml.hstack((Uw, Ua)).T,
            ml.hstack((ml.zeros((1, N)), ml.ones((1, 1)))).T).T

        Bw = ml.zeros((N * sM, N))
        Bw[0:N * M[k], :] = np.kron(I, s[k])
        kappa = pm * Qwmp / np.sum(pm * Qwmp * (-Kw).I * Bw)

        if k < K - 1:
            # step 2. construct fluid model for the remaining sojourn time process
            # ====================================================================
            # (for each class except the highest priority)
            Qsmm = ml.matrix(D0)
            for i in range(k + 1):
                Qsmm += D[i + 1]

            Np = Kw.shape[0]
            Qspm = ml.zeros((Np + N * np.sum(M[k + 1:]), N))
            Qsmp = ml.zeros((N, Np + N * np.sum(M[k + 1:])))
            Qspp = ml.zeros(
                (Np + N * np.sum(M[k + 1:]), Np + N * np.sum(M[k + 1:])))
            Qspp[:Np, :Np] = Kw
            Qspm[:Np, :N] = Bw
            kix = Np
            for i in range(k + 1, K):
                Qsmp[:, kix:kix + N * M[i]] = np.kron(D[i + 1], sigma[i])
                Qspm[kix:kix + N * M[i], :] = np.kron(I, s[i])
                Qspp[kix:kix + N * M[i], kix:kix + N * M[i]] = np.kron(I, S[i])
                kix += N * M[i]

            inis = ml.hstack((kappa, ml.zeros((1, N * np.sum(M[k + 1:])))))
            Psis = FluidFundamentalMatrices(Qspp, Qspm, Qsmp, Qsmm, 'P',
                                            precision)

            # step 3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx + 1]
                    Pn = [Psis]
                    rtMoms = []
                    for n in range(1, numOfSTMoms + 1):
                        A = Qspp + Psis * Qsmp
                        B = Qsmm + Qsmp * Psis
                        C = -2 * n * Pn[n - 1]
                        bino = 1
                        for i in range(1, n):
                            bino = bino * (n - i + 1) / i
                            C += bino * Pn[i] * Qsmp * Pn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        rtMoms.append(np.sum(inis * P * (-1)**n) / 2**n)
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx + 1]
                    res = []
                    for t in stCdfPoints:
                        L = erlMaxOrder
                        lambd = L / t / 2
                        Psie = FluidFundamentalMatrices(
                            Qspp - lambd * ml.eye(Qspp.shape[0]), Qspm, Qsmp,
                            Qsmm - lambd * ml.eye(Qsmm.shape[0]), 'P',
                            precision)
                        Pn = [Psie]
                        pr = np.sum(inis * Psie)
                        for n in range(1, L):
                            A = Qspp + Psie * Qsmp - lambd * ml.eye(
                                Qspp.shape[0])
                            B = Qsmm + Qsmp * Psie - lambd * ml.eye(
                                Qsmm.shape[0])
                            C = 2 * lambd * Pn[n - 1]
                            for i in range(1, n):
                                C += Pn[i] * Qsmp * Pn[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            pr += np.sum(inis * P)
                        res.append(pr)
                    Ret.append(np.array(res))
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "ncMoms":
                    # MOMENTS OF THE NUMBER OF JOBS
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfQLMoms = argv[argIx + 1]
                    # first calculate it at departure instants
                    QLDPn = [Psis]
                    dqlMoms = []
                    for n in range(1, numOfQLMoms + 1):
                        A = Qspp + Psis * Qsmp
                        B = Qsmm + Qsmp * Psis
                        C = n * QLDPn[n - 1] * D[k + 1]
                        bino = 1
                        for i in range(1, n):
                            bino = bino * (n - i + 1) / i
                            C = C + bino * QLDPn[i] * Qsmp * QLDPn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        QLDPn.append(P)
                        dqlMoms.append(np.sum(inis * P))
                    dqlMoms = MomsFromFactorialMoms(dqlMoms)
                    # now calculate it at random time instance
                    pi = CTMCSolve(sD)
                    lambdak = np.sum(pi * D[k + 1])
                    QLPn = [pi]
                    qlMoms = []
                    iTerm = (ml.ones((N, 1)) * pi - sD).I
                    for n in range(1, numOfQLMoms + 1):
                        sumP = np.sum(inis * QLDPn[n]) + n * (
                            inis * QLDPn[n - 1] - QLPn[n - 1] * D[k + 1] /
                            lambdak) * iTerm * np.sum(D[k + 1], 1)
                        P = sumP * pi + n * (QLPn[n - 1] * D[k + 1] - inis *
                                             QLDPn[n - 1] * lambdak) * iTerm
                        QLPn.append(P)
                        qlMoms.append(np.sum(P))
                    qlMoms = MomsFromFactorialMoms(qlMoms)
                    Ret.append(qlMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "ncDistr":
                    # DISTRIBUTION OF THE NUMBER OF JOBS
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfQLProbs = argv[argIx + 1]
                    sDk = ml.matrix(D0)
                    for i in range(k):
                        sDk += D[i + 1]
                    # first calculate it at departure instants
                    Psid = FluidFundamentalMatrices(Qspp, Qspm, Qsmp, sDk, 'P',
                                                    precision)
                    Pn = [Psid]
                    dqlProbs = inis * Psid
                    for n in range(1, numOfQLProbs):
                        A = Qspp + Psid * Qsmp
                        B = sDk + Qsmp * Psid
                        C = Pn[n - 1] * D[k + 1]
                        for i in range(1, n):
                            C += Pn[i] * Qsmp * Pn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        dqlProbs = ml.vstack((dqlProbs, inis * P))
                    # now calculate it at random time instance
                    pi = CTMCSolve(sD)
                    lambdak = np.sum(pi * D[k + 1])
                    iTerm = -(sD - D[k + 1]).I
                    qlProbs = lambdak * dqlProbs[0, :] * iTerm
                    for n in range(1, numOfQLProbs):
                        P = (qlProbs[n - 1, :] * D[k + 1] + lambdak *
                             (dqlProbs[n, :] - dqlProbs[n - 1, :])) * iTerm
                        qlProbs = ml.vstack((qlProbs, P))
                    qlProbs = np.sum(qlProbs, 1).A.flatten()
                    Ret.append(qlProbs)
                    argIx += 1
                else:
                    raise Exception("MMAPPH1PRPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1
        elif k == K - 1:
            # step 3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx + 1]
                    rtMoms = []
                    for i in range(1, numOfSTMoms + 1):
                        rtMoms.append(
                            np.sum(
                                math.factorial(i) * kappa * (-Kw).I**(i + 1) *
                                Bw))
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx + 1]
                    rtDistr = []
                    for t in stCdfPoints:
                        rtDistr.append(
                            np.sum(kappa * (-Kw).I *
                                   (ml.eye(Kw.shape[0]) - la.expm(Kw * t)) *
                                   Bw))
                    Ret.append(np.array(rtDistr))
                    argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx] == "ncMoms" or
                                                   argv[argIx] == "ncDistr"):
                    L = np.kron(sD - D[k + 1], ml.eye(M[k])) + np.kron(
                        ml.eye(N), S[k])
                    B = np.kron(ml.eye(N), s[k] * sigma[k])
                    F = np.kron(D[k + 1], ml.eye(M[k]))
                    L0 = np.kron(sD - D[k + 1], ml.eye(M[k]))
                    R = QBDFundamentalMatrices(B, L, F, 'R', precision)
                    p0 = CTMCSolve(L0 + R * B)
                    p0 = p0 / np.sum(p0 * (ml.eye(R.shape[0]) - R).I)
                    if argv[argIx] == "ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx + 1]
                        qlMoms = []
                        for i in range(1, numOfQLMoms + 1):
                            qlMoms.append(
                                np.sum(
                                    math.factorial(i) * p0 * R**i *
                                    (ml.eye(R.shape[0]) - R).I**(i + 1)))
                        Ret.append(MomsFromFactorialMoms(qlMoms))
                    elif argv[argIx] == "ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx + 1]
                        qlProbs = [np.sum(p0)]
                        for i in range(1, numOfQLProbs):
                            qlProbs.append(np.sum(p0 * R**i))
                        Ret.append(np.array(qlProbs))
                    argIx += 1
                else:
                    raise Exception("MMAPPH1PRPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1

    if len(Ret) == 1:
        return Ret[0]
    else:
        return Ret
Пример #18
0
def CanonicalFromDPH3(alpha, A, prec=1e-14):
    """
    Returns the canonical form of an order-3 discrete phase-type 
    distribution.
    
    Parameters
    ----------
    alpha : matrix, shape (1,3)
        Initial vector of the discrete phase-type distribution
    A : matrix, shape (3,3)
        Transition probability matrix of the discrete phase-type
        distribution
    prec : double, optional
      Numerical precision for checking the input, default value
      is 1e-14
    
    Returns
    -------
    beta : matrix, shape (1,3)
      The initial probability vector of the canonical form
    B : matrix, shape (3,3)
      Transition probability matrix of the canonical form
    """

    if butools.checkInput and not CheckMGRepresentation(alpha, A, prec):
        raise Exception(
            "CanonicalFromDPH3: Input is not a valid DPH representation!")

    if butools.checkInput and (A.shape[0] != 3 or A.shape[1] != 3):
        raise Exception("CanonicalFromDPH3: Dimension must be 3!")

    ev = la.eigvals(A)
    ix = np.argsort(-np.abs(np.real(ev)))
    lambd = ev[ix]
    eye = ml.eye(3)

    a0 = -lambd[0] * lambd[1] * lambd[2]
    a1 = lambd[0] * lambd[1] + lambd[0] * lambd[2] + lambd[1] * lambd[2]
    a2 = -lambd[0] - lambd[1] - lambd[2]
    e = ml.matrix([[1, 1, 1]]).T

    if np.real(lambd[0]) > 0 and np.real(lambd[1]) >= 0 and np.real(
            lambd[2]) >= 0:
        #PPP case
        alphaout, A2 = CanonicalFromPH3(alpha, A - eye, prec)
        Aout = A2 + eye
    elif np.real(lambd[0]) > 0 and np.real(lambd[1]) >= 0 and np.real(
            lambd[2]) < 0:
        #PPN case
        x1 = lambd[0]
        x2 = lambd[1] + lambd[2]
        x3 = lambd[1] * lambd[2] / (lambd[1] + lambd[2] - 1)
        Aout = ml.matrix([[x1, 1 - x1, 0], [0, x2, 1 - x2], [0, x3, 0]])
        b3 = 1 / (1 - x3) * (e - A * e)
        b2 = 1 / (1 - x2) * A * b3
        b1 = e - b2 - b3
        B = ml.hstack((b1, b2, b3))
        alphaout = alpha * B
    elif np.real(lambd[0]) > 0 and np.real(lambd[1]) < 0 and np.real(
            lambd[2]) >= 0:
        #PNP case
        x1 = -a2
        x2 = (a0 - a1 * a2) / (a2 * (1 + a2))
        x3 = a0 * (1 + a2) / (a0 - a2 - a1 * a2 - a2**2)
        Aout = ml.matrix([[x1, 1 - x1, 0], [x2, 0, 1 - x2], [0, x3, 0]])
        b3 = 1 / (1 - x3) * (e - A * e)
        b2 = 1 / (1 - x2) * A * b3
        b1 = e - b2 - b3
        if alpha * b1 >= 0:
            B = ml.hstack((b1, b2, b3))
            alphaout = alpha * B
        else:
            #Set the initial vector first element to 0
            x33 = 0
            a1 = -1
            while x33 <= 1:
                [a1, x1, x2, x3, B] = firstInitElem(x33, lambd, alpha, A)
                if a1 >= 0 and x1 >= 0 and x2 >= 0 and x3 >= 0 and x3 + x33 < 1:
                    break
                x33 = x33 + 0.01

            if a1 >= 0:
                Aout = ml.matrix([[x1, 1 - x1, 0], [x2, 0, 1 - x2],
                                  [0, x3, x33]])
                alphaout = alpha * B
            else:
                #PNP+
                x1 = lambd[2]
                x2 = lambd[0] + lambd[1]
                x3 = lambd[0] * lambd[1] / (lambd[0] + lambd[1] - 1)
                Aout = ml.matrix([[x1, 0, 0], [0, x2, 1 - x2], [0, x3, 0]])

                p1 = alpha * (e - A * e)
                p2 = alpha * A * (e - A * e)
                d1 = (1 - lambd[0]) * ((1 - lambd[1]) * (1 - lambd[2]) +
                                       (-1 + lambd[1] + lambd[2]) * p1 -
                                       p2) / ((lambd[0] - lambd[1]) *
                                              (lambd[0] - lambd[2]))
                d2 = (lambd[1] - 1) * ((1 - lambd[0]) * (1 - lambd[2]) +
                                       (-1 + lambd[0] + lambd[2]) * p1 -
                                       p2) / ((lambd[0] - lambd[1]) *
                                              (lambd[1] - lambd[2]))
                d3 = (lambd[2] - 1) * ((1 - lambd[0]) * (1 - lambd[1]) +
                                       (-1 + lambd[0] + lambd[1]) * p1 -
                                       p2) / ((lambd[1] - lambd[2]) *
                                              (lambd[2] - lambd[0]))
                alphaout = ml.matrix([[
                    d3 / (1 - lambd[2]),
                    (d1 * lambd[0] + d2 * lambd[1]) / ((1 - lambd[0]) *
                                                       (1 - lambd[1])),
                    (d1 + d2) * (1 - lambd[0] - lambd[1]) / ((1 - lambd[0]) *
                                                             (1 - lambd[1]))
                ]])

                if np.min(alphaout) < 0 or np.min(np.min(Aout)) < 0:
                    raise Exception("DPH3Canonical: Unhandled PNP case!")
    elif np.real(lambd[0]) > 0 and np.real(lambd[1]) < 0 and np.real(
            lambd[2]) < 0:
        #PNN case
        if np.all(np.isreal(lambd)) or np.abs(
                lambd[1])**2 <= 2 * lambd[0] * (-np.real(lambd[1])):
            x1 = -a2
            x2 = -a1 / (1 + a2)
            x3 = -a0 / (1 + a1 + a2)
            Aout = ml.matrix([[x1, 1 - x1, 0], [x2, 0, 1 - x2], [x3, 0, 0]])
            b3 = 1 / (1 - x3) * (e - A * e)
            b2 = 1 / (1 - x2) * A * b3
            b1 = e - b2 - b3
            B = ml.hstack((b1, b2, b3))
            alphaout = alpha * B
        else:
            [alphaout, A2] = CanonicalFromPH3(alpha, A - eye, prec)
            Aout = A2 + eye
    return (np.real(alphaout), np.real(Aout))
Пример #19
0
def SPIRIT(streams, energyThresh, lamb, evalMetrics):

    # Make 
    if type(streams) == np.ndarray:
        streams_iter = iter(streams)

    # Max No. Streams
    if streams.ndim == 1:
        streams = np.expand_dims(streams, axis=1)
        num_streams = streams.shape[1]
    else: 
        num_streams = streams.shape[1]

    count_over = 0
    count_under = 0

#===============================================================================
#      Initalise k, w and d, lamb
#===============================================================================

    k = 1 # Hidden Variables, initialise to one 
    
    # Weights
    pc_weights = npm.zeros(num_streams)
    pc_weights[0, 0] = 1
    
    # initialise outputs
    res = {}
    all_weights = []
    k_hist = []
    anomalies = []
    x_dash = npm.zeros((1,num_streams))
    
    Eng = mat([0.00000001, 0.00000001])    
    
    E_xt = 0  # Energy of X at time t
    E_rec_i = mat([0.000000000000001]) # Energy of reconstruction

    Y = npm.zeros(num_streams)
    
    timeSteps = streams.shape[0]
    
#===============================================================================
# Main Loop 
#===============================================================================
    for t in range(1, timeSteps + 1): # t = 1,...,200

        k_hist.append(k)

        x_t_plus_1 = mat(streams_iter.next()) # Read in next signals

        d_i = E_rec_i * t

        # Step 1 - Update Weights 
        pc_weights, y_t_i, error = track_W(x_t_plus_1, 
                                               k, pc_weights, d_i,
                                               num_streams, 
                                               lamb)
        # Record hidden variables
        padding = num_streams - k
        y_bar_t = npm.hstack((y_t_i, mat([nan] * padding)))
        Y = npm.vstack((Y,y_bar_t))
        
        # Record Weights
        all_weights.append(pc_weights)  
        # Record reconstrunted z and RSRE
        x_dash = npm.vstack((x_dash, y_t_i * pc_weights))
               
        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0
            
        top = top + (norm(x_t_plus_1 - x_dash) ** 2 )

        bot = bot + (norm(x_t_plus_1) ** 2)
        
        new_RSRE = top / bot   
                  
        if t == 1:
            RSRE = new_RSRE
        else:                  
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### FOR EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T' :
            
            Qt = pc_weights.T            
            
            if t == 1 :
                res['subspace_error'] = npm.zeros((timeSteps,1))
                res['orthog_error'] = npm.zeros((timeSteps,1))                


                res['angle_error'] = npm.zeros((timeSteps,1))
                Cov_mat = npm.zeros([num_streams,num_streams])
                
            # Calculate Covarentce Matrix of data up to time t   
            Cov_mat = lamb * Cov_mat +  npm.dot(x_t_plus_1,  x_t_plus_1.T)
            # Get eigenvalues and eigenvectors             
            W , V = eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = W.argsort() # Get sort index
            eig_idx = eig_idx[::-1] # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:k]]          
            # Calculate subspace error        
            C = npm.dot(V_k , V_k.T) - npm.dot(Qt , Qt.T)  
            res['subspace_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(C.T , C))) #frobenius norm in dB
        
            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k) 
            eigVal, eigVec = eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))        
            res['angle_error'][t-1,0] = angle        
    
            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T , Qt) - npm.eye(k)
            res['orthog_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(F.T , F))) #frobenius norm in dB
              

        # Step 2 - Update Energy estimate
        E_xt = ((lamb * (t-1) * E_xt) + norm(x_t_plus_1) ** 2) / t
    
        for i in range(k):
            E_rec_i[0, i] = ((lamb * (t-1) * E_rec_i[0, i]) + (y_t_i[0, i] ** 2)) / t

        # Step 3 - Estimate the retained energy
        E_retained = npm.sum(E_rec_i,1)
    
        # Record Energy  
        Eng_new = npm.hstack((E_xt, E_retained[0,0]))
        Eng = npm.vstack((Eng, Eng_new))
    
        if E_retained < energyThresh[0] * E_xt:
            if k != num_streams:
                k = k + 1       
                # Initalise Ek+1 <-- 0 
                E_rec_i = npm.hstack((E_rec_i, mat([0]))) 
                # Initialise W_i+1
                new_weight_vec = npm.zeros(num_streams)  
                new_weight_vec[0, k-1] = 1
                pc_weights = npm.vstack((pc_weights, new_weight_vec))
                anomalies.append(t -1)
            else:
                count_over += 1
        elif E_retained > energyThresh[1] * E_xt:
            if k > 1 :
                k = k - 1
                # discard w_k and error
                pc_weights = delete(pc_weights, -1, 0)    
                # Discard E_rec_i[k]
                E_rec_i = delete(E_rec_i, -1)
            else:
                count_under += 1
          
          
    # Data Stores
    res2 = {'hidden' :  Y,                        # Array for hidden Variables
           'weights' : all_weights,
           'E_t' : Eng[:,0],                     # total energy of data 
           'E_dash_t' : Eng[:,1],                # hidden var energy
           'e_ratio' : np.divide(Eng[:,1], Eng[:,0]),      # Energy ratio 
           'RSRE' : RSRE,                        # Relative squared Reconstruction error 
           'recon' : x_dash,                     # reconstructed data
           'r_hist' : k_hist, # history of r values 
           'anomalies' : anomalies}  
           
    res.update(res2)
              
    return res, all_weights
Пример #20
0
def MMAPPH1FCFS(D, sigma, S, *argv):
    """
    Returns various performane measures of a MMAP[K]/PH[K]/1 
    first-come-first-serve queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "stDistrME"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution              |
        +----------------+--------------------+----------------------------------------+
        | "stDistrPH"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution, converted   |
        |                |                    | to a continuous PH representation      |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati     |
        |                |                    | equation                               |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] Qiming He, "Analysis of a continuous time 
           SM[K]/PH[K]/1/FCFS queue: Age process, sojourn times,
           and queue lengths", Journal of Systems Science and 
           Complexity, 25(1), pp 133-155, 2012.
    """

    K = len(D) - 1

    # parse options
    eaten = []
    precision = 1e-14
    classes = np.arange(0, K)
    for i in range(len(argv)):
        if argv[i] == "prec":
            precision = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "classes":
            classes = np.array(argv[i + 1]) - 1
            eaten.append(i)
            eaten.append(i + 1)

    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception(
            'MMAPPH1FCFS: The arrival process is not a valid MMAP representation!'
        )

    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k], S[k]):
                raise Exception(
                    'MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!'
                )

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    Ia = ml.eye(N)
    Da = ml.zeros((N, N))
    for q in range(K):
        Da += D[q + 1]
    theta = CTMCSolve(D0 + Da)
    beta = [CTMCSolve(S[k] + ml.sum(-S[k], 1) * sigma[k]) for k in range(K)]
    lambd = [np.sum(theta * D[k + 1]) for k in range(K)]
    mu = [np.sum(beta[k] * (-S[k])) for k in range(K)]
    Nsk = [S[k].shape[0] for k in range(K)]
    ro = np.sum(np.array(lambd) / np.array(mu))
    alpha = theta * Da / sum(lambd)
    D0i = (-D0).I

    Sa = S[0]
    sa = [ml.zeros(sigma[0].shape)] * K
    sa[0] = sigma[0]
    ba = [ml.zeros(beta[0].shape)] * K
    ba[0] = beta[0]
    sv = [ml.zeros((Nsk[0], 1))] * K
    sv[0] = ml.sum(-S[0], 1)
    Pk = [D0i * D[q + 1] for q in range(K)]

    for k in range(1, K):
        Sa = la.block_diag(Sa, S[k])
        for q in range(K):
            if q == k:
                sa[q] = ml.hstack((sa[q], sigma[k]))
                ba[q] = ml.hstack((ba[q], beta[k]))
                sv[q] = ml.vstack((sv[q], -np.sum(S[k], 1)))
            else:
                sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape)))
                ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape)))
                sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k], 1))))
    Sa = ml.matrix(Sa)
    P = D0i * Da
    iVec = ml.kron(D[1], sa[0])
    for k in range(1, K):
        iVec += ml.kron(D[k + 1], sa[k])
    Ns = Sa.shape[0]
    Is = ml.eye(Ns)

    # step 1. solve the age process of the queue
    # ==========================================

    # solve Y0 and calculate T
    Y0 = FluidFundamentalMatrices(ml.kron(Ia, Sa), ml.kron(Ia, -ml.sum(Sa, 1)),
                                  iVec, D0, "P", precision)
    T = ml.kron(Ia, Sa) + Y0 * iVec

    # calculate pi0 and v0
    pi0 = ml.zeros((1, T.shape[0]))
    for k in range(K):
        pi0 += ml.kron(theta * D[k + 1], ba[k] / mu[k])
    pi0 = -pi0 * T

    iT = (-T).I
    oa = ml.ones((N, 1))

    # step 2. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:
        argIx = 0
        clo = iT * ml.kron(oa, sv[k])
        while argIx < len(argv):
            if argIx in eaten:
                argIx += 1
                continue
            elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                numOfSTMoms = argv[argIx + 1]
                rtMoms = []
                for m in range(1, numOfSTMoms + 1):
                    rtMoms.append(
                        math.factorial(m) * np.sum(pi0 * iT**m * clo /
                                                   (pi0 * clo)))
                Ret.append(rtMoms)
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                stCdfPoints = argv[argIx + 1]
                cdf = []
                for t in stCdfPoints:
                    pr = 1 - np.sum(pi0 * la.expm(T * t) * clo / (pi0 * clo))
                    cdf.append(pr)
                Ret.append(np.array(cdf))
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx] == "stDistrME":
                Bm = SimilarityMatrixForVectors(clo / (pi0 * clo),
                                                ml.ones((N * Ns, 1)))
                Bmi = Bm.I
                A = Bm * T * Bmi
                alpha = pi0 * Bmi
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx] == "stDistrPH":
                vv = pi0 * iT
                ix = np.arange(N * Ns)
                nz = ix[vv.flat > precision]
                delta = Diag(vv[:, nz])
                cl = -T * clo / (pi0 * clo)
                alpha = cl[nz, :].T * delta
                A = delta.I * T[nz, :][:, nz].T * delta
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx] == "ncDistr":
                numOfQLProbs = argv[argIx + 1]
                argIx += 1
                values = np.empty(numOfQLProbs)
                jm = ml.zeros((Ns, 1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 1
                jmc = ml.ones((Ns, 1))
                jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 0
                LmCurr = la.solve_sylvester(T, ml.kron(D0 + Da - D[k + 1], Is),
                                            -ml.eye(N * Ns))
                values[0] = 1 - ro + np.sum(pi0 * LmCurr * ml.kron(oa, jmc))
                for i in range(1, numOfQLProbs):
                    LmPrev = LmCurr
                    LmCurr = la.solve_sylvester(
                        T, ml.kron(D0 + Da - D[k + 1], Is),
                        -LmPrev * ml.kron(D[k + 1], Is))
                    values[i] = np.sum(pi0 * LmCurr * ml.kron(oa, jmc) +
                                       pi0 * LmPrev * ml.kron(oa, jm))
                Ret.append(values)
            elif type(argv[argIx]) is str and argv[argIx] == "ncMoms":
                numOfQLMoms = argv[argIx + 1]
                argIx += 1
                jm = ml.zeros((Ns, 1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k + 1]), :] = 1
                ELn = [
                    la.solve_sylvester(T, ml.kron(D0 + Da, Is),
                                       -ml.eye(N * Ns))
                ]
                qlMoms = []
                for n in range(1, numOfQLMoms + 1):
                    bino = 1
                    Btag = ml.zeros((N * Ns, N * Ns))
                    for i in range(n):
                        Btag += bino * ELn[i]
                        bino *= (n - i) / (i + 1)
                    ELn.append(
                        la.solve_sylvester(T, ml.kron(D0 + Da, Is),
                                           -Btag * ml.kron(D[k + 1], Is)))
                    qlMoms.append(
                        np.sum(pi0 * ELn[n]) +
                        np.sum(pi0 * Btag * ml.kron(oa, jm)))
                Ret.append(qlMoms)
            else:
                raise Exception("MMAPPH1FCFS: Unknown parameter " +
                                str(argv[argIx]))
            argIx += 1

    if len(Ret) == 1:
        return Ret[0]
    else:
        return Ret
Пример #21
0
def MMAPPH1NPPR(D, sigma, S, *argv):
    """
    Returns various performane measures of a continuous time 
    MMAP[K]/PH[K]/1 non-preemptive priority queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
        D1 corresponds to the lowest, DK to the highest priority.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati and |
        |                |                    | the matrix-quadratic equations         |
        +----------------+--------------------+----------------------------------------+
        | "erlMaxOrder"  | Integer number     | The maximal Erlang order used in the   |
        |                |                    | erlangization procedure. The default   |
        |                |                    | value is 200.                          |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] G. Horvath, "Efficient analysis of the MMAP[K]/PH[K]/1
           priority queue", European Journal of Operational 
           Research, 246(1), 128-139, 2015.
    """

    K = len(D) - 1

    # parse options
    eaten = []
    erlMaxOrder = 200
    precision = 1e-14
    classes = np.arange(0, K)
    for i in range(len(argv)):
        if argv[i] == "prec":
            precision = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "erlMaxOrder":
            erlMaxOrder = argv[i + 1]
            eaten.append(i)
            eaten.append(i + 1)
        elif argv[i] == "classes":
            classes = np.array(argv[i + 1]) - 1
            eaten.append(i)
            eaten.append(i + 1)

    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception(
            'MMAPPH1PRPR: The arrival process is not a valid MMAP representation!'
        )

    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k], S[k]):
                raise Exception(
                    'MMAPPH1PRPR: the vector and matrix describing the service times is not a valid PH representation!'
                )

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    I = ml.eye(N)
    sD = ml.zeros((N, N))
    for Di in D:
        sD += Di

    s = []
    M = np.empty(K)
    for i in range(K):
        s.append(np.sum(-S[i], 1))
        M[i] = sigma[i].size

    # step 1. solution of the workload process of the joint queue
    # ===========================================================
    sM = np.sum(M)
    Qwmm = ml.matrix(D0)
    Qwpm = ml.zeros((N * sM, N))
    Qwmp = ml.zeros((N, N * sM))
    Qwpp = ml.zeros((N * sM, N * sM))
    kix = 0
    for i in range(K):
        Qwmp[:, kix:kix + N * M[i]] = np.kron(D[i + 1], sigma[i])
        Qwpm[kix:kix + N * M[i], :] = np.kron(I, s[i])
        Qwpp[kix:kix + N * M[i], :][:, kix:kix + N * M[i]] = np.kron(I, S[i])
        kix += N * M[i]

    # calculate fundamental matrices
    Psiw, Kw, Uw = FluidFundamentalMatrices(Qwpp, Qwpm, Qwmp, Qwmm, 'PKU',
                                            precision)

    # calculate boundary vector
    Ua = ml.ones((N, 1)) + 2 * np.sum(Qwmp * (-Kw).I, 1)
    pm = Linsolve(
        ml.hstack((Uw, Ua)).T,
        ml.hstack((ml.zeros((1, N)), ml.ones((1, 1)))).T).T

    ro = ((1.0 - np.sum(pm)) / 2.0) / (
        np.sum(pm) + (1.0 - np.sum(pm)) / 2.0
    )  # calc idle time with weight=1, and the busy time with weight=1/2
    kappa = pm / np.sum(pm)

    pi = CTMCSolve(sD)
    lambd = []
    for i in range(K):
        lambd.append(np.sum(pi * D[i + 1]))

    Psiw = []
    Qwmp = []
    Qwzp = []
    Qwpp = []
    Qwmz = []
    Qwpz = []
    Qwzz = []
    Qwmm = []
    Qwpm = []
    Qwzm = []
    for k in range(K):
        # step 2. construct a workload process for classes k...K
        # ======================================================
        Mlo = np.sum(M[:k])
        Mhi = np.sum(M[k:])

        Qkwpp = ml.zeros((N * Mlo * Mhi + N * Mhi, N * Mlo * Mhi + N * Mhi))
        Qkwpz = ml.zeros((N * Mlo * Mhi + N * Mhi, N * Mlo))
        Qkwpm = ml.zeros((N * Mlo * Mhi + N * Mhi, N))
        Qkwmz = ml.zeros((N, N * Mlo))
        Qkwmp = ml.zeros((N, N * Mlo * Mhi + N * Mhi))
        Dlo = ml.matrix(D0)
        for i in range(k):
            Dlo = Dlo + D[i + 1]
        Qkwmm = Dlo
        Qkwzp = ml.zeros((N * Mlo, N * Mlo * Mhi + N * Mhi))
        Qkwzm = ml.zeros((N * Mlo, N))
        Qkwzz = ml.zeros((N * Mlo, N * Mlo))
        kix = 0
        for i in range(k, K):
            kix2 = 0
            for j in range(k):
                bs = N * M[j] * M[i]
                bs2 = N * M[j]
                Qkwpp[kix:kix + bs,
                      kix:kix + bs] = np.kron(I, np.kron(ml.eye(M[j]), S[i]))
                Qkwpz[kix:kix + bs,
                      kix2:kix2 + bs2] = np.kron(I,
                                                 np.kron(ml.eye(M[j]), s[i]))
                Qkwzp[kix2:kix2 + bs2,
                      kix:kix + bs] = np.kron(D[i + 1],
                                              np.kron(ml.eye(M[j]), sigma[i]))
                kix += bs
                kix2 += bs2
        for i in range(k, K):
            bs = N * M[i]
            Qkwpp[kix:kix + bs, :][:, kix:kix + bs] = np.kron(I, S[i])
            Qkwpm[kix:kix + bs, :] = np.kron(I, s[i])
            Qkwmp[:, kix:kix + bs] = np.kron(D[i + 1], sigma[i])
            kix += bs
        kix = 0
        for j in range(k):
            bs = N * M[j]
            Qkwzz[kix:kix + bs, kix:kix +
                  bs] = np.kron(Dlo, ml.eye(M[j])) + np.kron(I, S[j])
            Qkwzm[kix:kix + bs, :] = np.kron(I, s[j])
            kix += bs

        if Qkwzz.shape[0] > 0:
            Psikw = FluidFundamentalMatrices(
                Qkwpp + Qkwpz * (-Qkwzz).I * Qkwzp,
                Qkwpm + Qkwpz * (-Qkwzz).I * Qkwzm, Qkwmp, Qkwmm, 'P',
                precision)
        else:
            Psikw = FluidFundamentalMatrices(Qkwpp, Qkwpm, Qkwmp, Qkwmm, 'P',
                                             precision)
        Psiw.append(Psikw)

        Qwzp.append(Qkwzp)
        Qwmp.append(Qkwmp)
        Qwpp.append(Qkwpp)
        Qwmz.append(Qkwmz)
        Qwpz.append(Qkwpz)
        Qwzz.append(Qkwzz)
        Qwmm.append(Qkwmm)
        Qwpm.append(Qkwpm)
        Qwzm.append(Qkwzm)

    # step 3. calculate Phi vectors
    # =============================
    lambdaS = sum(lambd)
    phi = [(1 - ro) * kappa * (-D0) / lambdaS]
    q0 = [[]]
    qL = [[]]
    for k in range(K - 1):
        sDk = ml.matrix(D0)
        for j in range(k + 1):
            sDk = sDk + D[j + 1]
        # pk
        pk = sum(lambd[:k + 1]) / lambdaS - (1 - ro) * kappa * np.sum(
            sDk, 1) / lambdaS
        # A^(k,1)
        Qwzpk = Qwzp[k + 1]
        vix = 0
        Ak = []
        for ii in range(k + 1):
            bs = N * M[ii]
            V1 = Qwzpk[vix:vix + bs, :]
            Ak.append(
                np.kron(I, sigma[ii]) *
                (-np.kron(sDk, ml.eye(M[ii])) - np.kron(I, S[ii])).I *
                (np.kron(I, s[ii]) + V1 * Psiw[k + 1]))
            vix += bs
        # B^k
        Qwmpk = Qwmp[k + 1]
        Bk = Qwmpk * Psiw[k + 1]
        ztag = phi[0] * ((-D0).I * D[k + 1] * Ak[k] - Ak[0] + (-D0).I * Bk)
        for i in range(k):
            ztag += phi[i + 1] * (Ak[i] - Ak[i + 1]) + phi[0] * (
                -D0).I * D[i + 1] * Ak[i]
        Mx = ml.eye(Ak[k].shape[0]) - Ak[k]
        Mx[:, 0] = ml.ones((N, 1))
        phi.append(
            ml.hstack((pk, ztag[:, 1:])) *
            Mx.I)  # phi(k) = Psi^(k)_k * p(k). Psi^(k)_i = phi(i) / p(k)

        q0.append(phi[0] * (-D0).I)
        qLii = []
        for ii in range(k + 1):
            qLii.append((phi[ii + 1] - phi[ii] + phi[0] *
                         (-D0).I * D[ii + 1]) * np.kron(I, sigma[ii]) *
                        (-np.kron(sDk, ml.eye(M[ii])) - np.kron(I, S[ii])).I)
        qL.append(ml.hstack(qLii))

    # step 4. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:

        sD0k = ml.matrix(D0)
        for i in range(k):
            sD0k += D[i + 1]

        if k < K - 1:
            # step 4.1 calculate distribution of the workload process right
            # before the arrivals of class k jobs
            # ============================================================
            if Qwzz[k].shape[0] > 0:
                Kw = Qwpp[k] + Qwpz[k] * (
                    -Qwzz[k]).I * Qwzp[k] + Psiw[k] * Qwmp[k]
            else:
                Kw = Qwpp[k] + Psiw[k] * Qwmp[k]
            BM = ml.zeros((0, 0))
            CM = ml.zeros((0, N))
            DM = ml.zeros((0, 0))
            for i in range(k):
                BM = la.block_diag(BM, np.kron(I, S[i]))
                CM = ml.vstack((CM, np.kron(I, s[i])))
                DM = la.block_diag(DM, np.kron(D[k + 1], ml.eye(M[i])))
            if k > 0:
                Kwu = ml.vstack((ml.hstack(
                    (Kw, (Qwpz[k] + Psiw[k] * Qwmz[k]) * (-Qwzz[k]).I * DM)),
                                 ml.hstack((ml.zeros(
                                     (BM.shape[0], Kw.shape[1])), BM))))
                Bwu = ml.vstack((Psiw[k] * D[k + 1], CM))
                iniw = ml.hstack(
                    (q0[k] * Qwmp[k] + qL[k] * Qwzp[k], qL[k] * DM))
                pwu = q0[k] * D[k + 1]
            else:
                Kwu = Kw
                Bwu = Psiw[k] * D[k + 1]
                iniw = pm * Qwmp[k]
                pwu = pm * D[k + 1]

            norm = np.sum(pwu) + np.sum(iniw * (-Kwu).I * Bwu)
            pwu = pwu / norm
            iniw = iniw / norm

            # step 4.2 create the fluid model whose first passage time equals the
            # WAITING time of the low prioroity customers
            # ==================================================================
            KN = Kwu.shape[0]
            Qspp = ml.zeros(
                (KN + N * np.sum(M[k + 1:]), KN + N * np.sum(M[k + 1:])))
            Qspm = ml.zeros((KN + N * np.sum(M[k + 1:]), N))
            Qsmp = ml.zeros((N, KN + N * np.sum(M[k + 1:])))
            Qsmm = sD0k + D[k + 1]
            kix = 0
            for i in range(k + 1, K):
                bs = N * M[i]
                Qspp[KN + kix:KN + kix + bs, :][:, KN + kix:KN + kix +
                                                bs] = np.kron(I, S[i])
                Qspm[KN + kix:KN + kix + bs, :] = np.kron(I, s[i])
                Qsmp[:, KN + kix:KN + kix + bs] = np.kron(D[i + 1], sigma[i])
                kix += bs

            Qspp[:KN, :][:, :KN] = Kwu
            Qspm[:KN, :] = Bwu
            inis = ml.hstack((iniw, ml.zeros((1, N * np.sum(M[k + 1:])))))

            # calculate fundamental matrix
            Psis = FluidFundamentalMatrices(Qspp, Qspm, Qsmp, Qsmm, 'P',
                                            precision)

            # step 4.3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx] == "stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx + 1]
                    # calculate waiting time moments
                    Pn = [Psis]
                    wtMoms = []
                    for n in range(1, numOfSTMoms + 1):
                        A = Qspp + Psis * Qsmp
                        B = Qsmm + Qsmp * Psis
                        C = -2 * n * Pn[n - 1]
                        bino = 1
                        for i in range(1, n):
                            bino = bino * (n - i + 1) / i
                            C += bino * Pn[i] * Qsmp * Pn[n - i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        wtMoms.append(np.sum(inis * P * (-1)**n) / 2**n)
                    # calculate RESPONSE time moments
                    Pnr = [np.sum(inis * Pn[0]) * sigma[k]]
                    rtMoms = []
                    for n in range(1, numOfSTMoms + 1):
                        P = n * Pnr[n - 1] * (-S[k]).I + (-1)**n * np.sum(
                            inis * Pn[n]) * sigma[k] / 2**n
                        Pnr.append(P)
                        rtMoms.append(
                            np.sum(P) + np.sum(pwu) * math.factorial(n) *
                            np.sum(sigma[k] * (-S[k]).I**n))
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx] == "stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx + 1]
                    res = []
                    for t in stCdfPoints:
                        L = erlMaxOrder
                        lambdae = L / t / 2
                        Psie = FluidFundamentalMatrices(
                            Qspp - lambdae * ml.eye(Qspp.shape[0]), Qspm, Qsmp,
                            Qsmm - lambdae * ml.eye(Qsmm.shape[0]), 'P',
                            precision)
                        Pn = [Psie]
                        pr = (np.sum(pwu) + np.sum(inis * Psie)) * (1 - np.sum(
                            sigma[k] *
                            (ml.eye(S[k].shape[0]) - S[k] / 2 / lambdae).I**L))
                        for n in range(1, L):
                            A = Qspp + Psie * Qsmp - lambdae * ml.eye(
                                Qspp.shape[0])
                            B = Qsmm + Qsmp * Psie - lambdae * ml.eye(
                                Qsmm.shape[0])
                            C = 2 * lambdae * Pn[n - 1]
                            for i in range(1, n):
                                C += Pn[i] * Qsmp * Pn[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            pr += np.sum(inis * P) * (
                                1 - np.sum(sigma[k] *
                                           (np.eye(S[k].shape[0]) -
                                            S[k] / 2 / lambdae).I**(L - n)))
                        res.append(pr)
                    Ret.append(np.array(res))
                    argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx] == "ncMoms" or
                                                   argv[argIx] == "ncDistr"):
                    W = (-np.kron(sD - D[k + 1], ml.eye(M[k])) -
                         np.kron(I, S[k])).I * np.kron(D[k + 1], ml.eye(M[k]))
                    iW = (ml.eye(W.shape[0]) - W).I
                    w = np.kron(ml.eye(N), sigma[k])
                    omega = (-np.kron(sD - D[k + 1], ml.eye(M[k])) -
                             np.kron(I, S[k])).I * np.kron(I, s[k])
                    if argv[argIx] == "ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx + 1]
                        # first calculate it at departure instants
                        Psii = [Psis]
                        QLDPn = [inis * Psii[0] * w * iW]
                        for n in range(1, numOfQLMoms + 1):
                            A = Qspp + Psis * Qsmp
                            B = Qsmm + Qsmp * Psis
                            C = n * Psii[n - 1] * D[k + 1]
                            bino = 1
                            for i in range(1, n):
                                bino = bino * (n - i + 1) / i
                                C = C + bino * Psii[i] * Qsmp * Psii[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Psii.append(P)
                            QLDPn.append(n * QLDPn[n - 1] * iW * W +
                                         inis * P * w * iW)
                        for n in range(numOfQLMoms + 1):
                            QLDPn[n] = (QLDPn[n] +
                                        pwu * w * iW**(n + 1) * W**n) * omega
                        # now calculate it at random time instance
                        QLPn = [pi]
                        qlMoms = []
                        iTerm = (ml.ones((N, 1)) * pi - sD).I
                        for n in range(1, numOfQLMoms + 1):
                            sumP = np.sum(QLDPn[n]) + n * np.sum(
                                (QLDPn[n - 1] - QLPn[n - 1] * D[k + 1] /
                                 lambd[k]) * iTerm * D[k + 1])
                            P = sumP * pi + n * (
                                QLPn[n - 1] * D[k + 1] -
                                QLDPn[n - 1] * lambd[k]) * iTerm
                            QLPn.append(P)
                            qlMoms.append(np.sum(P))
                        qlMoms = MomsFromFactorialMoms(qlMoms)
                        Ret.append(qlMoms)
                        argIx += 1
                    elif argv[argIx] == "ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx + 1]
                        Psid = FluidFundamentalMatrices(
                            Qspp, Qspm, Qsmp, sD0k, 'P', precision)
                        Pn = [Psid]
                        XDn = inis * Psid * w
                        dqlProbs = (XDn + pwu * w) * omega
                        for n in range(1, numOfQLProbs):
                            A = Qspp + Psid * Qsmp
                            B = sD0k + Qsmp * Psid
                            C = Pn[n - 1] * D[k + 1]
                            for i in range(1, n):
                                C += Pn[i] * Qsmp * Pn[n - i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            XDn = XDn * W + inis * P * w
                            dqlProbs = ml.vstack(
                                (dqlProbs, (XDn + pwu * w * W**n) * omega))
                        # now calculate it at random time instance
                        iTerm = -(sD - D[k + 1]).I
                        qlProbs = lambd[k] * dqlProbs[0, :] * iTerm
                        for n in range(1, numOfQLProbs):
                            P = (qlProbs[n - 1, :] * D[k + 1] + lambd[k] *
                                 (dqlProbs[n, :] - dqlProbs[n - 1, :])) * iTerm
                            qlProbs = ml.vstack((qlProbs, P))
                        qlProbs = np.sum(qlProbs, 1).A.flatten()
                        Ret.append(qlProbs)
                        argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1
        elif k == K - 1:
            # step 3. calculate the performance measures
            # ==========================================
            argIx = 0
            while argIx < len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and (argv[argIx] == "stMoms" or
                                                   argv[argIx] == "stDistr"):
                    Kw = Qwpp[k] + Qwpz[k] * (
                        -Qwzz[k]).I * Qwzp[k] + Psiw[k] * Qwmp[k]
                    AM = ml.zeros((0, 0))
                    BM = ml.zeros((0, 0))
                    CM = ml.zeros((0, 1))
                    DM = ml.zeros((0, 0))
                    for i in range(k):
                        AM = la.block_diag(
                            AM,
                            np.kron(ml.ones((N, 1)),
                                    np.kron(ml.eye(M[i]), s[k])))
                        BM = la.block_diag(BM, S[i])
                        CM = ml.vstack((CM, s[i]))
                        DM = la.block_diag(DM, np.kron(D[k + 1], ml.eye(M[i])))
                    Z = ml.vstack((ml.hstack(
                        (Kw, ml.vstack((AM, ml.zeros(
                            (N * M[k], AM.shape[1])))))),
                                   ml.hstack((ml.zeros(
                                       (BM.shape[0], Kw.shape[1])), BM))))
                    z = ml.vstack((ml.zeros(
                        (AM.shape[0], 1)), np.kron(ml.ones((N, 1)), s[k]), CM))
                    iniw = ml.hstack((q0[k] * Qwmp[k] + qL[k] * Qwzp[k],
                                      ml.zeros((1, BM.shape[0]))))
                    zeta = iniw / np.sum(iniw * (-Z).I * z)
                    if argv[argIx] == "stMoms":
                        # MOMENTS OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfSTMoms = argv[argIx + 1]
                        rtMoms = []
                        for i in range(1, numOfSTMoms + 1):
                            rtMoms.append(
                                np.sum(
                                    math.factorial(i) * zeta *
                                    (-Z).I**(i + 1) * z))
                        Ret.append(rtMoms)
                        argIx += 1
                    if argv[argIx] == "stDistr":
                        # DISTRIBUTION OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        stCdfPoints = argv[argIx + 1]
                        rtDistr = []
                        for t in stCdfPoints:
                            rtDistr.append(
                                np.sum(zeta * (-Z).I *
                                       (ml.eye(Z.shape[0]) - la.expm(Z * t)) *
                                       z))
                        Ret.append(np.array(rtDistr))
                        argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx] == "ncMoms" or
                                                   argv[argIx] == "ncDistr"):
                    L = ml.zeros((N * np.sum(M), N * np.sum(M)))
                    B = ml.zeros((N * np.sum(M), N * np.sum(M)))
                    F = ml.zeros((N * np.sum(M), N * np.sum(M)))
                    kix = 0
                    for i in range(K):
                        bs = N * M[i]
                        F[kix:kix + bs, :][:, kix:kix + bs] = np.kron(
                            D[k + 1], ml.eye(M[i]))
                        L[kix:kix + bs, :][:, kix:kix + bs] = np.kron(
                            sD0k, ml.eye(M[i])) + np.kron(I, S[i])
                        if i < K - 1:
                            L[kix:kix + bs, :][:,
                                               N * np.sum(M[:k]):] = np.kron(
                                                   I, s[i] * sigma[k])
                        else:
                            B[kix:kix + bs, :][:,
                                               N * np.sum(M[:k]):] = np.kron(
                                                   I, s[i] * sigma[k])
                        kix += bs
                    R = QBDFundamentalMatrices(B, L, F, 'R', precision)
                    p0 = ml.hstack((qL[k], q0[k] * np.kron(I, sigma[k])))
                    p0 = p0 / np.sum(p0 * (ml.eye(R.shape[0]) - R).I)
                    if argv[argIx] == "ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx + 1]
                        qlMoms = []
                        for i in range(1, numOfQLMoms + 1):
                            qlMoms.append(
                                np.sum(
                                    math.factorial(i) * p0 * R**i *
                                    (ml.eye(R.shape[0]) - R).I**(i + 1)))
                        Ret.append(MomsFromFactorialMoms(qlMoms))
                    elif argv[argIx] == "ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx + 1]
                        qlProbs = [np.sum(p0)]
                        for i in range(1, numOfQLProbs):
                            qlProbs.append(np.sum(p0 * R**i))
                        Ret.append(np.array(qlProbs))
                    argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter " +
                                    str(argv[argIx]))
                argIx += 1

    if len(Ret) == 1:
        return Ret[0]
    else:
        return Ret
Пример #22
0
def test9():
    
    #####################################
    ## STANDARD COOLEY HANSE CIA MODEL ##
    #####################################
    # Load and solve the manually linearized model
    rbc1 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_linear,mesg=True)
    rbc1.modsolvers.pyuhlig.solve()
    rbc1.modsolvers.forklein.solve()
    
    # Load and solve the automatically linearized model
    rbc2 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_cf,mesg=True)
    rbc2.modsolvers.forkleind.solve()
    
    # Check equivalence of steady states
    for keyo in rbc1.sstate.keys():
        if keyo in rbc2.sstate.keys():
            assert round(rbc1.sstate[keyo],5) == round(rbc2.sstate[keyo],5)
    
    # Check equivalence of results
    nexo = len(rbc2.vardic['exo']['var'])
    nendo = len(rbc2.vardic['endo']['var'])
    
    modlin1 = MAT.hstack((rbc1.modsolvers.pyuhlig.Q,rbc1.modsolvers.pyuhlig.P))
    modlin1 = [round(modlin1[0,i1],5) for i1 in range(modlin1.shape[1])]
    modnlin1 = rbc2.modsolvers.forkleind.P[-nendo:,:]
    modnlin1 = [round(modnlin1[0,i1],5) for i1 in range(modnlin1.shape[1])]
    print 'Comparison: Standard CIA model'
    print "Linear is: ",modlin1
    print '----------------------'
    print "Nonlinear is: ",modnlin1
    assert modlin1 == modnlin1
    
    modlin2 = MAT.hstack((rbc1.modsolvers.pyuhlig.S,rbc1.modsolvers.pyuhlig.R))
    modlin2 = [[round(modlin2[i2,i1],5) for i1 in range(modlin2.shape[1])] for i2 in range(modlin2.shape[0])]
    modnlin2 = rbc2.modsolvers.forkleind.F
    modnlin2 = [[round(modnlin2[i2,i1],5) for i1 in range(modnlin2.shape[1])] for i2 in range(modnlin2.shape[0])]
    print modlin2
    print '----------------------'
    print modnlin2    
    assert modlin2 == modnlin2
    

    #####################################################
    ## STANDARD COOLEY HANSE CIA MODEL WITH SEIGNORAGE ##
    #####################################################
    # Load and solve the manually linearized model
    rbc1 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_linear,mesg=True)
    rbc1.modsolvers.pyuhlig.solve()
    rbc1.modsolvers.forklein.solve()
    
    # Load and solve the automatically linearized model
    rbc2 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_cf,mesg=True)
    rbc2.modsolvers.forkleind.solve()
    
    # Check equivalence of steady states
    for keyo in rbc1.sstate.keys():
        if keyo in rbc2.sstate.keys():
            assert round(rbc1.sstate[keyo],5) == round(rbc2.sstate[keyo],5)
    
    # Check equivalence of results
    nexo = len(rbc2.vardic['exo']['var'])
    nendo = len(rbc2.vardic['endo']['var'])
    
    modlin1 = MAT.hstack((rbc1.modsolvers.pyuhlig.Q,rbc1.modsolvers.pyuhlig.P))
    modlin1 = [round(modlin1[0,i1],5) for i1 in range(modlin1.shape[1])]
    modnlin1 = rbc2.modsolvers.forkleind.P[-nendo:,:]
    modnlin1 = [round(modnlin1[0,i1],5) for i1 in range(modnlin1.shape[1])]
    print 'Comparison: Standard CIA model with seignorage'
    print "Linear is: ",modlin1
    print '----------------------'
    print "Nonlinear is: ",modnlin1
    assert modlin1 == modnlin1
    
    modlin2 = MAT.hstack((rbc1.modsolvers.pyuhlig.S,rbc1.modsolvers.pyuhlig.R))
    modlin2 = [[round(modlin2[i2,i1],5) for i1 in range(modlin2.shape[1])] for i2 in range(modlin2.shape[0])]
    modnlin2 = rbc2.modsolvers.forkleind.F
    modnlin2 = [[round(modnlin2[i2,i1],5) for i1 in range(modnlin2.shape[1])] for i2 in range(modnlin2.shape[0])]
    print modlin2
    print '----------------------'
    print modnlin2    
    assert modlin2 == modnlin2


    #####################################################################
    ## STANDARD COOLEY HANSE CIA MODEL WITH SEIGNORAGE AND CES UTILITY ##
    #####################################################################
    # Be careful, here in the log-linearized version we have two endogenous states, k and mg
    # But in the automatically linearized version using the Jacobian we only have one, k
    
    # Load and solve the manually linearized model
    rbc1 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_ces_linear,mesg=True)
    rbc1.modsolvers.pyuhlig.solve()
    rbc1.modsolvers.forklein.solve()
    
    # Load and solve the automatically linearized model
    rbc2 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_ces_cf,mesg=True)
    rbc2.modsolvers.forkleind.solve()
    
    # Check equivalence of steady states
    for keyo in rbc1.sstate.keys():
        if keyo in rbc2.sstate.keys():
            assert round(rbc1.sstate[keyo],5) == round(rbc2.sstate[keyo],5)
    
    # Check equivalence of results
    nexo = len(rbc2.vardic['exo']['var'])
    nendo = len(rbc2.vardic['endo']['var'])
    
    modlin1 = MAT.hstack((rbc1.modsolvers.pyuhlig.Q[:nendo,:],rbc1.modsolvers.pyuhlig.P[:nendo,:nendo]))
    modlin1 = [[round(modlin1[i2,i1],5) for i1 in range(modlin1.shape[1])] for i2 in range(modlin1.shape[0])]
    modnlin1 = rbc2.modsolvers.forkleind.P[-nendo:,:]
    modnlin1 = [[round(modnlin1[i2,i1],5) for i1 in range(modnlin1.shape[1])] for i2 in range(modnlin1.shape[0])]
    print 'Comparison: Standard CIA model with seignorage and CES utility'
    print "Linear is: ",modlin1
    print '----------------------'
    print "Nonlinear is: ",modnlin1
    assert modlin1 == modnlin1
    
    modlin2 = MAT.hstack((rbc1.modsolvers.pyuhlig.S[:,:],rbc1.modsolvers.pyuhlig.R[:,:nendo]))
    modlin2 = [[round(modlin2[i2,i1],5) for i1 in range(modlin2.shape[1])] for i2 in range(modlin2.shape[0])]
    modnlin2 = rbc2.modsolvers.forkleind.F[:-1,:]
    modnlin2 = [[round(modnlin2[i2,i1],5) for i1 in range(modnlin2.shape[1])] for i2 in range(modnlin2.shape[0])]
    print modlin2
    print '----------------------'
    print modnlin2    
    assert modlin2 == modnlin2
Пример #23
0
def CanonicalFromDPH3 (alpha,A,prec=1e-14):
    """
    Returns the canonical form of an order-3 discrete phase-type 
    distribution.
    
    Parameters
    ----------
    alpha : matrix, shape (1,3)
        Initial vector of the discrete phase-type distribution
    A : matrix, shape (3,3)
        Transition probability matrix of the discrete phase-type
        distribution
    prec : double, optional
      Numerical precision for checking the input, default value
      is 1e-14
    
    Returns
    -------
    beta : matrix, shape (1,3)
      The initial probability vector of the canonical form
    B : matrix, shape (3,3)
      Transition probability matrix of the canonical form
    """
   
    if butools.checkInput and not CheckMGRepresentation (alpha, A, prec):
        raise Exception("CanonicalFromDPH3: Input is not a valid DPH representation!")

    if butools.checkInput and (A.shape[0]!=3 or A.shape[1]!=3):
        raise Exception("CanonicalFromDPH3: Dimension must be 3!")

    ev = la.eigvals(A)
    ix = np.argsort(-np.abs(np.real(ev)))
    lambd = ev[ix]
    eye = ml.eye(3);

    a0 = -lambd[0]*lambd[1]*lambd[2]
    a1 = lambd[0]*lambd[1]+lambd[0]*lambd[2]+lambd[1]*lambd[2]
    a2 = -lambd[0]-lambd[1]-lambd[2]
    e = ml.matrix([[1,1,1]]).T

    if np.real(lambd[0])>0 and np.real(lambd[1])>=0 and np.real(lambd[2])>=0:
        #PPP case
        alphaout,A2 = CanonicalFromPH3(alpha,A-eye,prec)
        Aout = A2+eye
    elif np.real(lambd[0])>0 and np.real(lambd[1])>=0 and np.real(lambd[2])<0:
        #PPN case
        x1 = lambd[0]
        x2 = lambd[1]+lambd[2]
        x3=lambd[1]*lambd[2]/(lambd[1]+lambd[2]-1)
        Aout=ml.matrix([[x1,1-x1,0],[0,x2,1-x2],[0,x3,0]])
        b3=1/(1-x3)*(e-A*e)
        b2=1/(1-x2)*A*b3
        b1=e-b2-b3
        B=ml.hstack((b1,b2,b3))
        alphaout=alpha*B
    elif np.real(lambd[0])>0 and np.real(lambd[1])<0 and np.real(lambd[2])>=0:
        #PNP case
        x1=-a2
        x2=(a0-a1*a2)/(a2*(1+a2))
        x3=a0*(1+a2)/(a0-a2-a1*a2-a2**2)
        Aout=ml.matrix([[x1,1-x1,0],[x2,0,1-x2],[0,x3,0]])
        b3=1/(1-x3)*(e-A*e)
        b2=1/(1-x2)*A*b3
        b1=e-b2-b3
        if alpha*b1>=0:
            B=ml.hstack((b1,b2,b3))
            alphaout = alpha*B
        else:
            #Set the initial vector first element to 0
            x33=0
            a1=-1
            while x33 <= 1:
                [a1,x1,x2,x3,B]=firstInitElem(x33,lambd,alpha,A)
                if a1 >= 0 and x1 >= 0 and x2 >= 0 and x3 >= 0 and x3+x33 < 1:
                    break
                x33=x33+0.01
            
            if a1 >= 0:
                Aout=ml.matrix([[x1,1-x1,0],[x2,0,1-x2],[0,x3,x33]])
                alphaout=alpha*B
            else:
                #PNP+
                x1=lambd[2]
                x2=lambd[0]+lambd[1]
                x3=lambd[0]*lambd[1]/(lambd[0]+lambd[1]-1)
                Aout=ml.matrix([[x1,0,0],[0,x2,1-x2],[0,x3,0]])

                p1=alpha*(e-A*e)
                p2=alpha*A*(e-A*e)
                d1=(1-lambd[0])*((1-lambd[1])*(1-lambd[2])+(-1+lambd[1]+lambd[2])*p1-p2)/((lambd[0]-lambd[1])*(lambd[0]-lambd[2]))
                d2=(lambd[1]-1)*((1-lambd[0])*(1-lambd[2])+(-1+lambd[0]+lambd[2])*p1-p2)/((lambd[0]-lambd[1])*(lambd[1]-lambd[2]))
                d3=(lambd[2]-1)*((1-lambd[0])*(1-lambd[1])+(-1+lambd[0]+lambd[1])*p1-p2)/((lambd[1]-lambd[2])*(lambd[2]-lambd[0]))
                alphaout=ml.matrix([[d3/(1-lambd[2]),(d1*lambd[0]+d2*lambd[1])/((1-lambd[0])*(1-lambd[1])),(d1+d2)*(1-lambd[0]-lambd[1])/((1-lambd[0])*(1-lambd[1]))]])

                if np.min(alphaout) < 0 or np.min(np.min(Aout)) < 0:
                    raise Exception("DPH3Canonical: Unhandled PNP case!")
    elif np.real(lambd[0])>0 and np.real(lambd[1])<0 and np.real(lambd[2])<0:
        #PNN case
        if np.all(np.isreal(lambd)) or np.abs(lambd[1])**2 <= 2*lambd[0]*(-np.real(lambd[1])):
            x1=-a2
            x2=-a1/(1+a2)
            x3=-a0/(1+a1+a2)
            Aout=ml.matrix([[x1,1-x1,0],[x2,0,1-x2],[x3,0,0]])
            b3=1/(1-x3)*(e-A*e)
            b2=1/(1-x2)*A*b3
            b1=e-b2-b3
            B=ml.hstack((b1,b2,b3))
            alphaout=alpha*B
        else:
           [alphaout,A2]=CanonicalFromPH3(alpha,A-eye,prec)
           Aout=A2+eye
    return (np.real(alphaout),np.real(Aout))
Пример #24
0
def MMAPPH1NPPR(D, sigma, S, *argv):
    """
    Returns various performane measures of a continuous time 
    MMAP[K]/PH[K]/1 non-preemptive priority queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
        D1 corresponds to the lowest, DK to the highest priority.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati and |
        |                |                    | the matrix-quadratic equations         |
        +----------------+--------------------+----------------------------------------+
        | "erlMaxOrder"  | Integer number     | The maximal Erlang order used in the   |
        |                |                    | erlangization procedure. The default   |
        |                |                    | value is 200.                          |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] G. Horvath, "Efficient analysis of the MMAP[K]/PH[K]/1
           priority queue", European Journal of Operational 
           Research, 246(1), 128-139, 2015.
    """
    
    K = len(D)-1

    # parse options
    eaten = []
    erlMaxOrder = 200;
    precision = 1e-14;
    classes = np.arange(0,K)
    for i in range(len(argv)):
        if argv[i]=="prec":
            precision = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="erlMaxOrder":
            erlMaxOrder = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="classes":
            classes = np.array(argv[i+1])-1
            eaten.append(i)
            eaten.append(i+1) 
    
    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception('MMAPPH1PRPR: The arrival process is not a valid MMAP representation!')
    
    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k],S[k]):
                raise Exception('MMAPPH1PRPR: the vector and matrix describing the service times is not a valid PH representation!')

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    I = ml.eye(N)
    sD = ml.zeros((N,N))
    for Di in D:
        sD += Di
    
    s = []
    M = np.empty(K)
    for i in range(K):
        s.append(np.sum(-S[i],1))
        M[i] = sigma[i].size
    
    # step 1. solution of the workload process of the joint queue
    # ===========================================================
    sM = np.sum(M)
    Qwmm = ml.matrix(D0)
    Qwpm = ml.zeros((N*sM, N))
    Qwmp = ml.zeros((N, N*sM))
    Qwpp = ml.zeros((N*sM, N*sM)) 
    kix = 0
    for i in range(K):
        Qwmp[:,kix:kix+N*M[i]] = np.kron(D[i+1], sigma[i])
        Qwpm[kix:kix+N*M[i],:] = np.kron(I,s[i])
        Qwpp[kix:kix+N*M[i],:][:,kix:kix+N*M[i]] = np.kron(I,S[i])
        kix += N*M[i]

    # calculate fundamental matrices
    Psiw, Kw, Uw = FluidFundamentalMatrices (Qwpp, Qwpm, Qwmp, Qwmm, 'PKU', precision)
    
    # calculate boundary vector
    Ua = ml.ones((N,1)) + 2*np.sum(Qwmp*(-Kw).I,1)
    pm = Linsolve (ml.hstack((Uw,Ua)).T, ml.hstack((ml.zeros((1,N)),ml.ones((1,1)))).T).T

    ro =  ((1.0-np.sum(pm))/2.0)/(np.sum(pm)+(1.0-np.sum(pm))/2.0) # calc idle time with weight=1, and the busy time with weight=1/2
    kappa = pm/np.sum(pm)
    
    pi = CTMCSolve (sD)
    lambd = []
    for i in range(K):
        lambd.append(np.sum(pi*D[i+1]))

    Psiw = []
    Qwmp = []
    Qwzp = []
    Qwpp = []
    Qwmz = []
    Qwpz = []
    Qwzz = []
    Qwmm = []
    Qwpm = []
    Qwzm = []
    for k in range(K):
        # step 2. construct a workload process for classes k...K
        # ======================================================
        Mlo = np.sum(M[:k])
        Mhi = np.sum(M[k:])

        Qkwpp = ml.zeros((N*Mlo*Mhi+N*Mhi, N*Mlo*Mhi+N*Mhi))
        Qkwpz = ml.zeros((N*Mlo*Mhi+N*Mhi, N*Mlo)) 
        Qkwpm = ml.zeros((N*Mlo*Mhi+N*Mhi, N))
        Qkwmz = ml.zeros((N, N*Mlo))
        Qkwmp = ml.zeros((N, N*Mlo*Mhi+N*Mhi))
        Dlo = ml.matrix(D0)
        for i in range(k):
            Dlo = Dlo + D[i+1]
        Qkwmm = Dlo
        Qkwzp = ml.zeros((N*Mlo, N*Mlo*Mhi+N*Mhi))
        Qkwzm = ml.zeros((N*Mlo, N))
        Qkwzz = ml.zeros((N*Mlo, N*Mlo))
        kix = 0
        for i in range(k,K):
            kix2 = 0
            for j in range(k):
                bs = N*M[j]*M[i]
                bs2 = N*M[j]
                Qkwpp[kix:kix+bs,kix:kix+bs] = np.kron(I,np.kron(ml.eye(M[j]),S[i]))
                Qkwpz[kix:kix+bs,kix2:kix2+bs2] = np.kron(I,np.kron(ml.eye(M[j]),s[i]))
                Qkwzp[kix2:kix2+bs2,kix:kix+bs] = np.kron(D[i+1],np.kron(ml.eye(M[j]), sigma[i]))
                kix += bs
                kix2 += bs2
        for i in range(k,K):
            bs = N*M[i]
            Qkwpp[kix:kix+bs,:][:,kix:kix+bs] = np.kron(I,S[i])
            Qkwpm[kix:kix+bs,:] = np.kron(I,s[i])
            Qkwmp[:,kix:kix+bs] = np.kron(D[i+1],sigma[i])
            kix += bs
        kix = 0
        for j in range(k):
            bs = N*M[j]
            Qkwzz[kix:kix+bs,kix:kix+bs] = np.kron(Dlo, ml.eye(M[j])) + np.kron(I, S[j])
            Qkwzm[kix:kix+bs,:] = np.kron(I, s[j])
            kix += bs

        if Qkwzz.shape[0]>0:
            Psikw = FluidFundamentalMatrices (Qkwpp+Qkwpz*(-Qkwzz).I*Qkwzp, Qkwpm+Qkwpz*(-Qkwzz).I*Qkwzm, Qkwmp, Qkwmm, 'P', precision)
        else:
            Psikw = FluidFundamentalMatrices (Qkwpp, Qkwpm, Qkwmp, Qkwmm, 'P', precision)
        Psiw.append(Psikw)
        
        Qwzp.append(Qkwzp)
        Qwmp.append(Qkwmp)
        Qwpp.append(Qkwpp)
        Qwmz.append(Qkwmz)
        Qwpz.append(Qkwpz)
        Qwzz.append(Qkwzz)
        Qwmm.append(Qkwmm)
        Qwpm.append(Qkwpm)
        Qwzm.append(Qkwzm)
    
    # step 3. calculate Phi vectors
    # =============================
    lambdaS = sum(lambd)
    phi = [(1-ro)*kappa*(-D0) / lambdaS]
    q0 = [[]]
    qL = [[]]
    for k in range(K-1):
        sDk = ml.matrix(D0)
        for j in range(k+1):
            sDk = sDk + D[j+1]
        # pk
        pk = sum(lambd[:k+1])/lambdaS - (1-ro)*kappa*np.sum(sDk,1)/lambdaS
        # A^(k,1)
        Qwzpk = Qwzp[k+1]
        vix = 0
        Ak = []
        for ii in range(k+1):
            bs = N*M[ii]
            V1 = Qwzpk[vix:vix+bs,:]
            Ak.append (np.kron(I,sigma[ii]) * (-np.kron(sDk,ml.eye(M[ii]))-np.kron(I,S[ii])).I * (np.kron(I,s[ii]) + V1*Psiw[k+1]))
            vix += bs
        # B^k
        Qwmpk = Qwmp[k+1]
        Bk = Qwmpk * Psiw[k+1]
        ztag = phi[0]*((-D0).I*D[k+1]*Ak[k] - Ak[0] + (-D0).I*Bk)
        for i in range(k):
            ztag += phi[i+1]*(Ak[i]-Ak[i+1]) + phi[0]*(-D0).I*D[i+1]*Ak[i]
        Mx = ml.eye(Ak[k].shape[0])-Ak[k]
        Mx[:,0] = ml.ones((N,1))
        phi.append(ml.hstack((pk, ztag[:,1:]))*Mx.I)  # phi(k) = Psi^(k)_k * p(k). Psi^(k)_i = phi(i) / p(k)

        q0.append(phi[0]*(-D0).I)
        qLii = []
        for ii in range(k+1):
            qLii.append((phi[ii+1] - phi[ii] + phi[0]*(-D0).I*D[ii+1]) * np.kron(I,sigma[ii]) * (-np.kron(sDk,ml.eye(M[ii]))-np.kron(I,S[ii])).I)
        qL.append(ml.hstack(qLii))
    
    
    # step 4. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:

        sD0k = ml.matrix(D0)
        for i in range(k):
            sD0k +=  D[i+1]     
       
        if k<K-1:
            # step 4.1 calculate distribution of the workload process right 
            # before the arrivals of class k jobs
            # ============================================================
            if Qwzz[k].shape[0]>0:
                Kw = Qwpp[k]+Qwpz[k]*(-Qwzz[k]).I*Qwzp[k] + Psiw[k]*Qwmp[k]
            else:
                Kw = Qwpp[k] + Psiw[k]*Qwmp[k]
            BM = ml.zeros((0,0))
            CM = ml.zeros((0,N))
            DM = ml.zeros((0,0))
            for i in range(k):
                BM = la.block_diag(BM,np.kron(I,S[i]))
                CM = ml.vstack((CM, np.kron(I,s[i])))
                DM = la.block_diag(DM,np.kron(D[k+1],ml.eye(M[i])))
            if k>0:
                Kwu = ml.vstack((ml.hstack((Kw, (Qwpz[k]+Psiw[k]*Qwmz[k])*(-Qwzz[k]).I*DM)), ml.hstack((ml.zeros((BM.shape[0],Kw.shape[1])), BM))))
                Bwu = ml.vstack((Psiw[k]*D[k+1], CM))
                iniw = ml.hstack((q0[k]*Qwmp[k]+qL[k]*Qwzp[k], qL[k]*DM))
                pwu = q0[k]*D[k+1]
            else:
                Kwu = Kw
                Bwu = Psiw[k]*D[k+1]
                iniw = pm*Qwmp[k]
                pwu = pm*D[k+1]

            norm = np.sum(pwu) + np.sum(iniw*(-Kwu).I*Bwu)
            pwu = pwu / norm
            iniw = iniw / norm

            # step 4.2 create the fluid model whose first passage time equals the
            # WAITING time of the low prioroity customers
            # ==================================================================
            KN = Kwu.shape[0]
            Qspp = ml.zeros((KN+N*np.sum(M[k+1:]), KN+N*np.sum(M[k+1:])))
            Qspm = ml.zeros((KN+N*np.sum(M[k+1:]), N))
            Qsmp = ml.zeros((N, KN+N*np.sum(M[k+1:])))
            Qsmm = sD0k + D[k+1]
            kix = 0
            for i in range(k+1,K):
                bs = N*M[i]
                Qspp[KN+kix:KN+kix+bs,:][:,KN+kix:KN+kix+bs] = np.kron(I,S[i])
                Qspm[KN+kix:KN+kix+bs,:] = np.kron(I,s[i])
                Qsmp[:,KN+kix:KN+kix+bs] = np.kron(D[i+1],sigma[i])
                kix += bs

            Qspp[:KN,:][:,:KN] = Kwu
            Qspm[:KN,:] = Bwu
            inis = ml.hstack((iniw, ml.zeros((1,N*np.sum(M[k+1:])))))

            # calculate fundamental matrix
            Psis = FluidFundamentalMatrices (Qspp, Qspm, Qsmp, Qsmm, 'P', precision)

            # step 4.3. calculate the performance measures
            # ==========================================   
            argIx = 0
            while argIx<len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx]=="stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx+1]
                    # calculate waiting time moments
                    Pn = [Psis]
                    wtMoms = []
                    for n in range(1,numOfSTMoms+1):
                        A = Qspp + Psis*Qsmp
                        B = Qsmm + Qsmp*Psis
                        C = -2*n*Pn[n-1]
                        bino = 1
                        for i in range(1,n):
                            bino = bino * (n-i+1) / i
                            C += bino * Pn[i]*Qsmp*Pn[n-i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        wtMoms.append(np.sum(inis*P*(-1)**n) / 2**n)
                    # calculate RESPONSE time moments
                    Pnr = [np.sum(inis*Pn[0])*sigma[k]]
                    rtMoms = []
                    for n in range(1,numOfSTMoms+1):
                        P =  n*Pnr[n-1]*(-S[k]).I + (-1)**n*np.sum(inis*Pn[n])*sigma[k] / 2**n
                        Pnr.append(P)
                        rtMoms.append(np.sum(P)+np.sum(pwu)*math.factorial(n)*np.sum(sigma[k]*(-S[k]).I**n))
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx]=="stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx+1]
                    res = []
                    for t in stCdfPoints:
                        L = erlMaxOrder
                        lambdae = L/t/2
                        Psie = FluidFundamentalMatrices (Qspp-lambdae*ml.eye(Qspp.shape[0]), Qspm, Qsmp, Qsmm-lambdae*ml.eye(Qsmm.shape[0]), 'P', precision)
                        Pn = [Psie]
                        pr = (np.sum(pwu) + np.sum(inis*Psie)) * (1-np.sum(sigma[k]*(ml.eye(S[k].shape[0])-S[k]/2/lambdae).I**L))
                        for n in range(1,L):
                            A = Qspp + Psie*Qsmp - lambdae*ml.eye(Qspp.shape[0])
                            B = Qsmm + Qsmp*Psie - lambdae*ml.eye(Qsmm.shape[0])
                            C = 2*lambdae*Pn[n-1]
                            for i in range(1,n):
                                C += Pn[i]*Qsmp*Pn[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            pr += np.sum(inis*P) * (1-np.sum(sigma[k]*(np.eye(S[k].shape[0])-S[k]/2/lambdae).I**(L-n)))
                        res.append(pr)
                    Ret.append(np.array(res))
                    argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx]=="ncMoms" or argv[argIx]=="ncDistr"):
                    W = (-np.kron(sD-D[k+1],ml.eye(M[k]))-np.kron(I,S[k])).I*np.kron(D[k+1],ml.eye(M[k]))
                    iW = (ml.eye(W.shape[0])-W).I
                    w = np.kron(ml.eye(N),sigma[k])
                    omega = (-np.kron(sD-D[k+1],ml.eye(M[k]))-np.kron(I,S[k])).I*np.kron(I,s[k])
                    if argv[argIx]=="ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx+1]
                        # first calculate it at departure instants
                        Psii = [Psis]
                        QLDPn = [inis*Psii[0]*w*iW]
                        for n in range(1,numOfQLMoms+1):
                            A = Qspp + Psis*Qsmp
                            B = Qsmm + Qsmp*Psis
                            C = n*Psii[n-1]*D[k+1]
                            bino = 1
                            for i in range(1,n):
                                bino = bino * (n-i+1) / i
                                C = C + bino * Psii[i]*Qsmp*Psii[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Psii.append(P)
                            QLDPn.append(n*QLDPn[n-1]*iW*W + inis*P*w*iW)
                        for n in range(numOfQLMoms+1):
                            QLDPn[n] = (QLDPn[n] + pwu*w*iW**(n+1)*W**n)*omega
                        # now calculate it at random time instance
                        QLPn = [pi]
                        qlMoms = []
                        iTerm = (ml.ones((N,1))*pi - sD).I
                        for n in range(1,numOfQLMoms+1):
                            sumP = np.sum(QLDPn[n]) + n*np.sum((QLDPn[n-1] - QLPn[n-1]*D[k+1]/lambd[k])*iTerm*D[k+1])
                            P = sumP*pi + n*(QLPn[n-1]*D[k+1] - QLDPn[n-1]*lambd[k])*iTerm
                            QLPn.append(P)
                            qlMoms.append(np.sum(P))
                        qlMoms = MomsFromFactorialMoms(qlMoms)
                        Ret.append(qlMoms)
                        argIx += 1
                    elif argv[argIx]=="ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx+1]
                        Psid = FluidFundamentalMatrices (Qspp, Qspm, Qsmp, sD0k, 'P', precision)
                        Pn = [Psid]
                        XDn = inis*Psid*w
                        dqlProbs = (XDn+pwu*w)*omega
                        for n in range(1,numOfQLProbs):
                            A = Qspp + Psid*Qsmp
                            B = sD0k + Qsmp*Psid
                            C = Pn[n-1]*D[k+1]
                            for i in range(1,n):
                                C += Pn[i]*Qsmp*Pn[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            XDn = XDn*W + inis*P*w
                            dqlProbs = ml.vstack((dqlProbs, (XDn+pwu*w*W**n)*omega))
                        # now calculate it at random time instance
                        iTerm = -(sD-D[k+1]).I
                        qlProbs = lambd[k]*dqlProbs[0,:]*iTerm
                        for n in range(1,numOfQLProbs):
                            P = (qlProbs[n-1,:]*D[k+1]+lambd[k]*(dqlProbs[n,:]-dqlProbs[n-1,:]))*iTerm
                            qlProbs = ml.vstack((qlProbs, P))
                        qlProbs = np.sum(qlProbs,1).A.flatten()
                        Ret.append(qlProbs)
                        argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter "+str(argv[argIx]))
                argIx += 1
        elif k==K-1:
            # step 3. calculate the performance measures
            # ==========================================   
            argIx = 0
            while argIx<len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and (argv[argIx]=="stMoms" or argv[argIx]=="stDistr"):
                    Kw = Qwpp[k]+Qwpz[k]*(-Qwzz[k]).I*Qwzp[k] + Psiw[k]*Qwmp[k]
                    AM = ml.zeros((0,0))
                    BM = ml.zeros((0,0))
                    CM = ml.zeros((0,1))
                    DM = ml.zeros((0,0))
                    for i in range(k):
                        AM = la.block_diag(AM,np.kron(ml.ones((N,1)),np.kron(ml.eye(M[i]),s[k])))
                        BM = la.block_diag(BM,S[i])
                        CM = ml.vstack((CM, s[i]))
                        DM = la.block_diag(DM,np.kron(D[k+1],ml.eye(M[i])))                        
                    Z = ml.vstack((ml.hstack((Kw, ml.vstack((AM,ml.zeros((N*M[k],AM.shape[1])))))), ml.hstack((ml.zeros((BM.shape[0],Kw.shape[1])), BM))))
                    z = ml.vstack((ml.zeros((AM.shape[0],1)), np.kron(ml.ones((N,1)),s[k]), CM))
                    iniw = ml.hstack((q0[k]*Qwmp[k]+qL[k]*Qwzp[k], ml.zeros((1,BM.shape[0]))))
                    zeta = iniw/np.sum(iniw*(-Z).I*z)
                    if argv[argIx]=="stMoms":
                        # MOMENTS OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfSTMoms = argv[argIx+1]
                        rtMoms = []
                        for i in range(1,numOfSTMoms+1):
                            rtMoms.append(np.sum(math.factorial(i)*zeta*(-Z).I**(i+1)*z))
                        Ret.append(rtMoms)
                        argIx += 1
                    if argv[argIx]=="stDistr":
                        # DISTRIBUTION OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        stCdfPoints = argv[argIx+1]
                        rtDistr = []
                        for t in stCdfPoints:
                            rtDistr.append (np.sum(zeta*(-Z).I*(ml.eye(Z.shape[0])-la.expm(Z*t))*z))
                        Ret.append(np.array(rtDistr))
                        argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx]=="ncMoms" or argv[argIx]=="ncDistr"):
                    L = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    B = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    F = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    kix = 0
                    for i in range(K):
                        bs = N*M[i]
                        F[kix:kix+bs,:][:,kix:kix+bs] = np.kron(D[k+1],ml.eye(M[i]))
                        L[kix:kix+bs,:][:,kix:kix+bs] = np.kron(sD0k,ml.eye(M[i])) + np.kron(I,S[i])
                        if i<K-1:
                            L[kix:kix+bs,:][:,N*np.sum(M[:k]):] = np.kron(I,s[i]*sigma[k])
                        else:
                            B[kix:kix+bs,:][:,N*np.sum(M[:k]):] = np.kron(I,s[i]*sigma[k])
                        kix += bs
                    R = QBDFundamentalMatrices (B, L, F, 'R', precision)
                    p0 = ml.hstack((qL[k], q0[k]*np.kron(I,sigma[k])))
                    p0 = p0/np.sum(p0*(ml.eye(R.shape[0])-R).I)
                    if argv[argIx]=="ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx+1]
                        qlMoms = []
                        for i in range(1,numOfQLMoms+1):
                            qlMoms.append(np.sum(math.factorial(i)*p0*R**i*(ml.eye(R.shape[0])-R).I**(i+1)))
                        Ret.append(MomsFromFactorialMoms(qlMoms))
                    elif argv[argIx]=="ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx+1]
                        qlProbs = [np.sum(p0)]
                        for i in range(1,numOfQLProbs):
                            qlProbs.append(np.sum(p0*R**i))
                        Ret.append(np.array(qlProbs))
                    argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter "+str(argv[argIx]))
                argIx += 1

    if len(Ret)==1:
        return Ret[0]
    else:
        return Ret
Пример #25
0
def test9():

    #####################################
    ## STANDARD COOLEY HANSE CIA MODEL ##
    #####################################
    # Load and solve the manually linearized model
    rbc1 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_linear, mesg=True)
    rbc1.modsolvers.pyuhlig.solve()
    rbc1.modsolvers.forklein.solve()

    # Load and solve the automatically linearized model
    rbc2 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_cf, mesg=True)
    rbc2.modsolvers.forkleind.solve()

    # Check equivalence of steady states
    for keyo in rbc1.sstate.keys():
        if keyo in rbc2.sstate.keys():
            assert round(rbc1.sstate[keyo], 5) == round(rbc2.sstate[keyo], 5)

    # Check equivalence of results
    nexo = len(rbc2.vardic['exo']['var'])
    nendo = len(rbc2.vardic['endo']['var'])

    modlin1 = MAT.hstack(
        (rbc1.modsolvers.pyuhlig.Q, rbc1.modsolvers.pyuhlig.P))
    modlin1 = [round(modlin1[0, i1], 5) for i1 in range(modlin1.shape[1])]
    modnlin1 = rbc2.modsolvers.forkleind.P[-nendo:, :]
    modnlin1 = [round(modnlin1[0, i1], 5) for i1 in range(modnlin1.shape[1])]
    print 'Comparison: Standard CIA model'
    print "Linear is: ", modlin1
    print '----------------------'
    print "Nonlinear is: ", modnlin1
    assert modlin1 == modnlin1

    modlin2 = MAT.hstack(
        (rbc1.modsolvers.pyuhlig.S, rbc1.modsolvers.pyuhlig.R))
    modlin2 = [[round(modlin2[i2, i1], 5) for i1 in range(modlin2.shape[1])]
               for i2 in range(modlin2.shape[0])]
    modnlin2 = rbc2.modsolvers.forkleind.F
    modnlin2 = [[
        round(modnlin2[i2, i1], 5) for i1 in range(modnlin2.shape[1])
    ] for i2 in range(modnlin2.shape[0])]
    print modlin2
    print '----------------------'
    print modnlin2
    assert modlin2 == modnlin2

    #####################################################
    ## STANDARD COOLEY HANSE CIA MODEL WITH SEIGNORAGE ##
    #####################################################
    # Load and solve the manually linearized model
    rbc1 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_linear,
                     mesg=True)
    rbc1.modsolvers.pyuhlig.solve()
    rbc1.modsolvers.forklein.solve()

    # Load and solve the automatically linearized model
    rbc2 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_cf,
                     mesg=True)
    rbc2.modsolvers.forkleind.solve()

    # Check equivalence of steady states
    for keyo in rbc1.sstate.keys():
        if keyo in rbc2.sstate.keys():
            assert round(rbc1.sstate[keyo], 5) == round(rbc2.sstate[keyo], 5)

    # Check equivalence of results
    nexo = len(rbc2.vardic['exo']['var'])
    nendo = len(rbc2.vardic['endo']['var'])

    modlin1 = MAT.hstack(
        (rbc1.modsolvers.pyuhlig.Q, rbc1.modsolvers.pyuhlig.P))
    modlin1 = [round(modlin1[0, i1], 5) for i1 in range(modlin1.shape[1])]
    modnlin1 = rbc2.modsolvers.forkleind.P[-nendo:, :]
    modnlin1 = [round(modnlin1[0, i1], 5) for i1 in range(modnlin1.shape[1])]
    print 'Comparison: Standard CIA model with seignorage'
    print "Linear is: ", modlin1
    print '----------------------'
    print "Nonlinear is: ", modnlin1
    assert modlin1 == modnlin1

    modlin2 = MAT.hstack(
        (rbc1.modsolvers.pyuhlig.S, rbc1.modsolvers.pyuhlig.R))
    modlin2 = [[round(modlin2[i2, i1], 5) for i1 in range(modlin2.shape[1])]
               for i2 in range(modlin2.shape[0])]
    modnlin2 = rbc2.modsolvers.forkleind.F
    modnlin2 = [[
        round(modnlin2[i2, i1], 5) for i1 in range(modnlin2.shape[1])
    ] for i2 in range(modnlin2.shape[0])]
    print modlin2
    print '----------------------'
    print modnlin2
    assert modlin2 == modnlin2

    #####################################################################
    ## STANDARD COOLEY HANSE CIA MODEL WITH SEIGNORAGE AND CES UTILITY ##
    #####################################################################
    # Be careful, here in the log-linearized version we have two endogenous states, k and mg
    # But in the automatically linearized version using the Jacobian we only have one, k

    # Load and solve the manually linearized model
    rbc1 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_ces_linear,
                     mesg=True)
    rbc1.modsolvers.pyuhlig.solve()
    rbc1.modsolvers.forklein.solve()

    # Load and solve the automatically linearized model
    rbc2 = pm.newMOD(models.abcs_rbcs.cooley_hansen_cia_seignorage_ces_cf,
                     mesg=True)
    rbc2.modsolvers.forkleind.solve()

    # Check equivalence of steady states
    for keyo in rbc1.sstate.keys():
        if keyo in rbc2.sstate.keys():
            assert round(rbc1.sstate[keyo], 5) == round(rbc2.sstate[keyo], 5)

    # Check equivalence of results
    nexo = len(rbc2.vardic['exo']['var'])
    nendo = len(rbc2.vardic['endo']['var'])

    modlin1 = MAT.hstack((rbc1.modsolvers.pyuhlig.Q[:nendo, :],
                          rbc1.modsolvers.pyuhlig.P[:nendo, :nendo]))
    modlin1 = [[round(modlin1[i2, i1], 5) for i1 in range(modlin1.shape[1])]
               for i2 in range(modlin1.shape[0])]
    modnlin1 = rbc2.modsolvers.forkleind.P[-nendo:, :]
    modnlin1 = [[
        round(modnlin1[i2, i1], 5) for i1 in range(modnlin1.shape[1])
    ] for i2 in range(modnlin1.shape[0])]
    print 'Comparison: Standard CIA model with seignorage and CES utility'
    print "Linear is: ", modlin1
    print '----------------------'
    print "Nonlinear is: ", modnlin1
    assert modlin1 == modnlin1

    modlin2 = MAT.hstack((rbc1.modsolvers.pyuhlig.S[:, :],
                          rbc1.modsolvers.pyuhlig.R[:, :nendo]))
    modlin2 = [[round(modlin2[i2, i1], 5) for i1 in range(modlin2.shape[1])]
               for i2 in range(modlin2.shape[0])]
    modnlin2 = rbc2.modsolvers.forkleind.F[:-1, :]
    modnlin2 = [[
        round(modnlin2[i2, i1], 5) for i1 in range(modnlin2.shape[1])
    ] for i2 in range(modnlin2.shape[0])]
    print modlin2
    print '----------------------'
    print modnlin2
    assert modlin2 == modnlin2
Пример #26
0
def MMAPPH1FCFS(D, sigma, S, *argv):
    """
    Returns various performane measures of a MMAP[K]/PH[K]/1 
    first-come-first-serve queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "stDistrME"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution              |
        +----------------+--------------------+----------------------------------------+
        | "stDistrPH"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution, converted   |
        |                |                    | to a continuous PH representation      |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati     |
        |                |                    | equation                               |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] Qiming He, "Analysis of a continuous time 
           SM[K]/PH[K]/1/FCFS queue: Age process, sojourn times,
           and queue lengths", Journal of Systems Science and 
           Complexity, 25(1), pp 133-155, 2012.
    """
    
    K = len(D)-1

    # parse options
    eaten = []
    precision = 1e-14;
    classes = np.arange(0,K)
    for i in range(len(argv)):
        if argv[i]=="prec":
            precision = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="classes":
            classes = np.array(argv[i+1])-1
            eaten.append(i)
            eaten.append(i+1) 
    
    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception('MMAPPH1FCFS: The arrival process is not a valid MMAP representation!')
    
    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k],S[k]):
                raise Exception('MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!')

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    Ia = ml.eye(N);
    Da = ml.zeros((N,N))
    for q in range(K):
        Da += D[q+1]
    theta = CTMCSolve(D0+Da)
    beta = [CTMCSolve(S[k]+ml.sum(-S[k],1)*sigma[k]) for k in range(K)]
    lambd = [np.sum(theta*D[k+1]) for k in range(K)]    
    mu = [np.sum(beta[k]*(-S[k])) for k in range(K)]
    Nsk = [S[k].shape[0] for k in range(K)]    
    ro = np.sum(np.array(lambd)/np.array(mu))
    alpha = theta*Da/sum(lambd)
    D0i = (-D0).I

    Sa = S[0];
    sa = [ml.zeros(sigma[0].shape)]*K
    sa[0] = sigma[0]
    ba = [ml.zeros(beta[0].shape)]*K
    ba[0] = beta[0]
    sv = [ml.zeros((Nsk[0],1))]*K
    sv[0] = ml.sum(-S[0],1)
    Pk = [D0i*D[q+1] for q in range(K)]

    for k in range(1,K):
        Sa = la.block_diag(Sa, S[k])
        for q in range(K):
            if q==k:
                sa[q] = ml.hstack((sa[q], sigma[k]))
                ba[q] = ml.hstack((ba[q], beta[k]))
                sv[q] = ml.vstack((sv[q], -np.sum(S[k],1)))
            else:
                sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape)))
                ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape)))
                sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k],1))))
    Sa = ml.matrix(Sa)
    P = D0i*Da
    iVec = ml.kron(D[1],sa[0])
    for k in range(1,K):
        iVec += ml.kron(D[k+1],sa[k])
    Ns = Sa.shape[0]
    Is = ml.eye(Ns)
    
    # step 1. solve the age process of the queue
    # ==========================================

    # solve Y0 and calculate T
    Y0 = FluidFundamentalMatrices (ml.kron(Ia,Sa), ml.kron(Ia,-ml.sum(Sa,1)), iVec, D0, "P", precision)
    T = ml.kron(Ia,Sa) + Y0 * iVec
    
    # calculate pi0 and v0
    pi0 = ml.zeros((1,T.shape[0]))
    for k in range(K):
        pi0 += ml.kron(theta*D[k+1],ba[k]/mu[k])
    pi0 = - pi0 * T

    iT = (-T).I
    oa = ml.ones((N,1))

    # step 2. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:
        argIx = 0
        clo = iT*ml.kron(oa,sv[k])
        while argIx<len(argv):
            if argIx in eaten:
                argIx += 1
                continue
            elif type(argv[argIx]) is str and argv[argIx]=="stMoms":
                numOfSTMoms = argv[argIx+1]
                rtMoms = []
                for m in range(1,numOfSTMoms+1):
                    rtMoms.append(math.factorial(m) * np.sum(pi0 * iT**m * clo / (pi0*clo)))
                Ret.append(rtMoms)
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx]=="stDistr":
                stCdfPoints = argv[argIx+1]
                cdf = [];
                for t in stCdfPoints:
                    pr = 1 - np.sum(pi0 * la.expm(T*t) * clo / (pi0*clo))
                    cdf.append(pr)
                Ret.append(np.array(cdf))
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx]=="stDistrME":
                Bm = SimilarityMatrixForVectors(clo/(pi0*clo),ml.ones((N*Ns,1)))
                Bmi = Bm.I
                A = Bm * T * Bmi
                alpha = pi0 * Bmi
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx]=="stDistrPH":
                vv = pi0*iT
                ix = np.arange(N*Ns)
                nz = ix[vv.flat>precision]
                delta = Diag(vv[:,nz])
                cl = -T*clo/(pi0*clo)
                alpha = cl[nz,:].T*delta
                A = delta.I*T[nz,:][:,nz].T*delta
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx]=="ncDistr":
                numOfQLProbs = argv[argIx+1]
                argIx += 1
                values = np.empty(numOfQLProbs)
                jm = ml.zeros((Ns,1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1
                jmc = ml.ones((Ns,1))
                jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 0
                LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -ml.eye(N*Ns))
                values[0] = 1-ro+np.sum(pi0*LmCurr*ml.kron(oa,jmc))
                for i in range(1,numOfQLProbs):
                    LmPrev = LmCurr
                    LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -LmPrev*ml.kron(D[k+1],Is))
                    values[i] = np.sum(pi0*LmCurr*ml.kron(oa,jmc) + pi0*LmPrev*ml.kron(oa,jm));
                Ret.append(values)
            elif type(argv[argIx]) is str and argv[argIx]=="ncMoms":
                numOfQLMoms = argv[argIx+1]
                argIx += 1
                jm = ml.zeros((Ns,1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1
                ELn = [la.solve_sylvester(T, ml.kron(D0+Da,Is), -ml.eye(N*Ns))]
                qlMoms = []
                for n in range(1,numOfQLMoms+1):
                    bino = 1
                    Btag = ml.zeros((N*Ns,N*Ns))
                    for i in range(n):
                        Btag += bino * ELn[i]
                        bino *= (n-i) / (i+1)
                    ELn.append(la.solve_sylvester(T, ml.kron(D0+Da,Is), -Btag*ml.kron(D[k+1],Is)))
                    qlMoms.append(np.sum(pi0*ELn[n]) + np.sum(pi0*Btag*ml.kron(oa,jm)))
                Ret.append(qlMoms)
            else:
                raise Exception("MMAPPH1FCFS: Unknown parameter "+str(argv[argIx]))
            argIx += 1

    if len(Ret)==1:
        return Ret[0]
    else:
        return Ret
Пример #27
0
def SPIRIT(streams, energyThresh, lamb, evalMetrics):

    # Make
    if type(streams) == np.ndarray:
        streams_iter = iter(streams)

    # Max No. Streams
    if streams.ndim == 1:
        streams = np.expand_dims(streams, axis=1)
        num_streams = streams.shape[1]
    else:
        num_streams = streams.shape[1]

    count_over = 0
    count_under = 0

    #===============================================================================
    #      Initalise k, w and d, lamb
    #===============================================================================

    k = 1  # Hidden Variables, initialise to one

    # Weights
    pc_weights = npm.zeros(num_streams)
    pc_weights[0, 0] = 1

    # initialise outputs
    res = {}
    all_weights = []
    k_hist = []
    anomalies = []
    x_dash = npm.zeros((1, num_streams))

    Eng = mat([0.00000001, 0.00000001])

    E_xt = 0  # Energy of X at time t
    E_rec_i = mat([0.000000000000001])  # Energy of reconstruction

    Y = npm.zeros(num_streams)

    timeSteps = streams.shape[0]

    #===============================================================================
    # Main Loop
    #===============================================================================
    for t in range(1, timeSteps + 1):  # t = 1,...,200

        k_hist.append(k)

        x_t_plus_1 = mat(streams_iter.next())  # Read in next signals

        d_i = E_rec_i * t

        # Step 1 - Update Weights
        pc_weights, y_t_i, error = track_W(x_t_plus_1, k, pc_weights, d_i,
                                           num_streams, lamb)
        # Record hidden variables
        padding = num_streams - k
        y_bar_t = npm.hstack((y_t_i, mat([nan] * padding)))
        Y = npm.vstack((Y, y_bar_t))

        # Record Weights
        all_weights.append(pc_weights)
        # Record reconstrunted z and RSRE
        x_dash = npm.vstack((x_dash, y_t_i * pc_weights))

        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0

        top = top + (norm(x_t_plus_1 - x_dash)**2)

        bot = bot + (norm(x_t_plus_1)**2)

        new_RSRE = top / bot

        if t == 1:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### FOR EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T':

            Qt = pc_weights.T

            if t == 1:
                res['subspace_error'] = npm.zeros((timeSteps, 1))
                res['orthog_error'] = npm.zeros((timeSteps, 1))

                res['angle_error'] = npm.zeros((timeSteps, 1))
                Cov_mat = npm.zeros([num_streams, num_streams])

            # Calculate Covarentce Matrix of data up to time t
            Cov_mat = lamb * Cov_mat + npm.dot(x_t_plus_1, x_t_plus_1.T)
            # Get eigenvalues and eigenvectors
            W, V = eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = W.argsort()  # Get sort index
            eig_idx = eig_idx[::-1]  # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:k]]
            # Calculate subspace error
            C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T)
            res['subspace_error'][t - 1, 0] = 10 * np.log10(
                npm.trace(npm.dot(C.T, C)))  #frobenius norm in dB

            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k)
            eigVal, eigVec = eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))
            res['angle_error'][t - 1, 0] = angle

            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T, Qt) - npm.eye(k)
            res['orthog_error'][t - 1, 0] = 10 * np.log10(
                npm.trace(npm.dot(F.T, F)))  #frobenius norm in dB

        # Step 2 - Update Energy estimate
        E_xt = ((lamb * (t - 1) * E_xt) + norm(x_t_plus_1)**2) / t

        for i in range(k):
            E_rec_i[0, i] = ((lamb * (t - 1) * E_rec_i[0, i]) +
                             (y_t_i[0, i]**2)) / t

        # Step 3 - Estimate the retained energy
        E_retained = npm.sum(E_rec_i, 1)

        # Record Energy
        Eng_new = npm.hstack((E_xt, E_retained[0, 0]))
        Eng = npm.vstack((Eng, Eng_new))

        if E_retained < energyThresh[0] * E_xt:
            if k != num_streams:
                k = k + 1
                # Initalise Ek+1 <-- 0
                E_rec_i = npm.hstack((E_rec_i, mat([0])))
                # Initialise W_i+1
                new_weight_vec = npm.zeros(num_streams)
                new_weight_vec[0, k - 1] = 1
                pc_weights = npm.vstack((pc_weights, new_weight_vec))
                anomalies.append(t - 1)
            else:
                count_over += 1
        elif E_retained > energyThresh[1] * E_xt:
            if k > 1:
                k = k - 1
                # discard w_k and error
                pc_weights = delete(pc_weights, -1, 0)
                # Discard E_rec_i[k]
                E_rec_i = delete(E_rec_i, -1)
            else:
                count_under += 1

    # Data Stores
    res2 = {
        'hidden': Y,  # Array for hidden Variables
        'weights': all_weights,
        'E_t': Eng[:, 0],  # total energy of data 
        'E_dash_t': Eng[:, 1],  # hidden var energy
        'e_ratio': np.divide(Eng[:, 1], Eng[:, 0]),  # Energy ratio 
        'RSRE': RSRE,  # Relative squared Reconstruction error 
        'recon': x_dash,  # reconstructed data
        'r_hist': k_hist,  # history of r values 
        'anomalies': anomalies
    }

    res.update(res2)

    return res, all_weights