Example #1
0
def solve_discrete_lyapunov(a, q, complex_step=False):
    r"""
    Solves the discrete Lyapunov equation using a bilinear transformation.

    Notes
    -----
    This is a modification of the version in Scipy (see
    https://github.com/scipy/scipy/blob/master/scipy/linalg/_solvers.py)
    which allows passing through the complex numbers in the matrix a
    (usually the transition matrix) in order to allow complex step
    differentiation.
    """
    eye = np.eye(a.shape[0])
    if not complex_step:
        aH = a.conj().transpose()
        aHI_inv = np.linalg.inv(aH + eye)
        b = np.dot(aH - eye, aHI_inv)
        c = 2 * np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
        return solve_sylvester(b.conj().transpose(), b, -c)
    else:
        aH = a.transpose()
        aHI_inv = np.linalg.inv(aH + eye)
        b = np.dot(aH - eye, aHI_inv)
        c = 2 * np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
        return solve_sylvester(b.transpose(), b, -c)
Example #2
0
def solve_discrete_lyapunov(a, q, complex_step=False):
    r"""
    Solves the discrete Lyapunov equation using a bilinear transformation.

    Notes
    -----
    This is a modification of the version in Scipy (see
    https://github.com/scipy/scipy/blob/master/scipy/linalg/_solvers.py)
    which allows passing through the complex numbers in the matrix a
    (usually the transition matrix) in order to allow complex step
    differentiation.
    """
    eye = np.eye(a.shape[0])
    if not complex_step:
        aH = a.conj().transpose()
        aHI_inv = np.linalg.inv(aH + eye)
        b = np.dot(aH - eye, aHI_inv)
        c = 2 * np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
        return solve_sylvester(b.conj().transpose(), b, -c)
    else:
        aH = a.transpose()
        aHI_inv = np.linalg.inv(aH + eye)
        b = np.dot(aH - eye, aHI_inv)
        c = 2 * np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
        return solve_sylvester(b.transpose(), b, -c)
Example #3
0
def dexpm_triu(
    a11: np.ndarray,
    a12: np.ndarray,
    a22: np.ndarray,
    da11: np.ndarray,
    da12: np.ndarray,
    da22: np.ndarray,
    dt: float,
    f11: np.ndarray,
    f22: np.ndarray,
    df11: np.ndarray,
    df22: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
    """Compute the exponential of the upper triangular matrix A and its derivative using the
    Parlett's method

    .. math::

        F = \\exp(A) = \\exp \\left(\\begin{bmatrix} a11 & a12 \\\\ 0 & a22 \\end{bmatrix} \\right)
        = \\begin{bmatrix} f11 & f12 \\\\ 0 & f22 \\end{bmatrix}

    Args:
        a11: Upper left input matrix
        a12: Upper right input matrix
        a22: Lower right input matrix
        da11: Partial derivative upper left input matrix
        da12: Partial derivative upper right input matrix
        da22: Partial derivative lower right input matrix
        dt: Sampling time
        f11: Upper left output matrix
        f22: Lower right output matrix
        df11: Partial derivative upper left output matrix
        df22: Partial derivative lower right output matrix

    Returns:
        2-elements tuple containing
            - **F**: Matrix exponential of A
            - **dF**: Derivative of the matrix exponential of A
    """
    nj, n11, n22 = da12.shape

    f12 = solve_sylvester(a11, -a22, f11 @ a12 - a12 @ f22)
    df12 = np.zeros((nj, n11, n22))
    tmp = -da11 @ f12 + f12 @ da22 + df11 @ a12 + f11 @ da12 - da12 @ f22 - a12 @ df22
    for n in range(nj):
        if tmp[n].any():
            df12[n] = solve_sylvester(a11, -a22, tmp[n])

    F = np.zeros((n11 + n22, n11 + n22))
    F[:n11, :n11] = f11
    F[:n11, n11:] = f12
    F[n11:, n11:] = f22

    dF = np.zeros((nj, n11 + n22, n11 + n22))
    dF[:, :n11, :n11] = df11
    dF[:, :n11, n11:] = df12
    dF[:, n11:, n11:] = df22

    return F, dF
Example #4
0
    def test_simple(self):
        a = np.array([[1, 2], [0, 4]])
        b = np.array([[5, 6], [0, 8]])
        c = np.array([[9, 10], [11, 12]])

        x = solve_sylvester(a, b, c)
        assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)

        # Test with a complex form, but effectively same matrices
        ac = complex(0, 1)*a
        bc = complex(0, 1)*b
        cc = complex(0, 1)*c

        x = solve_sylvester(a, b, c)
        assert_array_almost_equal(np.dot(ac, x) + np.dot(x, bc), cc)
Example #5
0
def expm_triu(
    a11: np.ndarray, a12: np.ndarray, a22: np.ndarray, dt: float, f11: np.ndarray, f22: np.ndarray,
) -> np.ndarray:
    """Compute the exponential of the upper triangular matrix A using the Parlett's method

    .. math::

        F = \\exp(A) = \\exp \\left(\\begin{bmatrix} a11 & a12 \\\\ 0 & a22 \\end{bmatrix} \\right)
        = \\begin{bmatrix} f11 & f12 \\\\ 0 & f22 \\end{bmatrix}

    Args:
        a11: Upper left input matrix
        a12: Upper right input matrix
        a22: Lower right input matrix
        dt: Sampling time
        f11: Upper left output matrix
        f22: Lower right output matrix

    Returns:
        F: Matrix exponential of A
    """
    F = block_diag(f11, f22)
    dim = a12.shape[0]
    F[:dim, dim:] = solve_sylvester(a11, -a22, f11 @ a12 - a12 @ f22)
    return F
Example #6
0
    def test_nonsquare(self):
        a = np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]])
        b = np.array([[2, 3], [4, 5]])
        c = np.array([[1, 2], [3, 4], [5, 6]])

        x = solve_sylvester(a, b, c)
        assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def get_sdrgain_upd(amat, wnrm='fro', maxeps=None,
                    baseA=None, baseZ=None, baseGain=None,
                    maxfac=None):

    deltaA = amat - baseA
    # nda = npla.norm(deltaA, ord=wnrm)
    # nz = npla.norm(baseZ, ord=wnrm)
    # na = npla.norm(baseA, ord=wnrm)
    # import ipdb; ipdb.set_trace()

    epsP = spla.solve_sylvester(amat, -baseZ, -deltaA)
    # print('debugging!!!')
    # epsP = 0*amat
    eps = npla.norm(epsP, ord=wnrm)
    print('|amat - baseA|: {0} -- |E|: {1}'.
          format(npla.norm(deltaA, ord=wnrm), eps))
    if maxeps is not None:
        if eps < maxeps:
            updGaint = npla.solve(epsP+np.eye(epsP.shape[0]), baseGain.T)
            return updGaint.T, True
    elif maxfac is not None:
        if (1+eps)/(1-eps) < maxfac and eps < 1:
            updGaint = npla.solve(epsP+np.eye(epsP.shape[0]), baseGain.T)
            return updGaint.T, True

    return None, False
Example #8
0
def _w_update(x, w, Omega, b, rho, gamma, maxepochs=25):
    theta = w.copy()
    n = x.shape[0]
    z = w.copy()  #numpy.zeros(shape=w.shape)
    u = numpy.zeros(shape=w.shape)

    # cache multiplications
    xb = x.T.dot(b)
    xx = x.T.dot(x)
    for l in range(maxepochs):
        zprev = z.copy()
        # updates
        theta = solve_sylvester(rho * xx + rho * numpy.eye(xx.shape[0]),
                                2 * Omega, rho * xb + rho * (z - u))
        z = softthreshold(theta + u, 1. * gamma / rho)
        u = u + theta - z
        # compute residuals
        dualresid = numpy.linalg.norm(-rho * (z - zprev), 'fro')
        primalresid = numpy.linalg.norm(theta - z, 'fro')
        epspri = n * EPSABS + EPSREL * numpy.max(
            [numpy.linalg.norm(theta, 'fro'),
             numpy.linalg.norm(z, 'fro'), 0])
        epsdual = n * EPSABS + EPSREL * numpy.linalg.norm(rho * u, 'fro')
        # check for convergence
        if (dualresid < epsdual) and (primalresid < epspri):
            break

    return z
Example #9
0
def subdivision_eigenvectors_lower_block(N): # U1 in Stam's paper
    # Sylvester Equation:
    # A * X + X * B = Q

    # From Stam:
    # u1 * sigma - s12 * u1 = s11 * u0

    # Fit to Sylvester:
    # (u1 * sigma - s12 * u1)^T = (s11 * u0)^T
    # (u1 * sigma)^T + (-1 * s12 * u1)^T = (s11 * u0)^T
    # Transpose distributes over multiplication: (A * B)^T = B^T * A^T
    # sigma is diagonal : (sigma^T = sigma)
    # sigma * u1^T + u1^T * (-1 * s12)^T = (s11 * u0)^T
    # |___|   |__|   |__|   |__________|   |__________|
    #   A       X      X          B              Q

    sigma = np.diag(np.asarray(subdivision_eigenvalues(N)))
    s12 = np.asarray(s12_matrix())
    s11 = np.asarray(s11_matrix(N))
    u0 = np.asarray(subdivision_eigenvectors(N))

    A = sigma                     # (M, M)
    B = (-1 * s12).transpose()    # (N, N)
    Q = (s11.dot(u0)).transpose() # (M, N)
    X = la.solve_sylvester(A, B, Q).transpose() # (M, N)

    return X.tolist()
Example #10
0
def sylvester_project(M, A, B, r):
  """
  Project via SVD on error matrix + solving the Sylvester equation.
  """
  t = time.time()
  E = np.dot(A, M) - np.dot(M, B)
  G,H,dr = get_GH(E)

  G_r = G[:, 0:r]
  H_r = H[:, 0:r]

  lowrank = np.dot(G_r, H_r.T)

  print('norm(E-lowrank): ', np.linalg.norm(E-lowrank))

  # Sylvester solve
  M_class = solve_sylvester(A, -B, lowrank)

  print('rank(lowrank): ', np.linalg.matrix_rank(lowrank))
  print('rank(A): ', np.linalg.matrix_rank(A))
  print('rank(B): ', np.linalg.matrix_rank(B))
  print('norm(M-M_class): ', np.linalg.norm(M-M_class))

  E_class = np.dot(A, M_class) - np.dot(M_class, B)
  print('rank of E_class',np.linalg.matrix_rank(E_class))
  #print 'eigvals of E_class',np.linalg.eigvals(E_class)

  print('time of sylv project: ', time.time() - t)

  return M_class
Example #11
0
def lyapunov(A, Q=None):
    """
    Solve the equation :math:`A^T X + X A = -Q`.
    default Q is set to I

    :param A: system matrix
    :type A: np.ndarray | np.matrix | List[List]
    :param Q: matrix
    :type Q: np.ndarray | np.matrix

    :return: the matrix X if there is a solution
    :rtype: np.ndarray | None
    """
    A = np.array(A)
    if Q is None:
        Q = np.eye(A.shape[0])
    try:
        # continuous lyapunov equation is a sylvester equation
        X = solve_sylvester(A.T, A, -Q)
        if config['use_numpy_matrix']:
            return np.mat(X)
        else:
            return X
    except LinAlgError:
        return None
def adjust_reads(mat, column_spread = column_spread, row_spread = row_spread, cutoff = c, r = n_rows, c = n_cols):
    mat = np.array(mat).flatten()
    mat = mat.reshape(r,c)
    adjusted_reads = np.rint(solve_sylvester(column_spread,row_spread,mat))
    # A lower bound cutoff removes false positives (unfortunately also remove true reads with low counts in that cell)
    adjusted_reads[adjusted_reads < cutoff] = 0
    return adjusted_reads
 def dy_Z(z_i):
     DFi = self.DF(z_i)[n:,:]
     df = self.df(z_i)
     A = np.linalg.solve(DFi[:,y] + DFi[:,e].dot(df),DFi[:,v].dot(Ivy))
     B = np.linalg.inv(self.dZ_Z)
     C = -np.linalg.solve(DFi[:,y] + DFi[:,e].dot(df),DFi[:,Y].dot(self.dY_Z)+DFi[:,Z].dot(V)).dot(B)
     return solve_sylvester(A,B,C)
Example #14
0
def graph_sc(X, D, Lc, beta=0.6, rho=0.01, T=None, sc_n_iter=10, **args):

    # Update code
    A = sparse_decode(X, D.T, sc_mode=3, lambda1=T).T  
    
    Z = A.copy()
    U = 0
    
    i = 0
    Lm1 = np.dot(D.T, D) + rho*np.identity(D.shape[1])
    Lm2 = beta*Lc.toarray() if  sparse.issparse(Lc) else beta*Lc
    P3  = np.dot(D.T, X)
    while i < sc_n_iter:
        i += 1
        Rm = P3 + rho*(Z - U)  
        A  = linalg.solve_sylvester(Lm1, Lm2, Rm)
        
        Z   = A + U 
        ids = np.argsort(np.abs(Z), 0)[::-1]
        ids = ids[T:, :].T
        
        for k in range(Z.shape[1]):
            Z[ids[k], k] = 0
            
        U = U + A - Z
        
        for k in range(Z.shape[1]):
            wk       = np.argwhere(Z[:,k] != 0).squeeze()
            Z[wk, k] = np.dot(linalg.pinv(D[:,wk]), X[:,k])
            
    return Z
def ridge_regression_modified(X, Y, L, D):
    """
    Computes the solution of the multivariate problem given in question 1.4.
    Takes as input X and Y arrays, the parameter lambda, and the penalty
    matrix D. Returns the optimal values for beta0 and b.
    """
    # Define N, d, and q
    N = X.shape[1]
    d = X.shape[0]
    q = Y.shape[0]
    # Convert X and Y arrays
    X = np.transpose(np.vstack((np.ones(N), X)))
    Y = np.transpose(Y)
    # Calculate the optimal parameters for b
    b_opt = solve_sylvester(np.matmul(
        np.transpose(X), X), L * D, np.matmul(np.transpose(X), Y))
    # Calculate average of x1,...,xN
    total_X = np.zeros(d + 1)
    for i, j in enumerate(X):
        total_X += j
    x_bar = total_X / N
    # Calculate average of y1,..,yN
    total_Y = np.zeros(q)
    for i, j in enumerate(Y):
        total_Y += j
    y_bar = total_Y / N
    # Calculate the optimal parameters for beta0
    beta_opt = y_bar - np.matmul(np.transpose(b_opt), x_bar)
    return beta_opt, b_opt
def get_sdrefb_upd(amat, t, fbtype=None, wnrm=2,
                   B=None, R=None, Q=None, maxeps=None,
                   baseA=None, baseZ=None, baseP=None, maxfac=None, **kwargs):
    if fbtype == 'sylvupdfb' or fbtype == 'singsylvupd':
        if baseP is not None:
            deltaA = amat - baseA
            epsP = spla.solve_sylvester(amat, -baseZ, -deltaA)
            eps = npla.norm(epsP, ord=wnrm)
            print('|amat - baseA|: {0} -- |E|: {1}'.
                  format(npla.norm(deltaA, ord=wnrm), eps))
            if maxeps is not None:
                if eps < maxeps:
                    opepsPinv = npla.inv(epsP+np.eye(epsP.shape[0]))
                    return baseP.dot(opepsPinv), True
            elif maxfac is not None:
                if (1+eps)/(1-eps) < maxfac and eps < 1:
                    opepsPinv = npla.inv(epsP+np.eye(epsP.shape[0]))
                    return baseP.dot(opepsPinv), True

    # otherwise: (SDRE feedback or `eps` too large already)
    # curX = spla.solve_continuous_are(amat, B, Q, R)
    # if fbtype == 'sylvupdfb' or fbtype == 'singsylvupd':
    #     logger.debug('in `get_fb_dict`: t={0}: eps={1} too large, switch!'.
    #                  format(t, eps))
    # else:
    #     logger.debug('t={0}: computed the SDRE feedback')
    return None, False
Example #17
0
def _w_update(x, w, Omega, b, rho, gamma, maxepochs=25):
    theta = w.copy()
    n = x.shape[0]
    z = w.copy() #numpy.zeros(shape=w.shape)
    u = numpy.zeros(shape=w.shape)

    # cache multiplications
    xb = x.T.dot(b)
    xx = x.T.dot(x)
    for l in range(maxepochs):
        zprev = z.copy()
        # updates
        theta = solve_sylvester(rho*xx + rho * numpy.eye(xx.shape[0]), 2*Omega, rho*xb + rho*(z-u))
        z = softthreshold(theta + u, 1.*gamma/rho)
        u = u + theta - z
        # compute residuals
        dualresid = numpy.linalg.norm(-rho * (z - zprev), 'fro')
        primalresid = numpy.linalg.norm(theta - z, 'fro')
        epspri = n * EPSABS  + EPSREL * numpy.max([numpy.linalg.norm(theta, 'fro'), numpy.linalg.norm(z, 'fro'), 0])
        epsdual = n * EPSABS + EPSREL * numpy.linalg.norm(rho*u, 'fro')
        # check for convergence 
        if (dualresid < epsdual) and (primalresid < epspri):
            break

    return z
Example #18
0
def SAE(X, S, lamb):

    A = S.dot(S.T)
    B = lamb * (X.dot(X.T))
    C = (1 + lamb) * (S.dot(X.T))
    W = linalg.solve_sylvester(A, B, C)
    return W
Example #19
0
 def sylvester_propagate(self, a, b, het, phi, y_left_bound, y_right_bound,
                         z_left_bound, z_right_bound):
     matrix_c = a * phi @ self.matrix_z + a * self.matrix_y @ phi + phi
     res = la.solve_sylvester(b * self.matrix_y,
                              b * self.matrix_z + np.eye(self.n_z),
                              matrix_c)
     return res
Example #20
0
def SAE(X, S, lamb):

    A = np.nan_to_num(S.dot(S.T))
    B = np.nan_to_num(lamb * (X.dot(X.T)))
    C = np.nan_to_num((1 + lamb) * (S.dot(X.T)))
    W = linalg.solve_sylvester(A, B, C)
    return W
def _g2sSylvester(A, B, F, G, u, v):
    """
    % Purpose : Solves the semi-definite Sylvester Equation of the form
    %   A'*A * Phi + Phi * B'*B - A'*F - G*B = 0,
    %   Where the null vectors of A and B are known to be
    %   A * u = 0
    %   B * v = 0
    %
    % Use (syntax):
    %   Phi = g2sSylvester( A, B, F, G, u, v )
    %
    % Input Parameters :
    %   A, B, F, G := Coefficient matrices of the Sylvester Equation
    %   u, v := Respective null vectors of A and B
    %
    % Return Parameters :
    %   Phi := The minimal norm solution to the Sylvester Equation
    %
    % Description and algorithms:
    %   The rank deficient Sylvester equation is solved by means of Householder
    %   reflections and the Bartels-Stewart algorithm.  It uses the MATLAB
    %   function "lyap", in reference to Lyapunov Equations, a special case of
    %   the Sylvester Equation.
    """

    # Household vectors (???)
    m, n = len(u), len(v)

    u[0] += norm(u)
    u *= np.sqrt(2) / norm(u)

    v[0] += norm(v)
    v *= np.sqrt(2) / norm(v)

    # Apply householder updates
    A -= np.dot(np.dot(A, u), u.T)
    B -= np.dot(np.dot(B, v), v.T)
    F -= np.dot(np.dot(F, v), v.T)
    G -= np.dot(u, (np.dot(u.T, G)))

    # Solve the system of equations
    phi = np.zeros((m, n))
    phi[0, 1:] = mrdivide(G[0, :], B[:, 1:].T)
    phi[1:, 0] = mldivide(A[:, 1:], F[:, 0].T)
    phi[1:, 1:] = solve_sylvester(
        np.dot(A[:, 1:].T, A[:, 1:]),
        np.dot(B[:, 1:].T, B[:, 1:]),
        -np.dot(-A[:, 1:].T, F[:, 1:]) + np.dot(G[1:, :], B[:, 1:]),
    )

    # Invert the householder updates
    if u.dtype == "complex128":
        u = np.sqrt(np.real(u) ** 2 + np.imag(u) ** 2)
    if v.dtype == "complex128":
        v = np.sqrt(np.real(v) ** 2 + np.imag(v) ** 2)
    phi -= np.dot(u, (np.dot(u.T, phi)))
    phi -= np.dot(np.dot(phi, v), v.T)
    # phi += np.dot(u, np.dot(np.dot(u.T, (np.dot(phi, v))), v.T))
    return phi
Example #22
0
    def fit_dim_red(self, data, r, eigen=None, verbose=False):
        """
        Fits the emotion transfer model by a closed form solution
        with low rank matrix A based on SVD of the data covariance.
        If Y^T Y = V S V^T then uses A = V diag( eigen, 0) V^T
        Parameters
        ----------
        data: torch.Tensor of shape (n_samples, n_emotions, n_landmarks)
           Input vector of data
        r: Int
            Rank of A
        eigen: torch.Tensor of shape (r)
            Eigenvalues of A to consider. Default None -> use all eigen=1
        Returns
        -------
        Nothing
        """
        if verbose:
            print('Computing Gram matrices')
        self.initialise(data)
        self.model.compute_gram_train()

        tmp = self.model.G_xt + self.lbda * self.model.n * self.model.m * \
            torch.eye(self.model.n * self.model.m)

        if verbose:
            print('Computing SVD of empirical covariance')
        cor = 1 / self.model.n * \
            self.model.y_train.reshape(
                -1, self.model.output_dim).T @ self.model.y_train.reshape(-1, self.model.output_dim)
        u, d, v = torch.svd(cor)

        if verbose:
            print("Solving the associated linear system")

        identity_r = torch.diag_embed(torch.Tensor([1 for i in range(r)] +
                                                   [0 for i in range(self.model.output_dim - r)]))
        proj_r = identity_r[:, :r]

        if eigen is None:

            gamma_r, _ = torch.solve(
                self.model.y_train.reshape(-1, self.model.output_dim) @ v @ proj_r, tmp)
            self.model.alpha = gamma_r @ proj_r.T @ v.T
            self.model.A = v @ identity_r @ v.T

        else:
            B = torch.inverse(torch.diag(eigen)).numpy()
            Q = (self.model.y_train.reshape(-1, self.model.output_dim) @ v @ proj_r).numpy() @ B
            gamma_r = solve_sylvester(self.model.G_xt,
                                       self.lbda * self.model.n * self.model.m * B,
                                       Q)
            self.model.alpha = torch.from_numpy(
                gamma_r) @ torch.diag(eigen) @ proj_r.T @ v.T
            self.model.A = v @ proj_r @ torch.diag(eigen) @ proj_r.T @ v.T

        if verbose:
            print('Coefficients alpha fitted, empirical risk=',
                  self.training_risk())
Example #23
0
def RW_NN_ALM(alpha, Z, n, m, W1, W2, flag, struc):
    D_rec = lambda_k_1 = lambda_k_2 = E = A = D = np.zeros(
        (n, m + 1)
    )  #Initiliaze lagrange multiplier and guesses for variables to 0 matrix
    W1_sq_inv = np.linalg.inv(np.dot(W1, W1))
    W2_sq = np.dot(W2, W2)
    mu_init = 1.05
    mu_k = mu_init
    gamma = 1
    k = 0
    eps = 0.0001
    step_A = step_E = np.matrix(np.identity(n))
    E = np.zeros((n, m + 1))
    while (
            k < 700
    ):  # and np.linalg.norm(step_E) > eps and np.linalg.norm(step_A) > eps ):
        WAW = np.dot(np.dot(W1, A), W2)
        D_new = soft_threshold(
            np.subtract(WAW, lambda_k_2 * (1 / (1.0 * mu_k))),
            1 / (1.0 * mu_k))
        step_D = np.subtract(D, D_new)
        D = D_new
        E_new = new_Error(alpha, mu_k, lambda_k_1, Z, A)

        # for i in range(np.shape(E)[0]):
        # 	for j in range(np.shape(E)[1]):
        # 		E_new[i][j]=soft_threshold(np.subtract(np.subtract(Z,A),1/(1.0*mu_k)*lambda_k_1), alpha/mu_k)[i][j]
        E_new = np.matrix(E_new)
        step_E = np.subtract(E, E_new)
        E = E_new
        if (struc == 1):
            for i in range(np.shape(E)[0]):
                for j in range(i):
                    E[i, j] = 0  # Projection

        RHS = np.add((1 / (1.0 * mu_k)) *
                     np.add(lambda_k_1, np.dot(np.dot(W1, lambda_k_2), W2)),
                     np.add(np.subtract(Z, E), np.dot(np.dot(W1, D), W2)))
        A_new = spla.solve_sylvester(W1_sq_inv, W2_sq, np.dot(W1_sq_inv, RHS))
        #T1 = np.dot(np.dot(W1, np.add (np.subtract(np.subtract(Z, E) , A) , 1/(1.0*mu_k)*lambda_k_1) ), W2 )
        #T2 = np.add (np.subtract(D,WAW) , 1/(1.0*mu_k)*lambda_k_2)
        #A_new = A + gamma*np.add(T1,T2)
        step_A = np.subtract(A, A_new)
        A = A_new
        lambda_k_1 = np.add(lambda_k_1,
                            mu_k * (np.subtract(np.subtract(Z, A), E)))
        lambda_k_2 = np.add(lambda_k_2, mu_k * (np.subtract(D, WAW)))
        mu_k = mu_k * mu_init
        k = k + 1
    # 	U,S,V=np.linalg.svd(A)
    # 	s=S[len(S)-1]
    # 	A=soft_threshold(A,s)

    if (flag == 1):
        print "alpha", alpha, "iterations", k, "error A", np.linalg.norm(
            A), "error norm E", np.linalg.norm(
                E), "rank A", np.linalg.matrix_rank(A)
    WAW = np.dot(np.dot(W1, np.subtract(Z, E)), W2)
    return A, E, flag, WAW, np.subtract(Z, E)
def surf_height(surf_normal,penalty,estimate=0):
    """ this function reconstruct the surface using a regularized least-square
    method.
    surface_normal is the heightxwidthx3 array that stores the surface normal
    at each point
    penalty is the value of lambda"""
    
    
    def D_matrix(size):
        D_mat = np.zeros(shape=(size,size))
    
        D_mat[0,0] = -3
        D_mat[0,1] = 4
        D_mat[0,2] = -1
        D_mat[-1,-3] = 1
        D_mat[-1,-2] = -4
        D_mat[-1,3] = 3

        for row in range(1,size-1):
            D_mat[row,row-1] = -1
            D_mat[row,row+1] = 1
    
        D_mat = D_mat/2
        return D_mat
       
    normal_array[np.isnan(normal_array)]=0
    y,x = np.nonzero(surf_normal[:,:,0])
    left = np.min(x)
    right = np.max(x)
    top = np.min(y)
    bottom = np.max(y)
    surf_normal = surf_normal[top:bottom+1,left:right+1,:]
    #return surf_normal
    
    Zx = -surf_normal[:,:,0]
    Zy = surf_normal[:,:,1]

    height,width,useless = surf_normal.shape
    
    if estimate==0:
        guess=np.zeros(shape=(height,width))
     
    Dy = D_matrix(height)
    Dx = D_matrix(width)
    
    A = np.dot(Dy.T,Dy) + penalty**2*np.identity(height)
    B = np.dot(Dx.T,Dx) + penalty**2*np.identity(width)
    Q = np.dot(Dy.T,Zy )+ np.dot(Zx,Dx)+2*penalty**2*estimate
    
    Z = spl.solve_sylvester(A,B,Q)
    
    min_height = np.min(Z)
    Z = Z - min_height 
    
    Z = Z/masked_image[top:bottom+1,left:right+1]
    Z[Z==float('inf')] = np.nan
    Z[Z==(-float('inf'))] = np.nan

    return Z
Example #25
0
def get_mapping(ts_cls):

    X = pd.DataFrame()
    S = np.zeros((n_att, 1))
    #labels = pd.DataFrame()
    #print (ts_cls)
    tr_cls = [x for x in all_cls if (x not in ts_cls)]
    #print (tr_cls)
    #print (tr_cls)

    for cls in tr_cls:
        #print ('class', cls)
        df = data[cls]
        #attribute_mat = np.array([])
        #print (cls)
        attribute_vec = aam[class_names[cls]]
        #print (cls)
        m1 = df.shape[0]
        #print (m1)
        attribute_mat = np.repeat(np.array(attribute_vec).reshape(n_att, 1),
                                  m1,
                                  axis=1)

        #attribute_mat = attribute_mat.reshape(n_att, m1)

        #y = np.ones((m1, 1)) * cls
        #y[:, i] = 1
        #y_df = pd.DataFrame(y)
        #Y = Y.append(y_df, ignore_index = True)

        # print (m)
        S = np.concatenate((S, attribute_mat), axis=1)
        X = X.append(df, ignore_index=True)
        #labels = labels.append(y_df, ignore_index = True)

    #print (X.shape)
    X = X.T
    X = np.array(X)
    S = S[:, 1:]
    #labels = np.array(labels)

    (k, n) = S.shape
    #print (k, n)
    (d, n) = X.shape
    #print (d, n)
    #print (labels.shape)
    labda = 1
    A = np.matmul(S, S.T)
    #print (A.shape)
    B = labda * np.matmul(X, X.T)
    #print (B.shape)
    C = (1 + labda) * np.matmul(S, X.T)
    #print (C.shape)

    W = solve_sylvester(A, B, C)
    return (W)
Example #26
0
def transformation(users: List[int],
                   representation: np.ndarray, entity_embeddings: np.ndarray,
                   ratings: np.ndarray, alpha=1.0) -> np.ndarray:
    try:
        _A = representation.T @ representation
        _B = alpha * inv(entity_embeddings.T @ entity_embeddings)
        _Q = representation.T @ ratings[users] @ entity_embeddings @ inv(entity_embeddings.T @ entity_embeddings)
        return solve_sylvester(_A, _B, _Q)
    except LinAlgError as err:
        return np.zeros((representation.shape[1], entity_embeddings.shape[1]))
def SAE(X, S, lamd):
    A = np.dot(S.T, S)
    B = lamd * np.dot(X.T, X)
    C = (1 + lamd) * np.dot(S.T, X)

    try:
        W = solve_sylvester(A, B, C)
    except:
        print('Sovler failed')
        return None
    return W
Example #28
0
def np_solve_sylvester(tensors, sign_matrices, d_sign_matrices):
    res = np.empty_like(tensors)
    for k in range(tensors.shape[0]):
        tensor = tensors[k]
        sign_matrix = sign_matrices[k]
        d_sign_matrix = d_sign_matrices[k]
        q = np.dot(tensor, d_sign_matrix) - np.dot(d_sign_matrix, tensor)
        a = sign_matrix
        b = -sign_matrix
        res[k] = linalg.solve_sylvester(a, b, q)
    return res
def sylvester(M, N, n, r):
    # Generate random rank r error matrix
    G = np.random.random((n, r))
    H = np.random.random((n, r))
    GH = np.dot(G, H.T)

    # Solve Sylvester equation to recover A
    # Such that MA - AN^T = GH^T
    A = solve_sylvester(M, -N, GH)

    E = np.dot(M, A) - np.dot(A, N)

    return A, G, H
Example #30
0
File: SAE.py Project: mrzhang11/zsl
    def _calculate_w(self, s, x, lambda_val):
        """
        :param s: array, (n, k)
        :param x:  array, (n, d)
        :param lambda_val: hyper-parameter
        :return: w: array, (k, d)
        """
        A = s.T @ s  # k*k
        B = lambda_val * x.T @ x  # d*d
        C = (1 + lambda_val) * s.T @ x  # k*d
        w = solve_sylvester(A, B, C)  # k*d

        return w
Example #31
0
    def sae(vis_data, sem_data, lambda_):
        """
        Computes the weight matrix that estimates the latent space of the Semantic Auto-encoder.

        @param vis_data: dxN data matrix
        @param sem_data: kxN semantic matrix
        @param lambda_: regularisation parameter
        @return: kxd projection matrix
        """
        a = sem_data.dot(sem_data.transpose())
        b = lambda_ * vis_data.dot(vis_data.transpose())
        c = (1 + lambda_) * sem_data.dot(vis_data.transpose())

        return solve_sylvester(a, b, c)
Example #32
0
def sylvester(A, B, n, r):
  # Generate random rank r error matrix
  G = np.random.random((n, r))
  H = np.random.random((n, r))
  GH = np.dot(G,H.T)

  # Solve Sylvester equation to recover M
  # Such that AM - MB = GH^T
  M = solve_sylvester(A, -B, GH)

  E = np.dot(A,M) - np.dot(M,B)

  assert np.linalg.norm(E - GH) <= 1e-10

  return M
Example #33
0
def SAE(X, S, lamb):
    """
    SAE - Semantic Autoencoder
    :param X: d x N data matrix
    :param S: k x N semantic matrix
    :param lamb: regularization parameter
    :return: W -> k x d projection function
    """

    A = np.dot(S, S.T)
    B = lamb * np.dot(X, X.T)
    C = (1 + lamb) * np.dot(S, X.T)
    W = solve_sylvester(A, B, C)

    return W
Example #34
0
def SAE(X, S, lam):
    #For the error:Memory Error
    #We use csc_matrix to take place of ndarray
    S = scipy.transpose(S)
    S = sparse.csc_matrix(S, dtype='float16')
    A = np.dot(S, S.T)
    B = lam * np.dot(X, X.T)
    C = (1 + lam) * np.dot(S, X.T)

    A = sparse.csc_matrix(A).toarray()
    B = sparse.csc_matrix(B).toarray()
    C = sparse.csc_matrix(C).toarray()

    W = linalg.solve_sylvester(A, B, C)  #Solve the Sylvester equation
    return W
Example #35
0
 def fit(self, X, y, sample_weight=None):
     lambd = self.lambd
     X, y, X_offset, y_offset, X_scale = self._preprocess_data(
         X,
         y,
         fit_intercept=self.fit_intercept,
         normalize=self.normalize,
         copy=self.copy_X,
         sample_weight=sample_weight)
     A = np.dot(y.T, y)
     B = lambd * np.dot(X.T, X)
     C = (1 + lambd) * np.dot(y.T, X)
     W = linalg.solve_sylvester(A, B, C)
     self.coef_ = W
     self._set_intercept(X_offset, y_offset, X_scale)
     return self
Example #36
0
    def find_W(self, X, S, ld):

        # INPUTS:
        # X: d x N - data matrix
        # S: Number of Attributes (k) x N - semantic matrix
        # ld: regularization parameter
        #
        # Return :
        # 	W: kxd projection matrix

        A = np.dot(S, S.T)
        B = ld * np.dot(X, X.T)
        C = (1 + ld) * np.dot(S, X.T)
        W = linalg.solve_sylvester(A, B, C)

        return W
Example #37
0
def remove_a12(As, n_stable):
    r"""Basis change to remove the (1, 2) block of the block-ordered real Schur matrix :math:`\mathbf{A}`

    Being :math:`\mathbf{A}_s\in\mathbb{R}^{m\times m}` a matrix of the form

    .. math:: \mathbf{A}_s = \begin{bmatrix} A_{11} & A_{12} \\ 0 & A_{22} \end{bmatrix}

    the (1,2) block is removed by solving the Sylvester equation

    .. math:: \mathbf{A}_{11}\mathbf{X} - \mathbf{X}\mathbf{A}_{22} + \mathbf{A}_{12} = 0

    used to build the change of basis

    .. math:: \mathbf{T} = \begin{bmatrix} \mathbf{I}_{s,s} & -\mathbf{X}_{s,u} \\ \mathbf{0}_{u, s}
        & \mathbf{I}_{u,u} \end{bmatrix}

    where :math:`s` and :math:`u` are the respective number of stable and unstable eigenvalues, such that

    .. math:: \mathbf{TA}_s\mathbf{T}^\top = \begin{bmatrix} A_{11} & \mathbf{0} \\ 0 & A_{22} \end{bmatrix}.

    Args:
        As (np.ndarray): Block-ordered real Schur matrix (can be built using
            :func:`sharpy.rom.utils.krylovutils.schur_ordered`).
        n_stable (int): Number of stable eigenvalues in ``As``.

    Returns:
        np.ndarray: Basis transformation :math:`\mathbf{T}\in\mathbb{R}^{m\times m}`.

    References:
        Jaimoukha, I. M., Kasenally, E. D.. Implicitly Restarted Krylov Subspace Methods for Stable Partial Realizations
        SIAM Journal of Matrix Analysis and Applications, 1997.
    """
    A11 = As[:n_stable, :n_stable]
    A12 = As[:n_stable, n_stable:]
    A22 = As[n_stable:, n_stable:]
    n = As.shape[0]

    X = sclalg.solve_sylvester(A11, -A22, -A12)

    T = np.block([[np.eye(n_stable), -X],
                  [np.zeros((n - n_stable, n_stable)),
                   np.eye(n - n_stable)]])

    T2 = np.eye(n, n_stable)
    # App = T2.T.dot(T.dot(As.dot(np.linalg.inv(T).dot(T2))))
    return T, X
Example #38
0
    def update(self, X, y, Omega):
        # save the previous W
        self.prev_w = self.values.copy()
        # Initialize Updates
        Theta = self.values.copy()  # dxk
        Z = self.values.copy()  ## warm start
        U = numpy.zeros(shape=Z.shape)

        # Cache multiplication
        XX = X.T.dot(X)  # dxd
        Xy = X.T.dot(y)  # dxk

        for j in range(self.w_epochs):
            Z_prev = Z.copy()

            # Update Theta
            C = Xy + self.rho * (Z - U)
            Theta = solve_sylvester(XX + self.rho * numpy.eye(XX.shape[0]),
                                    2 * Omega, C)
            Z = softthreshold(Theta + U, self.gamma / self.rho)
            U = U + Theta - Z

            # Compute residuals
            dualresid = numpy.linalg.norm(-self.rho * (Z - Z_prev), 2)
            primalresid = numpy.linalg.norm(Theta - Z, 2)
            epspri = X.shape[0] * EPSABS + EPSREL * numpy.max(
                [numpy.linalg.norm(Theta, 2),
                 numpy.linalg.norm(Z, 2), 0])
            epsdual = X.shape[0] * EPSABS + EPSREL * numpy.linalg.norm(
                self.rho * U, 2)

            # Confirm the dual is decreasing
            if (j > 0) and (prevdual < dualresid):
                #break
                pass
            prevdual = dualresid
            # Check for convergence
            if (dualresid < epsdual) and (primalresid < epspri):
                break

            if not self.quiet:
                print "Learning W, Iteration: %i, Rho: %2.2f, Dualresid: %2.4f, EPS_Dual: %2.4f, PriResid: %2.4f, EPS_Pri: %2.4f" % (
                    j, self.rho, dualresid, epsdual, primalresid, epspri)

        self.values = Z
Example #39
0
    def SAE(self, X, S, lamb):
        """
        SAE - Semantic Autoencoder
        :param X: d x N data matrix
        :param S: k x N semantic matrix
        :param lamb: regularization parameter
        :return: W -> k x d projection function
        """

        print("X shape", X.shape)
        print("S shape", S.shape)
        print("Lambda", lamb)

        A = np.dot(S.T, S)
        B = lamb * np.dot(X.T, X)
        C = (1 + lamb) * np.dot(S.T, X)
        W = solve_sylvester(A, B, C)

        return W
Example #40
0
    def update(self, X, y, Omega):
        # save the previous W
        self.prev_w = self.values.copy()
        # Initialize Updates
        Theta = self.values.copy()      # dxk
        Z = self.values.copy()  ## warm start
        U = numpy.zeros(shape=Z.shape)

        # Cache multiplication
        XX = X.T.dot(X)  # dxd
        Xy = X.T.dot(y)  # dxk

        for j in range(self.w_epochs): 
            Z_prev = Z.copy()

            # Update Theta
            C = Xy + self.rho * (Z - U)
            Theta = solve_sylvester(XX + self.rho * numpy.eye(XX.shape[0]), 2*Omega, C)
            Z = softthreshold(Theta + U, self.gamma/self.rho)
            U = U + Theta - Z

            # Compute residuals 
            dualresid = numpy.linalg.norm(-self.rho*(Z - Z_prev), 2)
            primalresid = numpy.linalg.norm(Theta - Z, 2)
            epspri = X.shape[0] * EPSABS + EPSREL * numpy.max([numpy.linalg.norm(Theta, 2), numpy.linalg.norm(Z, 2), 0])
            epsdual = X.shape[0] * EPSABS + EPSREL * numpy.linalg.norm(self.rho*U,2)

            # Confirm the dual is decreasing
            if (j > 0) and (prevdual < dualresid):
                #break
                pass
            prevdual = dualresid
            # Check for convergence
            if (dualresid < epsdual) and (primalresid < epspri):
                break

            if not self.quiet:
                print "Learning W, Iteration: %i, Rho: %2.2f, Dualresid: %2.4f, EPS_Dual: %2.4f, PriResid: %2.4f, EPS_Pri: %2.4f" % (j, self.rho, dualresid, epsdual, primalresid, epspri)

        self.values = Z
Example #41
0
 def check_case(self, a, b, c):
     x = solve_sylvester(a, b, c)
     assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
Example #42
0
def LinApp_Solve(AA,BB,CC,DD,FF,GG,HH,JJ,KK,LL,MM,WWW,TT,NN,Z0,Sylv):
    """
    This code takes Uhlig's original code and puts it in the form of a
    function.  This version outputs the policy function coefficients: PP,
    QQ and UU for X, and RR, SS and VV for Y.

    Inputs overview:
    The matrices of derivatives: AA - TT.
    The autoregression coefficient matrix NN from the law of motion for Z.
    Z0 is the Z-point about which the linearization is taken.  For
    linearizing about the steady state this is Zbar and normally Zbar = 0.
    Sylv is an indicator variable telling the program to use the built-in
    function sylvester() to solve for QQ and SS, if possible.  Default is
    to use Sylv=1.

    Parameters
    ----------
    AA : array_like, dtype=float, shape=(ny, nx)
        The matrix represented above by :math:`A`. It is the matrix of
        derivatives of the Y equations with repsect to :math:`X_t`
    BB : array_like, dtype=float, shape=(ny, nx)
        The matrix represented above by :math:`B`. It is the matrix of
        derivatives of the Y equations with repsect to
        :math:`X_{t-1}`.
    CC : array_like, dtype=float, shape=(ny, ny)
        The matrix represented above by :math:`C`. It is the matrix of
        derivatives of the Y equations with repsect to :math:`Y_t`
    DD : array_like, dtype=float, shape=(ny, nz)
        The matrix represented above by :math:`C`. It is the matrix of
        derivatives of the Y equations with repsect to :math:`Z_t`
    FF : array_like, dtype=float, shape=(nx, nx)
        The matrix represetned above by :math:`F`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`X_{t+1}`
    GG : array_like, dtype=float, shape=(nx, nx)
        The matrix represetned above by :math:`G`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`X_t`
    HH : array_like, dtype=float, shape=(nx, nx)
        The matrix represetned above by :math:`H`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`X_{t-1}`
    JJ : array_like, dtype=float, shape=(nx, ny)
        The matrix represetned above by :math:`J`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Y_{t+1}`
    KK : array_like, dtype=float, shape=(nx, ny)
        The matrix represetned above by :math:`K`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Y_t`
    LL : array_like, dtype=float, shape=(nx, nz)
        The matrix represetned above by :math:`L`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Z_{t+1}`
    MM : array_like, dtype=float, shape=(nx, nz)
        The matrix represetned above by :math:`M`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Z_t`
    WWW : array, dtype=float, shape=(ny,)
        The vector of the numerical errors of first ny characterizing
        equations
    TT : array, dtype=float, shape=(nx,)
        The vector of the numerical errors of the next nx characterizing
        equations following the first ny equations
    NN : array_like, dtype=float, shape=(nz, nz)
        The autocorrelation matrix for the exogenous state vector z.
    Z0 : array, dtype=float, shape=(nz,)
        The Z-point about which the linearization is taken.  For linearizing 
        about the steady state this is Zbar and normally Zbar = 0.
        QQ if true.
    Sylv: binary, dtype=int 
        An indicator variable telling the program to use the built-in
        function sylvester() to solve for QQ and SS, if possible.  Default is
        to use Sylv=1.

    Returns
    -------
    P : 2D-array, dtype=float, shape=(nx, nx)
        The matrix :math:`P` in the law of motion for endogenous state
        variables described above.
    Q : 2D-array, dtype=float, shape=(nx, nz)
        The matrix :math:`Q` in the law of motion for exogenous state
        variables described above.
    U : array, dtype=float, shape=(nx,)
        The vector of the constant term of the policy function for X, 
        the endogenous state variables
    R : 2D-array, dtype=float, shape=(ny, nx)
        The matrix :math:`R` in the law of motion for endogenous state
        variables described above.
    S : 2D-array, dtype=float, shape=(ny, nz)
        The matrix :math:`S` in the law of motion for exogenous state
        variables described above.
    V : array, dtype=float, shape=(ny,)
        The vector of the constant term of the policy function for Y, 
        the endogenous non-state variables
    References
    ----------
    .. [1] Uhlig, H. (1999): "A toolkit for analyzing nonlinear dynamic
       stochastic models easily," in Computational Methods for the Study
       of Dynamic Economies, ed. by R. Marimon, pp. 30-61. Oxford
       University Press.

    """
    #The original coding we did used the np.matrix form for our matrices so we
    #make sure to set our inputs to numpy matrices.
    AA = np.matrix(AA)
    BB = np.matrix(BB)
    CC = np.matrix(CC)
    DD = np.matrix(DD)
    FF = np.matrix(FF)
    GG = np.matrix(GG)
    HH = np.matrix(HH)
    JJ = np.matrix(JJ)
    KK = np.matrix(KK)
    LL = np.matrix(LL)
    MM = np.matrix(MM)
    NN = np.matrix(NN)
    WWW = np.array(WWW)
    TT = np.array(TT)
    Z0 = np.array(Z0)
    #Tolerance level to use
    TOL = .000001

    # Here we use matrices to get pertinent dimensions.
    nx = FF.shape[1]
    l_equ = CC.shape[0]
    ny = CC.shape[1]
    nz = min(NN.shape)

    # The following if and else blocks form the
    # Psi, Gamma, Theta Xi, Delta mats
    if l_equ == 0:
        if CC.any():
            # This blcok makes sure you don't throw an error with an empty CC.
            CC_plus = la.pinv(CC)
            CC_0 = _nullSpaceBasis(CC.T)
        else:
            CC_plus = np.mat([])
            CC_0 = np.mat([])
        Psi_mat = FF
        Gamma_mat = -GG
        Theta_mat = -HH
        Xi_mat = np.mat(vstack((hstack((Gamma_mat, Theta_mat)),
                        hstack((eye(nx), zeros((nx, nx)))))))
        Delta_mat = np.mat(vstack((hstack((Psi_mat, zeros((nx, nx)))),
                           hstack((zeros((nx, nx)), eye(nx))))))

    else:
        CC_plus = la.pinv(CC)
        CC_0 = _nullSpaceBasis(CC.T)
        if l_equ != ny:
            Psi_mat = vstack((zeros((l_equ - ny, nx)), FF \
                            - dot(dot(JJ, CC_plus), AA)))
            Gamma_mat = vstack((dot(CC_0, AA), dot(dot(JJ, CC_plus), BB) \
                        - GG + dot(dot(KK, CC_plus), AA)))
            Theta_mat = vstack((dot(CC_0, BB), dot(dot(KK, CC_plus), BB) - HH))
        else:
            CC_inv = la.inv(CC)
            Psi_mat = FF - dot(JJ.dot(CC_inv), AA)
            Gamma_mat = dot(JJ.dot(CC_inv), BB) - GG + dot(dot(KK, CC_inv), AA)
            Theta_mat = dot(KK.dot(CC_inv), BB) - HH
        Xi_mat = vstack((hstack((Gamma_mat, Theta_mat)), \
                            hstack((eye(nx), zeros((nx, nx))))))
        Delta_mat = vstack((hstack((Psi_mat, np.mat(zeros((nx, nx))))),\
                                hstack((zeros((nx, nx)), eye(nx)))))

    # Now we need the generalized eigenvalues/vectors for Xi with respect to
    # Delta. That is eVals and eVecs below.

    eVals, eVecs = la.eig(Xi_mat, Delta_mat)
    if npla.matrix_rank(eVecs) < nx:
        print("Error: Xi is not diagonalizable, stopping...")

    # From here to line 158 we Diagonalize Xi, form Lambda/Omega and find P.
    else:
        Xi_sortabs = np.sort(abs(eVals))
        Xi_sortindex = np.argsort(abs(eVals))
        Xi_sortedVec = np.array([eVecs[:, i] for i in Xi_sortindex]).T
        Xi_sortval = eVals[Xi_sortindex]
        Xi_select = np.arange(0, nx)
        if np.imag(Xi_sortval[nx - 1]).any():
            if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL):
                drop_index = 1
                cond_1 = (abs(np.imag(Xi_sortval[drop_index-1])) > TOL)
                cond_2 = drop_index < nx
                while cond_1 and cond_2:
                    drop_index += 1
                if drop_index >= nx:
                    print("There is an error. Too many complex eigenvalues."
                          +" Quitting...")
                else:
                    print("Droping the lowest real eigenvalue. Beware of" +
                          " sunspots!")
                    Xi_select = np.array([np.arange(0, drop_index - 1),\
                                          np.arange(drop_index, nx + 1)])
        # Here Uhlig computes stuff if user chose "Manual roots" I skip it.
        if max(abs(Xi_sortval[Xi_select])) > 1 + TOL:
            print("It looks like we have unstable roots. This might not work...")
        if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL:
            print("Check the model to make sure you have a unique steady" +
                  " state we are having problems with convergence.")
        Lambda_mat = np.diag(Xi_sortval[Xi_select])
        Omega_mat = Xi_sortedVec[nx:2 * nx, Xi_select]

        if npla.matrix_rank(Omega_mat) < nx:
            print("Omega matrix is not invertible, Can't solve for P; we" +
                    " proceed with the alternative, QZ-method, to get P...")

            #~~~~~~~~~ QZ-method codes from SOLVE_QZ ~~~~~~~~#
            Delta_up,Xi_up,UUU,VVV=la.qz(Delta_mat,Xi_mat, output='complex')
            UUU=UUU.T
            Xi_eigval = np.diag( np.diag(Xi_up)/np.maximum(np.diag(Delta_up),TOL))
            Xi_sortabs= np.sort(abs(np.diag(Xi_eigval)))
            Xi_sortindex= np.argsort(abs(np.diag(Xi_eigval)))
            Xi_sortval = Xi_eigval[Xi_sortindex, Xi_sortindex]
            Xi_select = np.arange(0, nx)
            stake = max(abs(Xi_sortval[Xi_select])) + TOL

            Delta_up, Xi_up, UUU, VVV = qzdiv(stake,Delta_up,Xi_up,UUU,VVV)
                    
            #Check conditions from line 49-109
            if np.imag(Xi_sortval[nx - 1]).any():
                if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL):
                    print("Problem: You have complex eigenvalues! And this means"+
                        " PP matrix will contain complex numbers by this method." )
                drop_index = 1
                cond_1 = (abs(np.imag(Xi_sortval[drop_index-1])) > TOL)
                cond_2 = drop_index < nx
                while cond_1 and cond_2:
                    drop_index += 1
                if drop_index >= nx:
                    print("There is an error. Too many complex eigenvalues."
                              +" Quitting...")
                else:
                    print("Dropping the lowest real eigenvalue. Beware of" +
                          " sunspots!")
                    for i in xrange(drop_index,nx+1):
                        Delta_up,Xi_up,UUU,VVV = qzswitch(i,Delta_up,Xi_up,UUU,VVV)
                    Xi_select1 = np.arange(0,drop_index-1)
                    Xi_select = np.append(Xi_select1, np.arange(drop_index,nx+1))

            if Xi_sortval[max(Xi_select)] < 1 - TOL:
                print('There are stable roots NOT used. Proceeding with the' +
                        ' smallest root.')
            if max(abs(Xi_sortval[Xi_select])) > 1 + TOL:
                print("It looks like we have unstable roots. This might not work...")
            if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL:
                print("Check the model to make sure you have a unique steady" +
                          " state we are having problems with convergence.")
            #End of checking conditions
            
            #Lambda_mat = np.diag(Xi_sortval[Xi_select]) # to help sol_out.m
            
            VVV=VVV.conj().T
            VVV_2_1 = VVV[nx : 2*nx, 0 : nx]
            VVV_2_2 = VVV[nx : 2*nx, nx :2*nx]
            UUU_2_1 = UUU[nx : 2*nx, 0 : nx]
            VVV = VVV.conj().T
            
            if abs(la.det(UUU_2_1))< TOL:
                print("One necessary condition for computing P is NOT satisfied,"+
                    " but we proceed anyways...")
            if abs(la.det(VVV_2_1))< TOL:
                print("VVV_2_1 matrix, used to compute for P, is not invertible; we"+
                    " are in trouble but we proceed anyways...")
            
            PP = np.matrix( la.solve(- VVV_2_1, VVV_2_2) )
            PP_imag = np.imag(PP)
            PP = np.real(PP)
            if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any():
                print("A lot of P is complex. We will continue with the" +
                      " real part and hope we don't lose too much information.")
            #~~~~~~~~~ End of QZ-method ~~~~~~~~~#

        #This follows the original uhlig.py file
        else:
            PP = dot(dot(Omega_mat, Lambda_mat), la.inv(Omega_mat))
            PP_imag = np.imag(PP)
            PP = np.real(PP)
            if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any():
                print("A lot of P is complex. We will continue with the" +
                      " real part and hope we don't lose too much information.")
    
    # The code from here to the end was from the Uhlig file calc_qrs.m.
    # I think for python it fits better here than in a separate file.

    # The if and else below make RR and VV depending on our model's setup.
    if l_equ == 0:
        RR = zeros((0, nx))
        VV = hstack((kron(NN.T, FF) + kron(eye(nz), \
            (dot(FF, PP) + GG)), kron(NN.T, JJ) + kron(eye(nz), KK))) 

    else:
        RR = - dot(CC_plus, (dot(AA, PP) + BB))
        VV = sp.vstack((hstack((kron(eye(nz), AA), \
                        kron(eye(nz), CC))), hstack((kron(NN.T, FF) +\
                        kron(eye(nz), dot(FF, PP) + dot(JJ, RR) + GG),\
                        kron(NN.T, JJ) + kron(eye(nz), KK)))))

    # Now we use LL, NN, RR, VV to get the QQ, RR, SS, VV matrices.
    # first try using Sylvester equation solver
    if ny>0:
        PM = (FF-la.solve(JJ.dot(CC),AA))
        if npla.matrix_rank(PM)< nx+ny:
            Sylv=0
            print("Sylvester equation solver condition is not satisfied;"\
                    +" proceed with the original method...")
    else:
        if npla.matrix_rank(FF)< nx:
            Sylv=0
            print("Sylvester equation solver condition is not satisfied;"\
                    +" proceed with the original method...")
    if Sylv:
        print("Using Sylvester equation solver...")
        if ny>0:
            Anew = la.solve(PM, (FF.dot(PP)+GG+JJ.dot(RR)-\
                    la.solve(KK.dot(CC), AA)) )
            Bnew = NN
            Cnew1 = la.solve(JJ.dot(CC),DD.dot(NN))+la.solve(KK.dot(CC), DD)-\
                    LL.dot(NN)-MM
            Cnew = la.solve(PM, Cnew1)
            QQ = la.solve_sylvester(Anew,Bnew,Cnew)
            SS = la.solve(-CC, (AA.dot(QQ)+DD))
        else:
            Anew = la.solve(FF, (FF.dot(PP)+GG))
            Bnew = NN
            Cnew = la.solve(FF, (-LL.dot(NN)-MM))
            QQ = la.solve_sylvester(Anew,Bnew,Cnew)
            SS = np.zeros((0,nz)) #empty matrix
    # then the Uhlig's way
    else:
        if (npla.matrix_rank(VV) < nz * (nx + ny)):
            print("Sorry but V is not invertible. Can't solve for Q and S;"+
                     " but we proceed anyways...")
        
        LL = sp.mat(LL)
        NN = sp.mat(NN)
        LLNN_plus_MM = dot(LL, NN) + MM

        if DD.any():
            impvec = vstack([DD.T, np.reshape(LLNN_plus_MM,
                                                  (nx * nz, 1), 'F')])
        else:
            impvec = np.reshape(LLNN_plus_MM, (nx * nz, 1), 'F')

        QQSS_vec = np.matrix(la.solve(-VV, impvec))

        if (max(abs(QQSS_vec)) == sp.inf).any():
            print("We have issues with Q and S. Entries are undefined." +
                      " Probably because V is no inverible.")

        #Build QQ SS
        QQ = np.reshape(np.matrix(QQSS_vec[0:nx * nz, 0]),
                            (nx, nz), 'F')

        SS = np.reshape(QQSS_vec[(nx * nz):((nx + ny) * nz), 0],\
                            (ny, nz), 'F')

    #Build WW - WW has the property [x(t)',y(t)',z(t)']=WW [x(t)',z(t)'].
    WW = sp.vstack((
        hstack((eye(nx), zeros((nx, nz)))),
        hstack((dot(RR, la.pinv(PP)), (SS - dot(dot(RR, la.pinv(PP)), QQ)))),
        hstack((zeros((nz, nx)), eye(nz)))))

    # find constant terms
    # redefine matrices to be 2D-arrays for generating vector UU and VVV
    AA = np.array(AA)
    CC = np.array(CC)
    FF = np.array(FF)
    GG = np.array(GG)
    JJ = np.array(JJ)
    KK = np.array(KK)
    LL = np.array(LL)
    NN = np.array(NN)
    RR = np.array(RR)
    QQ = np.array(QQ)
    SS = np.array(SS)
    if ny>0:
        UU1 = -(FF.dot(PP)+GG+JJ.dot(RR)+FF-(JJ+KK).dot(la.solve(CC,AA)))
        UU2 = (TT+(FF.dot(QQ)+JJ.dot(SS)+LL).dot(NN.dot(Z0)-Z0)- \
            (JJ+KK).dot(la.solve(CC,WWW)))
        UU = la.solve(UU1, UU2)
        VVV = la.solve(- CC, (WWW+AA.dot(UU)) )
    else:
        UU = la.solve( -(FF.dot(PP)+FF+GG), (TT+(FF.dot(QQ)+LL).dot(NN.dot(Z0)-Z0)) )
        VVV = np.array([])

    return np.array(PP), np.array(QQ), np.array(UU), np.array(RR), np.array(SS),\
             np.array(VVV)
Example #43
0
def MMAPPH1NPPR(D, sigma, S, *argv):
    """
    Returns various performane measures of a continuous time 
    MMAP[K]/PH[K]/1 non-preemptive priority queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
        D1 corresponds to the lowest, DK to the highest priority.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati and |
        |                |                    | the matrix-quadratic equations         |
        +----------------+--------------------+----------------------------------------+
        | "erlMaxOrder"  | Integer number     | The maximal Erlang order used in the   |
        |                |                    | erlangization procedure. The default   |
        |                |                    | value is 200.                          |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] G. Horvath, "Efficient analysis of the MMAP[K]/PH[K]/1
           priority queue", European Journal of Operational 
           Research, 246(1), 128-139, 2015.
    """
    
    K = len(D)-1

    # parse options
    eaten = []
    erlMaxOrder = 200;
    precision = 1e-14;
    classes = np.arange(0,K)
    for i in range(len(argv)):
        if argv[i]=="prec":
            precision = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="erlMaxOrder":
            erlMaxOrder = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="classes":
            classes = np.array(argv[i+1])-1
            eaten.append(i)
            eaten.append(i+1) 
    
    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception('MMAPPH1PRPR: The arrival process is not a valid MMAP representation!')
    
    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k],S[k]):
                raise Exception('MMAPPH1PRPR: the vector and matrix describing the service times is not a valid PH representation!')

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    I = ml.eye(N)
    sD = ml.zeros((N,N))
    for Di in D:
        sD += Di
    
    s = []
    M = np.empty(K)
    for i in range(K):
        s.append(np.sum(-S[i],1))
        M[i] = sigma[i].size
    
    # step 1. solution of the workload process of the joint queue
    # ===========================================================
    sM = np.sum(M)
    Qwmm = ml.matrix(D0)
    Qwpm = ml.zeros((N*sM, N))
    Qwmp = ml.zeros((N, N*sM))
    Qwpp = ml.zeros((N*sM, N*sM)) 
    kix = 0
    for i in range(K):
        Qwmp[:,kix:kix+N*M[i]] = np.kron(D[i+1], sigma[i])
        Qwpm[kix:kix+N*M[i],:] = np.kron(I,s[i])
        Qwpp[kix:kix+N*M[i],:][:,kix:kix+N*M[i]] = np.kron(I,S[i])
        kix += N*M[i]

    # calculate fundamental matrices
    Psiw, Kw, Uw = FluidFundamentalMatrices (Qwpp, Qwpm, Qwmp, Qwmm, 'PKU', precision)
    
    # calculate boundary vector
    Ua = ml.ones((N,1)) + 2*np.sum(Qwmp*(-Kw).I,1)
    pm = Linsolve (ml.hstack((Uw,Ua)).T, ml.hstack((ml.zeros((1,N)),ml.ones((1,1)))).T).T

    ro =  ((1.0-np.sum(pm))/2.0)/(np.sum(pm)+(1.0-np.sum(pm))/2.0) # calc idle time with weight=1, and the busy time with weight=1/2
    kappa = pm/np.sum(pm)
    
    pi = CTMCSolve (sD)
    lambd = []
    for i in range(K):
        lambd.append(np.sum(pi*D[i+1]))

    Psiw = []
    Qwmp = []
    Qwzp = []
    Qwpp = []
    Qwmz = []
    Qwpz = []
    Qwzz = []
    Qwmm = []
    Qwpm = []
    Qwzm = []
    for k in range(K):
        # step 2. construct a workload process for classes k...K
        # ======================================================
        Mlo = np.sum(M[:k])
        Mhi = np.sum(M[k:])

        Qkwpp = ml.zeros((N*Mlo*Mhi+N*Mhi, N*Mlo*Mhi+N*Mhi))
        Qkwpz = ml.zeros((N*Mlo*Mhi+N*Mhi, N*Mlo)) 
        Qkwpm = ml.zeros((N*Mlo*Mhi+N*Mhi, N))
        Qkwmz = ml.zeros((N, N*Mlo))
        Qkwmp = ml.zeros((N, N*Mlo*Mhi+N*Mhi))
        Dlo = ml.matrix(D0)
        for i in range(k):
            Dlo = Dlo + D[i+1]
        Qkwmm = Dlo
        Qkwzp = ml.zeros((N*Mlo, N*Mlo*Mhi+N*Mhi))
        Qkwzm = ml.zeros((N*Mlo, N))
        Qkwzz = ml.zeros((N*Mlo, N*Mlo))
        kix = 0
        for i in range(k,K):
            kix2 = 0
            for j in range(k):
                bs = N*M[j]*M[i]
                bs2 = N*M[j]
                Qkwpp[kix:kix+bs,kix:kix+bs] = np.kron(I,np.kron(ml.eye(M[j]),S[i]))
                Qkwpz[kix:kix+bs,kix2:kix2+bs2] = np.kron(I,np.kron(ml.eye(M[j]),s[i]))
                Qkwzp[kix2:kix2+bs2,kix:kix+bs] = np.kron(D[i+1],np.kron(ml.eye(M[j]), sigma[i]))
                kix += bs
                kix2 += bs2
        for i in range(k,K):
            bs = N*M[i]
            Qkwpp[kix:kix+bs,:][:,kix:kix+bs] = np.kron(I,S[i])
            Qkwpm[kix:kix+bs,:] = np.kron(I,s[i])
            Qkwmp[:,kix:kix+bs] = np.kron(D[i+1],sigma[i])
            kix += bs
        kix = 0
        for j in range(k):
            bs = N*M[j]
            Qkwzz[kix:kix+bs,kix:kix+bs] = np.kron(Dlo, ml.eye(M[j])) + np.kron(I, S[j])
            Qkwzm[kix:kix+bs,:] = np.kron(I, s[j])
            kix += bs

        if Qkwzz.shape[0]>0:
            Psikw = FluidFundamentalMatrices (Qkwpp+Qkwpz*(-Qkwzz).I*Qkwzp, Qkwpm+Qkwpz*(-Qkwzz).I*Qkwzm, Qkwmp, Qkwmm, 'P', precision)
        else:
            Psikw = FluidFundamentalMatrices (Qkwpp, Qkwpm, Qkwmp, Qkwmm, 'P', precision)
        Psiw.append(Psikw)
        
        Qwzp.append(Qkwzp)
        Qwmp.append(Qkwmp)
        Qwpp.append(Qkwpp)
        Qwmz.append(Qkwmz)
        Qwpz.append(Qkwpz)
        Qwzz.append(Qkwzz)
        Qwmm.append(Qkwmm)
        Qwpm.append(Qkwpm)
        Qwzm.append(Qkwzm)
    
    # step 3. calculate Phi vectors
    # =============================
    lambdaS = sum(lambd)
    phi = [(1-ro)*kappa*(-D0) / lambdaS]
    q0 = [[]]
    qL = [[]]
    for k in range(K-1):
        sDk = ml.matrix(D0)
        for j in range(k+1):
            sDk = sDk + D[j+1]
        # pk
        pk = sum(lambd[:k+1])/lambdaS - (1-ro)*kappa*np.sum(sDk,1)/lambdaS
        # A^(k,1)
        Qwzpk = Qwzp[k+1]
        vix = 0
        Ak = []
        for ii in range(k+1):
            bs = N*M[ii]
            V1 = Qwzpk[vix:vix+bs,:]
            Ak.append (np.kron(I,sigma[ii]) * (-np.kron(sDk,ml.eye(M[ii]))-np.kron(I,S[ii])).I * (np.kron(I,s[ii]) + V1*Psiw[k+1]))
            vix += bs
        # B^k
        Qwmpk = Qwmp[k+1]
        Bk = Qwmpk * Psiw[k+1]
        ztag = phi[0]*((-D0).I*D[k+1]*Ak[k] - Ak[0] + (-D0).I*Bk)
        for i in range(k):
            ztag += phi[i+1]*(Ak[i]-Ak[i+1]) + phi[0]*(-D0).I*D[i+1]*Ak[i]
        Mx = ml.eye(Ak[k].shape[0])-Ak[k]
        Mx[:,0] = ml.ones((N,1))
        phi.append(ml.hstack((pk, ztag[:,1:]))*Mx.I)  # phi(k) = Psi^(k)_k * p(k). Psi^(k)_i = phi(i) / p(k)

        q0.append(phi[0]*(-D0).I)
        qLii = []
        for ii in range(k+1):
            qLii.append((phi[ii+1] - phi[ii] + phi[0]*(-D0).I*D[ii+1]) * np.kron(I,sigma[ii]) * (-np.kron(sDk,ml.eye(M[ii]))-np.kron(I,S[ii])).I)
        qL.append(ml.hstack(qLii))
    
    
    # step 4. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:

        sD0k = ml.matrix(D0)
        for i in range(k):
            sD0k +=  D[i+1]     
       
        if k<K-1:
            # step 4.1 calculate distribution of the workload process right 
            # before the arrivals of class k jobs
            # ============================================================
            if Qwzz[k].shape[0]>0:
                Kw = Qwpp[k]+Qwpz[k]*(-Qwzz[k]).I*Qwzp[k] + Psiw[k]*Qwmp[k]
            else:
                Kw = Qwpp[k] + Psiw[k]*Qwmp[k]
            BM = ml.zeros((0,0))
            CM = ml.zeros((0,N))
            DM = ml.zeros((0,0))
            for i in range(k):
                BM = la.block_diag(BM,np.kron(I,S[i]))
                CM = ml.vstack((CM, np.kron(I,s[i])))
                DM = la.block_diag(DM,np.kron(D[k+1],ml.eye(M[i])))
            if k>0:
                Kwu = ml.vstack((ml.hstack((Kw, (Qwpz[k]+Psiw[k]*Qwmz[k])*(-Qwzz[k]).I*DM)), ml.hstack((ml.zeros((BM.shape[0],Kw.shape[1])), BM))))
                Bwu = ml.vstack((Psiw[k]*D[k+1], CM))
                iniw = ml.hstack((q0[k]*Qwmp[k]+qL[k]*Qwzp[k], qL[k]*DM))
                pwu = q0[k]*D[k+1]
            else:
                Kwu = Kw
                Bwu = Psiw[k]*D[k+1]
                iniw = pm*Qwmp[k]
                pwu = pm*D[k+1]

            norm = np.sum(pwu) + np.sum(iniw*(-Kwu).I*Bwu)
            pwu = pwu / norm
            iniw = iniw / norm

            # step 4.2 create the fluid model whose first passage time equals the
            # WAITING time of the low prioroity customers
            # ==================================================================
            KN = Kwu.shape[0]
            Qspp = ml.zeros((KN+N*np.sum(M[k+1:]), KN+N*np.sum(M[k+1:])))
            Qspm = ml.zeros((KN+N*np.sum(M[k+1:]), N))
            Qsmp = ml.zeros((N, KN+N*np.sum(M[k+1:])))
            Qsmm = sD0k + D[k+1]
            kix = 0
            for i in range(k+1,K):
                bs = N*M[i]
                Qspp[KN+kix:KN+kix+bs,:][:,KN+kix:KN+kix+bs] = np.kron(I,S[i])
                Qspm[KN+kix:KN+kix+bs,:] = np.kron(I,s[i])
                Qsmp[:,KN+kix:KN+kix+bs] = np.kron(D[i+1],sigma[i])
                kix += bs

            Qspp[:KN,:][:,:KN] = Kwu
            Qspm[:KN,:] = Bwu
            inis = ml.hstack((iniw, ml.zeros((1,N*np.sum(M[k+1:])))))

            # calculate fundamental matrix
            Psis = FluidFundamentalMatrices (Qspp, Qspm, Qsmp, Qsmm, 'P', precision)

            # step 4.3. calculate the performance measures
            # ==========================================   
            argIx = 0
            while argIx<len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and argv[argIx]=="stMoms":
                    # MOMENTS OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    numOfSTMoms = argv[argIx+1]
                    # calculate waiting time moments
                    Pn = [Psis]
                    wtMoms = []
                    for n in range(1,numOfSTMoms+1):
                        A = Qspp + Psis*Qsmp
                        B = Qsmm + Qsmp*Psis
                        C = -2*n*Pn[n-1]
                        bino = 1
                        for i in range(1,n):
                            bino = bino * (n-i+1) / i
                            C += bino * Pn[i]*Qsmp*Pn[n-i]
                        P = la.solve_sylvester(A, B, -C)
                        Pn.append(P)
                        wtMoms.append(np.sum(inis*P*(-1)**n) / 2**n)
                    # calculate RESPONSE time moments
                    Pnr = [np.sum(inis*Pn[0])*sigma[k]]
                    rtMoms = []
                    for n in range(1,numOfSTMoms+1):
                        P =  n*Pnr[n-1]*(-S[k]).I + (-1)**n*np.sum(inis*Pn[n])*sigma[k] / 2**n
                        Pnr.append(P)
                        rtMoms.append(np.sum(P)+np.sum(pwu)*math.factorial(n)*np.sum(sigma[k]*(-S[k]).I**n))
                    Ret.append(rtMoms)
                    argIx += 1
                elif type(argv[argIx]) is str and argv[argIx]=="stDistr":
                    # DISTRIBUTION OF THE SOJOURN TIME
                    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                    stCdfPoints = argv[argIx+1]
                    res = []
                    for t in stCdfPoints:
                        L = erlMaxOrder
                        lambdae = L/t/2
                        Psie = FluidFundamentalMatrices (Qspp-lambdae*ml.eye(Qspp.shape[0]), Qspm, Qsmp, Qsmm-lambdae*ml.eye(Qsmm.shape[0]), 'P', precision)
                        Pn = [Psie]
                        pr = (np.sum(pwu) + np.sum(inis*Psie)) * (1-np.sum(sigma[k]*(ml.eye(S[k].shape[0])-S[k]/2/lambdae).I**L))
                        for n in range(1,L):
                            A = Qspp + Psie*Qsmp - lambdae*ml.eye(Qspp.shape[0])
                            B = Qsmm + Qsmp*Psie - lambdae*ml.eye(Qsmm.shape[0])
                            C = 2*lambdae*Pn[n-1]
                            for i in range(1,n):
                                C += Pn[i]*Qsmp*Pn[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            pr += np.sum(inis*P) * (1-np.sum(sigma[k]*(np.eye(S[k].shape[0])-S[k]/2/lambdae).I**(L-n)))
                        res.append(pr)
                    Ret.append(np.array(res))
                    argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx]=="ncMoms" or argv[argIx]=="ncDistr"):
                    W = (-np.kron(sD-D[k+1],ml.eye(M[k]))-np.kron(I,S[k])).I*np.kron(D[k+1],ml.eye(M[k]))
                    iW = (ml.eye(W.shape[0])-W).I
                    w = np.kron(ml.eye(N),sigma[k])
                    omega = (-np.kron(sD-D[k+1],ml.eye(M[k]))-np.kron(I,S[k])).I*np.kron(I,s[k])
                    if argv[argIx]=="ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx+1]
                        # first calculate it at departure instants
                        Psii = [Psis]
                        QLDPn = [inis*Psii[0]*w*iW]
                        for n in range(1,numOfQLMoms+1):
                            A = Qspp + Psis*Qsmp
                            B = Qsmm + Qsmp*Psis
                            C = n*Psii[n-1]*D[k+1]
                            bino = 1
                            for i in range(1,n):
                                bino = bino * (n-i+1) / i
                                C = C + bino * Psii[i]*Qsmp*Psii[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Psii.append(P)
                            QLDPn.append(n*QLDPn[n-1]*iW*W + inis*P*w*iW)
                        for n in range(numOfQLMoms+1):
                            QLDPn[n] = (QLDPn[n] + pwu*w*iW**(n+1)*W**n)*omega
                        # now calculate it at random time instance
                        QLPn = [pi]
                        qlMoms = []
                        iTerm = (ml.ones((N,1))*pi - sD).I
                        for n in range(1,numOfQLMoms+1):
                            sumP = np.sum(QLDPn[n]) + n*np.sum((QLDPn[n-1] - QLPn[n-1]*D[k+1]/lambd[k])*iTerm*D[k+1])
                            P = sumP*pi + n*(QLPn[n-1]*D[k+1] - QLDPn[n-1]*lambd[k])*iTerm
                            QLPn.append(P)
                            qlMoms.append(np.sum(P))
                        qlMoms = MomsFromFactorialMoms(qlMoms)
                        Ret.append(qlMoms)
                        argIx += 1
                    elif argv[argIx]=="ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx+1]
                        Psid = FluidFundamentalMatrices (Qspp, Qspm, Qsmp, sD0k, 'P', precision)
                        Pn = [Psid]
                        XDn = inis*Psid*w
                        dqlProbs = (XDn+pwu*w)*omega
                        for n in range(1,numOfQLProbs):
                            A = Qspp + Psid*Qsmp
                            B = sD0k + Qsmp*Psid
                            C = Pn[n-1]*D[k+1]
                            for i in range(1,n):
                                C += Pn[i]*Qsmp*Pn[n-i]
                            P = la.solve_sylvester(A, B, -C)
                            Pn.append(P)
                            XDn = XDn*W + inis*P*w
                            dqlProbs = ml.vstack((dqlProbs, (XDn+pwu*w*W**n)*omega))
                        # now calculate it at random time instance
                        iTerm = -(sD-D[k+1]).I
                        qlProbs = lambd[k]*dqlProbs[0,:]*iTerm
                        for n in range(1,numOfQLProbs):
                            P = (qlProbs[n-1,:]*D[k+1]+lambd[k]*(dqlProbs[n,:]-dqlProbs[n-1,:]))*iTerm
                            qlProbs = ml.vstack((qlProbs, P))
                        qlProbs = np.sum(qlProbs,1).A.flatten()
                        Ret.append(qlProbs)
                        argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter "+str(argv[argIx]))
                argIx += 1
        elif k==K-1:
            # step 3. calculate the performance measures
            # ==========================================   
            argIx = 0
            while argIx<len(argv):
                if argIx in eaten:
                    argIx += 1
                    continue
                elif type(argv[argIx]) is str and (argv[argIx]=="stMoms" or argv[argIx]=="stDistr"):
                    Kw = Qwpp[k]+Qwpz[k]*(-Qwzz[k]).I*Qwzp[k] + Psiw[k]*Qwmp[k]
                    AM = ml.zeros((0,0))
                    BM = ml.zeros((0,0))
                    CM = ml.zeros((0,1))
                    DM = ml.zeros((0,0))
                    for i in range(k):
                        AM = la.block_diag(AM,np.kron(ml.ones((N,1)),np.kron(ml.eye(M[i]),s[k])))
                        BM = la.block_diag(BM,S[i])
                        CM = ml.vstack((CM, s[i]))
                        DM = la.block_diag(DM,np.kron(D[k+1],ml.eye(M[i])))                        
                    Z = ml.vstack((ml.hstack((Kw, ml.vstack((AM,ml.zeros((N*M[k],AM.shape[1])))))), ml.hstack((ml.zeros((BM.shape[0],Kw.shape[1])), BM))))
                    z = ml.vstack((ml.zeros((AM.shape[0],1)), np.kron(ml.ones((N,1)),s[k]), CM))
                    iniw = ml.hstack((q0[k]*Qwmp[k]+qL[k]*Qwzp[k], ml.zeros((1,BM.shape[0]))))
                    zeta = iniw/np.sum(iniw*(-Z).I*z)
                    if argv[argIx]=="stMoms":
                        # MOMENTS OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfSTMoms = argv[argIx+1]
                        rtMoms = []
                        for i in range(1,numOfSTMoms+1):
                            rtMoms.append(np.sum(math.factorial(i)*zeta*(-Z).I**(i+1)*z))
                        Ret.append(rtMoms)
                        argIx += 1
                    if argv[argIx]=="stDistr":
                        # DISTRIBUTION OF THE SOJOURN TIME
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        stCdfPoints = argv[argIx+1]
                        rtDistr = []
                        for t in stCdfPoints:
                            rtDistr.append (np.sum(zeta*(-Z).I*(ml.eye(Z.shape[0])-la.expm(Z*t))*z))
                        Ret.append(np.array(rtDistr))
                        argIx += 1
                elif type(argv[argIx]) is str and (argv[argIx]=="ncMoms" or argv[argIx]=="ncDistr"):
                    L = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    B = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    F = ml.zeros((N*np.sum(M),N*np.sum(M)))
                    kix = 0
                    for i in range(K):
                        bs = N*M[i]
                        F[kix:kix+bs,:][:,kix:kix+bs] = np.kron(D[k+1],ml.eye(M[i]))
                        L[kix:kix+bs,:][:,kix:kix+bs] = np.kron(sD0k,ml.eye(M[i])) + np.kron(I,S[i])
                        if i<K-1:
                            L[kix:kix+bs,:][:,N*np.sum(M[:k]):] = np.kron(I,s[i]*sigma[k])
                        else:
                            B[kix:kix+bs,:][:,N*np.sum(M[:k]):] = np.kron(I,s[i]*sigma[k])
                        kix += bs
                    R = QBDFundamentalMatrices (B, L, F, 'R', precision)
                    p0 = ml.hstack((qL[k], q0[k]*np.kron(I,sigma[k])))
                    p0 = p0/np.sum(p0*(ml.eye(R.shape[0])-R).I)
                    if argv[argIx]=="ncMoms":
                        # MOMENTS OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLMoms = argv[argIx+1]
                        qlMoms = []
                        for i in range(1,numOfQLMoms+1):
                            qlMoms.append(np.sum(math.factorial(i)*p0*R**i*(ml.eye(R.shape[0])-R).I**(i+1)))
                        Ret.append(MomsFromFactorialMoms(qlMoms))
                    elif argv[argIx]=="ncDistr":
                        # DISTRIBUTION OF THE NUMBER OF JOBS
                        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                        numOfQLProbs = argv[argIx+1]
                        qlProbs = [np.sum(p0)]
                        for i in range(1,numOfQLProbs):
                            qlProbs.append(np.sum(p0*R**i))
                        Ret.append(np.array(qlProbs))
                    argIx += 1
                else:
                    raise Exception("MMAPPH1NPPR: Unknown parameter "+str(argv[argIx]))
                argIx += 1

    if len(Ret)==1:
        return Ret[0]
    else:
        return Ret
Example #44
0
 def test_trivial(self):
     a = np.array([[1.0, 0.0], [0.0, 1.0]])
     b = np.array([[1.0]])
     c = np.array([2.0, 2.0]).reshape(-1,1)
     x = solve_sylvester(a, b, c)
     assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1,1))
Example #45
0
def MMAPPH1FCFS(D, sigma, S, *argv):
    """
    Returns various performane measures of a MMAP[K]/PH[K]/1 
    first-come-first-serve queue, see [1]_.
    
    Parameters
    ----------
    D : list of matrices of shape (N,N), length (K+1)
        The D0...DK matrices of the arrival process.
    sigma : list of row vectors, length (K)
        The list containing the initial probability vectors of the service
        time distributions of the various customer types. The length of the
       vectors does not have to be the same.
    S : list of square matrices, length (K)
        The transient generators of the phase type distributions representing
        the service time of the jobs belonging to various types.
    further parameters : 
        The rest of the function parameters specify the options
        and the performance measures to be computed.
    
        The supported performance measures and options in this 
        function are:
    
        +----------------+--------------------+----------------------------------------+
        | Parameter name | Input parameters   | Output                                 |
        +================+====================+========================================+
        | "ncMoms"       | Number of moments  | The moments of the number of customers |
        +----------------+--------------------+----------------------------------------+
        | "ncDistr"      | Upper limit K      | The distribution of the number of      |
        |                |                    | customers from level 0 to level K-1    |
        +----------------+--------------------+----------------------------------------+
        | "stMoms"       | Number of moments  | The sojourn time moments               |
        +----------------+--------------------+----------------------------------------+
        | "stDistr"      | A vector of points | The sojourn time distribution at the   |
        |                |                    | requested points (cummulative, cdf)    |
        +----------------+--------------------+----------------------------------------+
        | "stDistrME"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution              |
        +----------------+--------------------+----------------------------------------+
        | "stDistrPH"    | None               | The vector-matrix parameters of the    |
        |                |                    | matrix-exponentially distributed       |
        |                |                    | sojourn time distribution, converted   |
        |                |                    | to a continuous PH representation      |
        +----------------+--------------------+----------------------------------------+
        | "prec"         | The precision      | Numerical precision used as a stopping |
        |                |                    | condition when solving the Riccati     |
        |                |                    | equation                               |
        +----------------+--------------------+----------------------------------------+
        | "classes"      | Vector of integers | Only the performance measures          |
        |                |                    | belonging to these classes are         |
        |                |                    | returned. If not given, all classes    |
        |                |                    | are analyzed.                          |
        +----------------+--------------------+----------------------------------------+
        
        (The quantities related to the number of customers in 
        the system include the customer in the server, and the 
        sojourn time related quantities include the service 
        times as well)
    
    Returns
    -------
    Ret : list of the performance measures
        Each entry of the list corresponds to a performance 
        measure requested. Each entry is a matrix, where the
        columns belong to the various job types.
        If there is just a single item, 
        then it is not put into a list.
    
    References
    ----------
    .. [1] Qiming He, "Analysis of a continuous time 
           SM[K]/PH[K]/1/FCFS queue: Age process, sojourn times,
           and queue lengths", Journal of Systems Science and 
           Complexity, 25(1), pp 133-155, 2012.
    """
    
    K = len(D)-1

    # parse options
    eaten = []
    precision = 1e-14;
    classes = np.arange(0,K)
    for i in range(len(argv)):
        if argv[i]=="prec":
            precision = argv[i+1]
            eaten.append(i)
            eaten.append(i+1) 
        elif argv[i]=="classes":
            classes = np.array(argv[i+1])-1
            eaten.append(i)
            eaten.append(i+1) 
    
    if butools.checkInput and not CheckMMAPRepresentation(D):
        raise Exception('MMAPPH1FCFS: The arrival process is not a valid MMAP representation!')
    
    if butools.checkInput:
        for k in range(K):
            if not CheckPHRepresentation(sigma[k],S[k]):
                raise Exception('MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!')

    # some preparation
    D0 = D[0]
    N = D0.shape[0]
    Ia = ml.eye(N);
    Da = ml.zeros((N,N))
    for q in range(K):
        Da += D[q+1]
    theta = CTMCSolve(D0+Da)
    beta = [CTMCSolve(S[k]+ml.sum(-S[k],1)*sigma[k]) for k in range(K)]
    lambd = [np.sum(theta*D[k+1]) for k in range(K)]    
    mu = [np.sum(beta[k]*(-S[k])) for k in range(K)]
    Nsk = [S[k].shape[0] for k in range(K)]    
    ro = np.sum(np.array(lambd)/np.array(mu))
    alpha = theta*Da/sum(lambd)
    D0i = (-D0).I

    Sa = S[0];
    sa = [ml.zeros(sigma[0].shape)]*K
    sa[0] = sigma[0]
    ba = [ml.zeros(beta[0].shape)]*K
    ba[0] = beta[0]
    sv = [ml.zeros((Nsk[0],1))]*K
    sv[0] = ml.sum(-S[0],1)
    Pk = [D0i*D[q+1] for q in range(K)]

    for k in range(1,K):
        Sa = la.block_diag(Sa, S[k])
        for q in range(K):
            if q==k:
                sa[q] = ml.hstack((sa[q], sigma[k]))
                ba[q] = ml.hstack((ba[q], beta[k]))
                sv[q] = ml.vstack((sv[q], -np.sum(S[k],1)))
            else:
                sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape)))
                ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape)))
                sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k],1))))
    Sa = ml.matrix(Sa)
    P = D0i*Da
    iVec = ml.kron(D[1],sa[0])
    for k in range(1,K):
        iVec += ml.kron(D[k+1],sa[k])
    Ns = Sa.shape[0]
    Is = ml.eye(Ns)
    
    # step 1. solve the age process of the queue
    # ==========================================

    # solve Y0 and calculate T
    Y0 = FluidFundamentalMatrices (ml.kron(Ia,Sa), ml.kron(Ia,-ml.sum(Sa,1)), iVec, D0, "P", precision)
    T = ml.kron(Ia,Sa) + Y0 * iVec
    
    # calculate pi0 and v0
    pi0 = ml.zeros((1,T.shape[0]))
    for k in range(K):
        pi0 += ml.kron(theta*D[k+1],ba[k]/mu[k])
    pi0 = - pi0 * T

    iT = (-T).I
    oa = ml.ones((N,1))

    # step 2. calculate performance measures
    # ======================================
    Ret = []
    for k in classes:
        argIx = 0
        clo = iT*ml.kron(oa,sv[k])
        while argIx<len(argv):
            if argIx in eaten:
                argIx += 1
                continue
            elif type(argv[argIx]) is str and argv[argIx]=="stMoms":
                numOfSTMoms = argv[argIx+1]
                rtMoms = []
                for m in range(1,numOfSTMoms+1):
                    rtMoms.append(math.factorial(m) * np.sum(pi0 * iT**m * clo / (pi0*clo)))
                Ret.append(rtMoms)
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx]=="stDistr":
                stCdfPoints = argv[argIx+1]
                cdf = [];
                for t in stCdfPoints:
                    pr = 1 - np.sum(pi0 * la.expm(T*t) * clo / (pi0*clo))
                    cdf.append(pr)
                Ret.append(np.array(cdf))
                argIx += 1
            elif type(argv[argIx]) is str and argv[argIx]=="stDistrME":
                Bm = SimilarityMatrixForVectors(clo/(pi0*clo),ml.ones((N*Ns,1)))
                Bmi = Bm.I
                A = Bm * T * Bmi
                alpha = pi0 * Bmi
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx]=="stDistrPH":
                vv = pi0*iT
                ix = np.arange(N*Ns)
                nz = ix[vv.flat>precision]
                delta = Diag(vv[:,nz])
                cl = -T*clo/(pi0*clo)
                alpha = cl[nz,:].T*delta
                A = delta.I*T[nz,:][:,nz].T*delta
                Ret.append(alpha)
                Ret.append(A)
            elif type(argv[argIx]) is str and argv[argIx]=="ncDistr":
                numOfQLProbs = argv[argIx+1]
                argIx += 1
                values = np.empty(numOfQLProbs)
                jm = ml.zeros((Ns,1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1
                jmc = ml.ones((Ns,1))
                jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 0
                LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -ml.eye(N*Ns))
                values[0] = 1-ro+np.sum(pi0*LmCurr*ml.kron(oa,jmc))
                for i in range(1,numOfQLProbs):
                    LmPrev = LmCurr
                    LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -LmPrev*ml.kron(D[k+1],Is))
                    values[i] = np.sum(pi0*LmCurr*ml.kron(oa,jmc) + pi0*LmPrev*ml.kron(oa,jm));
                Ret.append(values)
            elif type(argv[argIx]) is str and argv[argIx]=="ncMoms":
                numOfQLMoms = argv[argIx+1]
                argIx += 1
                jm = ml.zeros((Ns,1))
                jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1
                ELn = [la.solve_sylvester(T, ml.kron(D0+Da,Is), -ml.eye(N*Ns))]
                qlMoms = []
                for n in range(1,numOfQLMoms+1):
                    bino = 1
                    Btag = ml.zeros((N*Ns,N*Ns))
                    for i in range(n):
                        Btag += bino * ELn[i]
                        bino *= (n-i) / (i+1)
                    ELn.append(la.solve_sylvester(T, ml.kron(D0+Da,Is), -Btag*ml.kron(D[k+1],Is)))
                    qlMoms.append(np.sum(pi0*ELn[n]) + np.sum(pi0*Btag*ml.kron(oa,jm)))
                Ret.append(qlMoms)
            else:
                raise Exception("MMAPPH1FCFS: Unknown parameter "+str(argv[argIx]))
            argIx += 1

    if len(Ret)==1:
        return Ret[0]
    else:
        return Ret