コード例 #1
0
    def perform_charting(self, ctl_object):
        q = np.logspace(1, 6, 300)
        w = np.logspace(-3, 3, 600)
        PO = np.zeros([len(q), 1])
        tr = np.zeros_like(PO)
        ts = np.zeros_like(PO)
        wc = np.zeros_like(PO)
        maxu = np.zeros_like(PO)
        srmin = np.zeros_like(PO)
        rdmin = np.zeros_like(PO)
        nreg = ctl_object.Creg.shape[0]

        Qw = np.zeros_like(ctl_object.Qw)

        for i in range(len(q)):
            for j in range(nreg):
                Qw[j, j] = q[i]

            try:
                # Solve the LQR problem.
                ctl.Kw, S, E = control.lqr(ctl.Aw, ctl.Bw, ctl.Qw, ctl.Rw)
            except:
                PO[i] = np.nan
                tr[i] = np.nan
                ts[i] = np.nan
                wc[i] = np.nan
                rdmin[i] = np.nan
                srmin[i] = np.nan
                maxu[i] = np.nan
                continue

            # Set the state feedback gain and the integral error gain.
            kI = ctl.Kw[:, :nreg][0]
            Kv = ctl.Kw[:, nreg:][0]

            # Build controller object to track reference theta
            sys_c = sys_setup()
            sys_c.A = np.zeros([nreg, nreg])
            sys_c.B = Creg
            sys_c.B2 = -np.eye(nreg)
            sys_c.C = -kI
            sys_c.D = -Kv
            sys_c.D2 = np.zeros([m, nreg])

            # Form closed loop system.
            Acl, Bcl, Ccl, Dcl = self.closed_loop_system(ctl.sys_p, ctl.sys_c)

            # Get the eigenvalues
            ee = np.eig(Acl)
            if any(np.real(ee) > 0):
                continue
コード例 #2
0
ファイル: kcipy.py プロジェクト: DennisZY/CI_toolkit
def eigdec(x, N):
    """EIGDEC	Sorted eigendecomposition
        Description
        EVALS, EVEC = EIGDEC(X, N) computes the largest N eigenvalues of the
        matrix X in descending order.
        See also
        PCA, PPCA
        Copyright (c) Ian T Nabney (1996-2001)
        and Neil D. Lawrence (2009) (translation to python)"""

    # x = np.asmatrix(x)
    # Would be true if you are returning only eigenvectors, can't make
    # that decision in python
    evals_only = False

    if not N == round(N) or N < 1 or N > x.shape[1]:
        raise Exception('Number of eigenvalues must be integer, >0, < dim')

    # Find the eigenvalues of the data covariance matrix
    if evals_only:
        # This isn't called in python version.
        # Use eig function as always more efficient than eigs here
        temp_evals = np.eig(x)
    else:
        # Use eig function unless fraction of eigenvalues required is tiny
        if (N / x.shape[1]) > 0.04:
            temp_evals, temp_evec = la.eig(x)
        else:
            # Want to use eigs here, but it doesn't exist for python yet.
            # options.disp = 0
            # temp_evec, temp_evals = eigs(x, N, 'LM', options)
            temp_evals, temp_evec = la.eig(x)

    # Sort eigenvalues into descending order
    perm = np.argsort(-temp_evals)
    evals = temp_evals[perm[0:N]]

    if not evals_only:
        # should always come through here.
        evec = temp_evec[:, perm[0:N]]
    return evals, evec
コード例 #3
0
ファイル: fixed_formVB.py プロジェクト: YSanchezAraujo/genus
def decompose_kernel(M):
    L = {'M':np.copy(M)}
    V, D = np.eig(M)
    L['V'] = np.real(V)
    L['D'] = np.real(np.diag(D))
    return L
コード例 #4
0
def decompose_kernel(M):
    L = {'M': np.copy(M)}
    V, D = np.eig(M)
    L['V'] = np.real(V)
    L['D'] = np.real(np.diag(D))
    return L
コード例 #5
0
def design_regredob(sys_c_ol,
                    sampling_interval,
                    desired_settling_time,
                    desired_observer_settling_time=None,
                    spoles=None,
                    sopoles=None,
                    disp=True):
    """ Design a digital reduced order observer regulator with the desired settling time.
    
    Args:
        sys_c_ol (StateSpace): The continouous plant model
        sampling_interval: The sampling interval for the digital control system in seconds.
        desired_settling_time: The desired settling time in seconds
        desired_observer_settling_time (optional): The desired observer settling time
            in seconds. If not provided the observer settling time will be 4 times faster
            than the overall settling time. Default is None.
        spoles (optional): The desired closed loop poles. If not supplied, then optimal
            poles will try to be used. Default is None.
        sopoles (optional): The desired observer poles. If not supplied, then optimal
            poles will try to be used. Default is None.
        disp: Print debugging output. Default is True.

    Returns:
        tuple: (sys_d_ol, L1, L2, K, F, G, H) Where sys_d_ol is the discrete plant, L is the stablizing
            gain matrix, and K is the observer gain matrix.
    """

    # Make sure the system is in fact continuous and not discrete
    if sys_c_ol.dt != None:
        print("Error: Function expects continuous plant")
        return None

    A = sys_c_ol.A
    B = sys_c_ol.B
    C = sys_c_ol.C
    D = sys_c_ol.D

    num_states = A.shape[0]
    num_inputs = B.shape[1]
    num_outputs = C.shape[0]
    num_measured_states = num_outputs
    num_unmeasured_states = num_states - num_measured_states

    # Convert to discrete system using zero order hold method
    sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
    phi = sys_d_ol.A
    gamma = sys_d_ol.B

    # Check controlability of the discrete system
    controllability_mat = control.ctrb(phi, gamma)
    rank = LA.matrix_rank(controllability_mat)
    if rank != num_states:
        print(rank, num_states)
        print("Error: System is not controlable")
        return None

    # Check observability of the discrete system
    observability_mat = control.obsv(phi, C)
    rank = LA.matrix_rank(observability_mat)
    if rank != num_states:
        print("Error: System is not observable")
        return None

    # Choose poles if none were given
    if spoles is None:
        spoles = find_candadate_spoles(sys_c_ol, desired_settling_time, disp)

        num_spoles_left = num_states - len(spoles)
        if num_spoles_left > 0:
            # Use normalized bessel poles for the rest
            spoles.extend(
                control_poles.bessel_spoles(num_spoles_left,
                                            desired_settling_time))

    zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)

    if disp:
        print("spoles = ", spoles)
        print("zpoles = ", zpoles)

    # place the poles such that eig(phi - gamma*L) are inside the unit circle
    full_state_feedback = signal.place_poles(phi, gamma, zpoles)

    # Check the poles for stability just in case
    for zpole in full_state_feedback.computed_poles:
        if abs(zpole) >= 1:
            print("Computed pole is not stable")
            return None

    L = full_state_feedback.gain_matrix

    # Choose poles if none were given
    if sopoles is None:
        sopoles = []
        if desired_observer_settling_time == None:
            desired_observer_settling_time = desired_settling_time / 4

        # TODO: Find existing poles based on the rules. For now just use bessel

        num_sopoles_left = num_unmeasured_states - len(sopoles)

        if num_sopoles_left > 0:
            # Use normalized bessel poles for the rest
            sopoles.extend(
                control_poles.bessel_spoles(num_sopoles_left,
                                            desired_observer_settling_time))
            if disp:
                print("Using normalized bessel for the remaining",
                      num_sopoles_left, "sopoles")

    zopoles = control_poles.spoles_to_zpoles(sopoles, sampling_interval)

    if disp:
        print("sopoles = ", sopoles)
        print("zopoles = ", zopoles)

    # partition out the phi and gamma matrix
    phi11 = phi[:num_measured_states, :num_measured_states]
    phi12 = phi[:num_measured_states, num_measured_states:]
    phi21 = phi[num_measured_states, :num_measured_states]
    phi22 = phi[num_measured_states:, num_measured_states:]
    gamma1 = gamma[:num_measured_states]
    gamma2 = gamma[num_measured_states:]
    C1 = C[:num_measured_states, :num_measured_states]

    if num_measured_states >= num_states / 2 and LA.matrix_rank(
            phi12) == num_unmeasured_states:
        # case 1
        if num_unmeasured_states % 2 == 0:
            F = np.matrix([[zopoles[0].real, zopoles[0].imag],
                           [zopoles[1].imag, zopoles[1].real]])
        else:
            # We only support 1 real pole
            F = np.matrix([zopoles[0].real])
        cp = C1 * phi12
        cp_t = np.transpose(cp)

        K = (phi22 - F) * np.linalg.inv(cp_t * cp) * cp_t

    elif num_measured_states == 1:
        # case 2 (unsupported)
        print("unsupported design with measured states = 1")
        np.poly(np.eig(phi22))
    else:
        full_state_feedback = signal.place_poles(np.transpose(phi22),
                                                 np.transpose(C1 * phi12),
                                                 zopoles)
        K = np.transpose(full_state_feedback.gain_matrix)
        F = phi22 - K * C1 * phi12

    H = gamma2 - K * C1 * gamma1
    G = (phi21 - K * C1 * phi11) * np.linalg.inv(C1) + (F * K)

    # Check the poles for stability just in case
    for zopole in full_state_feedback.computed_poles:
        if abs(zopole) > 1:
            print("Computed observer pole is not stable")
            return None

    return (sys_d_ol, np.matrix(L), np.matrix(K), np.matrix(F), np.matrix(G),
            np.matrix(H))
コード例 #6
0

#Covarianza
def covarianza(x, y):
    for i in range(len(x)):
        for j in range(len(y)):
            sumaCova = (x[i] - np.mean(x)) * (y[j] - np.mean(y))
    return sumaCova / len(x)


#Matriz covarianza. No vamos a tener en cuenta la variable ID ya que no tiene nada ver con tener cancer. Se dejara la variable Diagnosis para poder ver que variable es la que mas influye en esta diagnosis.

cantidadVariables = len(matriz[:])
matrizCova = np.zeros((cantidadVariables, cantidadVariables))
for i in range(cantidadVariables):
    for j in range(cantidadVariables):
        if (i == 32):
            matrizCova[i][j] = covarianza(matriz[i], matriz[i])
        else:
            matrizCova[i][j] = covarianza(matriz[i], matriz[j])

print("MIA ", matrizCova, " CALCULADO POR NP ", np.cov(matriz))

#Punto 3: calcula autovalores y autovectores.

eigenValues = np.eig(matrizCova)
eigenVectors = np.eig(matrizCova)

#for i in range (len (eigenValues)):
#	print ( "ID EigenValue: "eigenValues[i], "ID EigenVector: ", eigenVectors[i], "\n Radius EigenValue: ", eigenValues[i],  " Radius EigenVector: ",eigenVectors[i], "\n Texture EigenValue: ", eigenValues[i],  " Texture EigenVector: ",eigenVectors[i], "\n Perimeter EigenValue: ", eigenValues[i],  " Perimeter EigenVector: ",eigenVectors[i], "\n Area EigenValue: ", eigenValues[i],  " Area EigenVector: ",eigenVectors[i], "\n Smoothness EigenValue: ", eigenValues[i],  " Smoothness EigenVector: ",eigenVectors[i], "\n Compactness EigenValue: ", eigenValues[i],  " Compactness EigenVector: ",eigenVectors[i], "\n Concavity EigenValue: ", eigenValues[i],  " Concavity EigenVector: ",eigenVectors[i], "\n Concave EigenValue: ", eigenValues[i],  " Concave EigenVector: ",eigenVectors[i], "\n Symmetry EigenValue: ", eigenValues[i],  " Symmetry EigenVector: ",eigenVectors[i],  "\n Fractal dimension EigenValue: ", eigenValues[i],  " Fractal dimension EigenVector: ",eigenVectors[i] )
コード例 #7
0
ファイル: _findPIS.py プロジェクト: pretl/python-deltasigma
    if np.max(xmax) > 100:
        print('findPIS: A direct simulation indicates that the modulator is unstable.')
        s = np.tile(np.Inf, (order, 1))
        e = None
        n = None
        o = None
        return
    
    x = x[:, 1+skip:N+skip+1]

    # Do scaling (coodinate transformation) to help qhull do better facet merging.
    # The scaling is based on principal component analysis (pg 105 of notebook 6).
    center = np.mean(x.T, axis=0).T
    xp = x - np.tile(center[:, 0], (1, np.shape(x)[1]))
    R = xp * xp.T / N
    L, Q = np.eig(R)
    Sc = Q*np.sqrt(L)
    Si = np.linalg.inv(Sc)
    A0, B0, C0, D0 = partitionABCD(ABCD)
    ABCD = np.array([[Si*A0*Sc, Si*B0],
                     [C0*Sc, D0]])
    x = Si*x
    xmax = np.max(np.abs(x.T)).T
    center = Si*center

    # Store original data in case I need to restart
    restart = 1
    x0 = x
    ABCD0 = ABCD
    Si0 = Si
    Sc0 = Sc
	
	n, m = X_0.shape
	predictions = [] 
  def preprocessing(raw_data = X_0):
		
    def regressor(reg_data = raw_data):
      pred = []; 
      j = np.arange(m)
      for i in range(n):
        slope = ((reg_data[i] @ j) - (reg_data[0, i]*j.sum()))/(j@j)
        pred.append(slope*(m+1)+reg_data[0, i]);
      pred = np.array(pred).reshape(-1, 1);
      reg_data = np.hstack((reg_data, pred));
      return reg_data;
    raw_data = regressor(raw_data)[:, 1:]
    return raw_data
  if X_1 == None:
		X_1 = preprocessing();
		
	U, S, V = np.linalg(X_0);
	A_tilde = U.T @ X_1 @ V.T @ (1/np.diag(S))
	W, L = np.eig(A_tilde)
	Phi = X_1 @ V.T @ (1/np.diag(S)) @ W
	for i in range(1, threshold):
		eigenvalues = np.diag(L**(i-1))
		x_i = Phi @ eigenvalues @ np.linalg.pinv(Phi) @ 
		
	
	
      
コード例 #9
0
ファイル: dlqr.py プロジェクト: natolambert/si-rl-samples
def dlqr(*args, **keywords):
    """Linear quadratic regulator design for discrete systems
    Usage
    =====
    [K, S, E] = dlqr(A, B, Q, R, [N])
    [K, S, E] = dlqr(sys, Q, R, [N])
    The dlqr() function computes the optimal state feedback controller
    that minimizes the quadratic cost
        J = \sum_0^\infty x' Q x + u' R u + 2 x' N u
    Inputs
    ------
    A, B: 2-d arrays with dynamics and input matrices
    sys: linear I/O system
    Q, R: 2-d array with state and input weight matrices
    N: optional 2-d array with cross weight matrix
    Outputs
    -------
    K: 2-d array with state feedback gains
    S: 2-d array with solution to Riccati equation
    E: 1-d array with eigenvalues of the closed loop system
    """

    #
    # Process the arguments and figure out what inputs we received
    #

    # Get the system description
    if (len(args) < 3):
        raise ControlArgument("not enough input arguments")

    elif (ctrlutil.issys(args[0])):
        # We were passed a system as the first argument; extract A and B
        A = np.array(args[0].A, ndmin=2, dtype=float);
        B = np.array(args[0].B, ndmin=2, dtype=float);
        index = 1;
        if args[0].dt == 0.0:
            print
            "dlqr works only for discrete systems!"
            return
    else:
        # Arguments should be A and B matrices
        A = np.array(args[0], ndmin=2, dtype=float);
        B = np.array(args[1], ndmin=2, dtype=float);
        index = 2;

    # Get the weighting matrices (converting to matrices, if needed)
    Q = np.array(args[index], ndmin=2, dtype=float);
    R = np.array(args[index + 1], ndmin=2, dtype=float);
    if (len(args) > index + 2):
        N = np.array(args[index + 2], ndmin=2, dtype=float);
        Nflag = 1;
    else:
        N = np.zeros((Q.shape[0], R.shape[1]));
        Nflag = 0;

    # Check dimensions for consistency
    nstates = B.shape[0];
    ninputs = B.shape[1];
    if (A.shape[0] != nstates or A.shape[1] != nstates):
        raise ControlDimension("inconsistent system dimensions")

    elif (Q.shape[0] != nstates or Q.shape[1] != nstates or
          R.shape[0] != ninputs or R.shape[1] != ninputs or
          N.shape[0] != nstates or N.shape[1] != ninputs):
        raise ControlDimension("incorrect weighting matrix dimensions")

    if Nflag == 1:
        Ao = A - B * np.inv(R) * N.T
        Qo = Q - N * np.inv(R) * N.T
    else:
        Ao = A
        Qo = Q

    # Solve the riccati equation
    (X, L, G) = dare(Ao, B, Qo, R)
    #    X = bb_dare(Ao,B,Qo,R)

    # Now compute the return value
    Phi = np.mat(A)
    H = np.mat(B)
    K = np.inv(H.T * X * H + R) * (H.T * X * Phi + N.T)
    L = np.eig(Phi - H * K)
    return K, X, L
コード例 #10
0
def run_varimax(data, nm='captvar', cv=50, nmode=0, mv=0.01, **kwargs):
    # Compute the PCA varimax decomposition of a 3D matrix along the 2d dimension
    # Parameters:
    #   data : a 2D numpy array
    #   nm (optional): how is fixed the number of modes
    #       - 'captvar' (default) percentage of the total variance captured by the sum of the modes
    #       - 'minvar' minimum fraction of variance captured by the kept modes
    #       - 'nmod'   number of modes
    #   cv: percentage of the total variance captured by the sum of the modes (if 'captvar')
    #   nmode:   number of modes (if 'nmod')
    #   mv: minimum fraction of variance (if 'minvar')
    # Returns:
    #   ru :  varimax eigen vector
    #   rpc: varimax pc
    #   rw : varimax captured variance
    #
    #
    N, M = data.shape
    # Normalization of the input
    for k in range(N):
        st = np.std(data[k, :])
        if st != 0:
            data[k, :] = (data[k, :] - np.mean(data[k, :])) / st

# Principal component analysis
    S = np.dot(data.T, data)
    w, v = np.eig(S)
    w = np.real(w)
    v = np.real(v)
    pc = np.dot(np.transpose(v), data.T)
    # Mode selection
    if nm == 'captvar':
        s = np.cumsum(w) / sum(w)
        npc = np.nonzero(s > cv * 0.01)[0][0] + 1
    if nm == 'minvar':
        s = w / np.sum(w)
        npc = np.count_nonzero(s >= mv)
    elif nm == 'nmod':
        npc = nmode
    w0 = np.copy(w)
    ii = np.flip(np.argsort(w))
    w = w[ii]
    pc = pc[ii, :]
    v = v[:, ii]
    w = w[:npc]
    v = v[:, :npc]
    pc = pc[:npc, :]
    # call the varimax function
    Lambda = varimax(v, q=20, tol=1e-6)
    # construction of the varimax PCs
    ru = Lambda
    rpc = np.dot(ru.T, data.T)
    rw = np.zeros(npc)
    for k in range(npc):
        reck = np.dot(np.reshape(ru[:, k], (M, 1)),
                      np.reshape(rpc[k, :], (1, N)))
        rw[k] = 1 - np.var(data.T - reck) / np.var(data.T)
    ia = np.flipud(np.argsort(rw))
    rw = rw[ia]
    ru = ru[:, ia]
    rpc = rpc[ia, :]
    for k in range(npc):
        if ru[:, k].max() < -ru[:, k].min():
            ru[:, k] = -ru[:, k]
            rpc[k, :] = -rpc[k, :]
        if v[:, k].max() < -v[:, k].min():
            v[:, k] = -v[:, k]
            pc[k, :] = -pc[k, :]
    return ru, rpc, rw
コード例 #11
0
ファイル: lsr1tr.py プロジェクト: aranganath/pytorchProjects
def lsr1tr_obs(g,SY, SS,YY, Sc, Yc, indS, delta, gamma):
	maxiter = 100
	tol = 1e-10
	try:
		gammaIn = gamma
		A = np.tril(SY) + np.tril(SY,-1)
		B = SS
		eABmin = min(np.eig(A,B))

		if(gamma >-eABmin or gamma==1):
			if (eABmin > 0):
				gamma = max(0.5*eABmin, 1e-6)
			else:
				gamma = min(1.5*eABmin,-1e-6)
			print('gamma={}, eABmin={}, gammaNew={}\n'.format(gammaIn, eABmin, gamma))
	except:
		gamma = gammaIn

	invM = np.tril(SY) + np.transpose(np.tril(SY,-1)) - gamma*SS
	invM = (invM +np.transpose(invM))/2
	PsiPsi = YY - gamma *(SY +np.transpose(SY) + gamma**2*SS)

	R = np.linalg.cholesky(PsiPsi)
	RMR = R*(invM*np.linalg.inv(np.transpose(R)))
	RMR = (RMR +np.transpose(RMR))/2
	U,D = np.linalg.eig(RMR)
	U = np.matrix(U)
	D = np.matrix(D)
	diag = np.diag(D)
	D = np.sort(diag)
	indD = np.argsort(diag)
	U = U[:,indD]
	sizeD = np.size(D)
	Lambda_one = D + gamma*np.ones(sizeD)
	Lambda = np.append(Lambda_one,gamma)
	Lambda = Lambda*(np.absolute(Lambda)>tol)
	lambda_min = np.min([Lambda[1], gamma])

	RU = R*np.linalg.inv(U)
	Psig = cellTransMatMult(Yc, indS, g) - gamma*cellTransMatMult(Sc, indS, g)
	g_parallel = np.transpose(RU)*Psig

	a_kp2 = np.sqrt(np.absoute(np.transpose(g)*g-np.transpose(g_parallel)*g_parallel))
	if(a_kp2<tol):
		a_kp2 = 0

	a_j = np.append(g_parallel,a_kp2)

	if(lambda_min>0 and np.linalg.norm(a_j/Lambda)<=delta):
		sigmaStar = 0
		pStar = ComputeSBySMW(gamma, g, Psing,Sc,Yc, indS,gamma, invM, PsiPsi)
	elif(lambda_min<=0 and phiBar_f(-lambda_min,delta, Lambda, delta,Lambda, a_j)>0):
		Psi = cell2mat(Yc[indS]) - gamma*cell2mat(Sc[indS])
		sigmaStar = -lambda_min
		P_parallel = Psi*RU
		index_pseudo = find(np.absolute(Lambda+sigmaStar)>tol)
		v = np.zeros(sizeD+1,1)
		v[index_pseudo] = a_j[index_pseudo]/(Lamda[index_pseudo]+sigmaStar)
		if(np.absolute(gamma + sigmaStar)<tol):
			pStar = -P_parallel*v[0:sizeD-1]
		else:
			pStar = -P_parallel*v[0:sizeD-1] + (1/(gamma+sigmaStar))*(Psi*(PsiPsi*np.linalg.inv(Psig))) - (g / (gamma+sigmaStar))

		if(lambda_min<0):
			alpha = np.sqrt(delta^2-np.transpose(pStar)*pStar)
			pHatStar = pStar
		
			if(np.absolute(lambda_min-Lambda[1])<tol):
				zstar = (1/np.linalg.norm(P_parallel[:,0]))*alpha*P_parallel[:,0]
			else:
				e = np.zeros(np.size(g,0),0)
				found = 0
				for i in range(sizeD):
					e[i] = 1
					u_min = e-P_parallel*np.transpose(P_parallel[i,:])
					if (np.norm(u_min)>tol):
						found =1
						break
					e[i] =0
				if(found==0):
					e[m+1] = 1
					u_min = e - P_parallel*P_parallel[i,:]
				u_min = u_min/np.linalg.norm(u_min)
				zstar = alpha*u_min

			pStar = pHatStar + zstar
		else:
			if(lambda_min>0):
				sigmaStar = Newton(0,maxiter,tol,delta,Lambda,a_j)
			else:
				sigmaHat = max(a_j/delta - Lambda)
				if (sigmaHat>-lambda_min):
					sigmaStar = Newton(sigmaHat, maxiter,tol,delta,Lambda,a_j)
				else:
					sigmaStar = Newton(-lambda_min, maxiter,tol,delta,Lambda,a_j)

		pStar = ComputeSBySMW(gamma+sigmaStar,g,Psig,Sc, Yc,indS, gamma,invM, PsiPsi)

	PsipStar = cellTransMatMult(Yc, indS,pStar) - gamma*cellTransMatMult(Sc,indS,pStar)
	tmp = invM* np.linalg.inv(PsipStar)
	Psitmp = cellMatMult(Yc, indS,tmp) - gamma*cellMatMult(Sc,indS,pStar)
	tmp = invM*np.linalg.inv(PsipStar)
	Psitmp = cellMatMult(Yc,indS,tmp) - gamma*cellMatMult(Sc,indS,tmp)
	BpStar = gamma*pStar + Psitmp
	if(show>1):
		opt1 = np.linalg.norm(BpStar+sigmaStar*pStar + g)
		opt2 = sigmaStar*np.linalg.norm(delta-np.linalg.norm(pStar))
		spd_check = lambda_min + sigmaStar
		if(show==2):
			print('Optimality condition #1: {}'.format(opt1))
			print('Optimality condition #2: {}'.format(opt2))
			print('lambda_min+sigma*:{}, lam:{}, sig={}'.format(spd_check, lambda_min,sigmaStar))
			print('\n')
	else:
		opt1 = []
		opt2 = []
		spd_check = []
		phiBar_check = []

	return [sigmaStar,pStar,BpStar,opt1,opt2,spd_check, gamma]