Beispiel #1
0
def show_solution(A, b, bp, ls):
# "real" solution
    x = solve(A, b)

    for l in ls:
        bc = l * bp # current b matrix
        db = abs(bc - b)

        print "With %f: " % l
        print "\t║𝚫b║₂ = %f" % frobenius(db)

        # solve it
        xc = solve(A, l*bp)
        dx = abs(xc - x)

        # Calcualte the errors
        Ainv = inv(A)
        cond = frobenius(A) * frobenius(Ainv)
        abserr = frobenius(Ainv) * frobenius(db)
        relerr = cond * (frobenius(db) / frobenius(b))

        print "\t Expected max error %f" % abserr
        print "\t║𝚫x║₂ = %f" % frobenius(dx)
        print "\t Expected rel error %f" % relerr
        print "\t║𝚫x║₂/║x║₂ = %f" % (frobenius(dx) / frobenius(x))
        print ""
Beispiel #2
0
	def rols(self, keep=True):
		"""Return: array(T-ncoefs by ncoefs)

		Compute "recursive OLS" parameter estimates.

		:todo: add standard errors
		"""
		if self._rols_coefs is not None:
			return self._rols_coefs
		from numpy.linalg import solve
		Y, X = np.asmatrix(self.Y), np.asmatrix(self.X)
		nobs, ncoefs = X.shape
		X0 = X[:ncoefs]  #square matrix
		Y0 = Y[:ncoefs]  #square matrix
		#create array to hold parameter estimates
		coef_array = np.empty( (nobs-ncoefs+1, ncoefs) )
		coef_array[0] = solve(X0,Y0).A1
		xTx = X0.T * X0
		xTy = X0.T * Y0
		#get initial parameter estimate (shortest possible data sample)
		#iteratively update parameter estimates
		for i in range(ncoefs,nobs):
			xTx += X[i].T * X[i]
			xTy += X[i].T * Y[i]
			coef_array[i-ncoefs+1] = solve(xTx,xTy).A1
		if keep:
			self._rols_coefs = coef_array
		return coef_array
Beispiel #3
0
def ridge_regression(X, Y, c1=0.0, c2=0.0, offset=None):
    """
    Also known as Tikhonov regularization. This solves the minimization problem:

    min_{beta} ||(beta X - Y)||^2 + c1||beta||^2 + c2||beta - offset||^2

    One can find more information here: http://en.wikipedia.org/wiki/Tikhonov_regularization

    Parameters:
        X: a (n,d) numpy array
        Y: a (n,) numpy array
        c1: a scalar
        c2: a scalar
        offset: a (d,) numpy array.

    Returns:
        beta_hat: the solution to the minimization problem.
        V = (X*X^T + (c1+c2)I)^{-1} X^T

    """
    n, d = X.shape
    X = X.astype(float)
    penalizer_matrix = (c1 + c2) * np.eye(d)

    if offset is None:
        offset = np.zeros((d,))

    A = (np.dot(X.T, X) + penalizer_matrix)
    b = (np.dot(X.T, Y) + c2 * offset)

    # rather than explicitly computing the inverse, just solve the system of equations
    return (solve(A, b), solve(A, X.T))
    def als_step(self,
                 latent_vectors,
                 fixed_vecs,
                 ratings,
                 _lambda,
                 type='user'):
        """
        One of the two ALS steps. Solve for the latent vectors
        specified by type.
        """
        if type == 'user':
            # Precompute
            YTY = fixed_vecs.T.dot(fixed_vecs)
            lambdaI = np.eye(YTY.shape[0]) * _lambda

            for u in xrange(latent_vectors.shape[0]):
                latent_vectors[u, :] = solve((YTY + lambdaI), 
                                             ratings[u, :].dot(fixed_vecs))
        elif type == 'item':
            # Precompute
            XTX = fixed_vecs.T.dot(fixed_vecs)
            lambdaI = np.eye(XTX.shape[0]) * _lambda
            
            for i in xrange(latent_vectors.shape[0]):
                latent_vectors[i, :] = solve((XTX + lambdaI), 
                                             ratings[:, i].T.dot(fixed_vecs))
        return latent_vectors
Beispiel #5
0
def qzswitch(i, A2, B2, Q, Z):
    #print i, A2, B2, Q, Z
    Aout = A2.copy(); Bout = B2.copy(); Qout = Q.copy(); Zout = Z.copy()
    ix = i-1    # from 1-based to 0-based indexing...
    # use all 1x1-matrices for convenient conjugate-transpose even if real:
    a = mat(A2[ix, ix]); d = mat(B2[ix, ix]); b = mat(A2[ix, ix+1]);
    e = mat(B2[ix, ix+1]); c = mat(A2[ix+1, ix+1]); f = mat(B2[ix+1, ix+1])
    wz = c_[c*e - f*b, (c*d - f*a).H]
    xy = c_[(b*d - e*a).H, (c*d - f*a).H]
    n = sqrt(wz*wz.H)
    m = sqrt(xy*xy.H)
    if n[0,0] == 0: return (Aout, Bout, Qout, Zout)
    wz = solve(n, wz)
    xy = solve(m, xy)
    wz = r_[ wz, \
            c_[-wz[:,1].H, wz[:,0].H]]
    xy = r_[ xy, \
         c_[-xy[:,1].H, xy[:,0].H]]
    Aout[ix:ix+2, :] = xy * Aout[ix:ix+2, :]
    Bout[ix:ix+2, :] = xy * Bout[ix:ix+2, :]
    Aout[:, ix:ix+2] = Aout[:, ix:ix+2] * wz
    Bout[:, ix:ix+2] = Bout[:, ix:ix+2] * wz
    Zout[:, ix:ix+2] = Zout[:, ix:ix+2] * wz
    Qout[ix:ix+2, :] = xy * Qout[ix:ix+2, :]
    return (Aout, Bout, Qout, Zout)
Beispiel #6
0
def row_3_step(f, Jf, yi, h):
    r"""Rosenbrock-Wanner Methode der Ordnung 3

    Input:
    f:   Die rhs Funktion f(x): R^(nx1) -> R^(nx1)
    Jf:  Jacobi Matrix J(x) der Funktion: R^(nx1) -> R^(nxn)
    yi:  Aktueller Wert y_i zur Zeit ti
    h:   Schrittweite

    Output:
    yip1:  Zeitpropagierter Wert y(t+h): R^(nx1)
    """
    yi = atleast_2d(yi)
    n = yi.shape[0]
    yip1 = zeros_like(yi)

    ####################################################
    #                                                  #
    # TODO: Implementieren Sie die ROW-3 Methode hier. #
    #                                                  #
    ####################################################

    a = 1/(2+sqrt(2))
    d31 = -(4+sqrt(2))/(2+sqrt(2))
    d32 = (6+sqrt(2))/(2+sqrt(2))
    J = Jf(yi)
    I = identity(n)
    
    k1 = solve((I - a*h*J), f(yi))
    k2 = solve((I - a*h*J), f(yi+h*0.5*k1) - a*h*J.dot(k1))
    k3 = solve((I - a*h*J), f(yi+h*k2) - d31*h*J.dot(k1) - d32*h*J.dot(k2))
    yip1 = yi + h/6*(k1+4*k2+k3)

    return yip1
Beispiel #7
0
    def setCoefs(self):
	self.aquiferParent = self.modelParent.aq.findAquiferData(self.xw,self.yw)
        self.rwsq = self.rw**2;
        self.pylayers = self.layers-1; self.NscreenedLayers = len(self.layers)
	if self.NscreenedLayers == 1:  # Screened in only one layer, no unknown parameters
            self.parameters = array([[self.discharge]])
        else:
            self.parameters = zeros((self.NscreenedLayers,1),'d')
        self.coef = ones((self.NscreenedLayers,self.aquiferParent.Naquifers),'d')
	if self.aquiferParent.Naquifers > 1:   # Multiple aquifers, must compute coefficients
            if self.aquiferParent.type == self.aquiferParent.conf:
                for i in range(self.NscreenedLayers):
                    pylayer = self.pylayers[i]
                    ramat = self.aquiferParent.eigvec[:,1:]  # All eigenvectors, but not first normalized transmissivity
                    ramat = vstack(( ramat[0:pylayer,:], ramat[pylayer+1:,:] ))  # Remove row pylayer
                    rb = self.aquiferParent.eigvec[:,0] / (2.0*pi)   # Store Tn vector in rb
                    rb = hstack(( rb[0:pylayer], rb[pylayer+1:] )) # solve takes rowvector
                    self.coef[i,1:self.aquiferParent.Naquifers] = linalg.solve(ramat,rb)
            elif self.aquiferParent.type == self.aquiferParent.semi:
                for i in range(self.NscreenedLayers):
                    pylayer = self.pylayers[i]
                    ramat = self.aquiferParent.eigvec
                    rb = zeros(self.aquiferParent.Naquifers,'d')
                    rb[pylayer] = - 1.0 / (2.0 * pi)
                    self.coef[i,:] = linalg.solve(ramat,rb)
        if self.aquiferParent.Naquifers == 1 and self.aquiferParent.type == self.aquiferParent.semi:
            self.coef[0,0] = -1.0 / (2.0 * pi)
        self.paramxcoef = sum( self.parameters * self.coef, 0 )  # Parameters times coefficients
Beispiel #8
0
def fJr(pars, x, y = 0, calcJ = True):
    """
    calculate f and J for reduced system (only nonlinear parameters)
    """

    F, Fd = rF(pars, x)

    #calculate linear Parameters
    FtF = inner(F, F)
    Fty = inner(F, y)
    c = solve(FtF, Fty)

    #calculate residuum
    r = dot(c, F) - y

    if not calcJ:
        return r, c, F

    ##calculate complete Jacobian
    cd = numpy.empty(shape = (len(pars),) + c.shape)
    Jr = numpy.empty(shape = (len(pars),) + x.shape)
    for j in range(len(pars)):
        cd[j] = solve(FtF, inner(Fd[j], r) - inner(F, dot(c, Fd[j])))
        Jr[j] = dot(c, Fd[j]) + dot(cd[j], F)

    return r, Jr
def resolveSistema(chuteInicial,dimensao):
    erro = 0.00001
    matrizJacobiana = matrix(calculaMatrizJacobiana(dimensao,chuteInicial))
    matrizTermoFonte = matrix(calculaTermoFonte(dimensao,chuteInicial))
    try:    
        matrizSolucao = linalg.solve(matrizJacobiana,matrizTermoFonte) #resolver sistema linear
    except LinAlgError:
        #return [0,0]
        return 'ERRO'
    deltas = []
    for linha in range(dimensao):
        deltas.append(matrizSolucao[linha,0])
    for linha in range(dimensao):
        chuteInicial[linha] = chuteInicial[linha] + deltas[linha]
    
    while (criterioParada(matrizTermoFonte,erro,dimensao) == 0):
        matrizJacobiana = matrix(calculaMatrizJacobiana(dimensao,chuteInicial))
        matrizTermoFonte = matrix(calculaTermoFonte(dimensao,chuteInicial))
        try:
            matrizSolucao = linalg.solve(matrizJacobiana,matrizTermoFonte)
        except LinAlgError:
            #return [0,0]
            return 'ERRO'
        for linha in range(dimensao):
            deltas[linha] = matrizSolucao[linha,0]
        for linha in range(dimensao):
            chuteInicial[linha] = chuteInicial[linha] + deltas[linha]
    
    return chuteInicial
Beispiel #10
0
	def observables2parameters(self,features_covariance=None):

		"""
		Computes the conversion matrix M that allows to match a feature vector V to its best fit parameters P, in the sense P = P[fiducial] + MV

		:param features_covariance: covariance matrix of the simulated features, must be provided!
		:type features_covariance: 2 dimensional array (or 1 dimensional if diagonal)

		:returns: the (p,N) conversion matrix
		:rtype: array

		"""

		#Safety checks
		assert features_covariance is not None,"No science without the covariance matrix, you must provide one!"
		assert features_covariance.shape in [self.training_set.shape[-1:],self.training_set.shape[-1:]*2]

		#Check if derivatives are already computed 
		if not hasattr(self,"derivatives"):
			self.compute_derivatives()

		#Linear algebra manipulations (parameters = M x features)
		if features_covariance.shape == self.training_set.shape[1:] * 2:
			Y = solve(features_covariance,self.derivatives.transpose())
		else:
			Y = (1/features_covariance[:,np.newaxis]) * self.derivatives.transpose()

		XY = np.dot(self.derivatives,Y)
		
		return solve(XY,Y.transpose())
def transform(data, canvas, replace=False):
    x = solve(data.A, data.b)
    x2 = solve(data.A2, data.b2)

    a1 = data.A[:, 0] * x[0]
    b1 = data.A[:, 1] * x[1]
    c1 = data.A[:, 2] * x[2]

    a2 = data.A2[:, 0] * x2[0]
    b2 = data.A2[:, 1] * x2[1]
    c2 = data.A2[:, 2] * x2[2]

    L1 = inv(np.array([a1, b1, c1]).transpose())
    L2 = np.array([a2, b2, c2]).transpose()
    L = np.dot(L2, L1)

    result = np.dot(L, data.chosen_point)
    result = result / result[2]

    if not replace:
        data.transformed_point = canvas.create_oval(result[0] + Const.RADIUS, result[1] + Const.RADIUS,
                                                    result[0] - Const.RADIUS, result[1] - Const.RADIUS,
                                                    fill="red")
    else:
        canvas.coords(data.transformed_point,
                      result[0] + Const.RADIUS, result[1] + Const.RADIUS,
                      result[0] - Const.RADIUS, result[1] - Const.RADIUS)
Beispiel #12
0
def marginalLikelihood(kernel, X, Y, nhyper, computeGradient=True, useCholesky=True, noise=1e-3):
    """
    get the negative log marginal likelihood and its partial derivatives wrt
    each hyperparameter
    """
    NX = len(X)
    assert NX == len(Y)
    # compute covariance matrix
    K = kernel.covMatrix(X) + eye(NX)*noise
    
    if useCholesky:
        # avoid inversion by using Cholesky decomp.
        try:
            L = cholesky(K)
            alpha = solve(L.T, solve(L, Y))
        except LinAlgError, e:
            print '\n ================ error in matrix'
            print '\thyper =', kernel.hyperparams
            print '===================================='
            logBadParams(kernel)
            pdb.set_trace()
        nlml = 0.5 * dot(Y, alpha) + sum(log(diag(L))) + 0.5 * NX * log(2.0*pi)
        if computeGradient:
            W = solve(L.T, solve(L, eye(NX))) - outer(alpha, alpha)
            dnlml = array([sum(W*kernel.derivative(X, i)) / 2.0 for i in xrange(nhyper)])
            # print '  loglik =', nlml, '   d loglik =', dnlml
            return nlml, dnlml
        else:
            return nlml
Beispiel #13
0
 def ROBO_output(self):
     if len(self.d) == 1:
         return
     fits = len(self.d)
     ctrls = fits + 2
     knots = ctrls + 4
     self.xfit = concatenate((self.xfit, zeros((2))))    # pad with 2 endpoint constraints
     self.yfit = concatenate((self.yfit, zeros((2))))    # pad with 2 endpoint constraints
     self.d = concatenate((self.d, zeros((6))))          # pad with 3 duplicates at each end
     self.d[fits+2] = self.d[fits+1] = self.d[fits] = self.d[fits-1]
     solmatrix = zeros((ctrls,ctrls), dtype=float)
     for i in range(fits):
         solmatrix[i,i]   = get_matrix(self.d, i, i)
         solmatrix[i,i+1] = get_matrix(self.d, i, i+1)
         solmatrix[i,i+2] = get_matrix(self.d, i, i+2)
     solmatrix[fits, 0]   = self.d[2]/self.d[fits-1]     # curvature at start = 0
     solmatrix[fits, 1]   = -(self.d[1] + self.d[2])/self.d[fits-1]
     solmatrix[fits, 2]   = self.d[1]/self.d[fits-1]
     solmatrix[fits+1, fits-1] = (self.d[fits-1] - self.d[fits-2])/self.d[fits-1]   # curvature at end = 0
     solmatrix[fits+1, fits]   = (self.d[fits-3] + self.d[fits-2] - 2*self.d[fits-1])/self.d[fits-1]
     solmatrix[fits+1, fits+1] = (self.d[fits-1] - self.d[fits-3])/self.d[fits-1]
     xctrl = solve(solmatrix, self.xfit)
     yctrl = solve(solmatrix, self.yfit)
     self.handle += 1
     self.dxf_add("  0\nSPLINE\n  5\n%x\n100\nAcDbEntity\n  8\n%s\n 62\n%d\n100\nAcDbSpline\n" % (self.handle, self.layer_ROBO, self.color_ROBO))
     self.dxf_add(" 70\n0\n 71\n3\n 72\n%d\n 73\n%d\n 74\n%d\n" % (knots, ctrls, fits))
     for i in range(knots):
         self.dxf_add(" 40\n%f\n" % self.d[i-3])
     for i in range(ctrls):
         self.dxf_add(" 10\n%f\n 20\n%f\n 30\n0.0\n" % (xctrl[i],yctrl[i]))
     for i in range(fits):
         self.dxf_add(" 11\n%f\n 21\n%f\n 31\n0.0\n" % (self.xfit[i],self.yfit[i]))
Beispiel #14
0
    def test_0_size(self):
        class ArraySubclass(np.ndarray):
            pass
        # Test system of 0x0 matrices
        a = np.arange(8).reshape(2, 2, 2)
        b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)

        expected = linalg.solve(a, b)[:, 0:0,:]
        result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0,:])
        assert_array_equal(result, expected)
        assert_(isinstance(result, ArraySubclass))

        # Test errors for non-square and only b's dimension being 0
        assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
        assert_raises(ValueError, linalg.solve, a, b[:, 0:0,:])

        # Test broadcasting error
        b = np.arange(6).reshape(1, 3, 2) # broadcasting error
        assert_raises(ValueError, linalg.solve, a, b)
        assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])

        # Test zero "single equations" with 0x0 matrices.
        b = np.arange(2).reshape(1, 2).view(ArraySubclass)
        expected = linalg.solve(a, b)[:, 0:0]
        result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
        assert_array_equal(result, expected)
        assert_(isinstance(result, ArraySubclass))

        b = np.arange(3).reshape(1, 3)
        assert_raises(ValueError, linalg.solve, a, b)
        assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
        assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
Beispiel #15
0
 def _update(self):
     """
     Calculate those terms for prediction that do not depend on predictive
     inputs.
     """
     from numpy.linalg import cholesky, solve, LinAlgError
     from numpy import transpose, eye, matrix
     import types
     self._K = self.calc_covariance(self.X)
     if not self._K.shape[0]:  # we didn't have any data
         self._L = matrix(zeros((0, 0), numpy.float64))
         self._alpha = matrix(zeros((0, 1), numpy.float64))
         self.LL = 0.
     else:
         try:
             self._L = matrix(cholesky(self._K))
         except LinAlgError as detail:
             raise RuntimeError("""Cholesky decomposition of covariance """
                                """matrix failed. Your kernel may not be positive """
                                """definite. Scipy complained: %s""" % detail)
         self._alpha = solve(self._L.T, solve(self._L, self.y))
         self.LL = (
             - self.n * math.log(2.0 * math.pi)
             - (self.y.T * self._alpha)[0, 0]
         ) / 2.0
     # print self.LL
     # import IPython; IPython.Debugger.Pdb().set_trace()
     self.LL -= log(diagonal(self._L)).sum()
Beispiel #16
0
    def cal_varcov(self, θ2_vec):
        """calculate variance covariance matrix"""
        θ2, ix_θ2_T, Z, LinvW, X1 = self.θ2, self.ix_θ2_T, self.Z, self.LinvW, self.X1

        θ2.T[ix_θ2_T] = θ2_vec

        # update δ
        δ = self.cal_δ(θ2)

        jacob = self.cal_jacobian(θ2, δ)

        θ1, ξ = self.cal_θ1_and_ξ(δ)

        Zres = Z * ξ.reshape(-1, 1)
        Ω = Zres.T @ Zres  # covariance of the momconds

        G = (np.c_[X1, jacob].T @ Z).T  # gradient of the momconds

        WG = cho_solve(LinvW, G)
        WΩ = cho_solve(LinvW, Ω)

        tmp = solve(G.T @ WG, G.T @ WΩ @ WG).T  # G'WΩWG(G'WG)^(-1) part

        varcov = solve((G.T @ WG), tmp)

        return varcov
def serial_solve(M,Y, debug=False):
    '''
    :param M: a pxpxN array
    :param Y: a pxN array
    :return X: a pxN array X such that M(:,:,i)*X(:,i) = Y(:,:,i)
    '''

    ## surprisingly it is slower than inverting M and doing a serial multiplication !

    import numpy
    from numpy.linalg import solve


    p = M.shape[0]
    assert(M.shape[1] == p)
    N = M.shape[2]
    assert(Y.shape == (p,N) )

    X = numpy.zeros((p,N))

    if not debug:
        for i in range(N):
            X[:,i] = solve(M[:,:,i],Y[:,i])
    else:
        for i in range(N):
            try:
                X[:,i] = solve(M[:,:,i],Y[:,i])
            except Exception as e:
                print('Derivative {}'.format(i))
                print(M[:,:,i])
                raise Exception('Error while solving point {}'.format(i))

    return X
Beispiel #18
0
def unteraufgabe_c():
    f = lambda x: np.sin(10*x*np.cos(x))

    [A10,A20] = unteraufgabe_b()
    b10 = np.zeros_like(x10)
    b20 = np.zeros_like(x20)
    alpha10 = np.zeros_like(x10)
    alpha20 = np.zeros_like(x20)

    b10 = f(x10)
    b20 = f(x20)
    alpha10 = solve(A10,b10)
    alpha20 = solve(A20,b20)

    x = np.linspace(0.0, 1.0, 100)
    pi10 = np.zeros_like(x)
    pi20 = np.zeros_like(x)

    pi10 = np.polyval(alpha10,x)
    pi20 = np.polyval(alpha20,x)

    plt.figure()
    plt.plot(x,f(x),"-b",label=r"$f(x)$")
    plt.plot(x,pi10 ,"-g",label=r"$p_{10}(x)$")
    plt.plot(x10,b10,"dg")
    plt.plot(x,pi20 ,"-r",label=r"$p_{20}(x)$")
    plt.plot(x20,b20,"or")
    plt.grid(True)
    plt.xlabel(r"$x$")
    plt.ylabel(r"$y$")
    plt.legend()
    plt.savefig("interpolation.eps")
def get_sdrgain_upd(amat, wnrm='fro', maxeps=None,
                    baseA=None, baseZ=None, baseGain=None,
                    maxfac=None):

    deltaA = amat - baseA
    # nda = npla.norm(deltaA, ord=wnrm)
    # nz = npla.norm(baseZ, ord=wnrm)
    # na = npla.norm(baseA, ord=wnrm)
    # import ipdb; ipdb.set_trace()

    epsP = spla.solve_sylvester(amat, -baseZ, -deltaA)
    # print('debugging!!!')
    # epsP = 0*amat
    eps = npla.norm(epsP, ord=wnrm)
    print('|amat - baseA|: {0} -- |E|: {1}'.
          format(npla.norm(deltaA, ord=wnrm), eps))
    if maxeps is not None:
        if eps < maxeps:
            updGaint = npla.solve(epsP+np.eye(epsP.shape[0]), baseGain.T)
            return updGaint.T, True
    elif maxfac is not None:
        if (1+eps)/(1-eps) < maxfac and eps < 1:
            updGaint = npla.solve(epsP+np.eye(epsP.shape[0]), baseGain.T)
            return updGaint.T, True

    return None, False
Beispiel #20
0
    def nf2(l):

        # Find x(l)
        xl = solve((LV + (l + sigma * delta) * identity(D)),dot(-UV.T,g))


        # Calculate |xl|-delta (for newton stopping rule)
        xlmd = norm(xl) - delta

        # Calculate f(l) for p=-1
        fl = 1 / norm(xl) - 1 / delta

        # Find x'(l)
        xlp = solve((LV + (l + sigma * delta) * identity(D)),(-xl))


        # Calculate f'(l) for p=-1
        flp = -dot(xl,xlp) * (dot(xl,xl) ** (-1.5))

        # Calculate increment
        dl = fl / flp

        # Set Delta
        Delta = delta

        return xlmd, dl, Delta
Beispiel #21
0
def normal_eq_comb(AtA, AtB, PassSet = None):
    num_cholesky = 0
    num_eq = 0
    if AtB.size == 0:
        Z = np.zeros([])

    elif (PassSet is None) or np.all(PassSet):
        Z = nla.solve(AtA, AtB)
        num_cholesky = 1
        num_eq = AtB.shape[1]

    else:
        Z = np.zeros(AtB.shape) #(n, k)
        if PassSet.shape[1] == 1:
            if np.any(PassSet):
                cols = np.nonzero(PassSet)[0]
                Z[cols] = nla.solve(AtA[np.ix_(cols, cols)], AtB[cols])
                num_cholesky = 1
                num_eq = 1
        else:
            groups = column_group(PassSet)

            for g in groups:
                cols = np.nonzero(PassSet[:, g[0]])[0]

                if cols.size > 0:
                    ix1 = np.ix_(cols, g)
                    ix2 = np.ix_(cols, cols)

                    Z[ix1] = nla.solve(AtA[ix2], AtB[ix1])
                    num_cholesky += 1
                    num_eq += len(g)
                    num_eq += len(g)
    return Z, num_cholesky, num_eq
Beispiel #22
0
def lu_inv(L):
    from numpy.linalg import solve
    from numpy import eye

    n = L.shape[0]
    K_inv = solve(L.T, solve(L, eye(n)))
    return K_inv
    def rand(self):

        m, n = self.__m, self.__n

        s = linalg.cholesky(self.__prod).transpose()
        w = self.__weight

        # Compute the parameters of the posterior distribution.
        mu = linalg.solve(s[:m, :m], s[:m, m:])
        omega = np.dot(s[:m, :m].transpose(), s[:m, :m])
        sigma = np.dot(s[m:, m:].transpose(), s[m:, m:]) / w
        eta = w

        # Simulate the marginal Wishart distribution.
        f = linalg.solve(np.diag(np.sqrt(2.0*random.gamma(
            (eta - np.arange(n))/2.0))) + np.tril(random.randn(n, n), -1),
                         np.sqrt(eta)*linalg.cholesky(sigma).transpose())
        b = np.dot(f.transpose(), f)

        # Simulate the conditional Gauss distribution.
        a = mu + linalg.solve(linalg.cholesky(omega).transpose(),
                              np.dot(random.randn(m, n),
                                     linalg.cholesky(b).transpose()))

        return a, b
Beispiel #24
0
def GMRES(A, b, krylovSize=10, useQR = True):
    def MultiplyMatrix(x):
        return dot(A, x)

    arnoldi = ArnoldiIterations(A, MultiplyMatrix, krylovSize)
    arnoldi.Setup(startVector = b)
    arnoldi.ArnoldiIterations()

    #converged = False
    #while not converged:
        #arnoldi step

        #check residual

    #Solve least square problem
    x = None
    bdStep = arnoldi.BreakdownStep
    if useQR:
        Q,R = linalg.qr(arnoldi.Hessenberg[:bdStep+1,:bdStep])
        Qb = dot(transpose(arnoldi.ArnoldiVectors[:,:bdStep+1]), b)
        Qbb = dot(transpose(Q), Qb)
        y = linalg.solve(R[:bdStep+1,:bdStep], Qbb)
        x = dot(arnoldi.ArnoldiVectors[:,:bdStep], y)
    else:
        HH = dot(transpose(arnoldi.Hessenberg), arnoldi.Hessenberg)
        bb = dot(transpose(arnoldi.Hessenberg), dot(transpose(arnoldi.ArnoldiVectors), b))
        y = linalg.solve(HH, bb)
        x = dot(arnoldi.ArnoldiVectors[:,:-1], y)

    return x
Beispiel #25
0
def rbf_int(tocke, rbf, z):
	"""
	Funkcija rbf_int resi interpolacijski problem
	   F(tocke[i]) = z[i]
	z radialnimi baznimi funkcijami oblike
		rbf(norm(x-tocke[i]))
	Vhodni podatki:
	tocke ... nxk tabela n-tock v R^k
	rbf ... funkcija, ki doloca obliko
	z ... predpisane vrednosti v tockah
	Rezultat:
	alpha ... koeficienti v razvoju po RBF
	"""
	# zgradimo matriko sistema
	n,k = tocke.shape
	A = zeros((n,n))
	for i in range(n):
		for j in range(n):
			A[i,j] = rbf(norm(tocke[i,:]-tocke[j,:])**2)
	# razcep choleskega
	try:
		R = cholesky(A)
		U = R.T
                # obratno vstavljanje R*y = z
                y = solve(R,z)
                # direktno vstavljanje R^T*alpha = y
                alpha = solve(R.T,y)
        except:
            #matrika ni pozitivno definitna
            alpha = solve(A,z)
        alpha = solve(A,z)
	
	return alpha
Beispiel #26
0
def MdcNE(A, b):
    """
    Résolution du problème des moindres carrés linéaire :
                Min_{alpha} || b - A*alpha ||^2
    par factorisation de Cholesky du système des équations normales.

    Parameters
    ----------
    A : np.array ou np.matrix
    b : np.array ou np.matrix

    Returns
    -------
    alpha : np.array (dans tous les cas)
        solution du problème des moindres carrés linéaire
    """
    S = np.matrix(A).T * np.matrix(A)

    # Vérification au préalable du conditionnement du système et de la stabilité
    # numérique de la résolution qui va suivre
    c = la.cond(S)
    if c > 1e16:
        print('Attention : le conditionnement de la matrice des équations')
        print('            normales est très grand ---> %0.5g' % c)

    # Factorisation suivi de la résolution
    L = la.cholesky(S)           # matrice triangulaire inférieure
    m = b.size
    bvect = np.matrix( b.reshape(m,1) )
    y = A.T * bvect
    z = la.solve(L, y)
    alpha = np.array( la.solve(L.T, z) )

    return alpha
Beispiel #27
0
    def inverse_chol(self, K=None, Chl=None):
        """ 
        One can use this function for calculating the inverse of K through cholesky decomposition
        
        Parameters
        ----------
        K: ndarray
            covariance K
        Chl: ndarray
            cholesky decomposition of K

        Returns
        -------
        ndarray 
            the inverse of K
        """
        if Chl is not None:
            chol = Chl
        else:
            chol = self.calc_chol(K)

        if chol is None:
            return None

        inve = 0
        error_k = 1e-25
        while(True):
            try:
                choly = chol + error_k * np.eye(chol.shape[0])
                inve = solve(choly.T, solve(choly, np.eye(choly.shape[0])))
                break
            except np.linalg.LinAlgError:
                error_k *= 10
        return inve
def bfgs_cholesky_backtracking(fn, x0, max_iter=500, vtr=1e-6, verbose=False):
    # Wrap cost function fn so that it takes a row vector and return
    # value plus column vector.
    n = len(x0)
    x = asarray(x0)[:,None]
    def cost_func(x):
        cost_func.calls += 1
        f,g = fn(x[:,0])
        return f,asarray(g)[:,None]
    cost_func.calls = 0

    # ----- Initial step -----
    # initial Hessian matrix H is the Identity matrix
    f, g = cost_func(x)         # function value and gradient
    scale = max(f, 1)           # scale diagonals of init Hessian
    H = scale * eye(n)          # initial Hessian

    #x_star = Inf

    fv = array(zeros((max_iter, 1)))

    for k in range(1, max_iter+1):
        if verbose: print k, x[:,0]

        # Newton step
        L, H = mh.modelhess(n, H, 1, eps)
        y = linalg.solve(L, -g)
        p = linalg.solve(L.T, y)

        Sx = ones((n,1))
        maxstep = 10
        steptol = 1e-6
        x_new, f_new, retcode = ls.linesearch(cost_func, n, x, f, g, p, Sx, maxstep, steptol)

        f_new,g_new = cost_func(x_new)

        # Gradient difference vector
        y = g_new - g

        # Hessian estimate  TODO :
        if dot(y.T,p) > sqrt(eps) * linalg.norm(y) * linalg.norm(p): # ISMET : I added this if statement based on cond in Algorithm A941
            p_rot = dot(H,p)
            H = H + outer(y, y) / dot(y.T, p) - outer(p_rot, p_rot) / dot(p.T, p_rot)

        # Update parameters
        g = g_new
        x = x_new
        f = f_new                                    # currently not used

        fv[k-1] = f

        # Check for convergence
        if linalg.norm(g) < vtr:
            print 'Convergence is achieved after', k, 'iterations', cost_func.calls,'calls'
            #x_star = x_new
            return x[:,0], fv
    else:
        print 'Failed to converge after',k,'iterations',cost_func.calls,'calls'
        return None,None
Beispiel #29
0
Datei: leg.py Projekt: qiqi/home
 def corrector(self, dt):
    r = zeros(8)
    self._lambda[0:8] = 0.0
    for i_iter in range(self._n_newton_iter):
       # calculate residual
       cos_arms = 0.5 * self._length * cos(self._a_pos)
       sin_arms = 0.5 * self._length * sin(self._a_pos)
       r[0] = self._x_pos[0] - self._x_pos[1] - sin_arms[0] - sin_arms[1]
       r[1] = self._y_pos[0] - self._y_pos[1] - cos_arms[0] - cos_arms[1]
       r[2] = self._x_pos[1] - self._x_pos[2] - sin_arms[1] - sin_arms[2]
       r[3] = self._y_pos[1] - self._y_pos[2] - cos_arms[1] - cos_arms[2]
       r[4] = self._a_vel[0] - self._a_vel[1] - self._control[0]
       r[5] = self._a_vel[1] - self._a_vel[2] - self._control[1]
       r[0:4] /= 0.5 * dt**2
       r[4:6] /= dt
       self.calc_C_matrix(dt)
       J = asmatrix(self._C[0:6,:]) * asmatrix(self._M_inv) * \
           asmatrix(self._F[:,0:6])
       self._lambda[0:6] -= solve(J, r[0:6])
       _logger.debug('Air Newton iter %d norm = %f' % (i_iter, norm(r[0:6])))
       self._V[0:9] = self._V0 + dt * dot(self._M_inv, self._G + \
                      dot(self._F[:,0:6], self._lambda[0:6]))
       self._X[0:9] = self._X0 + dt * (self._V0 + self._V) / 2.0
    # determine if foot is in air or on ground
    if self._y_pos[2] - cos_arms[2] < 0:
       # calculate foot x position
       x1, y1 = self._x_pos[2] - sin_arms[2], self._y_pos[2] - cos_arms[2]
       x0 = self._X0[6] - 0.5 * self._length[2] * sin(self._X0[8])
       y0 = self._X0[7] - 0.5 * self._length[2] * cos(self._X0[8])
       if y1 - y0 != 0:
           foot_pos = x0 - (x1 - x0) / (y1 - y0) * y0
       else:
           foot_pos = x0
       for i_iter in range(self._n_newton_iter):
          cos_arms = 0.5 * self._length * cos(self._a_pos)
          sin_arms = 0.5 * self._length * sin(self._a_pos)
          r[0] = self._x_pos[0] - self._x_pos[1] - sin_arms[0] - sin_arms[1]
          r[1] = self._y_pos[0] - self._y_pos[1] - cos_arms[0] - cos_arms[1]
          r[2] = self._x_pos[1] - self._x_pos[2] - sin_arms[1] - sin_arms[2]
          r[3] = self._y_pos[1] - self._y_pos[2] - cos_arms[1] - cos_arms[2]
          r[4] = self._a_vel[0] - self._a_vel[1] - self._control[0]
          r[5] = self._a_vel[1] - self._a_vel[2] - self._control[1]
          r[6] = self._x_vel[2] - cos_arms[2] * self._a_vel[2]
          r[7] = self._y_vel[2] + sin_arms[2] * self._a_vel[2]
          r[0:4] /= 0.5 * dt**2
          r[4:8] /= dt
          _logger.debug('Land Newton iter %d norm = %f' % \
                        (i_iter, norm(r[0:8])))
          self.calc_C_matrix(dt)
          J = asmatrix(self._C) * asmatrix(self._M_inv) * asmatrix(self._F)
          self._lambda -= solve(J, r[0:8])
          self._V[0:9] = self._V0 + dt * dot(self._M_inv, self._G + \
                         dot(self._F, self._lambda))
          self._X[0:9] = self._X0 + dt * (self._V0 + self._V) / 2.0
       # put it on the ground
       cos_arms = 0.5 * self._length * cos(self._a_pos)
       sin_arms = 0.5 * self._length * sin(self._a_pos)
       self._x_pos[0:3] += foot_pos - self._x_pos[2] + sin_arms[2]
       self._y_pos[0:3] -= self._y_pos[2] - cos_arms[2]
Beispiel #30
0
def dare(A, B, R, Q, tolerance=1e-10, max_iter=150):
    """
    Solves the discrete-time algebraic Riccati equation 
    
        X = A'XA - A'XB(B'XB + R)^{-1}B'XA + Q  

    via the doubling algorithm.  An explanation of the algorithm can be found
    in "Optimal Filtering" by B.D.O. Anderson and J.B. Moore (Dover
    Publications, 2005, p. 159).

    Parameters
    ============
    All arguments should be NumPy ndarrays.

        * A is k x k
        * B is k x n
        * Q is k x k, symmetric and nonnegative definite
        * R is n x n, symmetric and positive definite

    Returns
    ========
    X : a  k x k numpy.ndarray representing the approximate solution

    """
    # == Set up == #
    error = tolerance + 1
    fail_msg = "Convergence failed after {} iterations."
    # == Make sure that all arrays are two-dimensional == #
    A, B, Q, R = map(np.atleast_2d, (A, B, Q, R))
    k = Q.shape[0]
    I = np.identity(k)

    # == Initial conditions == #
    a0 = A
    b0 = dot(B, solve(R, B.T))
    g0 = Q
    i = 1

    # == Main loop == #
    while error > tolerance:

        if i > max_iter:
            raise ValueError(fail_msg.format(i))

        else:

            a1 = dot(a0, solve(I + dot(b0, g0), a0))
            b1 = b0 + dot(a0, solve(I + dot(b0, g0), dot(b0, a0.T)))
            g1 = g0 + dot(dot(a0.T, g0), solve(I + dot(b0, g0), a0))

            error = np.max(np.abs(g1 - g0))

            a0 = a1
            b0 = b1
            g0 = g1

            i += 1

    return g1  # Return X
def mlrsl(train_data, params, m=0, max_iter=100, tol=1e-4):
    Xtrain = train_data['Xtrain']
    Ytrain = train_data['Ytrain']
    Ls = train_data['Ls']
    R0 = train_data['R0']

    mu = params['mu']
    lam = params['lam']
    beta = params['beta']
    alpha = params['alpha']
    gam = params['gam']

    if m == 0:
        m = len(Xtrain)
    n, l = Ytrain.shape
    d = [Xtrain[i].shape[1] for i in range(m)]
    G = [zeros((d[i], l)) for i in range(m)]
    R = np.random.uniform(size=(l, l))
    theta = ones(m) / m
    t1 = 1
    t2 = 1
    check_step = 1
    total_loss_old = 1

    for ii in range(0, max_iter):
        Gold = [G[i].copy() for i in range(m)]

        Q = Ytrain
        L = zeros((n, n))
        for i in range(m):
            Q = Q + mu * Xtrain[i] @ G[i]
            L = L + theta[i]**gam * Ls[i]
        P = (1 + m * mu) * eye(n) + L
        F = solve(P, Q)

        Rn = 0
        Rd = 0
        for i in range(m):
            GG = G[i].T @ G[i]
            GG_pos = (abs(GG) + GG) / 2
            GG_neg = (abs(GG) - GG) / 2
            Rn += GG_pos
            GG_diag = diag(diag(GG))
            Rd += GG_neg + GG_diag @ ones((l, l)) + ones(
                (l, l)) @ GG_diag - GG_diag
        Rn = lam / 2 * Rn + 2 * alpha * R0
        Rd = lam / 2 * Rd + 2 * alpha * R
        R[Rd != 0] = R[Rd != 0] * sqrt(Rn[Rd != 0] / Rd[Rd != 0])
        LR = diag(R.sum(1)) - R

        for i in range(m):
            Gpk = G[i] + (t1 - 1) / t2 * (G[i] - Gold[i])
            A = mu * Xtrain[i].T @ Xtrain[i] - mu**2 * Xtrain[i].T @ solve(
                P.T, Xtrain[i])
            dfGpk = A @ Gpk - mu * Xtrain[i].T @ solve(P,
                                                       Ytrain) + lam * Gpk @ LR
            for j in range(m):
                if i == j:
                    continue
                else:
                    dfGpk = dfGpk - mu**2 * Xtrain[i].T @ solve(
                        P.T, Xtrain[j]) @ Gold[j]
            Lf = sqrt(2 * norm(A, 2)**2 + 2 * norm(lam * LR, 2)**2)
            Zk = Gpk - 1 / Lf * dfGpk
            G[i] = soft_threshold(Zk, beta / Lf)

        t1, t2 = t2, (1 + sqrt(4 * t1**2 + 1)) / 2

        for i in range(m):
            theta[i] = (1 / trace(F.T @ Ls[i] @ F))**(1 / (gam - 1))
        theta = theta / theta.sum()

        for i in range(m):
            L = L + theta[i]**gam * Ls[i]

        predict_loss = 1 / 2 * norm(F - Ytrain, 'fro')**2 + alpha * norm(
            R - R0, 'fro')**2
        correlation = 1 / 2 * trace(F.T @ L @ F)
        sparse = 0
        for i in range(m):
            predict_loss += mu / 2 * norm(Xtrain[i] @ G[i] - F, 'fro')**2
            correlation += lam / 2 * trace(LR @ G[i].T @ G[i])
            sparse += beta * sum(sum(abs(G[i])))
        total_loss = predict_loss + correlation + sparse

        loss_perc = abs((total_loss_old - total_loss) / total_loss_old)
        if ii % check_step == 0:
            print("ii = %i, loss = %.4f, loss perc = %.4f" %
                  (ii, total_loss, loss_perc))

        if loss_perc < tol:
            break
        else:
            total_loss_old = total_loss
    return G, F, theta, ii
Beispiel #32
0
def find_theta(sg, f_on_grid):
    """
    Given a SmolyakGrid object and the value of the function on the
    points of the grid, this function will return the coefficients theta
    """
    return la.solve(sg.B_U, la.solve(sg.B_L, f_on_grid))
Beispiel #33
0
    def propagate_wfs(self, sourceC_nM, targetC_nM, S_MM, H_MM, dt):
        self.timer.start('Linear solve')

        if self.blacs:
            # XXX, Preallocate
            target_blockC_nm = self.Cnm_block_descriptor.empty(dtype=complex)
            temp_blockC_nm = self.Cnm_block_descriptor.empty(dtype=complex)
            temp_block_mm = self.mm_block_descriptor.empty(dtype=complex)
            if self.density.gd.comm.rank != 0:
                # XXX Fake blacks nbands, nao, nbands, nao grid because some
                # weird asserts
                # (these are 0,x or x,0 arrays)
                sourceC_nM = self.CnM_unique_descriptor.zeros(dtype=complex)

            # 1. target = (S+0.5j*H*dt) * source
            # Wave functions to target
            self.CnM2nm.redistribute(sourceC_nM, temp_blockC_nm)

            # XXX It can't be this f'n hard to symmetrize a matrix (tri2full)
            # Remove upper diagonal
            scalapack_zero(self.mm_block_descriptor, H_MM, 'U')
            # Lower diagonal matrix:
            temp_block_mm[:] = S_MM - (0.5j * dt) * H_MM
            scalapack_set(self.mm_block_descriptor, temp_block_mm, 0, 0, 'U')
            # Note it's strictly lower diagonal matrix
            # Add transpose of H
            pblas_tran(-0.5j * dt, H_MM, 1.0, temp_block_mm,
                       self.mm_block_descriptor, self.mm_block_descriptor)
            # Add transpose of S
            pblas_tran(1.0, S_MM, 1.0, temp_block_mm,
                       self.mm_block_descriptor, self.mm_block_descriptor)

            pblas_simple_gemm(self.Cnm_block_descriptor,
                              self.mm_block_descriptor,
                              self.Cnm_block_descriptor,
                              temp_blockC_nm,
                              temp_block_mm,
                              target_blockC_nm)
            # 2. target = (S-0.5j*H*dt)^-1 * target
            # temp_block_mm[:] = S_MM + (0.5j*dt) * H_MM
            # XXX It can't be this f'n hard to symmetrize a matrix (tri2full)
            # Lower diagonal matrix:
            temp_block_mm[:] = S_MM + (0.5j * dt) * H_MM
            # Not it's stricly lower diagonal matrix:
            scalapack_set(self.mm_block_descriptor, temp_block_mm, 0, 0, 'U')
            # Add transpose of H:
            pblas_tran(+0.5j * dt, H_MM, 1.0, temp_block_mm,
                       self.mm_block_descriptor, self.mm_block_descriptor)
            # Add transpose of S
            pblas_tran(1.0, S_MM, 1.0, temp_block_mm,
                       self.mm_block_descriptor, self.mm_block_descriptor)

            scalapack_solve(self.mm_block_descriptor,
                            self.Cnm_block_descriptor,
                            temp_block_mm,
                            target_blockC_nm)

            if self.density.gd.comm.rank != 0:  # XXX is this correct?
                # XXX Fake blacks nbands, nao, nbands, nao grid because some
                # weird asserts
                # (these are 0,x or x,0 arrays)
                target = self.CnM_unique_descriptor.zeros(dtype=complex)
            else:
                target = targetC_nM
            self.Cnm2nM.redistribute(target_blockC_nm, target)
            self.density.gd.comm.broadcast(targetC_nM, 0)  # Is this required?
        else:
            # Note: The full equation is conjugated (therefore -+, not +-)
            targetC_nM[:] = \
                solve(S_MM - 0.5j * H_MM * dt,
                      np.dot(S_MM + 0.5j * H_MM * dt,
                             sourceC_nM.T.conjugate())).T.conjugate()

        self.timer.stop('Linear solve')
Beispiel #34
0
                p.append(True)
        if False in p:
            positiva = False
            m += 1
            print m
    return A, b


def grad_conj(A, b, tol):
    n = A.shape[0]
    xk = np.squeeze(np.asarray(np.zeros((1, n), dtype=float)))
    rk = np.squeeze(np.asarray(-b))
    betak = 0
    pk = np.squeeze(np.asarray(b))
    b = np.squeeze(np.asarray(b))
    while la.norm(rk, 2) > tol or la.norm(rk, 2) == tol:
        Apk = np.dot(A, pk)
        tauk = -(np.inner(pk, rk) / (np.inner(pk, Apk)))
        xk = xk + tauk * pk
        rk = np.squeeze(np.asarray(np.dot(A, xk))) - b
        betak = np.inner(rk, Apk) / np.inner(pk, Apk)
        pk = -rk - betak * pk
    return xk


A, b = matrizes()
print A
print b
print grad_conj(A, b, 1.e-5)
print "exata:" + str(la.solve(A, b))
Beispiel #35
0
def ctrl_traj(x, y, th, ctrl_prev, x_d, y_d, xd_d, yd_d, xdd_d, ydd_d, x_g,
              y_g, th_g):
    '''
    This function computes the closed-loop control law.
    Inputs:
        (x,y,th): current state
        ctrl_prev: previous control input (V,om)
        (x_d, y_d): desired position
        (xd_d, yd_d): desired velocity
        (xdd_d, ydd_d): desired acceleration
        (x_g,y_g,th_g): desired final state
    Outputs:
        (V, om): a numpy array np.array([V, om]) containing the desired control inputs
    '''

    # Timestep
    dt = 0.005

    ########## Code starts here ##########

    kpx = 1
    kpy = 1
    kdx = 2
    kdy = 2

    dist_from_goal = np.sqrt((x_g - x)**2 + (y_g - y)**2)

    if (dist_from_goal < 0.5):
        # switch control laws because we're close to the goal
        [V, om] = ctrl_pose(x, y, th, x_g, y_g, th_g)
    else:

        # Set up and solve for a, w
        V_prev = ctrl_prev[0]
        w_prev = ctrl_prev[1]

        xdot = V_prev * np.cos(th)
        ydot = V_prev * np.sin(th)

        u1 = xdd_d + kpx * (x_d - x) + kdx * (xd_d - xdot)
        u2 = ydd_d + kpy * (y_d - y) + kdy * (yd_d - ydot)

        J = np.array([[np.cos(th), -V_prev * np.sin(th)],
                      [np.sin(th), V_prev * np.cos(th)]])

        u = np.array([u1, u2])

        aw = linalg.solve(J, u)

        a = aw[0]
        om = aw[1]

        # Integrate to obtain V
        V = V_prev + a * dt

        #reset V if it becomes 0 to avoid singularity
        if (abs(V) < 0.01):
            V = np.sqrt((xd_d - xdot)**2 + (yd_d - xdot)**2)

        #Apply saturation limits
        V = np.sign(V) * min(0.5, abs(V))
        om = np.sin(om) * min(1.0, abs(om))

    ########## Code ends here ##########

    return np.array([V, om])
def Jf(xvec):
    x, y = xvec
    return np.array([[1, 2], [2 * x, 8 * y]])


# Pick an initial guess.

# In[5]:

x = np.array([1, 2])

# Now implement Newton's method.

# In[6]:

x = x - la.solve(Jf(x), f(x))
print(x)

# Check if that's really a solution:

# In[7]:

f(x)

# * What's the cost of one iteration?
# * Is there still something like quadratic convergence?

# --------------------
# Let's keep an error history and check.

# In[8]:
Beispiel #37
0
data = np.loadtxt(open("YearPredictionMSD.txt", "rb"), delimiter=",")

labels = np.mat(data[:, 0]).T
values = np.mat(data[:, 1:])

training_labels = labels[:TRAINING_SIZE]
training_values = values[:TRAINING_SIZE]

test_labels = labels[TRAINING_SIZE:]
test_values = values[TRAINING_SIZE:]

# Train beta
# X.T * X * beta = X.T Y
X = training_values.T * training_values
Y = training_values.T * training_labels
beta = linalg.solve(X, Y)

# Test
calc_labels = test_values * beta
RES = calc_labels - test_labels
# Matrix only has one value
RSS = np.asarray(0.5 * (RES.T * RES))[0][0]

print "RSS:" + str(RSS)
print "Max:" + str(max(calc_labels))
print "Min:" + str(min(calc_labels))

#pdb.set_trace()

#plt.stem(np.asarray(beta).T[0])
#plt.show()
Beispiel #38
0
def ncpsolve(f, a, b, x, tol=None, infos=False, verbose=False, serial=False):
    '''
    don't ask what ncpsolve can do for you...
    :param f:
    :param a:
    :param b:
    :param x:
    :param tol:
    :param serial:
    :return:
    '''

    maxit = 100

    if tol is None:
        tol = sqrt(finfo(float64).eps)

    maxsteps = 10
    showiters = True

    it = 0
    if verbose:
        headline = '|{0:^5} | {1:^12} | {2:^12} |'.format(
            'k', ' backsteps', '||f(x)||')
        stars = '-' * len(headline)
        print(stars)
        print(headline)
        print(stars)

    while it < maxit:

        it += 1

        [fval, fjac] = f(x)
        [ftmp, fjac] = smooth(x, a, b, fval, fjac, serial=serial)

        fnorm = norm(ftmp, ord=inf)

        if fnorm < tol:
            if verbose:
                print(stars)
            if infos:
                return [x, it]
            else:
                return x

        if serial:
            from dolo.numeric.serial_operations import serial_solve
            dx = -serial_solve(fjac, ftmp)
        else:
            dx = -solve(fjac, ftmp)

        fnormold = inf

        for backsteps in range(maxsteps):

            xnew = x + dx
            fnew = f(xnew)[0]  # TODO: don't ask for derivatives
            fnew = smooth(xnew, a, b, fnew, serial=serial)
            fnormnew = norm(fnew, ord=inf)

            if fnormnew < fnorm:
                break
            if fnormold < fnormnew:
                dx = 2 * dx
                break

            fnormold = fnormnew
            dx = dx / 2

        x = x + dx

        if verbose:
            print('|{0:5} | {2:12.3e} | {2:12.3e} |'.format(
                it, backsteps, fnormnew))

    if verbose:
        print(stars)

    warnings.Warning('Failure to converge in ncpsolve')

    fval = f(x)

    return [x, fval]
Beispiel #39
0
 def do(self, a, b):
     x = linalg.solve(a, b)
     assert_almost_equal(b, dot_generalized(a, x))
     assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
Beispiel #40
0
 def check(dtype):
     x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
     assert_equal(linalg.solve(x, x).dtype, dtype)
Beispiel #41
0

# 組み合わせ計算
def COM(n, r):
    fac = [1, 1]
    finv = [1, 1]
    inv = [0, 1]
    for i in range(2, MAX):
        fac.append(fac[i - 1] * i % MOD)
        inv.append(-inv[MOD % i] * (MOD // i))
        finv.append(finv[i - 1] * inv[i] % MOD)
    if n < r:
        return 0
    if n < 0 or r < 0:
        return 0
    return fac[n] * (finv[r] * finv[n - r] % MOD) % MOD


x, y = map(int, input().split())

if (x + y) % 3 != 0:
    print(0)
    exit()

l = solve([[2, 1], [1, 2]], [x, y])
if l[0] < 0 or l[1] < 0:  #n,m <0の時0
    print(0)
    exit()
# n+m回のうち,どのn回で移動するか n+mCOMnを計算
print(COM(int(l[0] + l[1]), int(l[0])))
Beispiel #42
0
def uLSIF(x, y, sigma_range, lambda_range, kernel_num=100, verbose=True):
    """Estimate Density Ratio p(x)/q(y) by uLSIF
                                (unconstrained Least-Square Importance Fitting)

    Args:
        x (numpy.matrix): sample from p(x).
        y (numpy.matrix): sample from p(x).
        sigma_range (list<float>): search range of Gaussian kernel bandwidth.
        lambda_range (list<float>): search range of regularization parameter.

    Kwargs:
        kernel_num (int): number of kernels. (default 100)
        verbose (bool): indicator to print messages (deafult True)

    Returns:
        densratio.DensityRatio object which has `compute_density_ratio()`.
    """
    nx = x.shape[0]
    ny = y.shape[0]
    kernel_num = min(kernel_num, nx)
    centers = x[randint(nx, size=kernel_num)]

    if verbose:
        print("################## Start uLSIF ##################")

    if sigma_range.size == 1 and lambda_range.size == 1:
        sigma = sigma_range[0]
        lambda_ = lambda_range[0]
    else:
        if verbose:
            print("Searching optimal sigma and lambda...")
        opt_params = search_sigma_and_lambda(x, y, centers, sigma_range,
                                             lambda_range, verbose)
        sigma = opt_params["sigma"]
        lambda_ = opt_params["lambda"]
        if verbose:
            print("Found optimal sigma = %.3f, lambda = %.3f." %
                  (sigma, lambda_))

    if verbose:
        print("Optimizing alpha...")
    phi_x = compute_kernel_Gaussian(x, centers, sigma)
    phi_y = compute_kernel_Gaussian(y, centers, sigma)
    H = phi_y.T.dot(phi_y) / ny
    h = phi_x.mean(axis=0).T
    alpha = solve(H + diag(array(lambda_).repeat(kernel_num)), h).A1
    alpha[alpha < 0] = 0
    if verbose:
        print("End.")

    kernel_info = KernelInfo(kernel_type="Gaussian RBF",
                             kernel_num=kernel_num,
                             sigma=sigma,
                             centers=centers)

    def compute_density_ratio(x):
        x = to_numpy_matrix(x)
        phi_x = compute_kernel_Gaussian(x, centers, sigma)
        density_ratio = phi_x.dot(matrix(alpha).T).A1
        return density_ratio

    result = DensityRatio(method="uLSIF",
                          alpha=alpha,
                          lambda_=lambda_,
                          kernel_info=kernel_info,
                          compute_density_ratio=compute_density_ratio)

    if verbose:
        print("################## Finished uLSIF ###############")

    return result
Beispiel #43
0
# image file:

# ### Newton

# In[5]:

# Initialize the method

iterates = [np.array([2, 2. / 5])]

# In[6]:

# Evaluate this cell many times in-place

x = iterates[-1]
s = la.solve(ddf(x), -df(x))
next_iterate = x + s
print f(next_iterate)

iterates.append(next_iterate)

# plot function and iterates
pt.axis("equal")
pt.contour(xmesh, ymesh, fmesh, 50)
it_array = np.array(iterates)
pt.plot(it_array.T[0], it_array.T[1], "x-")

# Out[6]:

#     0.0
#
Beispiel #44
0
def _rss_generate(states, inputs, outputs, type):
    """Generate a random state space.

    This does the actual random state space generation expected from rss and
    drss.  type is 'c' for continuous systems and 'd' for discrete systems.

    """

    # Probability of repeating a previous root.
    pRepeat = 0.05
    # Probability of choosing a real root.  Note that when choosing a complex
    # root, the conjugate gets chosen as well.  So the expected proportion of
    # real roots is pReal / (pReal + 2 * (1 - pReal)).
    pReal = 0.6
    # Probability that an element in B or C will not be masked out.
    pBCmask = 0.8
    # Probability that an element in D will not be masked out.
    pDmask = 0.3
    # Probability that D = 0.
    pDzero = 0.5

    # Check for valid input arguments.
    if states < 1 or states % 1:
        raise ValueError("states must be a positive integer.  states = %g." %
                         states)
    if inputs < 1 or inputs % 1:
        raise ValueError("inputs must be a positive integer.  inputs = %g." %
                         inputs)
    if outputs < 1 or outputs % 1:
        raise ValueError("outputs must be a positive integer.  outputs = %g." %
                         outputs)

    # Make some poles for A.  Preallocate a complex array.
    poles = zeros(states) + zeros(states) * 0.j
    i = 0

    while i < states:
        if rand() < pRepeat and i != 0 and i != states - 1:
            # Small chance of copying poles, if we're not at the first or last
            # element.
            if poles[i - 1].imag == 0:
                # Copy previous real pole.
                poles[i] = poles[i - 1]
                i += 1
            else:
                # Copy previous complex conjugate pair of poles.
                poles[i:i + 2] = poles[i - 2:i]
                i += 2
        elif rand() < pReal or i == states - 1:
            # No-oscillation pole.
            if type == 'c':
                poles[i] = -exp(randn()) + 0.j
            elif type == 'd':
                poles[i] = 2. * rand() - 1.
            i += 1
        else:
            # Complex conjugate pair of oscillating poles.
            if type == 'c':
                poles[i] = complex(-exp(randn()), 3. * exp(randn()))
            elif type == 'd':
                mag = rand()
                phase = 2. * math.pi * rand()
                poles[i] = complex(mag * cos(phase), mag * sin(phase))
            poles[i + 1] = complex(poles[i].real, -poles[i].imag)
            i += 2

    # Now put the poles in A as real blocks on the diagonal.
    A = zeros((states, states))
    i = 0
    while i < states:
        if poles[i].imag == 0:
            A[i, i] = poles[i].real
            i += 1
        else:
            A[i, i] = A[i + 1, i + 1] = poles[i].real
            A[i, i + 1] = poles[i].imag
            A[i + 1, i] = -poles[i].imag
            i += 2
    # Finally, apply a transformation so that A is not block-diagonal.
    while True:
        T = randn(states, states)
        try:
            A = dot(solve(T, A), T)  # A = T \ A * T
            break
        except LinAlgError:
            # In the unlikely event that T is rank-deficient, iterate again.
            pass

    # Make the remaining matrices.
    B = randn(states, inputs)
    C = randn(outputs, states)
    D = randn(outputs, inputs)

    # Make masks to zero out some of the elements.
    while True:
        Bmask = rand(states, inputs) < pBCmask
        if any(Bmask):  # Retry if we get all zeros.
            break
    while True:
        Cmask = rand(outputs, states) < pBCmask
        if any(Cmask):  # Retry if we get all zeros.
            break
    if rand() < pDzero:
        Dmask = zeros((outputs, inputs))
    else:
        Dmask = rand(outputs, inputs) < pDmask

    # Apply masks.
    B = B * Bmask
    C = C * Cmask
    D = D * Dmask

    return StateSpace(A, B, C, D)
Beispiel #45
0
    def ensure_gram_matrix(self):
        if self._C is not None:
            return

        self._C = self._covf.compute_gram_matrix(self._train_x)
        self._Cinvt = solve(self._C, self._train_t)
Beispiel #46
0
              ]]
    known_values = [
        my,
        find_known(1),
        find_known(2),
        find_known(3),
        find_known(4),
        find_known(5),
        find_known(6),
        find_known(7),
        find_known(8),
        find_known(9),
        find_known(10)
    ]

    beta = solve(values, known_values)
    print("\nРівняння регресії:")
    print(
        "{:.3f} + {:.3f} * X1 + {:.3f} * X2 + {:.3f} * X3 + {:.3f} * Х1X2 + {:.3f} * Х1X3 + {:.3f} * Х2X3 + {:.3f} * Х1Х2X3 + {:.3f} * X11^2 + {:.3f} * X22^2 + {:.3f} * X33^2 = ŷ\n\nПеревірка"
        .format(beta[0], beta[1], beta[2], beta[3], beta[4], beta[5], beta[6],
                beta[7], beta[8], beta[9], beta[10]))
    for i in range(N):
        print("ŷ{} = {:.3f} ≈ {:.3f}".format((i + 1), check_result(beta, i),
                                             middle_y[i]))

    while not homogeneity:
        print(
            "\n" + " " * 65 + "Матриця планування" + " " * 65 +
            "      X1           X2           X3          X1X2        X1X3         X2X3         X1X2X3       X1X1         X2X2         X3X3          Yi ..."
        )
        for row in range(N):
Beispiel #47
0
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from numpy import linalg

# (c)
A = np.mat('100,100;100,100.01')
b = np.mat('2;2.0001')
x = linalg.solve(A, b)
print(x)
'''
 when the b is recorrected, x turn out to be [0.01;0.01],
 which is the right answer. And the error in A was squared by x, then shown in b.
'''

# (d)
det_A = linalg.det(A)
con = max(linalg.svd(A)[1]) / min(linalg.svd(A)[1])
print('Determinant={0} Condition number={1}'.format(det_A, con))
'''
explain:
Since the kp(A) is so big ,so when there is error in b,
the corresponding error in x is multiplied many times, and it seems quietly different.

'''
Beispiel #48
0
        F[i*band + j] += f[j]
        for l in range(enodes):
            K[i*band + j,i*band + l] += k[j,l]
    xi +=he

# forcing Dirichlet boundaries    
u0 = 0
u1 = 0
K[0,:] = numpy.eye(size)[0,:]
# K[:,0] = numpy.eye(size)[:,0]
K[-1,:] = numpy.eye(size)[-1,:]
# K[:,-1] = numpy.eye(size)[:,-1]
F[0] = u0
F[-1] = u1

# solving system
u = solve(K,F)
# print(k,f)
# print('\n',K)
# print(F)
# print(K.shape)
# print(u)

# plot solutions
x = numpy.linspace(0,1,size)
xv = numpy.linspace(0,1,100)
xh, uh, du = plot_sol(u,x,band)
# print(uh)
plt.subplot(1,2,2);plt.plot(xh,du,label='approx du');plt.plot(xv,dv(xv),'m--',label='exact');plt.legend()
plt.subplot(1,2,1);plt.plot(xh,uh,label='approx solution');plt.plot(xv,v(xv),'m--',label='exact');plt.legend();plt.show()
def main():
    inv_dx = 20
    dx = 1 / inv_dx
    w = 0.8

    #The calculation needs only to be done to the inner points since we have
    #dirichlet conditions on the boundaries for the middle room. -> (2n-1)x(n-1)

    #For the small rooms we need to include the points with the the neumann
    #conditions aswell. -> (n-1)x(n-1+1)
    if rank == 0:  #rank 0 for the middle and 3 room
        #bounary vectors for the middle room:
        left2 = np.ones(inv_dx * 2 - 1) * 15
        top2 = np.ones(inv_dx - 1) * 40
        bot2 = np.ones(inv_dx - 1) * 5
        right2 = np.ones(inv_dx * 2 - 1) * 15
        top13 = np.ones(inv_dx) * 15
        bot13 = np.ones(inv_dx) * 15
        right3 = np.ones(inv_dx - 1) * 40
        u2L = laplaceMidMatrix((inv_dx * 2 + 1, inv_dx + 1)) / (
            dx**2)  #the size is + 1 for the points
        u3L = laplaceSideMatrix(inv_dx + 1, 3) / (dx**2)

    #boundary for the small rooms:
    if rank == 1:  #rank 1 for the 1 room
        left1 = np.ones(inv_dx - 1) * 40
        top13 = np.ones(inv_dx) * 15
        bot13 = np.ones(inv_dx) * 15
        u1L = laplaceSideMatrix(inv_dx + 1, 1) / (dx**2)

    #the laplaceMatricies for the rooms:

    for i in range(100):
        #starting with solving the right room, dirichlet beeing in the boundary vectors
        #initially set to 15
        if rank == 0:
            b2 = give_b_vector(left2, top2, right2, bot2) / (dx**2)
            b2 = np.hstack(b2)
            u2new = nl.solve(u2L, -b2)

            if i == 0:
                u2 = u2new
            else:
                u2 = w * u2 + (1 - w) * u2new

            #the neuman conditions for the left room:
            tempneu1 = u2.reshape((2 * inv_dx - 1, inv_dx - 1))[-inv_dx + 1:,
                                                                0]
            neu1 = gradfunc(left2[-inv_dx + 1:], tempneu1, dx)

            comm.send(neu1, dest=1, tag=11)
            #neumann for right room:
            tempneu3 = u2.reshape((2 * inv_dx - 1, inv_dx - 1))[:inv_dx - 1,
                                                                -1]
            neu3 = gradfunc(tempneu3, right2[:inv_dx - 1], dx)

            #solve right
            b3 = give_b_vector(-dx * neu3, top13, right3, bot13) / (dx**2)
            b3 = np.hstack(b3)
            u3new = nl.solve(u3L, -b3)
            if i == 0:
                u3 = u3new
            else:
                u3 = w * u3 + (1 - w) * u3new
            u1send = comm.recv(source=1, tag=12)
            #putting the solution from room 1 and 3 to the boundary vectors
            left2[-inv_dx + 1:] = u1send
            right2[:inv_dx - 1] = u3.reshape((inv_dx - 1, inv_dx))[:, 0]

        if rank == 1:
            #hopefully this worked
            #solve left
            neu1 = comm.recv(source=0, tag=11)
            b1 = give_b_vector(left1, top13, neu1 * dx, bot13) / (dx**2)
            b1 = np.hstack(b1)

            u1new = nl.solve(u1L, -b1)
            if i == 0:
                u1 = u1new
            else:
                u1 = w * u1 + (1 - w) * u1new
            u1send = u1.reshape((inv_dx - 1, inv_dx))[:, -1]
            comm.send(u1send, dest=0, tag=12)

    if rank == 1:
        u1 = u1.reshape((inv_dx - 1, inv_dx))
        comm.send(u1, dest=0, tag=20)
    if rank == 0:
        u3 = u3.reshape((inv_dx - 1, inv_dx))
        u2 = u2.reshape((2 * inv_dx - 1, inv_dx - 1))
        u1 = comm.recv(source=1, tag=20)

        mid_h, mid_w = u2.shape
        left_h, left_w = u1.shape
        right_h, right_w = u3.shape

        plot_matrix_width = mid_w + left_w + right_w + 2
        plot_matrix_height = mid_h + 2
        plot_matrix = np.zeros((plot_matrix_height, plot_matrix_width))

        plot_matrix[-inv_dx:-1, 1:inv_dx + 1] = u1
        plot_matrix[1:-1, inv_dx + 1:2 * inv_dx] = u2
        plot_matrix[1:inv_dx, 2 * inv_dx:-1] = u3
        plot_matrix[-1, 1:inv_dx + 1] = np.ones(inv_dx) * 15
        plot_matrix[-1, inv_dx + 1:2 * inv_dx] = np.ones(inv_dx - 1) * 5
        plot_matrix[-inv_dx - 1:, 0] = np.ones(inv_dx + 1) * 40
        plot_matrix[-inv_dx - 1, 1:inv_dx + 1] = np.ones(inv_dx) * 15
        plot_matrix[:inv_dx, inv_dx] = np.ones(inv_dx) * 15
        plot_matrix[0, inv_dx + 1:2 * inv_dx] = np.ones(mid_w) * 40
        plot_matrix[:inv_dx + 1, -1] = np.ones(inv_dx + 1) * 40
        plot_matrix[inv_dx + 1:, 2 * inv_dx] = np.ones(inv_dx) * 15
        plot_matrix[inv_dx, 2 * inv_dx:-1] = np.ones(inv_dx) * 15
        plot_matrix[0, 2 * inv_dx:-1] = np.ones(right_w) * 15

        heatplot = plt.imshow(plot_matrix)
        heatplot.set_cmap('hot')
        plt.colorbar()
        plt.show()
Beispiel #50
0
def norm_cop_pdf(u, mu, sigma2):
    """For details, see here.

    Parameters
    ----------
        u :  array, shape (n_,)
        mu : array, shape (n_,)
        sigma2 :  array, shape (n_, n_)

    Returns
    -------
        pdf_u : scalar

    """

    # Step 1: Compute the inverse marginal cdf's

    svec = np.sqrt(np.diag(sigma2))
    x = stats.norm.ppf(u.flatten(), mu.flatten(), svec).reshape(-1, 1)

    # Step 2: Compute the joint pdf

    n_ = len(u)
    pdf_x = (2*np.pi)**(-n_ / 2)*((det(sigma2))**(-.5))*np.exp(-0.5 * (x - mu.reshape(-1, 1)).T@(solve(sigma2, x - mu.reshape(-1, 1))))

    # Step 3: Compute the marginal pdf's

    pdf_xn = stats.norm.pdf(x.flatten(), mu.flatten(), svec)

    # Compute the pdf of the copula
    pdf_u = np.squeeze(pdf_x / np.prod(pdf_xn))

    return pdf_u
Beispiel #51
0
 def norm(self, x, u):
     xinvu = la.solve(x, u)
     return np.sqrt(self.inner(x, u, v))
Beispiel #52
0
#LEAST squares method begin
x = np.array([random.uniform(-1, 1) for i in range(NODES)])
Q = np.full((NODES, NODES), 0, dtype=float)
for i in range(0, NODES):
    for j in range(0, DEGREE + 1):
        Q[i][j] = x[i]**j
H = Q.T.dot(Q)
y = np.fromiter(map(func, x), float)
b = Q.T.dot(y)
#Dodging from singular case
for i in range(0, NODES):
    for j in range(DEGREE + 1, NODES):
        H[i][j] = i

solve = LA.solve(H, b)
#end

#Lezhandr method
#Definition tbe multipliers of searchable polinom
cn = np.array([
    1 / 3, (np.sin(1) - np.cos(1)) * 3, 2 / 3,
    (28 * np.cos(1) - 18 * np.sin(1)) / (2 / 7)
])
#endjkh

#Initial data
a = -1
b = 1
m = 100
Beispiel #53
0
 def inner(self, x, u, v):
     return (np.tensordot(la.solve(x, u),
                          multitransp(la.solve(x, v)),
                          axes=x.ndim))
def ols(y,
        X,
        get_cov=True,
        cov_est='hc1',
        get_t=True,
        get_p=True,
        clustvar=None):
    # Inputs
    # y: [n,1] vector, LHS variables
    # X: [n,k] matrix, RHS variables
    # get_cov: boolean, if true, the function returns an estimate of the
    #          variance/covariance matrix, in addition to the OLS coefficients
    # cov_est: string, specifies which variance/covariance matrix estimator to
    #          use. Currently, must be either hmsd (for the homoskedastic
    #          estimator) or hc1 (for the Eicker-Huber-White HC1 estimator)
    # get_t: boolean, if true, the function returns t-statistics for the simple
    #        null of beta[i] = 0, for each element of the coefficient vector
    #        separately
    # get_p: boolean, if true, calculate the p-values for a two-sided test of
    #        beta[i] = 0, for each element of the coefficient vector separately
    #
    # Outputs:
    # beta_hat: [k,1] vector, coefficient estimates
    # V_hat: [k,k] matrix, estimate of the variance/covariance matrix
    # t: [k,1] vector, t-statistics
    # p: [k,1] vector, p-values

    # If p-values are necessary, then t-statistics will be needed
    if get_p and not get_t:
        get_t = True

    # If t-statistics are necessary, then the covariance has to be estimated
    if get_t and not get_cov:
        get_cov = True

    # Get number of observations n and number of coefficients k
    n, k = X.shape[0], X.shape[1]

    # Calculate OLS coefficients
    XXinv = solve(X.transpose() @ X, np.eye(k))  # Calculate (X'X)^(-1)
    beta_hat = XXinv @ (X.transpose() @ y)

    # Check whether covariance is needed
    if get_cov:
        # Get residuals
        U_hat = y - X @ beta_hat

        # Check which covariance estimator to use
        if cov_est == 'hmsd':
            # For the homoskedastic estimator, just calculate the standard
            # variance
            V_hat = (1 / (n - k)) * XXinv * (U_hat.transpose() @ U_hat)
        elif cov_est == 'hc1':
            # Calculate component of middle part of EHW sandwich,
            # S_i = X_i u_i, which makes it very easy to calculate
            # sum_i X_i X_i' u_i^2 = S'S)
            S = (U_hat @ np.ones(shape=(1, k))) * X

            # Calculate EHW variance/covariance matrix
            V_hat = (n / (n - k)) * XXinv @ (S.transpose() @ S) @ XXinv
        elif cov_est == 'cluster':
            # Calculate number of clusters
            J = len(np.unique(clustvar))

            # Same thing as S above, but needs to be a DataFrame, because pandas
            # has the groupby method, which is needed in the next step
            S = pd.DataFrame((U_hat @ np.ones(shape=(1, k))) * X)

            # Sum all covariates within clusters
            S = S.groupby(clustvar[:, 0], axis=0).sum().values

            # Calculate cluster-robust variance estimator
            V_hat = ((n / (n - k)) * (J / (J - 1)) *
                     XXinv @ (S.transpose() @ S) @ XXinv)
        else:
            # Print an error message
            print('Error in ',
                  ols.__name__, '(): The specified covariance '
                  'method could not be recognized. Please specify another ',
                  'method.',
                  sep='')

            # Exit the program
            return

        # Replace NaNs as zeros (happen if division by zero occurs)
        V_hat[np.isnan(V_hat)] = 0

        # Check whether to get t-statistics
        if get_t:
            # Calculate t-statistics (I like having them as a column vector, but
            # to get that, I have to convert the square root of the diagonal
            # elements of V_hat into a proper column vector first)
            t = beta_hat / larry(np.sqrt(np.diag(V_hat)))

            # Check whether to calculate p-values
            if get_p:
                # Calculate p-values
                p = 2 * (1 - norm.cdf(np.abs(t)))

                # Return coefficients, variance/covariance matrix, t-statistics,
                # and p-values
                return beta_hat, V_hat, t, p
            else:
                # Return coefficients, variance/covariance matrix, and
                # t-statistics
                return beta_hat, V_hat, t
        else:
            # Return coefficients and variance/covariance matrix
            return beta_hat, V_hat
    else:
        # Otherwise, just return coefficients
        return beta_hat
Beispiel #55
0
    def interpolate(self,
                    pts,
                    interp=True,
                    deriv=False,
                    deriv_th=False,
                    deriv_X=False):
        """
        Basic Lagrange interpolation, with optional first derivatives
        (gradient)

        Parameters
        ----------
        pts : array (float, ndim=2)
            A 2d array of points on which to evaluate the function. Each
            row is assumed to be a new d-dimensional point. Therefore, pts
            must have the same number of columns as ``si.SGrid.d``

        interp : bool, optional(default=false)
            Whether or not to compute the actual interpolation values at pts

        deriv : bool, optional(default=false)
            Whether or not to compute the gradient of the function at each
            of the points. This will have the same dimensions as pts, where
            each column represents the partial derivative with respect to
            a new dimension.

        deriv_th : bool, optional(default=false)
            Whether or not to compute the ???? derivative with respect to the
            Smolyak polynomial coefficients (maybe?)

        deriv_X : bool, optional(default=false)
            Whether or not to compute the ???? derivative with respect to grid
            points


        Returns
        -------
        rets : list (array(float))
            A list of arrays containing the objects asked for. There are 4
            possible objects that can be computed in this function. They will,
            if they are called for, always be in the following order:

            1. Interpolation values at pts
            2. Gradient at pts
            3. ???? at pts
            4. ???? at pts

            If the user only asks for one of these objects, it is returned
            directly as an array and not in a list.


        Notes
        -----
        This is a stripped down port of ``dolo.SmolyakBasic.interpolate``

        TODO: There may be a better way to do this

        TODO: finish the docstring for the 2nd and 3rd type of derivatives

        """
        d = pts.shape[1]
        sg = self.sg

        theta = self.theta
        trans_points = sg.dom2cube(pts)  # Move points to correct domain

        rets = []

        if deriv:
            new_B, der_B = build_B(d, sg.mu, trans_points, sg.pinds, True)
            vals = new_B.dot(theta)
            d_vals = np.tensordot(theta, der_B, (0, 0)).T

            if interp:
                rets.append(vals)
            rets.append(sg.dom2cube(d_vals))

        elif not deriv and interp:  # No derivs in build_B. Just do vals
            new_B = build_B(d, sg.mu, trans_points, sg.pinds)
            vals = new_B.dot(theta)
            rets.append(vals)

        if deriv_th:  # The derivative wrt the coeffs is just new_B
            if not interp and not deriv:  # we  haven't found this  yet
                new_B = build_B(d, sg.mu, trans_points, sg.pinds)
            rets.append(new_B)

        if deriv_X:
            if not interp and not deriv and not deriv_th:
                new_B = build_B(d, sg.mu, trans_points, sg.pinds)
            d_X = la.solve(sg.B_U, la.solve(sg.B_L, new_B.T))
            rets.append(d_X)

        if len(rets) == 1:
            rets = rets[0]

        return rets
Beispiel #56
0
def main(n):
    # варіант 101
    x1min = -40
    x1max = 20
    x2min = 10
    x2max = 60
    x3min = -20
    x3max = 20

    # максимальне та мінімальне значення
    y_max = 200 + (x1max + x2max + x3max) / 3
    y_min = 200 + (x1min + x2min + x3min) / 3

    # матриця ПФЕ
    xn = [[1, 1, 1, 1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1, 1, 1],
          [-1, 1, -1, 1, -1, 1, -1, 1], [-1, 1, 1, -1, 1, -1, -1, 1]]

    x1x2_norm, x1x3_norm, x2x3_norm, x1x2x3_norm = [0] * 8, [0] * 8, [0] * 8, [
        0
    ] * 8

    for i in range(n):
        x1x2_norm[i] = xn[1][i] * xn[2][i]
        x1x3_norm[i] = xn[1][i] * xn[3][i]
        x2x3_norm[i] = xn[2][i] * xn[3][i]
        x1x2x3_norm[i] = xn[1][i] * xn[2][i] * xn[3][i]

    # заповнення у(генерація)
    y1 = [random.randint(int(y_min), int(y_max)) for i in range(8)]
    y2 = [random.randint(int(y_min), int(y_max)) for i in range(8)]
    y3 = [random.randint(int(y_min), int(y_max)) for i in range(8)]

    # матриця планування
    y_matrix = [[y1[0], y2[0], y3[0]], [y1[1], y2[1], y3[1]],
                [y1[2], y2[2], y3[2]], [y1[3], y2[3], y3[3]],
                [y1[4], y2[4], y3[4]], [y1[5], y2[5], y3[5]],
                [y1[6], y2[6], y3[6]], [y1[7], y2[7], y3[7]]]

    # вивід данних за допомогою цикла
    print("Матриця планування y : \n")
    for i in range(n):
        print(y_matrix[i])

    x0 = [1, 1, 1, 1, 1, 1, 1, 1]

    # заміна -1 на х1_мін, 1 на х1_макс
    x1 = [10, 10, 50, 50, 10, 10, 50, 50]

    # заміна -1 на х2_мін, 1 на х2_макс
    x2 = [20, 60, 20, 60, 20, 60, 20, 60]

    # заміна -1 на х3_мін, 1 на х3_макс
    x3 = [20, 25, 25, 20, 25, 20, 20, 25]

    # заповнення нулями х1х2, х1х3, х1х2х3
    x1x2, x1x3, x2x3, x1x2x3 = [0] * 8, [0] * 8, [0] * 8, [0] * 8
    # заповнення х1х2, х1х3, х1х2х3 добутками
    for i in range(n):
        x1x2[i] = x1[i] * x2[i]
        x1x3[i] = x1[i] * x3[i]
        x2x3[i] = x2[i] * x3[i]
        x1x2x3[i] = x1[i] * x2[i] * x3[i]
    # середні у
    Y_average = []
    for i in range(len(y_matrix)):
        Y_average.append(np.mean(y_matrix[i], axis=0))

    # формуємо списки b i a
    list_for_b = [
        xn[0], xn[1], xn[2], xn[3], x1x2_norm, x1x3_norm, x2x3_norm,
        x1x2x3_norm
    ]
    list_for_a = list(zip(x0, x1, x2, x3, x1x2, x1x3, x2x3, x1x2x3))

    # вивід матриці планування Х
    print("Матриця планування X:")
    for i in range(n):
        print(list_for_a[i])
    # нормовані фактори b_i
    bi = []
    for k in range(n):
        S = 0
        for i in range(n):
            S += (list_for_b[k][i] * Y_average[i]) / n
        bi.append(round(S, 3))
    # розрахунок аі(система рівнянь) через функцію solve, вивід рівняння регресії
    ai = [round(i, 3) for i in solve(list_for_a, Y_average)]
    print(
        "Рівняння регресії: \n"
        "y = {} + {}*x1 + {}*x2 + {}*x3 + {}*x1x2 + {}*x1x3 + {}*x2x3 + {}*x1x2x3"
        .format(ai[0], ai[1], ai[2], ai[3], ai[4], ai[5], ai[6], ai[7]))
    # вивід даних
    print("Рівняння регресії для нормованих факторів: \n"
          "y = {} + {}*x1 + {}*x2 + {}*x3 + {}*x1x2 + {}*x1x3 +"
          " {}*x2x3 + {}*x1x2x3".format(bi[0], bi[1], bi[2], bi[3], bi[4],
                                        bi[5], bi[6], bi[7]))

    print("Перевірка за критерієм Кохрена")
    print("Середні значення відгуку за рядками:", "\n", +Y_average[0],
          Y_average[1], Y_average[2], Y_average[3], Y_average[4], Y_average[5],
          Y_average[6], Y_average[7])
    # розрахунок дисперсій
    dispersions = []
    for i in range(len(y_matrix)):
        a = 0
        for k in y_matrix[i]:
            a += (k - np.mean(y_matrix[i], axis=0))**2
        dispersions.append(a / len(y_matrix[i]))
    # експериментально
    Gp = max(dispersions) / sum(dispersions)
    # теоретично
    Gt = 0.5157
    # перевірка однорідності дисперсій

    if Gp < Gt:
        print("Дисперсія однорідна")
    else:
        print(
            "\nДисперсія неоднорідна, потрібно розпочати експеримет з початку\n"
        )
        main(n + 1)

    # критерій Стьюдента
    print(" Перевірка значущості коефіцієнтів за критерієм Стьюдента")
    sb = sum(dispersions) / len(dispersions)
    sbs = (sb / (8 * 3))**0.5

    t_list = [abs(bi[i]) / sbs for i in range(0, 8)]

    d = 0
    res = [0] * 8
    coef_1 = []
    coef_2 = []
    # кількість повторень кожної комбінації
    m = 3
    F3 = (m - 1) * n
    # перевірка значущості коефіцієнтів
    for i in range(n):
        if t_list[i] < t.ppf(q=0.975, df=F3):
            coef_2.append(bi[i])
            res[i] = 0
        else:
            coef_1.append(bi[i])
            res[i] = bi[i]
            d += 1

    # вивід
    print("Значущі коефіцієнти регресії:", coef_1)
    print("Незначущі коефіцієнти регресії:", coef_2)

    # значення y з коефіцієнтами регресії
    y_st = []
    for i in range(n):
        y_st.append(res[0] + res[1] * xn[1][i] + res[2] * xn[2][i] + res[3] * xn[3][i] + res[4] * x1x2_norm[i] \
                    + res[5] * x1x3_norm[i] + res[6] * x2x3_norm[i] + res[7] * x1x2x3_norm[i])
    print("Значення з отриманими коефіцієнтами:\n", y_st)

    # критерій Фішера
    print("\nПеревірка адекватності за критерієм Фішера\n")
    Sad = m * sum([(y_st[i] - Y_average[i])**2 for i in range(8)]) / (n - d)
    Fp = Sad / sb
    F4 = n - d
    if Fp < f.ppf(q=0.95, dfn=F4, dfd=F3):
        print("Рівняння регресії адекватне при рівні значимості 0.05")
    else:
        print("Рівняння регресії неадекватне при рівні значимості 0.05")
Beispiel #57
0
 def inverted(self):
     return AffineMap(la.inv(self.matrix),
                      -la.solve(self.matrix, self.offset))
Beispiel #58
0
def _solve(k, A_k, X, Y, f, lam, lam_y, mu):
    '''Update one single factor'''
    s_u, i_u = get_row(Y, k)
    a = np.dot(s_u * A_k[i_u], X[i_u])
    B = X.T.dot(A_k[:, np.newaxis] * X) + lam * np.eye(f)
    return LA.solve(B, a)
Beispiel #59
0
def getv(n, k):
    a = np.mat([[1, 1], [1, -1]])  # 系数矩阵
    b = np.mat([n, k]).T  # 常数项列矩阵
    x = solve(a, b)  # 方程组的解
    return x
Beispiel #60
0
# ### Observe Spacing

# In[6]:


pt.plot(nodes, 0*nodes, "o")


# ## Part III: Chebyshev Interpolation

# In[9]:


V = np.cos(i*np.arccos(nodes.reshape(-1, 1)))
data = np.random.randn(n)
coeffs = la.solve(V, data)


# In[10]:


x = np.linspace(-1, 1, 1000)
Vfull = np.cos(i*np.arccos(x.reshape(-1, 1)))
pt.plot(x, np.dot(Vfull, coeffs))
pt.plot(nodes, data, "o")


# ## Part IV: Conditioning

# In[13]: