def dot(a, b):
	" vector-matrix, matrix-vector or matrix-matrix product "
	import vector
	if isinstance(a,sparse) and vector.isVector(b):
		new = vector.zeros(a.size()[0])
		for ij in a.keys():
			new[ij[0]] += a[ij]* b[ij[1]]
		return new
	elif vector.isVector(a) and isinstance(b,sparse):
		new = vector.zeros(b.size()[1])
		for ij in b.keys():
			new[ij[1]] += a[ij[0]]* b[ij]
		return new
	elif isinstance(a,sparse) and isinstance(b,sparse):
		if a.size()[1] != b.size()[0]:
			print '**Warning shapes do not match in dot(sparse, sparse)'
		new = sparse({})
		n = min([a.size()[1], b.size()[0]])
		for i in range(a.size()[0]):
			for j in range(b.size()[1]):
				sum = 0.
				for k in range(n):
					sum += a.get((i,k),0.)*b.get((k,j),0.)
				if sum != 0.:
					new[(i,j)] = sum
		return new
	else:
		raise TypeError, 'in dot'
Beispiel #2
0
def dotDot(y, a, x):
    " double dot product y^+ *A*x "
    if vector.isVector(y) and isSparse(a) and vector.isVector(x):
        res = 0.
        for ij in a.keys():
            i, j = ij
            res += y[i] * a[ij] * x[j]
        return res
    else:
        print 'sparse::Error: dotDot takes vector, sparse , vector as args'
Beispiel #3
0
def dotDot(y, a, x):
    " double dot product y^+ *A*x "
    if vector.isVector(y) and isSparse(a) and vector.isVector(x):
        res = 0.0
        for ij in a.keys():
            i, j = ij
            res += y[i] * a[ij] * x[j]
        return res
    else:
        print("sparse::Error: dotDot takes vector, sparse , vector as args")
def dotDot(y,a,x):
	" double dot product y^+ *A*x "
	import vector
	if vector.isVector(y) and isinstance(a,sparse) and vector.isVector(x):
		res = 0.
		for ij in a.keys():
			i,j = ij
			res += y[i]*a[ij]*x[j]
		return res
	else:
		print 'sparse::Error: dotDot takes vector, sparse , vector as args'
Beispiel #5
0
    def biCGsolve(self, x0, b, tol=1.0e-10, nmax=1000):

        """
		Solve self*x = b and return x using the bi-conjugate gradient method
		"""

        try:
            if not vector.isVector(b):
                raise TypeError, self.__class__, " in solve "
            else:
                if self.size()[0] != len(b) or self.size()[1] != len(b):
                    print("**Incompatible sizes in solve")
                    print("**size()=", self.size()[0], self.size()[1])
                    print("**len=", len(b))
                else:
                    kvec = diag(self)  # preconditionner
                    n = len(b)
                    x = x0  # initial guess
                    r = b - dot(self, x)
                    rbar = r
                    w = r / kvec
                    wbar = rbar / kvec
                    p = vector.zeros(n)
                    pbar = vector.zeros(n)
                    beta = 0.0
                    rho = vector.dot(rbar, w)
                    err = vector.norm(dot(self, x) - b)
                    k = 0
                    print(" bi-conjugate gradient convergence (log error)")
                    while abs(err) > tol and k < nmax:
                        p = w + beta * p
                        pbar = wbar + beta * pbar
                        z = dot(self, p)
                        alpha = rho / vector.dot(pbar, z)
                        r = r - alpha * z
                        rbar = rbar - alpha * dot(pbar, self)
                        w = r / kvec
                        wbar = rbar / kvec
                        rhoold = rho
                        rho = vector.dot(rbar, w)
                        x = x + alpha * p
                        beta = rho / rhoold
                        err = vector.norm(dot(self, x) - b)
                        print(k, " %5.1f " % math.log10(err))
                        k = k + 1
                    return x

        except:
            print("ERROR ", self.__class__, "::biCGsolve")
Beispiel #6
0
    def biCGsolve(self, x0, b, tol=1.0e-10, nmax=1000):
        """
		Solve self*x = b and return x using the bi-conjugate gradient method
		"""

        try:
            if not vector.isVector(b):
                raise TypeError, self.__class__, ' in solve '
            else:
                if self.size()[0] != len(b) or self.size()[1] != len(b):
                    print '**Incompatible sizes in solve'
                    print '**size()=', self.size()[0], self.size()[1]
                    print '**len=', len(b)
                else:
                    kvec = diag(self)  # preconditionner
                    n = len(b)
                    x = x0  # initial guess
                    r = b - dot(self, x)
                    rbar = r
                    w = r / kvec
                    wbar = rbar / kvec
                    p = vector.zeros(n)
                    pbar = vector.zeros(n)
                    beta = 0.0
                    rho = vector.dot(rbar, w)
                    err = vector.norm(dot(self, x) - b)
                    k = 0
                    print " bi-conjugate gradient convergence (log error)"
                    while abs(err) > tol and k < nmax:
                        p = w + beta * p
                        pbar = wbar + beta * pbar
                        z = dot(self, p)
                        alpha = rho / vector.dot(pbar, z)
                        r = r - alpha * z
                        rbar = rbar - alpha * dot(pbar, self)
                        w = r / kvec
                        wbar = rbar / kvec
                        rhoold = rho
                        rho = vector.dot(rbar, w)
                        x = x + alpha * p
                        beta = rho / rhoold
                        err = vector.norm(dot(self, x) - b)
                        print k, ' %5.1f ' % math.log10(err)
                        k = k + 1
                    return x

        except:
            print 'ERROR ', self.__class__, '::biCGsolve'
Beispiel #7
0
	def biCGsolve(self,x0, b, tol=1.0e-10, nmax = 1000):
		
		"""
		Solve self*x = b and return x using the bi-conjugate gradient method
		"""

		try:
			if not vector.isVector(b):
				raise TypeError, self.__class__,' in solve '
			else:
				if self.size()[0] != len(b) or self.size()[1] != len(b):
					print '**Incompatible sizes in solve'
					print '**size()=', self.size()[0], self.size()[1]
					print '**len=', len(b)
				else:
					kvec = diag(self) # preconditionner 
					n = len(b)
					x = x0 # initial guess
					r =  b - dot(self, x)
					rbar =  r
					w = r/kvec;
					wbar = rbar/kvec;
					p = vector.zeros(n);
					pbar = vector.zeros(n);
					beta = 0.0;
					rho = vector.dot(rbar, w);
					err = vector.norm(dot(self,x) - b);
					k = 0
					print " bi-conjugate gradient convergence (log error)"
					while abs(err) > tol and k < nmax:
						p = w + beta*p;
						pbar = wbar + beta*pbar;
						z = dot(self, p);
						alpha = rho/vector.dot(pbar, z);
						r = r - alpha*z;
						rbar = rbar - alpha* dot(pbar, self);
						w = r/kvec;
						wbar = rbar/kvec;
						rhoold = rho;
						rho = vector.dot(rbar, w);
						x = x + alpha*p;
						beta = rho/rhoold;
						err = vector.norm(dot(self, x) - b);
						print k,' %5.1f ' % math.log10(err)
						k = k+1
					return x
			
		except: print 'ERROR ',self.__class__,'::biCGsolve'
Beispiel #8
0
    def CGsolve(self, x0, b, tol=1.0e-10, nmax=1000, verbose=1):
        """
		Solve self*x = b and return x using the conjugate gradient method
		"""
        if not vector.isVector(b):
            raise TypeError, self.__class__, " in solve "
        else:
            if self.size()[0] != len(b) or self.size()[1] != len(b):
                print("**Incompatible sizes in solve")
                print("**size()=", self.size()[0], self.size()[1])
                print("**len=", len(b))
            else:
                kvec = diag(self)  # preconditionner
                n = len(b)
                x = x0  # initial guess
                r = b - dot(self, x)
                try:
                    w = r / kvec
                except:
                    print("***singular kvec")
                p = vector.zeros(n)
                beta = 0.0
                rho = vector.dot(r, w)
                err = vector.norm(dot(self, x) - b)
                k = 0
                if verbose:
                    print(" conjugate gradient convergence (log error)")
                while abs(err) > tol and k < nmax:
                    p = w + beta * p
                    z = dot(self, p)
                    alpha = rho / vector.dot(p, z)
                    r = r - alpha * z
                    w = r / kvec
                    rhoold = rho
                    rho = vector.dot(r, w)
                    x = x + alpha * p
                    beta = rho / rhoold
                    err = vector.norm(dot(self, x) - b)
                    if verbose:
                        print(k, " %5.1f " % math.log10(err))
                    k = k + 1
                return x
Beispiel #9
0
    def CGsolve(self, x0, b, tol=1.0e-10, nmax=1000, verbose=1):
        """
		Solve self*x = b and return x using the conjugate gradient method
		"""
        if not vector.isVector(b):
            raise TypeError, self.__class__, ' in solve '
        else:
            if self.size()[0] != len(b) or self.size()[1] != len(b):
                print('**Incompatible sizes in solve')
                print('**size()=', self.size()[0], self.size()[1])
                print('**len=', len(b))
            else:
                kvec = diag(self)  # preconditionner
                n = len(b)
                x = x0  # initial guess
                r = b - dot(self, x)
                try:
                    w = r / kvec
                except:
                    print('***singular kvec')
                p = vector.zeros(n)
                beta = 0.0
                rho = vector.dot(r, w)
                err = vector.norm(dot(self, x) - b)
                k = 0
                if verbose:
                    print(" conjugate gradient convergence (log error)")
                while abs(err) > tol and k < nmax:
                    p = w + beta * p
                    z = dot(self, p)
                    alpha = rho / vector.dot(p, z)
                    r = r - alpha * z
                    w = r / kvec
                    rhoold = rho
                    rho = vector.dot(r, w)
                    x = x + alpha * p
                    beta = rho / rhoold
                    err = vector.norm(dot(self, x) - b)
                    if verbose: print(k, ' %5.1f ' % math.log10(err))
                    k = k + 1
                return x