def indx_3dto1d(idx, sz): """ Translate 3D matrix coordinates to 1D vector coordinates for a 3D matrix of size sz. Parameters ---------- idx : array A 3D numpy array of matrix coordinates. sz : array Shape of 3D matrix idx. Returns ------- idx1 : array A 1D numpy coordinate vector. References ---------- .. Adapted from PyClusterROI """ from scipy import prod if np.linalg.matrix_rank(idx) == 1: idx1 = idx[0] * prod(sz[1:3]) + idx[1] * sz[2] + idx[2] else: idx1 = idx[:, 0] * prod(sz[1:3]) + idx[:, 1] * sz[2] + idx[:, 2] return idx1
def _expandg(g): """ Expand transition function to a matrix """ P = sparse.coo_matrix((sp.ones(sp.prod(g.shape)), (sp.r_[0:sp.prod(g.shape)], g.flatten(1)))) return P.tocsr()
def spre(A): """Superoperator formed from pre-multiplication by operator A. Parameters ---------- A : qobj Quantum operator for pre-multiplication. Returns -------- super :qobj Superoperator formed from input quantum object. """ if not isoper(A): raise TypeError('Input is not a quantum object') d = A.dims[1] S = Qobj() S.dims = [[A.dims[0][:], d[:]], [A.dims[1][:], d[:]]] S.shape = [ prod(S.dims[0][0]) * prod(S.dims[0][1]), prod(S.dims[1][0]) * prod(S.dims[1][1]) ] S.data = sp.kron(sp.identity(prod(d)), A.data, format='csr') return S
def indx_3dto1d(idx, sz): from scipy import prod, rank if rank(idx) == 1: idx1 = idx[0]*prod(sz[1:3])+idx[1]*sz[2]+idx[2] else: idx1 = idx[:, 0]*prod(sz[1:3])+idx[:, 1]*sz[2]+idx[:, 2] return idx1
def indx_1dto3d(idx, sz): """ Translate 1D vector coordinates to 3D matrix coordinates for a 3D matrix of size sz. Parameters ---------- idx : array A 1D numpy coordinate vector. sz : array Shape of 3D matrix idx. Returns ------- x : int x-coordinate of 3D matrix coordinates. y : int y-coordinate of 3D matrix coordinates. z : int z-coordinate of 3D matrix coordinates. References ---------- .. Adapted from PyClusterROI """ from scipy import divide, prod x = divide(idx, prod(sz[1:3])) y = divide(idx - x * prod(sz[1:3]), sz[2]) z = idx - x * prod(sz[1:3]) - y * sz[2] return x, y, z
def expectation_E_EXP(P, E_or): """ Given a set of binomial random variables parameterized by a vector P. Conditioned on E[at least one success] = E_or What is the expectation of each random variable? Output is a vector E of expectations. alternate, equivalent implementation for error checking the problem with this implementation is that it is exponential time """ P = asarray(P) P1 = 1 - P N = P.shape[0] E = zeros(N) import itertools for S in itertools.product( *tuple([[1, 0]] * N)): #iterate over all binary vectors of length N S = array(S) p = prod(where(S, P, P1)) #compute the probability according to P of vector E += p * S #accumulate the probability-weighted average E = E * E_or / ( 1 - prod(P1) ) #divide by the probability of getting at least 1 success and multiply times E_or return E
def step(self, x, y): SIZE = self.SIZE J = self.J h = self.h factor = y%2 * 2 - 1 neighbours = scipy.array([ self.m[x][(y + 1)% SIZE ] , self.m[x][(y - 1)% SIZE ] , self.m[(x+1)%SIZE][y] , self.m[(x-1)%SIZE][y] , self.m[(x-factor)%SIZE][(y-1)%SIZE] , self.m[(x-factor)%SIZE][(y+1)%SIZE] ]) jump = random.randint(1,int(35*self.cutoff)+1)*5 * random.choice([1,-1]) diff0 = (neighbours - self.m[x][y])%360 diff1 = (neighbours - self.m[x][y] - jump)%360 p = scipy.prod([self.be[i] for i in diff1]) / scipy.prod([self.be[i] for i in diff0]) self.sts += 1 if random.random() < p: self.change += 1 self.m[x][y] = (self.m[x][y] + jump)%360 self.E = self.E + (sum([self.cos[i] for i in diff1]) * J * (-1) - sum([self.cos[i] for i in diff0]) * J * (-1))
def test_insert_shape_corner_overwrite(self): im = sp.ones([10, 10]) shape = sp.ones([3, 3]) im = ps.generators.insert_shape(im, element=shape, corner=[5, 5], value=1.0, mode='overlay') assert sp.sum(im) == (sp.prod(im.shape) + sp.prod(shape.shape)) assert im[5, 5] == 2 assert im[4, 5] == 1 and im[5, 4] == 1
def make_dfact(n, primes): dfact = {} r = 1 while r <= len(primes) and sp.prod(primes[:r]) <= n: for c in Primes.combProdLessThan(primes, r, n): d = int(round(sp.prod(c)) + 0.01) dfact[d] = c r += 1 return dfact
def genLine(grid, numElements): for seg in genNegSlopeDiagonal(grid, numElements): yield (scipy.prod(seg), seg, 'negative slope') for seg in genPosSlopeDiagonal(grid, numElements): yield (scipy.prod(seg), seg, 'positive slope') for seg in genHoriz(grid, numElements): yield (scipy.prod(seg), seg, 'horizontal') for seg in genVert(grid, numElements): yield (scipy.prod(seg), seg, 'vertical')
def cube_grid(dims): """ Return a regular nD-cube mesh with given shape. Eg. cube_grid_nd((2,2)) -> 2x2 - 2d mesh (x,y) cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z) Eg. v,i = cube_grid_nd((2,1)) v = array([[ 0., 0.], [ 1., 0.], [ 2., 0.], [ 0., 1.], [ 1., 1.], [ 2., 1.]]) i = array([[[0, 3], [1, 4]], [[1, 4], [2, 5]]]) """ dims = tuple(dims) vert_dims = tuple(x + 1 for x in dims) N = len(dims) vertices = zeros((prod(vert_dims), N)) grid = mgrid[tuple(slice(0, x, None) for x in reversed(vert_dims))] for i in range(N): vertices[:, i] = ravel(grid[N - i - 1]) #construct one cube to be tiled cube = zeros((2, ) * N, dtype='i') cycle = array([1] + list(cumprod(vert_dims)[:-1]), dtype='i') for i in ndindex(*((2, ) * N)): cube[i] = sum(array(i) * cycle) cycle = array([1] + list(cumprod(vert_dims)[:-1]), dtype='i') #indices of all vertices which are the lower corner of a cube interior_indices = arange(prod(vert_dims)).reshape( tuple(reversed(vert_dims))).T interior_indices = interior_indices[tuple(slice(0, x, None) for x in dims)] indices = tile(cube, (prod(dims), ) + (1, ) * N) + interior_indices.reshape((prod(dims), ) + (1, ) * N) return (vertices, indices)
def cube_grid(dims): """ Return a regular nD-cube mesh with given shape. Eg. cube_grid_nd((2,2)) -> 2x2 - 2d mesh (x,y) cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z) Eg. v,i = cube_grid_nd((2,1)) v = array([[ 0., 0.], [ 1., 0.], [ 2., 0.], [ 0., 1.], [ 1., 1.], [ 2., 1.]]) i = array([[[0, 3], [1, 4]], [[1, 4], [2, 5]]]) """ dims = tuple(dims) vert_dims = tuple(x+1 for x in dims) N = len(dims) vertices = zeros((prod(vert_dims),N)) grid = mgrid[tuple(slice(0,x,None) for x in reversed(vert_dims))] for i in range(N): vertices[:,i] = ravel(grid[N-i-1]) #construct one cube to be tiled cube = zeros((2,)*N,dtype='i') cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i') for i in ndindex(*((2,)*N)): cube[i] = sum(array(i) * cycle) cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i') #indices of all vertices which are the lower corner of a cube interior_indices = arange(prod(vert_dims)).reshape(tuple(reversed(vert_dims))).T interior_indices = interior_indices[tuple(slice(0,x,None) for x in dims)] indices = tile(cube,(prod(dims),) + (1,)*N) + interior_indices.reshape((prod(dims),) + (1,)*N) return (vertices,indices)
def createRules(self, cages): """ Create a random set of rules for the latin square given by self.solution under the cage structure defined in cages """ for cage in cages: if len(cage) == 1: operation = EQUALS value = self.solution[cage[0]] else: if len(cage) == 2: operation = random.randint(1, 4) values = sorted([self.solution[index] for index in cage]) if operation == PLUS: value = scipy.sum(values) elif operation == MINUS: value = values[1] - values[0] elif operation == TIMES: value = scipy.prod(values) elif operation == DIVIDE: vMin, vMax = values if vMax % vMin == 0: value = vMax / vMin else: operation = MINUS value = vMax - vMin else: raise ValueError, "Operation -- I'm the doctor for you" else: operation = random.randint(1, 2) values = sorted([self.solution[index] for index in cage]) if operation == PLUS: value = scipy.sum(values) elif operation == TIMES: value = scipy.prod(values) else: raise ValueError, "Operation not possible for this list, dog" self.ruleList.append((cage, operation, value))
def _steadystate_power(L, maxiter=10, tol=1e-6, itertol=1e-5, use_umfpack=True, verbose=False): """ Inverse power method for steady state solving. """ if verbose: print('Starting iterative power method Solver...') use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) rhoss = Qobj() sflag = issuper(L) if sflag: rhoss.dims = L.dims[0] rhoss.shape = [prod(rhoss.dims[0]), prod(rhoss.dims[1])] else: rhoss.dims = [L.dims[0], 1] rhoss.shape = [prod(rhoss.dims[0]), 1] n = prod(rhoss.shape) L = L.data.tocsc() - (tol**2) * sp.eye(n, n, format='csc') L.sort_indices() v = mat2vec(rand_dm(rhoss.shape[0], 0.5 / rhoss.shape[0] + 0.5).full()) if verbose: start_time = time.time() it = 0 while (la.norm(L * v, np.inf) > tol) and (it < maxiter): v = spsolve(L, v, use_umfpack=use_umfpack) v = v / la.norm(v, np.inf) it += 1 if it >= maxiter: raise Exception('Failed to find steady state after ' + str(maxiter) + ' iterations') # normalise according to type of problem if sflag: trow = sp.eye(rhoss.shape[0], rhoss.shape[0], format='coo') trow = sp_reshape(trow, (1, n)) data = v / sum(trow.dot(v)) else: data = data / la.norm(v) data = sp.csr_matrix(vec2mat(data)) rhoss.data = 0.5 * (data + data.conj().T) rhoss.isherm = True if verbose: print('Power solver time: ', time.time() - start_time) if qset.auto_tidyup: return rhoss.tidyup() else: return rhoss
def diffmat(dims,order = 'C'): """ This function will return a tuple of difference matricies for data from an Nd array that has been rasterized. The order parameter determines whether the array was rasterized in a C style (python) of FORTRAN style (MATLAB). Inputs: dims- A list of the size of the x,y,z.. dimensions. order- Specifies the vectorization of the matrix Outputs: dx,dy,dy... - The finite difference operators for a vectorized array. If these are to be stacked together as one big operator then sp.sparse.vstack should be used. """ # flip the dimensions around dims=[int(i) for i in dims] xdim = dims[0] ydim = dims[1] dims[0]=ydim dims[1]=xdim if order.lower() == 'c': dims = dims[::-1] outD = [] for idimn, idim in enumerate(dims): if idim==0: outD.append(sp.array([])) continue e = sp.ones(idim) dthing = sp.vstack((-e,e)) D = sp.sparse.spdiags(dthing,[0,1],idim-1,idim).toarray() D = sp.vstack((D,D[-1])) if idim>0: E = sp.sparse.eye(sp.prod(dims[:idimn])) D = sp.sparse.kron(D,E) if idimn<len(dims)-1: E = sp.sparse.eye(sp.prod(dims[idimn+1:])) D = sp.sparse.kron(E,D) outD.append(sp.sparse.csc_matrix(D)) if order.lower() == 'c': outD=outD[::-1] Dy=outD[0] Dx = outD[1] outD[0]=Dx outD[1]=Dy return tuple(outD)
def isket(Q): """ Determines if given quantum object is a ket-vector. Parameters ---------- Q : qobj Quantum object Returns ------- isket : bool True if qobj is ket-vector, False otherwise. Examples -------- >>> psi=basis(5,2) >>> isket(psi) True """ result = isinstance(Q.dims[0], list) if result: result = result and prod(Q.dims[1]) == 1 return result
def calcExhaustiveSearchResults(self, xMin, xMax, xStp, xShp): resultList = [] numSearchPts = sp.prod(xShp) if (self.distributedMetricEvaluation): if ((self.comm == None) or (self.comm.Get_rank() == self.root)): for i in range(0, numSearchPts): x = xMin + np.unravel_index(i, xShp)*xStp resultList.append([self.metric(x), x]) rootLogger.debug(resultList[-1]) self.metric.rootTerminate() else: self.metric.waitForEvaluate() if (self.comm != None): resultList = self.comm.bcast(resultList, self.root) else: commSz = 1 commRk = 0 if (self.comm != None): commSz = self.comm.Get_size() commRk = self.comm.Get_rank() for i in range(commRk, numSearchPts, commSz): x = xMin + np.unravel_index(i, xShp)*xStp resultList.append([self.metric(x), x]) if (self.comm != None): rListList = self.comm.allgather(resultList) resultList = [] for rList in rListList: resultList += rList return resultList
def fitsurface(errfunc,paramlists,inputs): """This function will create a fit surface using an error function given by the user and an N length list of parameter value lists. The output will be a N-dimensional array where each dimension is the size of the array given for each of the parameters. Arrays of one element are not represented in the returned fit surface array. Inputs: errfunc - The function used to determine the error between the given data and the theoretical function paramlists - An N length list of arrays for each of the parameters. inputs - A tuple of the rest of the inputs for error function.""" paramsizlist = sp.array([len(i) for i in paramlists]) outsize = sp.where(paramsizlist!=1)[0] # make the fit surface and flatten it fit_surface = sp.zeros(paramsizlist[outsize]) fit_surface = fit_surface.flatten() for inum in range(sp.prod(paramsizlist)): numcopy = inum curnum = sp.zeros_like(paramsizlist) # TODO: Replace with sp.unravel_index # determine current parameters for i, iparam in enumerate(reversed(paramsizlist)): curnum[i] = sp.mod(numcopy,iparam) numcopy = sp.floor(numcopy/iparam) curnum = curnum[::-1] cur_x = sp.array([ip[curnum[num_p]] for num_p ,ip in enumerate(paramlists)]) diffthing = errfunc(cur_x,*inputs) fit_surface[inum]=(sp.absolute(diffthing)**2).sum() # return the fitsurace after its been de flattened return fit_surface.reshape(paramsizlist[outsize]).copy()
def fit(self): self.initialiseMetric() xMin, xMax, xStp, xShp = self.calcExhaustiveSearchGrid() numSearchPts = sp.prod(xShp) rootLogger.info("Grid search:") rootLogger.info("parameter x min = %s" % (xMin,)) rootLogger.info("parameter x max = %s" % (xMax,)) rootLogger.info("parameter x step = %s" % (xStp,)) rootLogger.info("parameter x shape = %s, %s metric evaluations" % (xShp, numSearchPts)) rootLogger.info("Exhausive search...") cylList = [] for cylIdx in range(0, self.numcyl): self.maskGradientImageCylinders(cylList) resultList = self.calcExhaustiveSearchResults(xMin, xMax, xStp, xShp) resultList = self.eliminatePoorResults(resultList) rootLogger.info("Done exhausive search.") resultList = self.calcBestRefinements(resultList) cylList.append(resultList[0]) # Convert the parameter-vectors into 3 element centre-point, 3-element axis, etc. cylList = \ [ [resultPair[0], self.calcFullCylinderParameters(resultPair[1])] for resultPair in cylList ] return cylList
def european_option_delta(self): numerator = sp.add( sp.log( sp.divide( self.spot_price, self.strike_price ) ), sp.multiply( ( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)), self.time_to_maturity ) ) d1 = sp.divide( numerator, sp.prod( [ self.sigma, sp.sqrt(self.time_to_maturity) ], axis=0, ) ) call_delta = self.bls_erf_value(d1) put_delta = call_delta - 1 return call_delta, put_delta
def set_angular_selection(self, costheta, angular, interpol='lin', copy=False): if scipy.isscalar(costheta[0]): costheta = [costheta] shapeangular = angular.shape self._shape_angular = scipy.array(shapeangular, dtype=ctypes.c_size_t) self._costheta = [ new(x_, dtype=self.C_TYPE, copy=copy) for x_ in costheta ] self.__costheta = scipy.concatenate(self._costheta) shape = tuple(map(len, self._costheta)) self._angular = new(angular, dtype=self.C_TYPE, copy=copy) typecostheta = ctypeslib.ndpointer(dtype=self.C_TYPE, shape=scipy.sum(shape)) typeangular = ctypeslib.ndpointer(dtype=self.C_TYPE, shape=scipy.prod(shape)) self.anacorr.set_angular_selection.argtypes = ( typecostheta, typeangular, ctypeslib.ndpointer(dtype=ctypes.c_size_t, shape=len(shape)), ctypes.c_size_t, ctypes.c_char_p) self.anacorr.set_angular_selection(self.__costheta, self._angular, self._shape_angular, len(shapeangular), interpol.encode('utf-8')) self._angular.shape = shapeangular
def set_window(self, x, nells=None): if scipy.isscalar(x[0]): x = [x] self.x = [new(x_, dtype=self.C_TYPE) for x_ in x] self._x = scipy.concatenate(self.x) shapex = tuple(map(len, self.x)) self._shape_window = scipy.array(shapex, dtype=ctypes.c_size_t) if nells is None: nells = tuple( len(self._ells[num + 1]) for num in range(len(shapex))) shapey = shapex + nells self.y = scipy.zeros(shapey, dtype=self.C_TYPE).flatten() typex = ctypeslib.ndpointer(dtype=self.C_TYPE, shape=scipy.sum(shapex)) typey = ctypeslib.ndpointer(dtype=self.C_TYPE, shape=scipy.prod(shapey)) self.anacorr.set_window.argtypes = (typex, typey, ctypeslib.ndpointer( dtype=ctypes.c_size_t, shape=len(shapex)), ctypes.c_size_t) self.anacorr.set_window(self._x, self.y, self._shape_window, len(shapex)) self.y.shape = shapey
def _generate_pores(self): r""" Generate the pores (coordinates, numbering and types) """ self._logger.info("generate_pores: Create specified number of pores") #Find non-zero elements in image template = self._template Np = np.sum(template > 0) #Add pores to data and ifo pind = np.arange(0, Np) self.set_pore_info(label='all', locations=pind) self.set_pore_data(prop='numbering', data=pind) # Remove eventually img_ind = np.ravel_multi_index(sp.nonzero(template), dims=sp.shape(template), order='F') self.set_pore_data(prop='voxel_index', data=img_ind) #This voxel_to_pore map is messy but works temp = sp.prod(sp.shape(template))*sp.ones(np.prod(sp.shape(template),),dtype=sp.int32) temp[img_ind] = pind self._voxel_to_pore_map = temp coords = self._Lc*(0.5 + np.transpose(np.nonzero(template))) self.set_pore_data(prop='coords', data=coords) self._logger.debug("generate_pores: End of method")
def _steadystate_lu(L, use_rcm=True, use_umfpack=False): """ Find the steady state(s) of an open quantum system by computing the LU decomposition of the underlying matrix. """ if settings.debug: print('Starting LU solver...') dims = L.dims[0] weight = np.abs(L.data.max()) n = prod(L.dims[0][0]) b = np.zeros(n**2, dtype=complex) b[0] = weight L = L.data.tocsc() + sp.csc_matrix( (weight * np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n**2, n**2)) L.sort_indices() use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) if use_rcm: perm = symrcm(L) L = sparse_permute(L, perm, perm) b = b[np.ix_(perm, )] solve = factorized(L) v = solve(b) if use_rcm: rev_perm = np.argsort(perm) v = v[np.ix_(rev_perm, )] data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=dims, isherm=True)
def _steadystate_eigen(L, ss_args): """ Internal function for solving the steady state problem by finding the eigenvector corresponding to the zero eigenvalue of the Liouvillian using ARPACK. """ if settings.debug: print('Starting Eigen solver...') dims = L.dims[0] shape = prod(dims[0]) L = L.data.tocsc() if ss_args['use_rcm']: if settings.debug: old_band = sp_bandwidth(L)[0] print('Original bandwidth:', old_band) perm = reverse_cuthill_mckee(L) rev_perm = np.argsort(perm) L = sp_permute(L, perm, perm, 'csc') if settings.debug: rcm_band = sp_bandwidth(L)[0] print('RCM bandwidth:', rcm_band) print('Bandwidth reduction factor:', round(old_band/rcm_band, 1)) eigval, eigvec = eigs(L, k=1, sigma=1e-15, tol=ss_args['tol'], which='LM', maxiter=ss_args['maxiter']) if ss_args['use_rcm']: eigvec = eigvec[np.ix_(rev_perm,)] data = vec2mat(eigvec) data = 0.5 * (data + data.conj().T) out = Qobj(data, dims=dims, isherm=True) return out/out.tr()
def _steadystate_lu(L, verbose=False): """ Find the steady state(s) of an open quantum system by computing the LU decomposition of the underlying matrix. """ use_solver(assumeSortedIndices=True) if verbose: print('Starting LU solver...') start_time = time.time() n = prod(L.dims[0][0]) b = np.zeros(n**2) b[0] = 1.0 A = L.data.tocsc() + sp.csc_matrix( (np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n**2, n**2)) A.sort_indices() solve = factorized(A) v = solve(b) if verbose: print('LU solver time: ', time.time() - start_time) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=L.dims[0], isherm=True)
def _steadystate_direct_sparse(L, use_umfpack=True, verbose=False): """ Direct solver that use scipy sparse matrices """ if verbose: print('Starting direct solver...') n = prod(L.dims[0][0]) b = sp.csr_matrix(([1.0], ([0], [0])), shape=(n**2, 1)) M = L.data + sp.csr_matrix( (np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n**2, n**2)) use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) M.sort_indices() if verbose: start_time = time.time() v = spsolve(M, b, use_umfpack=use_umfpack) if verbose: print('Direct solver time: ', time.time() - start_time) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=L.dims[0], isherm=True)
def steadystate_iterative(H, c_ops, use_precond=True): """ .. note:: Experimental. """ L = liouvillian_fast(H, c_ops) n = prod(L.dims[0][0]) b = np.zeros(n ** 2) b[0] = 1.0 A = L.data + sp.csr_matrix((np.ones(n), (np.zeros(n), \ [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) if use_precond: try: P = spilu(A, permc_spec='MMD_AT_PLUS_A') P_x = lambda x: P.solve(x) M = LinearOperator((n ** 2, n ** 2), matvec=P_x) except: warnings.warn("Preconditioning failed. Continuing without.", UserWarning) M = None else: M = None v, check = bicgstab(A, b, tol=1e-5, M=M) return Qobj(vec2mat(v), dims=L.dims[0], isherm=True)
def density(self, x): assert x.shape == self.dim, "Problem with the dimensionalities" assert x.dtype == int, "x has to be an integer array" theta = self.params['theta'].flatten() x = x.flatten() # return s.prod (stats.poisson.pmf(x,theta) ) return s.prod(s.divide(theta**x * s.exp(-theta), s.misc.factorial(x)))
def _steadystate_direct_sparse(L, use_rcm=True, use_umfpack=False): """ Direct solver that uses scipy sparse matrices """ if settings.debug: print('Starting direct solver...') dims = L.dims[0] weight = np.abs(L.data.max()) n = prod(L.dims[0][0]) b = np.zeros((n**2, 1), dtype=complex) b[0, 0] = weight L = L.data + sp.csr_matrix( (weight * np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n**2, n**2)) L.sort_indices() use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) if use_rcm: perm = symrcm(L) L = sparse_permute(L, perm, perm) b = b[np.ix_(perm, )] v = spsolve(L, b) if use_rcm: rev_perm = np.argsort(perm) v = v[np.ix_(rev_perm, )] data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=dims, isherm=True)
def _steadystate_direct_dense(L, verbose=False): """ Direct solver that use numpy dense matrices. Suitable for small system, with a few states. """ if verbose: print('Starting direct dense solver...') n = prod(L.dims[0][0]) b = np.zeros(n**2) b[0] = 1.0 M = L.data.todense() M[0, :] = np.diag(np.ones(n)).reshape((1, n**2)) if verbose: start_time = time.time() v = np.linalg.solve(M, b) if verbose: print('Direct dense solver time: ', time.time() - start_time) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=L.dims[0], isherm=True)
def state_number_index(dims, state): """ Return the index of a quantum state corresponding to state, given a system with dimensions given by dims. Example: >>> state_number_index([2, 2, 2], [1, 1, 0]) 6 Parameters ---------- dims : list or array The quantum state dimensions array, as it would appear in a Qobj. state : list State number array. Returns ------- idx : int The index of the state given by `state` in standard enumeration ordering. """ return int( sum([state[i] * prod(dims[i + 1:]) for i, d in enumerate(dims)]))
def isket(Q): """ Determines if given quantum object is a ket-vector. Parameters ---------- Q : qobj Quantum object Returns ------- isket : bool True if qobj is ket-vector, False otherwise. Examples -------- >>> psi=basis(5,2) >>> isket(psi) True """ result = isinstance(Q.dims[0],list) if result: result = result and prod(Q.dims[1])==1 return result
def _steadystate_lu(L, use_rcm=True, use_umfpack=False): """ Find the steady state(s) of an open quantum system by computing the LU decomposition of the underlying matrix. """ if settings.debug: print('Starting LU solver...') dims=L.dims[0] weight=np.abs(L.data.max()) n = prod(L.dims[0][0]) b = np.zeros(n ** 2, dtype=complex) b[0] = weight L = L.data.tocsc() + sp.csc_matrix((weight*np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) L.sort_indices() use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) if use_rcm: perm = symrcm(L) L = sparse_permute(L,perm,perm) b = b[np.ix_(perm,)] solve = factorized(L) v = solve(b) if use_rcm: rev_perm = np.argsort(perm) v = v[np.ix_(rev_perm,)] data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=dims, isherm=True)
def _steadystate_direct_sparse(L, use_rcm=True, use_umfpack=False): """ Direct solver that uses scipy sparse matrices """ if settings.debug: print('Starting direct solver...') dims=L.dims[0] weight=np.abs(L.data.max()) n = prod(L.dims[0][0]) b = np.zeros((n ** 2, 1), dtype=complex) b[0,0] = weight L = L.data + sp.csr_matrix((weight*np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) L.sort_indices() use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) if use_rcm: perm = symrcm(L) L = sparse_permute(L,perm,perm) b = b[np.ix_(perm,)] v = spsolve(L, b) if use_rcm: rev_perm = np.argsort(perm) v = v[np.ix_(rev_perm,)] data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=dims, isherm=True)
def get_num_blocks(self, return_block_shape=False, return_n_axes_diag=False): """Get the number of blocks in a block diagonal matrix.""" shape = self.mat_shape() # Current algorithm assumes specific format. self.assert_axes_ordered() diag_axes = [ ii for ii in range(self.ndim) if ii in self.rows and ii in self.cols ] num_blocks = sp.prod([self.shape[ii] for ii in diag_axes]) if return_block_shape and return_n_axes_diag: return num_blocks, \ (shape[0]/num_blocks, shape[1]/num_blocks), \ len(diag_axes) elif return_block_shape: return num_blocks, (shape[0] / num_blocks, shape[1] / num_blocks) elif return_n_axes_diag: return num_blocks, len(diag_axes) else: return num_blocks
def normalize(vectors, grid_spacing=1.0): """ Normalizes vectors stored as columns of a 2D numpy array """ K = vectors.shape[1] # number of vectors G = vectors.shape[0] # length of each vector # Set volume element h. This takes a little consideration if (isinstance(grid_spacing,NUMBER)): h = grid_spacing elif (isinstance(grid_spacing,ARRAY)): grid_spacing = sp.array(grid_spacing) h = sp.prod(grid_spacing) else: print 'ERROR: what kind of thing is grid_spacing?' print type(grid_spacing) raise assert (h > 0) norm_vectors = sp.zeros([G,K]) for i in range(K): # Extract v from list of vectors v = vectors[:,i] # Flip sign of v so that last element is nonnegative if (v[-1] < 0): v = -v # Normalize v and save in norm_vectors norm_vectors[:,i] = v/norm(v) # Return array with normalized vectors along the columns return norm_vectors
def compactRepresentation(self, photo): arr = scipy.ones((self.compareSize[0], self.compareSize[1], 3))*scipy.NaN if len(photo.shape) == 3: # an MxNx3 array of of ANY size: arr = photo elif photo.size == scipy.prod(self.fullSize)*3: # must be NxNx3, with N as in self.fullSize arr = photo.reshape((self.fullSize[0],self.fullSize[1],3)) return scipy.misc.imresize(arr, self.compareSize).reshape((1,self.totalSize*3))
def steady_direct_sparse(L, use_umfpack=True): """ Direct solver that use scipy sparse matrices .. note:: Experimental. """ n = prod(L.dims[0][0]) b = sp.csr_matrix(([1.0], ([0], [0])), shape=(n ** 2, 1)) M = L.data + sp.csr_matrix((np.ones(n), (np.zeros(n), \ [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) #b = sp.csr_matrix(([1.0], ([n**2-1], [0])), shape=(n ** 2, 1)) #b = np.zeros(n ** 2) #b[n**2-1] = 1.0 #M = L.data + sp.csr_matrix((np.ones(n), (np.ones(n) * (n ** 2 - 1), \ # [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack) M.sort_indices() v = spsolve(M, b, permc_spec="MMD_AT_PLUS_A", use_umfpack=use_umfpack) return Qobj(vec2mat(v), dims=L.dims[0], isherm=True)
def run(self, catalogue1, catalogue2, catalogue3=None, position='Position', weight='Weight', nthreads=8): pos = [catalogue1[position], catalogue2[position]] w = [catalogue1[weight], catalogue2[weight]] if (self.ells[-1] != self.ells[0]) and (catalogue3 is None): catalogue3 = catalogue2 if catalogue3 is not None: pos.append(catalogue3[position]) w.append(catalogue3[weight]) self.set_catalogues(pos, w) self.run_3pcf_multi_double_los(nthreads=nthreads) self.counts = scipy.transpose(self.counts, axes=(2, 3, 0, 1)) #counts is (ells,elld,s,d) self.weight = {key: self._weight[key].sum() for key in self._weight} self.weight_tot = scipy.prod([self.weight[key] for key in self.weight]) return self
def _steadystate_lu(L, verbose=False): """ Find the steady state(s) of an open quantum system by computing the LU decomposition of the underlying matrix. """ use_solver(assumeSortedIndices=True) if verbose: print('Starting LU solver...') start_time = time.time() n = prod(L.dims[0][0]) b = np.zeros(n ** 2) b[0] = 1.0 A = L.data.tocsc() + sp.csc_matrix((np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) A.sort_indices() solve = factorized(A) v = solve(b) if verbose: print('LU solver time: ', time.time() - start_time) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=L.dims[0], isherm=True)
def _steadystate_direct_sparse(L, verbose=False): """ Direct solver that use scipy sparse matrices """ if verbose: print('Starting direct solver...') n = prod(L.dims[0][0]) b = sp.csr_matrix(([1.0], ([0], [0])), shape=(n ** 2, 1), dtype=complex) M = L.data + sp.csr_matrix((np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) use_solver(assumeSortedIndices=True, useUmfpack=False) M.sort_indices() if verbose: start_time = time.time() # Do the actual solving here v = spsolve(M, b) if verbose: print('Direct solver time: ', time.time() - start_time) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=L.dims[0], isherm=True)
def _steadystate_direct_dense(L, verbose=False): """ Direct solver that use numpy dense matrices. Suitable for small system, with a few states. """ if verbose: print('Starting direct dense solver...') n = prod(L.dims[0][0]) b = np.zeros(n ** 2) b[0] = 1.0 M = L.data.todense() M[0, :] = np.diag(np.ones(n)).reshape((1, n ** 2)) if verbose: start_time = time.time() v = np.linalg.solve(M, b) if verbose: print('Direct dense solver time: ', time.time() - start_time) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=L.dims[0], isherm=True)
def __init__(self, BoxSize=1., BoxCenter=0., size=None, nbar=None, Position='Position', rng=None, seed=None, **attrs): self.BoxSize = scipy.empty((3), dtype=scipy.float64) self.BoxSize[:] = BoxSize self._boxcenter = scipy.empty((3), dtype=scipy.float64) self._boxcenter[:] = BoxCenter if rng is None: rng = scipy.random.RandomState(seed=seed) self.rng = rng if size is None: size = rng.poisson(nbar * scipy.prod(self.BoxSize)) position = scipy.array([ rng.uniform(-self.BoxSize[i] / 2. + self._boxcenter[i], self.BoxSize[i] / 2. + self._boxcenter[i], size=size) for i in range(3) ]).T super(RandomCatalogue, self).__init__(columns={Position: position}, BoxSize=BoxSize, BoxCenter=BoxCenter, Position=Position, seed=seed, size=size, nbar=nbar, **attrs)
def density(self, x): assert x.shape == self.dim, "Problem with the dimensionalities" assert x.dtype == int, "x has to be an integer array" # return s.prod( stats.binom.pmf(x, self.params["N"], self.theta) ) return s.prod( special.binom(self.params["N"], x) * self.params["theta"]**x * (1 - self.params["theta"])**(self.params["N"] - x))
def normalize(vectors, grid_spacing=1.0): """ Normalizes vectors stored as columns of a 2D numpy array """ G = vectors.shape[0] # length of each vector K = vectors.shape[1] # number of vectors # Set volume element h. This takes a little consideration if isinstance(grid_spacing, NUMBER): h = grid_spacing elif isinstance(grid_spacing, ARRAY): grid_spacing = sp.array(grid_spacing) h = sp.prod(grid_spacing) else: raise ControlledError('/normalize/ Cannot recognize h: h = %s' % h) if not (h > 0): raise ControlledError('/normalize/ h is not positive: h = %s' % h) norm_vectors = sp.zeros([G, K]) for i in range(K): # Extract v from list of vectors v = vectors[:, i] # Flip sign of v so that last element is non-negative if (v[-1] < 0): v = -v # Normalize v and save in norm_vectors norm_vectors[:, i] = v / norm(v) # Return array with normalized vectors along the columns return norm_vectors
def hist_flatened(im, nbr_bins=10): """ @param im: the (gray-scale) image as numpy/scipy array @param nbr_bins: the number of bins @return: the bins of the flattened histogram of the image """ # get image histogram imhist, bins = histogram(im.flatten(), 1000) # only take bins with content into account nz = imhist.nonzero() imhist = imhist[nz] bins = bins[nz] # prepare iteration bins_final = [bins[0]] # set initial bin delimiter bins_content = scipy.prod(im.shape) / float(nbr_bins) tmp_content = 0 for i in range(len(imhist) - 1): tmp_content += imhist[i] if tmp_content >= bins_content: # bin full # bins_final.append(bins[i+1]) # add new bin delimiter # tmp_content = 0 div = float(imhist[i]) / (bins_content - (tmp_content - imhist[i])) # what i got / what i want bins_final.append( bins[i] + (bins[i + 1] - bins[i]) / div ) # append a partial bin border, assuming that the dist inside the bin in equal tmp_content = imhist[i] - (bins_content - (tmp_content - imhist[i])) bins_final.append(im.max() + 1) # one added to work against rounding errors return bins_final
def __voxel_4conectedness(self, shape): """ Returns the number of edges for the supplied image shape assuming 4-connectedness. """ shape = list(shape) while 1 in shape: shape.remove(1) return int(round(sum([(dim - 1)/float(dim) for dim in shape]) * scipy.prod(shape)))
def scale(network, scale_factor=[1, 1, 1], preserve_vol=False, linear_scaling=[False, False, False]): r""" A method for scaling the coordinates and vertices to create anisotropic networks The original domain volume can be preserved by setting preserve_vol = True Example --------- >>> import OpenPNM >>> import OpenPNM.Utilities.vertexops as vo >>> import numpy as np >>> pn = OpenPNM.Network.Delaunay(num_pores=100, domain_size=[3,2,1]) >>> pn.add_boundaries() >>> B1 = pn.pores("left_boundary") >>> B2 = pn.pores("right_boundary") >>> Vol = vo.vertex_dimension(pn,B1,B2) >>> vo.scale(network=pn,scale_factor=[2,1,1],preserve_vol=True) >>> Vol2 = vo.vertex_dimension(pn,B1,B2) >>> np.around(Vol-Vol2,5) == 0.0 True >>> vo.scale(network=pn,scale_factor=[2,1,1],preserve_vol=False) >>> Vol3 = vo.vertex_dimension(pn,B1,B2) >>> np.around(Vol3/Vol,5) == 2.0 True """ from scipy.special import cbrt import scipy as sp minmax = np.around( vertex_dimension(network=network, face1=network.pores(), parm='minmax'), 10) scale_factor = np.asarray(scale_factor) if preserve_vol is True: scale_factor = scale_factor / (cbrt(sp.prod(scale_factor))) lin_scale = _linear_scale_factor(network["pore.coords"], minmax, scale_factor, linear_scaling) network["pore.coords"] = network["pore.coords"] * lin_scale # Cycle through all vertices of all pores updating vertex values for pore in network.pores(): for i, vert in network['pore.vert_index'][pore].items(): vert_scale = _linear_scale_factor(vert, minmax, scale_factor, linear_scaling) network["pore.vert_index"][pore][i] = vert * vert_scale # Cycle through all vertices of all throats updating vertex values for throat in network.throats(): for i, vert in network['throat.vert_index'][throat].items(): vert_scale = _linear_scale_factor(vert, minmax, scale_factor, linear_scaling) network["throat.vert_index"][throat][i] = vert * vert_scale # Scale the vertices on the voronoi diagram stored on the network # These are used for adding boundaries on the Delaunay network class vert = network._vor.vertices vert_scale = _linear_scale_factor(vert, minmax, scale_factor, linear_scaling) network._vor.vertices = vert * vert_scale
def to_window(self, **params): window = WindowFunction2D(**params) ells = self.result.ells sedges = self.result.sedges counts = self.result.counts window.poles = [(ell1, ell2) for ell1 in ells[0] for ell2 in ells[1]] window.los = self.result.los window.s = map(edges_to_mid, sedges) window.window = counts.reshape((-1, ) + counts.shape[2:]) if window.zero in window: window.error = window.window[window.index(window.zero)]**(1. / 4.) volume = (4. * constants.pi)**2 * scipy.prod(scipy.meshgrid( *map(radial_volume, sedges), sparse=False, indexing='ij'), axis=0) for ill, (ell1, ell2) in enumerate(window): window.window[ill] *= (2 * ell1 + 1) * (2 * ell2 + 1) / volume window.window /= self.normalization if hasattr(window, 'error'): window.error /= volume * self.normalization window.norm = self.normref window.pad_zero() return window
def _steadystate_direct_dense(L, ss_args): """ Direct solver that use numpy dense matrices. Suitable for small system, with a few states. """ if settings.debug: logger.debug('Starting direct dense solver.') dims = L.dims[0] n = prod(L.dims[0][0]) b = np.zeros(n**2) b[0] = ss_args['weight'] L = L.data.todense() L[0, :] = np.diag(ss_args['weight'] * np.ones(n)).reshape((1, n**2)) _dense_start = time.time() v = np.linalg.solve(L, b) _dense_end = time.time() ss_args['info']['solution_time'] = _dense_end - _dense_start if ss_args['return_info']: ss_args['info']['residual_norm'] = la.norm(b - L * v, np.inf) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=dims, isherm=True)
def _steadystate_power(L, maxiter=10, tol=1e-6, itertol=1e-5, verbose=False): """ Inverse power method for steady state solving. """ if verbose: print('Starting iterative power method Solver...') use_solver(assumeSortedIndices=True) rhoss = Qobj() sflag = issuper(L) if sflag: rhoss.dims = L.dims[0] rhoss.shape = [prod(rhoss.dims[0]), prod(rhoss.dims[1])] else: rhoss.dims = [L.dims[0], 1] rhoss.shape = [prod(rhoss.dims[0]), 1] n = prod(rhoss.shape) L = L.data.tocsc() - (tol ** 2) * sp.eye(n, n, format='csc') L.sort_indices() v = mat2vec(rand_dm(rhoss.shape[0], 0.5 / rhoss.shape[0] + 0.5).full()) if verbose: start_time = time.time() it = 0 while (la.norm(L * v, np.inf) > tol) and (it < maxiter): v = spsolve(L, v) v = v / la.norm(v, np.inf) it += 1 if it >= maxiter: raise Exception('Failed to find steady state after ' + str(maxiter) + ' iterations') # normalise according to type of problem if sflag: trow = sp.eye(rhoss.shape[0], rhoss.shape[0], format='coo') trow = sp_reshape(trow, (1, n)) data = v / sum(trow.dot(v)) else: data = data / la.norm(v) data = sp.csr_matrix(vec2mat(data)) rhoss.data = 0.5 * (data + data.conj().T) rhoss.isherm = True if verbose: print('Power solver time: ', time.time() - start_time) if qset.auto_tidyup: return rhoss.tidyup() else: return rhoss
def threej(j1,j2,j3,m1,m2,m3): """ Calculate the Wigner three-j symbol of three angular momenta """ def bad_values(j1,j2,j3,m1,m2,m3): """ Check validity of supplied values """ if (j1<abs(j2-j3) or j1>(j2+j3)): """ Braking the triangular rule """ return 1 if (abs(m1)>j1 or abs(m2)>j2 or abs(m3)>j3): """ Braking the |m| <= j rule """ return 1 if m1+m2+m3 !=0: """ Braking the sum rule """ return 1 return 0 if bad_values(j1,j2,j3,m1,m2,m3): return 0 jphase = (-1)**(j1-j2-m3) fac = zeros(10,long) fac[0] = factorial(j1+j2-j3) fac[1] = factorial(j1-j2+j3) fac[2] = factorial(-j1+j2+j3) fac[3] = factorial(j1+m1) fac[4] = factorial(j1-m1) fac[5] = factorial(j2+m2) fac[6] = factorial(j2-m2) fac[7] = factorial(j3+m3) fac[8] = factorial(j3-m3) fac[9] = factorial(j1+j2+j3+1) jprodfac = sqrt(prod(fac[0:9])/fac[9]) kmax = int(min([(j1+j2-j3), (j1-m1) , (j2+m2)])) kmin = int(max([0 , -(j3-j2+m1) , -(j3-j1-m2)])) jsum=0 for k in range(kmin,kmax+1): jsfac = zeros(6,long) jsfac[0] = factorial(k) jsfac[1] = factorial(j1+j2-j3-k) jsfac[2] = factorial(j1-m1-k) jsfac[3] = factorial(j2+m2-k) jsfac[4] = factorial(j3-j2+m1+k) jsfac[5] = factorial(j3-j1-m2+k) jsum += (-1)**k / prod(jsfac[:]) return jphase*jprodfac*jsum
def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False): """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`. Parameters ---------- Xi : :py:class:`Matrix` or other Array-like, (`M`, `N`) `M` inputs with dimension `N`. Xj : :py:class:`Matrix` or other Array-like, (`M`, `N`) `M` inputs with dimension `N`. ni : :py:class:`Matrix` or other Array-like, (`M`, `N`) `M` derivative orders for set `i`. nj : :py:class:`Matrix` or other Array-like, (`M`, `N`) `M` derivative orders for set `j`. hyper_deriv : Non-negative int or None, optional The index of the hyperparameter to compute the first derivative with respect to. If None, no derivatives are taken. Default is None (no hyperparameter derivatives). Hyperparameter derivatives are not support for `n` > 0 at this time. symmetric : bool, optional Whether or not the input `Xi`, `Xj` are from a symmetric matrix. Default is False. Returns ------- Kij : :py:class:`Array`, (`M`,) Covariances for each of the `M` `Xi`, `Xj` pairs. Raises ------ NotImplementedError If hyper_deriv is not None and `n` > 0. """ only_first_order = ((scipy.asarray(ni, dtype=int) == 0).all() and (scipy.asarray(nj, dtype=int) == 0).all()) if hyper_deriv is not None and not only_first_order: raise NotImplementedError("Hyperparameter derivatives with n > 0 " "have not been implemented!") tau = scipy.asarray(Xi - Xj, dtype=float) r2l2, l_mat = self._compute_r2l2(tau, return_l=True) k = self.params[0]**2 * scipy.exp(-r2l2 / 2.0) # Account for derivatives: # Get total number of differentiations: n_tot_j = scipy.asarray(scipy.sum(nj, axis=1), dtype=int).flatten() n_combined = scipy.asarray(ni + nj, dtype=int) # Compute factor from the dtau_d/dx_d_j terms in the chain rule: j_chain_factors = (-1.0)**(n_tot_j) # Compute Hermite polynomial factor: hermite_factors = scipy.prod((-1.0 / (scipy.sqrt(2.0) * l_mat))**(n_combined) * scipy.special.eval_hermite(n_combined, tau / (scipy.sqrt(2.0) * l_mat)), axis=1) k = j_chain_factors * hermite_factors * k # Take care of hyperparameter derivatives: if hyper_deriv is None: return k elif hyper_deriv == 0: return 2 * k / self.params[0] else: return (tau[:, hyper_deriv - 1])**2 / (self.params[hyper_deriv - 1])**3 * k
def delta(a,b,c): """ Calculate delta """ fac = zeros(4,long) fac[0] = factorial(a+b-c) fac[1] = factorial(a-b+c) fac[2] = factorial(-a+b+c) fac[3] = factorial(a+b+c+1) return sqrt(prod(fac[0:3])/fac[3]);
def _steadystate_iterative_bicg(L, tol=1e-5, use_precond=True, use_rcm=True, M=None, maxiter=1000, drop_tol=1e-3, diag_pivot_thresh=None, fill_factor=12, verbose=False): """ Iterative steady state solver using the BICG algorithm and a sparse incomplete LU preconditioner. """ if verbose: print('Starting BICG solver...') use_solver(assumeSortedIndices=True, useUmfpack=False) dims=L.dims[0] n = prod(L.dims[0][0]) b = np.zeros(n ** 2) b[0] = 1.0 L = L.data.tocsc() + sp.csc_matrix((np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])), shape=(n ** 2, n ** 2)) L.sort_indices() if use_rcm: if verbose: print('Original bandwidth ', sparse_bandwidth(L)) perm=symrcm(L) rev_perm=np.argsort(perm) L=sparse_permute(L,perm,perm,'csc') b = b[np.ix_(perm,)] if verbose: print('RCM bandwidth ', sparse_bandwidth(L)) if M is None and use_precond: M = _iterative_precondition(L, n, drop_tol, diag_pivot_thresh, fill_factor, verbose) if verbose: start_time = time.time() v, check = bicgstab(L, b, tol=tol, M=M) if use_rcm: v = v[np.ix_(rev_perm,)] if check > 0: raise Exception("Steadystate solver did not reach tolerance after " + str(check) + " steps.") elif check < 0: raise Exception( "Steadystate solver failed with fatal error: " + str(check) + ".") if verbose: print('BICG solver time: ', time.time() - start_time) data = vec2mat(v) data = 0.5 * (data + data.conj().T) return Qobj(data, dims=dims, isherm=True)
def variances(self, variances): if variances is None: self._invvars = None else: self._invvars = [1.0 / variance for variance in variances] self._gconsts = [len(variance) * scipy.log(2 * scipy.pi) / -2 # normalizing constant - scipy.log(scipy.prod(variance)) / 2 # determinant of covariance for variance in variances] self._set_matrix()
def compactRepresentation(self, photo): arr = scipy.ones((self.compareSize[0], self.compareSize[1], 3))*scipy.NaN if len(photo.shape) == 3: # an MxNx3 array of of ANY size: arr = photo elif photo.size == scipy.prod(self.fullSize)*3: # must be NxNx3, with N as in self.fullSize arr = photo.reshape((self.fullSize[0],self.fullSize[1],3)) greyed = scipy.misc.imresize(arr, self.compareSize)/3 greyed = scipy.array(greyed.sum(axis=2), dtype=scipy.uint8) return greyed.reshape((1,self.totalSize))