def lqmn(m,n,z): """Associated Legendre functions of the second kind, Qmn(z) and its derivative, ``Qmn'(z)`` of order m and degree n. Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. z can be complex. """ if not isscalar(m) or (m<0): raise ValueError("m must be a non-negative integer.") if not isscalar(n) or (n<0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") m = int(m) n = int(n) # Ensure neither m nor n == 0 mm = max(1,m) nn = max(1,n) if iscomplex(z): q,qd = specfun.clqmn(mm,nn,z) else: q,qd = specfun.lqmn(mm,nn,z) return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
def __init__(self, wpoints, gsphere, wggmat, inord="C"): """" Args: gsphere: |GSphere| with G-vectors and k-point object. wpoints: Complex frequency points in Hartree. wggmat: [nw, ng, ng] complex array. inord: storage order of ``wggmat``. If inord == "F", ``wggmat`` is in in Fortran column-major order. Default: "C" i.e. C row-major order. """ self.wpoints = np.array(wpoints, dtype=np.complex) self.gsphere = gsphere self.wggmat = np.reshape(wggmat, (self.nw, self.ng, self.ng)) if inord.lower() == "f": # Fortran to C. for iw, _ in enumerate(wpoints): self.wggmat[iw] = self.wggmat[iw].T.copy() for i in (1, 2): assert len(gsphere) == wggmat.shape[-i] assert len(self.wpoints) == len(self.wggmat) # Find number of real/imaginary frequencies. self.nrew = self.nw self.nimw = 0 for i, w in enumerate(self.wpoints): if np.iscomplex(w): self.nrew = i break self.nimw = self.nw - self.nrew if self.nimw and not np.all(np.iscomplex(self.wpoints[self.nrew+1:])): raise ValueError("wpoints should contained real points packed in the first positions\n" "followed by imaginary points but got: %s" % str(self.wpoints))
def average_structure(X): """ Calculate an average structure from an ensemble of structures (i.e. X is a rank-3 tensor: X[i] is a (N,3) configuration matrix). @param X: m x n x 3 input vector @type X: numpy array @return: average structure @rtype: (n,3) numpy.array """ from numpy.linalg import eigh B = csb.numeric.gower_matrix(X) v, U = eigh(B) if numpy.iscomplex(v).any(): v = v.real if numpy.iscomplex(U).any(): U = U.real indices = numpy.argsort(v)[-3:] v = numpy.take(v, indices, 0) U = numpy.take(U, indices, 1) x = U * numpy.sqrt(v) i = 0 while is_mirror_image(x, X[0]) and i < 2: x[:, i] *= -1 i += 1 return x
def __init__(self, qpoint, wpts, gsphere, wggmat, inord="C"): """" Args: qpoint: Q-point object wpts: Frequency points in Ha. wggmat: numpy array of shape [nw, ng, ng] inord: storage order of wggmat. If inord == "F", wggmat in in Fortran column-major order. Default: "C" i.e. C row-major order """ self.qpoint = qpoint self.wpts = wpts self.gsphere = gsphere self.wggmat = np.reshape(wggmat, (self.nw, self.ng, self.ng)) if inord == "F": # Fortran to C. for iw in range(len(wpts)): self.wggmat[iw] = self.wggmat[iw].T for i in (1, 2): assert len(gsphere) == wggmat.shape[-i] assert len(self.wpts) == len(self.wggmat) # Find number of real/imaginary frequencies self.nrew = self.nw; self.nimw = 0 for i, w in enumerate(self.wpts): if np.iscomplex(w): self.nrew = i break self.nimw = self.nw - self.nrew if self.nimw and not np.all(np.iscomplex(self.wpts[self.nrew+1:])): raise ValueError("wpts should contained real points packed in the first positions\n" "followed by imaginary points but got: %s" % str(self.wpts))
def test_random_like(self): """ Test that the random_like function produces sensible data """ # Try for floats and complex data for dtype in [np.float32, np.float64, np.complex64, np.complex128]: # Test random array creation with same # shape and type as existing array shape = (np.random.randint(1, 50), np.random.randint(1, 50)) ary = np.empty(shape=shape, dtype=dtype) random_ary = mbu.random_like(ary) # Test that that the shape and type is correct self.assertTrue(random_ary.shape == ary.shape) self.assertTrue(random_ary.dtype == dtype) # Test that we're getting complex data out if np.issubdtype(dtype, np.complexfloating): proportion_cplx = np.sum(np.iscomplex(random_ary)) / random_ary.size self.assertTrue(proportion_cplx > 0.9) # Test random array creation with supplied shape and type shape = (np.random.randint(1, 50), np.random.randint(1, 50)) random_ary = mbu.random_like(shape=shape, dtype=dtype) # Test that that the shape and type is correct self.assertTrue(random_ary.shape == shape) self.assertTrue(random_ary.dtype == dtype) # Test that we're getting complex data out if np.issubdtype(dtype, np.complexfloating): proportion_cplx = np.sum(np.iscomplex(random_ary)) / random_ary.size self.assertTrue(proportion_cplx > 0.9)
def min(X,Y=[],axis=1): axis -= 1 tX, tY = X, Y if _N.iscomplex(X.flat[0]): tX = abs(X) if len(tY) > 0: if _N.iscomplex(Y.flat[0]): tY = abs(Y) return _N.minimum(tX,tY) else: nargout = _get_nargout() print nargout if nargout == 1: return _N.min(tX,axis) elif nargout == 2: # slow i = _N.argmin(tX,axis) return _N.min(tX,axis), i # i = _N.argmin(tX,axis) # sh = X.shape # index = [ slice(0,x,1) for x in sh ] # if axis == 0: # index[1] = range(sh[1]) # else: # index[0] = range(sh[0]) # index[axis] = i # return _N.ndarray.__getslice__(index) else: raise Exception('too many output vals')
def trapz2d(x_gpu, dx=1.0, dy=1.0, handle=None): """ 2D trapezoidal integration. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input matrix to integrate. dx : float X-axis spacing. dy : float Y-axis spacing handle : int CUBLAS context. If no context is specified, the default handle from `skcuda.misc._global_cublas_handle` is used. Returns ------- result : float Definite double integral as approximated by the trapezoidal rule. Examples -------- >>> import pycuda.autoinit >>> import pycuda.gpuarray >>> import numpy as np >>> import integrate >>> integrate.init() >>> x = np.asarray(np.random.rand(10, 10), np.float32) >>> x_gpu = gpuarray.to_gpu(x) >>> z = integrate.trapz2d(x_gpu) >>> np.allclose(np.trapz(np.trapz(x)), z) True """ if handle is None: handle = misc._global_cublas_handle if len(x_gpu.shape) != 2: raise ValueError('input array must be 2D') if np.iscomplex(dx) or np.iscomplex(dy): raise ValueError('dx and dy must be real') float_type = x_gpu.dtype.type if float_type == np.complex64: cublas_func = cublas.cublasCdotu elif float_type == np.float32: cublas_func = cublas.cublasSdot elif float_type == np.complex128: cublas_func = cublas.cublasZdotu elif float_type == np.float64: cublas_func = cublas.cublasDdot else: raise ValueError('unsupported input type') trapz_mult_gpu = gen_trapz2d_mult(x_gpu.shape, float_type) result = cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1, trapz_mult_gpu.gpudata, 1) return float_type(dx)*float_type(dy)*result
def is_spd(M, decimal=15): """Assert that input matrix is real symmetric positive definite. M must be symmetric down to specified decimal places and with no complex entry. The check is performed by checking that all eigenvalues are positive. Parameters ========== M: numpy.ndarray matrix. Returns ======= answer: boolean True if matrix is symmetric real positive definite, False otherwise. """ if not np.allclose(M, M.T, atol=0.1 ** decimal): print ("matrix not symmetric to {0} decimals".format(decimal)) return False if np.all(np.iscomplex(M)): print ("matrix has a non real value {0}".format(M[np.iscomplex(M)][0])) eigvalsh = np.linalg.eigvalsh(M) ispd = eigvalsh.min() > 0 if not ispd: print ("matrix has a negative eigenvalue: %.3f" % eigvalsh.min()) return ispd
def fitToData(self, data): ''' param data: numpy array where [:,0] is x and [:,1] is y ''' x = data[:, 0][:, np.newaxis] y = data[:, 1][:, np.newaxis] D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x))) S = np.dot(D.T, D) C = np.zeros([6, 6]) C[0, 2] = C[2, 0] = 2; C[1, 1] = -1 E, V = eig(np.dot(inv(S), C)) n = np.argmax(np.abs(E)) self.parameters = V[:, n] axes = self.ellipse_axis_length() self.a = axes[0] self.b = axes[1] self.angle = self.ellipse_angle_of_rotation() if not self.a or not self.b or self.parameters == None or np.iscomplexobj(self.parameters) or \ math.isnan(self.a) or math.isnan(self.b) or math.isnan(self.ellipse_center()[0]) or \ np.iscomplex(self.ellipse_center()[0]) or np.iscomplex(self.a) or np.iscomplex(self.b) or \ np.iscomplexobj(self.angle): self.a = 0 self.b = 0 self.parameters = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] self.angle = 0 self.error = True
def solution_not_acceptable(P = -5e8, T = 293.15): """ This function raises a flag if the newly calculated values of P or T are problematic (either complex, positive or not calculable) """ a = np.any(np.isnan(P)) or np.any(np.iscomplex(P)) or np.any(P>0) b = np.any(np.isnan(T)) or np.any(np.iscomplex(T)) return a or b
def _check(self, res, ref): if hasattr(res, "get_x"): x = res.get_x() for k in list(res.keys()): if np.all(res[k] == x): continue elif np.any(np.iscomplex(res[k])) or np.any(np.iscomplex(ref[k])): # Interpolate Re and Im of the results to compare. x = x.reshape((-1,)) refx = ref[ref.x].reshape((-1,)) d1 = InterpolatedUnivariateSpline(x, np.real(res[k]).reshape((-1,))) d2 = InterpolatedUnivariateSpline(refx, np.real(ref[k]).reshape((-1,))) ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Re)" % self.test_id)) d1 = InterpolatedUnivariateSpline(x, np.imag(res[k]).reshape((-1,))) d2 = InterpolatedUnivariateSpline(refx, np.imag(ref[k]).reshape((-1,))) ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Im)" % self.test_id)) else: # Interpolate the results to compare. x = x.reshape((-1,)) refx = ref[ref.x].reshape((-1,)) d1 = InterpolatedUnivariateSpline(x, np.real_if_close(res[k]).reshape((-1,))) d2 = InterpolatedUnivariateSpline(refx, np.real_if_close(ref[k]).reshape((-1,))) ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id)) elif isinstance(res, results.op_solution): for k in list(res.keys()): assert k in ref ok(res[k], ref[k], rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id)) elif isinstance(res, results.pz_solution): # recover the reference signularities from Re/Im data ref_sing_keys = list(ref.keys())[:] ref_sing_keys.sort() assert len(ref_sing_keys) % 2 == 0 ref_sing = [ ref[ref_sing_keys[int(len(ref_sing_keys) / 2) + k]] + ref[ref_sing_keys[k]] * 1j for k in range(int(len(ref_sing_keys) / 2)) ] ref_poles_num = len([k for k in ref.keys() if k[:4] == "Re(p"]) poles_ref, zeros_ref = ref_sing[:ref_poles_num], ref_sing[ref_poles_num:] assert len(poles_ref) == len(res.poles) pz._check_singularities(res.poles, poles_ref) assert len(zeros_ref) == len(res.zeros) pz._check_singularities(res.zeros, zeros_ref) else: if isinstance(res, list) or isinstance(res, tuple): for i, j in zip(res, ref): self._check(i, j) elif res is not None: for k in list(res.keys()): assert k in ref if isinstance(res[k], dict): # hence ref[k] will be a dict too self._check(res[k], ref[k]) elif isinstance(ref[k], sympy.Basic) and isinstance(res[k], sympy.Basic): # get rid of assumptions. Evaluate only expression rf = parse_expr(str(ref[k])) rs = parse_expr(str(res[k])) assert (rs == rf) or (sympy.simplify(rf / rs) == 1) else: assert res[k] == ref[k]
def test_simple(self): a = [[8,12,3],[2,9,3],[10,3,6]] t,z = schur(a) assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) tc,zc = schur(a,'complex') assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc)))) assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a) tc2,zc2 = rsf2csf(tc,zc) assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)
def sph_yn(n, z): idx = np.isreal(z) out = _sph_yn_bessel(n, z) if np.any(idx): # Ascending recurrence is more accurate for real z out[idx] = _sph_yn_a_recur(n[idx], z[idx]) if np.any(np.iscomplex(out)): out[np.logical_and(np.isnan(out), np.iscomplex(out))] = np.inf*(1+1j) return out
def allsortedclose(a, b, atol=1e-3, rtol=1e-3): if np.iscomplex(a).any(): a = np.sort_complex(a) else: a = np.sort(a) if np.iscomplex(b).any(): b = np.sort_complex(b) else: b = np.sort(b) return np.allclose(a, b, rtol=rtol, atol=atol)
def _iter_initialize(self): """ Perform any necessary pre-processing operations. Returns ------- float Initial relative error in the user-specified residuals. float Initial absolute error in the user-specified residuals. """ system = self._system if self.options['debug_print']: self._err_cache['inputs'] = self._system._inputs._copy_views() self._err_cache['outputs'] = self._system._outputs._copy_views() # Convert local storage if we are under complex step. if system.under_complex_step: if np.iscomplex(self.xm[0]): self.Gm = self.Gm.astype(np.complex) self.xm = self.xm.astype(np.complex) self.fxm = self.fxm.astype(np.complex) elif np.iscomplex(self.xm[0]): self.Gm = self.Gm.real self.xm = self.xm.real self.fxm = self.fxm.real self._converge_failures = 0 self._computed_jacobians = 0 # Execute guess_nonlinear if specified. system._guess_nonlinear() # When under a complex step from higher in the hierarchy, sometimes the step is too small # to trigger reconvergence, so nudge the outputs slightly so that we always get at least # one iteration of Broyden. if system.under_complex_step and self.options['cs_reconverge']: system._outputs._data += np.linalg.norm(self._system._outputs._data) * 1e-10 # Start with initial states. self.xm = self.get_states() with Recording('Broyden', 0, self): self._solver_info.append_solver() # should call the subsystems solve before computing the first residual self._gs_iter() self._solver_info.pop() self._run_apply() norm = self._iter_get_norm() norm0 = norm if norm != 0.0 else 1.0 return norm0, norm
def time_correlations_direct(P, pi, obs1, obs2=None, times=[1]): r"""Compute time-correlations of obs1, or time-cross-correlation with obs2. The time-correlation at time=k is computed by the matrix-vector expression: cor(k) = obs1' diag(pi) P^k obs2 Parameters ---------- P : ndarray, shape=(n, n) or scipy.sparse matrix Transition matrix obs1 : ndarray, shape=(n) Vector representing observable 1 on discrete states obs2 : ndarray, shape=(n) Vector representing observable 2 on discrete states. If not given, the autocorrelation of obs1 will be computed pi : ndarray, shape=(n) stationary distribution vector. Will be computed if not given times : array-like, shape(n_t) Vector of time points at which the (auto)correlation will be evaluated Returns ------- """ n_t = len(times) times = np.sort(times) # sort it to use caching of previously computed correlations f = np.zeros(n_t) # maximum time > number of rows? if times[-1] > P.shape[0]: use_diagonalization = True R, D, L = rdl_decomposition(P) # discard imaginary part, if all elements i=0 if not np.any(np.iscomplex(R)): R = np.real(R) if not np.any(np.iscomplex(D)): D = np.real(D) if not np.any(np.iscomplex(L)): L = np.real(L) rdl = (R, D, L) if use_diagonalization: for i in xrange(n_t): f[i] = time_correlation_by_diagonalization(P, pi, obs1, obs2, times[i], rdl) else: start_values = None for i in xrange(n_t): f[i], start_values = \ time_correlation_direct_by_mtx_vec_prod(P, pi, obs1, obs2, times[i], start_values, True) return f
def __dot__(self,other): r1 = self.r r2 = other.r d = self.d if ( np.iscomplex(self.core).any() or np.iscomplex(other.core).any()): dt = np.zeros(r1[0]*r2[0]*r1[d]*r2[d],dtype=np.complex) dt = tt_f90.tt_f90.ztt_dotprod(self.n,r1,r2,self.ps,other.ps,self.core+0j,other.core+0j,dt.size) else: dt = np.zeros(r1[0]*r2[0]*r1[d]*r2[d]) dt = tt_f90.tt_f90.dtt_dotprod(self.n,r1,r2,self.ps,other.ps,np.real(self.core),np.real(other.core),dt.size) if dt.size is 1: dt = dt[0] return dt
def testConnectLapEig(g): begin = time.time() print '' print '-----' print 'Computing eigenvalues of L..' n = len(g.nodes()) L = np.zeros((n,n)) for x,i in g.edges(): L[x,i] = -1 L[i,x] = -1 for x in g.nodes(): if (x,x) in g.edges(): L[x,x] = g.degree(x)-2 else: L[x,x] = g.degree(x) w, v = LA.eig(L) w = sorted(list(w)) print '' print 'elapsed time:', time.time() - begin,' s' print '' print 'the eigenvalues of L are:' c = 0 for x in w: if np.iscomplex(x): print 'Complex eigenvalue:',x else: print float(np.where(x < 1e-10, 0, x)) c = c + 1 if c == 4: print 'and more..' break if np.iscomplex(w[1]): print '' print 'the second smallest eigenvalue is complex:', w[1] print '' seconSmallestEig = np.real(w[1]) else: seconSmallestEig = float(np.where(w[1] < 1e-10, 0, w[1])) print '' print 'the second smallest eigenvalue is:', seconSmallestEig print '' if seconSmallestEig > 0 : print 'which is positive: the graph is connected' print '-----' return True else: print 'which is negative: the graph is disconnected' print '-----' return False
def get_projected_coordinates(self): self.compute_polynomials() # returns projected x,y coordinates based on calculated parabolas print(self.z_x_poly.r) print(self.z_y_poly.r) x_coord = self.z_x_poly.r[0] y_coord = self.z_y_poly.r[0] if np.iscomplex(x_coord) or np.iscomplex(y_coord): return (None, None) return (x_coord, y_coord)
def time_relaxations_direct(P, p0, obs, times = [1]): r"""Compute time-relaxations of obs with respect of given initial distribution. relaxation(k) = p0 P^k obs Parameters ---------- P : ndarray, shape=(n, n) or scipy.sparse matrix Transition matrix p0 : ndarray, shape=(n) initial distribution obs : ndarray, shape=(n) Vector representing observable on discrete states. times : array-like, shape(n_t) Vector of time points at which the (auto)correlation will be evaluated Returns ------- relaxations : ndarray, shape(n_t) """ n_t = len(times) times = np.sort(times) # maximum time > number of rows? if times[-1] > P.shape[0]: use_diagonalization = True R, D, L = rdl_decomposition(P) # discard imaginary part, if all elements i=0 if not np.any(np.iscomplex(R)): R = np.real(R) if not np.any(np.iscomplex(D)): D = np.real(D) if not np.any(np.iscomplex(L)): L = np.real(L) rdl = (R, D, L) f = np.empty(n_t, dtype=D.dtype) if use_diagonalization: for i in xrange(n_t): f[i] = time_relaxation_direct_by_diagonalization( P, p0, obs, times[i], rdl) else: start_values = None for i in xrange(n_t): f[i], start_values = time_relaxation_direct_by_mtx_vec_prod( P, p0, obs, times[i], start_values, True) return f
def test_SparseValsOnly(): """ Sparse eigvals only Hermitian. """ H = rand_herm(10) spvals = H.eigenenergies(sparse=True) assert_equal(len(spvals), 10) # check that sorting is lowest eigval first assert_equal(spvals[0] <= spvals[-1], True) # check that spvals equal expect vals for k in range(10): # check that ouput is real for Hermitian operator assert_equal(isreal(spvals[k]), True) spvals = H.eigenenergies(sparse=True, sort='high') # check that sorting is lowest eigval first assert_equal(spvals[0] >= spvals[-1], True) spvals = H.eigenenergies(sparse=True, sort='high', eigvals=4) assert_equal(len(spvals), 4) U = rand_unitary(10) spvals = U.eigenenergies(sparse=True) assert_equal(len(spvals), 10) # check that sorting is lowest eigval first assert_equal(spvals[0] <= spvals[-1], True) # check that spvals equal expect vals for k in range(10): # check that ouput is real for Hermitian operator assert_equal(iscomplex(spvals[k]), True) spvals = U.eigenenergies(sparse=True, sort='high') # check that sorting is lowest eigval first assert_equal(spvals[0] >= spvals[-1], True) spvals = U.eigenenergies(sparse=True, sort='high', eigvals=4) assert_equal(len(spvals), 4)
def __init__(self, data, info, verbose=None): dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 data = np.asanyarray(data, dtype=dtype) if data.ndim != 2: raise ValueError('data must be a 2D array') logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s' % (dtype.__name__, data.shape[0], data.shape[1])) if len(data) != len(info['ch_names']): raise ValueError('len(data) does not match len(info["ch_names"])') assert len(info['ch_names']) == info['nchan'] cals = np.zeros(info['nchan']) for k in range(info['nchan']): cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal'] self.verbose = verbose self.cals = cals self.rawdir = None self.proj = None self.comp = None self._filenames = list() self._preloaded = True self.info = info self._data = data self.first_samp, self.last_samp = 0, self._data.shape[1] - 1 self._times = np.arange(self.first_samp, self.last_samp + 1) / info['sfreq'] self._projectors = list() logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( self.first_samp, self.last_samp, float(self.first_samp) / info['sfreq'], float(self.last_samp) / info['sfreq'])) logger.info('Ready.')
def round(self,eps): """Applies TT rounding procedure to the TT-tensor and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :returns: tensor -- rounded TT-tensor. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) """ c=tensor() c.n=self.n c.d=self.d c.r=self.r.copy() c.ps=self.ps.copy() if ( np.iscomplex(self.core).any() ): tt_f90.tt_f90.ztt_compr2(c.n,c.r,c.ps,self.core,eps) c.core = tt_f90.tt_f90.zcore.copy() else: tt_f90.tt_f90.dtt_compr2(c.n,c.r,c.ps,self.core,eps) c.core=tt_f90.tt_f90.core.copy() tt_f90.tt_f90.tt_dealloc() return c
def __init__(self, data, info, tmin, comment='', nave=1, kind='average', verbose=None): dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 data = np.asanyarray(data, dtype=dtype) if data.ndim != 2: raise ValueError('Data must be a 2D array of shape (n_channels, ' 'n_samples)') if len(info['ch_names']) != np.shape(data)[0]: raise ValueError('Info and data must have same number of ' 'channels.') self.data = data # XXX: this should use round and be tested self.first = int(tmin * info['sfreq']) self.last = self.first + np.shape(data)[-1] - 1 self.times = np.arange(self.first, self.last + 1, dtype=np.float) self.times /= info['sfreq'] self.info = info self.nave = nave self.kind = kind self.comment = comment self.proj = None self.picks = None self.verbose = verbose self._projector = None if self.kind == 'average': self._aspect_kind = aspect_dict['average'] else: self._aspect_kind = aspect_dict['standard_error']
def __init__(self, data, info, first_samp=0, copy='auto', verbose=None): # noqa: D102 _validate_type(info, "info") _check_option('copy', copy, ('data', 'info', 'both', 'auto', None)) dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 orig_data = data data = np.asanyarray(orig_data, dtype=dtype) if data.ndim != 2: raise ValueError('Data must be a 2D array of shape (n_channels, ' 'n_samples), got shape %s' % (data.shape,)) if len(data) != len(info['ch_names']): raise ValueError('len(data) (%s) does not match ' 'len(info["ch_names"]) (%s)' % (len(data), len(info['ch_names']))) assert len(info['ch_names']) == info['nchan'] if copy in ('auto', 'info', 'both'): info = info.copy() if copy in ('data', 'both'): if data is orig_data: data = data.copy() elif copy != 'auto' and data is not orig_data: raise ValueError('data copying was not requested by copy=%r but ' 'it was required to get to double floating point ' 'precision' % (copy,)) logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s' % (dtype.__name__, data.shape[0], data.shape[1])) super(RawArray, self).__init__(info, data, first_samps=(int(first_samp),), dtype=dtype, verbose=verbose) logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( self.first_samp, self.last_samp, float(self.first_samp) / info['sfreq'], float(self.last_samp) / info['sfreq'])) logger.info('Ready.')
def specificvibrations(adsorbate, surface): """Reads in the vibrations for an adsorbate on a specific surface.""" if surface is None: raise RuntimeError('A surface must be specified for specific ' 'vibrations.') filename = '_'.join([adsorbate, surface]) filename = os.path.join(datapath, 'specific-vibrations', filename) f = open(filename) d = pickle.load(f) f.close() if d.has_key('remark'): if d['remark']: print('Message from pickle %s:\n %s' % (filename, d['remark'])) if d.has_key('vibrations'): realvibs = [] for vib in d['vibrations']: if np.real(vib) > 0: realvibs.append(float(np.real(vib))) vib_energies = np.array(realvibs) #vib_energies = d['vibrations'] else: raise RuntimeError('No vibrations for %s on %s.' % (adsorbate, surface)) # Check to see if any frequencies are imaginary, and report to user. if sum(np.iscomplex(vib_energies)) > 0: print('WARNING: Imaginary frequencies encountered for %s on %s.' % (adsorbate, surface)) return vib_energies
def find_extrema(self, xmin=None, xmax=None): '''Find the position and values of the maxima/minima. Returns a tuple: (roots,vals,ypps) where roots are the x-values where the extrema occur, vals are the y-values at these points, and ypps are the 2nd derivatives. optionally, restrict roots to between xmin, and xmax''' if self.realization is not None: poly = self.realization else: poly = self.poly if xmin is None: xmin = self.poly.domain[0] if xmax is None: xmax = self.poly.domain[1] if not self.setup: self._setup() d1 = poly.deriv(m=1) d2 = poly.deriv(m=2) roots = d1.roots() # Roots can be complex. Want only the real ones gids = num.iscomplex(roots) roots = num.real(roots[-gids]) gids = num.greater_equal(roots, xmin)*num.less_equal(roots, xmax) roots = roots[gids] if len(roots) == 0: return num.array([]), num.array([]), num.array([]) vals = self.__call__(roots) curvs = d2(roots) curvs = num.where(curvs < 0, -1, curvs) curvs = num.where(curvs > 0, 1, curvs) return roots,vals[0],curvs
def __init__(self, data, info, first_samp=0, verbose=None): if not isinstance(info, Info): raise TypeError('info must be an instance of Info, got %s' % type(info)) dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 data = np.asanyarray(data, dtype=dtype) if data.ndim != 2: raise ValueError('Data must be a 2D array of shape (n_channels, ' 'n_samples') logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s' % (dtype.__name__, data.shape[0], data.shape[1])) if len(data) != len(info['ch_names']): raise ValueError('len(data) does not match len(info["ch_names"])') assert len(info['ch_names']) == info['nchan'] if info.get('buffer_size_sec', None) is None: info['buffer_size_sec'] = 1. # reasonable default super(RawArray, self).__init__(info, data, first_samps=(int(first_samp),), verbose=verbose) logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( self.first_samp, self.last_samp, float(self.first_samp) / info['sfreq'], float(self.last_samp) / info['sfreq'])) logger.info('Ready.')
def __init__(self, a=None, eps=1e-14, rmax=100000): if a is None: self.core = _np.array([0.0]) self.d = 0 self.n = _np.array([0]) self.r = _np.array([1], dtype=_np.int) self.ps = _np.array([0], dtype=_np.int) return self.d = a.ndim self.n = _np.array(a.shape, dtype=_np.int32) r = _np.zeros((self.d + 1,), dtype=_np.int32) ps = _np.zeros((self.d + 1,), dtype=_np.int32) if (_np.iscomplex(a).any()): if rmax is not None: self.r, self.ps = _tt_f90.tt_f90.zfull_to_tt( a.flatten('F'), self.n, self.d, eps, rmax) else: self.r, self.ps = _tt_f90.tt_f90.zfull_to_tt( a.flatten('F'), self.n, self.d, eps) self.core = _tt_f90.tt_f90.zcore.copy() else: if rmax is not None: self.r, self.ps = _tt_f90.tt_f90.dfull_to_tt( _np.real(a).flatten('F'), self.n, self.d, eps, rmax) else: self.r, self.ps = _tt_f90.tt_f90.dfull_to_tt( _np.real(a).flatten('F'), self.n, self.d, eps) self.core = _tt_f90.tt_f90.core.copy() _tt_f90.tt_f90.tt_dealloc()
def __init__(self, vib_energies, geometry, electronicenergy=None, atoms=None, symmetrynumber=None, spin=None, natoms=None): if electronicenergy == None: self.electronicenergy = 0. else: self.electronicenergy = electronicenergy self.geometry = geometry self.atoms = atoms self.sigma = symmetrynumber self.spin = spin if natoms == None: if atoms: natoms = len(atoms) # Cut the vibrations to those needed from the geometry. if natoms: if geometry == 'nonlinear': self.vib_energies = vib_energies[-(3 * natoms - 6):] elif geometry == 'linear': self.vib_energies = vib_energies[-(3 * natoms - 5):] elif geometry == 'monatomic': self.vib_energies = [] else: self.vib_energies = vib_energies # Make sure no imaginary frequencies remain. if sum(np.iscomplex(self.vib_energies)): raise ValueError('Imaginary frequencies are present.') else: self.vib_energies = np.real(self.vib_energies) # clear +0.j self.referencepressure = 101325. # Pa
#[[4 5 6] 第二行到最后一行,任意列 # [7 8 9]] print a[1, :] #[4 5 6] 第二行,任意列 print a[0:2] #[[1 2 3] 第一行到第三行,不包括第三行 # [4 5 6]] print a[..., 1] #[2 5 8] 任意行,第二列 print a[:, 1] #[2 5 8] 省略号和冒号的作用相同 print a[:, 1:] #[[2 3] # [5 6] # [8 9]] 任意行,从第二列到最后一列 x = np.array([[1, 2], [3, 4], [5, 6]]) print x[[0, 1, 2], [0, 1, 0]] # [1 4 5] # 相当于查找(0,0),(1,1),(2,0)处的值 x = np.array([[1, 2, 3], [3, 4, 5], [6, 7, 8], [8, 9, 10]]) print x[[[0, 0], [2, 3]], [[0, 2], [1, 2]]] # [[ 1 3] # [ 7 10]] # 相当于查找(0,0(0,2)(2,1)(3,2) #过滤 x = np.array([[1, 2, 3], [4, 6, 6], [7, 8, 9], [10, 11, 12]]) print x[x > 5] #[ 6 6 7 8 9 10 11 12] x = np.array([np.nan, 1, 2, np.nan, 3, 4, 5]) print x[np.isnan(x) == False] #[ 1. 2. 3. 4. 5.] x = np.array([1, 2 + 6j, 5, 3.5 + 5j]) print x[np.iscomplex(x)] #[ 2.0+6.j 3.5+5.j]
def interpolate_smoothly(X, Y, dx=None, fixed_x=None, window_size=None, fit_window=None, poly_order=None, expand_over_gap=None, min_control_point_spacing=None, return_cubic_spline=None, fast=None): if fast is None: fast = fast_mode XI = np.argsort(X) #print "sorting table:",XI #print "len = ",len(XI) X = shuffle_array(np.asarray(X), XI) Y = shuffle_array(np.asarray(Y), XI) minX = X[0] #min(X) maxX = X[-1] #max(X) if fixed_x is None: fixed_x = 0 if dx is None: # Default point density is equal to original point density dx = (maxX - minX) / (len(X) - 1) if window_size is None: # Default window size is twice the point density window_size = dx * 2 if fit_window is None: # Hamming window weights fit_window = lambda x: 0.54 + 0.46 * np.cos(2 * np.pi * x) if poly_order is None: poly_order = 2 # linear if expand_over_gap is None: expand_over_gap = True # default minimum spacing is min(dx, min(X spacing))/2 if min_control_point_spacing is None: xprev = X[0] min_control_point_spacing = dx for x in X[1:]: min_control_point_spacing = min(abs(x - xprev), min_control_point_spacing) min_control_point_spacing /= 2 if return_cubic_spline is None: return_cubic_spline = False # fast mode for preview if fast: csx = list(X) csy = list(Y) # main routine for real data else: # Pick a set of control points control_points = values_at_interval(X, dx, fixed_x) fit_domains = [(x - window_size / 2.0, x + window_size / 2.0) for x in control_points] lost_control_points = [] # starts empty. may end up being unused fit_sets_X = [] fit_sets_Y = [] fit_sets_W = [] exact_cs_points = set( ) # put points we wish to include in cubic spline explicitly xy_points_fitted = set() #print "Chose control points and fit domains:",zip(control_points,fit_domains) # speed up bracket_interval by keeping track of approximate operating position within data array rough_data_index = 0 # do it in reversed order so we can remove points from control_points if we can't use them. for i in reversed(range(len(control_points))): #print "" #print "control_points[",i,"] = ",control_points[i] # need a try block here if control point could possibly be outside the range of X # count data points covered by window #print "Fit domain is",(fit_domains[i][0], fit_domains[i][1]) (a, b) = bracket_interval(X, fit_domains[i][0], fit_domains[i][1], start=rough_data_index, inclusive=False, allow_all=True) points = b - a + 1 #print "Bracket interval is",(a,b),"; points=",points # if not enough: if poly_order >= points: # expand_over_gap is now misnamed. this might become default behavior with no need for # a variable name if expand_over_gap: # expand window to get enough control points # We need a sane way to symmetrically (except near endpoints) expand the window until we have # enough data to perform the fit. # # Here's a possible strategy: # Call bracket_interval() again, but with inclusive=True. That will get us at least one more index, # but we can allow_all so nothing overflows an endpoint. Since we want to expand symmetrically, # take the largest distance from control_points[i] as half_interval, and call bracket_interval() # yet again with inclusive=False and a=control_points[i]-half_interval, # b=control_points[i]+half_interval and allow_all=True. Repeat until poly_order +1 > points. # # # Include data points explicitly. Duplicates will be removed later. #print "not enough points to fit" for x, y in zip(X[a:b + 1], Y[a:b + 1]): exact_cs_points.add((x, y)) #print "Adding to exact_cs_points: ", (x,y) #print "Deleting control point",control_points[i] lost_control_points += [control_points[i]] del (control_points[i]) # It would be nice to mark points we have already used for fitting and not use them in # exact_cs_points continue else: print "Not enough points to fit. Don't use expand_over_gap=False" # add to lost_control_points and skip the rest of the loop. Can we actually do anything # with the lost control points? There are gaps in the data here. Might have to discard. lost_control_points += [control_points[i]] del (control_points[i]) continue # Add subarray on interval (a,b) (inclusive) to lists of x and y arrays for fitting. #print "adding to fit set:",(X[a:b+1]-control_points[i],Y[a:b+1],fit_window((X[a:b+1]-control_points[i])/(fit_domains[i][1]- fit_domains[i][0]))) fit_sets_X.append(X[a:b + 1] - control_points[i]) fit_sets_Y.append(Y[a:b + 1]) fit_sets_W.append( fit_window((X[a:b + 1] - control_points[i]) / (fit_domains[i][1] - fit_domains[i][0]))) #print "Computing fit window:" #print "X[a:b+1]-control_points[i]=",X[a:b+1]-control_points[i] #print "(fit_domains[i][1]- fit_domains[i][0])=",(fit_domains[i][1]- fit_domains[i][0]) # now add the data points we include in the fit into the set xy_points_fitted: for xy in zip(X[a:b + 1], Y[a:b + 1]): xy_points_fitted.add(xy) #print "xy_points_fitted now contains:",xy_points_fitted #print "" #print "Do the fitting." #print "control_points=",control_points csx = reversed(control_points) csy = [] for x, fitX, fitY, fitW in zip(reversed(control_points), fit_sets_X, fit_sets_Y, fit_sets_W): #print "Fit point:",x #print "X:",fitX,"Y:",fitY,"W:",fitW # currently not using this bit of kludge code if False: # if we need an exact solution, force the fitter to find it by adding an irrelevant point to the fit. if poly_order == len(fitX): fitX = np.append(fitX, 0.0) fitY = np.append(fitY, sum(fitY) / float(len(fitY))) fitW = np.append(fitW, min(fitW) * 1e-6) #print "Adjusted fit vectors for an exact solution: fitX=",fitX,"fitY=",fitY,"fitW=",fitW # do the weighted poly fit on its domain p = np.polyfit(fitX, fitY, 1, w=fitW) csy.append(np.polyval(p, 0.0)) # add to list of cubic splines #print " result:",np.polyval(p,0.0) # build the control point arrays csx = control_points csy = list(reversed(csy)) # zip lists so we can check for duplicates cs = zip(csx, csy) #print "exact_cs_points=",exact_cs_points #print "xy_points_fitted=",xy_points_fitted #print "set(cs)=",set(cs) # For the purposes of excluding fitted points from being used as exact_cs_points, explicit endpoints # are always allowed if the fit failed there. xy_points_fitted.discard((X[0], Y[0])) xy_points_fitted.discard((X[-1], Y[-1])) # Include exact spline handles (except for those that have been used as fit points). Remove # duplicate entries by converting to a set, then unzipping. Note that zip(*x) is inverse of zip. points = zip(*list(set(cs).union(exact_cs_points - xy_points_fitted))) #print "Duplicates removed. zip(exact_cs_points)=",points csx = list(points[0]) csy = list(points[1]) # we now have points out of order, so sort csxi = np.argsort(csx) csx = shuffle_list(csx, csxi) csy = shuffle_list(csy, csxi) # fast mode and high quality mode rejoin here if min_control_point_spacing > 0.0: # Ideal method: # find spacings between each pair of control points # throw out the point spaced closest to its peers # adjust lists and repeat process until no pair is closer than min_control_point_spacing # # Implemented method: delete from end to beginning if spacings are too close. average in a # way that becomes crude if we end up deleting more points. for i in reversed(range(0, len(csx) - 1)): if abs(csx[i] - csx[i + 1]) < min_control_point_spacing: csy[i + 1] = (csy[i + 1] + csy[i]) / 2 del (csx[i]) del (csy[i]) csx = np.asarray(csx) csy = np.asarray(csy) #print "" #print "doing spline on csx and csy:" #print "csx=",csx #print "csy=",csy #print "dtype of csy=",csy.dtype if np.any(np.iscomplex(csy)): # do cubic spline; default smoothing s is not zero, so must pass explicitly iscomplex = True cs_real = scipy.interpolate.splrep(csx, np.real(csy), s=0) cs_imag = scipy.interpolate.splrep(csx, np.imag(csy), s=0) else: iscomplex = False try: cs = scipy.interpolate.splrep(csx, csy, s=0) except TypeError: f_interp = scipy.interpolate.interp1d(csx, csy, kind='linear') print 'Falling back to linear interpolation.' if return_cubic_spline: if iscomplex: return cs_real, cs_imag else: return cs else: # figure out the domain. start at fixed_x and proceed backward or forward by dx steps until # we find the minimum and maximum possible values. # find largest domain of fixed_x + i*dx that is supported by data xnew = np.asarray(values_at_interval(csx, dx, fixed_x)) #print "interpolate_smoothly() csx: {} {}".format(min(csx),max(csx)) #print "interpolate_smoothly() from values_at_interval: {} {}".format(min(xnew),max(xnew)) # if cs doesn't exist, try the complex version if iscomplex: ynew_real = scipy.interpolate.splev(xnew, cs_real, der=0) ynew_imag = scipy.interpolate.splev(xnew, cs_imag, der=0) ynew = ynew_real + 1.j * ynew_imag else: try: ynew = scipy.interpolate.splev(xnew, cs, der=0) except: ynew = f_interp(xnew) # return result return xnew, ynew
def __pow__(self, b): """Returns a Scalar object representing the operation x ** b, where x is the current Scalar object and b is either another Scalar object or a numeric value. Calculations of new Scalar's value and derivations follow rules for exponents and power rule of differentiation respectively. INPUTS ======= b: int or float or Scalar The constant/Scalar we raise the current Scalar to the power of RETURNS ======== Scalar The new Scalar resulting from raising the base (self) to the power of b NOTES ===== PRE: g - b is an int or float or Scalar POST: - self is not changed by the function - b is not changed by the function - returns a scalar object, resulting from raising self to the power of b EXAMPLES ========= >>> x = Scalar('x', 2) >>> y = x ** 2 >>> y._val 4.0 >>> y._deriv {'x': 4.0} """ try: new_val = self._val**b._val #check that a negative number is not being raised to a decimal. Python returns a complex number if this occurs. if np.iscomplex(new_val): raise ValueError( "Cannot raise a negative number ({0}) to a decimal {1}". format(self._val, b._val)) powered = Scalar(None, new_val) #create new Scalar with updated value powered._deriv.pop(None, None) #check if both self and b are zero values because derivative for all variables is just 0 if self._val == 0 and b._val == 0: for variable in (set(self._deriv.keys()) | set(b._deriv.keys())): powered._deriv[variable] = 0 return powered #if b != 0 for variable in (set(self._deriv.keys()) | set(b._deriv.keys())): # _derivative of x^y with respect to y (exponential rule) if variable not in self._deriv.keys(): powered._deriv[variable] = (self._val**b._val) * np.log( self._val) * b._deriv[variable] # _derivative of x^y with respect to x (power rule) elif variable not in b._deriv.keys(): powered._deriv[variable] = b._val * (self._val**( b._val - 1)) * self._deriv[variable] # y = x ^ x # Credits to http://mathcentral.uregina.ca/QQ/database/QQ.09.03/cher1.html for formula else: powered._deriv[variable] = (self._val**b._val) * ( np.log(self._val) + b._val / (self._val)) * self._deriv[variable] except AttributeError: #b is just a integer or float value new_val = self._val**b #check that a negative number is not being raised to a decimal. Python returns a complex number if this occurs. if np.iscomplex(new_val): raise ValueError( "Cannot raise a negative number ({0}) to a decimal {1}". format(self._val, b)) powered = Scalar(None, self._val**b) powered._deriv.pop(None, None) #check if both self and b are zero values because derivative for all variables is just 0 if self._val == 0 and b == 0: for variable in set(self._deriv.keys()): powered._deriv[variable] = 0 return powered #b != 0 for variable in self._deriv.keys(): powered._deriv[variable] = b * (self._val**( b - 1)) * self._deriv[variable] return powered
def minimize(f, X, length, args=(), reduction=None, verbose=True, concise=False): """ Minimize a differentiable multivariate function. Parameters ---------- f : function to minimize. The function must return the value of the function (float) and a numpy array of partial derivatives of shape (D,) with respect to X, where D is the dimensionality of the function. X : numpy array - Shape : (D, 1) initial guess. length : int The length of the run. If positive, length gives the maximum number of line searches, if negative its absolute value gives the maximum number of function evaluations. args : tuple Tuple of parameters to be passed to the function f. reduction : float The expected reduction in the function value in the first linesearch (if None, defaults to 1.0) verbose : bool If True - prints the progress of minimize. (default is True) concise : bool If True - returns concise convergence info, only the minimium function value (necessary when ptimizing a large number of parameters) Return ------ Xs : numpy array - Shape : (D, 1) The found solution. convergence : numpy array - Shape : (i, D+1) Convergence information. The first column is the function values returned by the function being minimized. The next D columns are the guesses of X during the minimization process. i : int Number of line searches or function evaluations depending on which was selected. The function returns when either its length is up, or if no further progress can be made (ie, we are at a (local) minimum, or so close that due to numerical problems, we cannot get any closer) Copyright (C) 2001 - 2006 by Carl Edward Rasmussen (2006-09-08). Coverted to python by David Lines (2019-23-08) """ INT = 0.1 # don't reevaluate within 0.1 of the limit of the current bracket EXT = 3.0 # extrapolate maximum 3 times the current step size MAX = 20 # max 20 function evaluations per line search RATIO = 10 # maximum allowed slope ratio SIG = 0.1 RHO = SIG / 2 # SIG and RHO control the Wolfe-Powell conditions # SIG is the maximum allowed absolute ratio between # previous and new slopes (derivatives in the search direction), thus setting # SIG to low (positive) values forces higher precision in the line-searches. # RHO is the minimum allowed fraction of the expected (from the slope at the # initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1. # Tuning of SIG (depending on the nature of the function to be optimized) may # speed up the minimization; it is probably not worth playing much with RHO. print("Minimizing %s ..." % f) if reduction is None: red = 1.0 else: red = reduction S = 'Linesearch' if length > 0 else 'Function evaluation' i = 0 # run length counter ls_failed = 0 # no previous line search has failed f0, df0 = eval('f')(X, *list(args)) # get initial function value and gradient df0 = df0.reshape(-1, 1) fX = [] fX.append(f0) Xd = [] Xd.append(X) i += (length < 0) # count epochs s = -df0 # get column vec d0 = -s.T @ s # initial search direction (steepest) and slope x3 = red / (1 - d0) # initial step is red/(|s|+1) while i < abs(length): # while not finished i += (length > 0) # count iterations X0 = X F0 = f0 dF0 = df0 # copy current vals M = MAX if length > 0 else min(MAX, -length - i) while 1: # extrapolate as long as necessary x2 = 0 f2 = f0 d2 = d0 f3 = f0 df3 = df0 success = False while not success and M > 0: try: M -= 1 i += (length < 0) # count epochs f3, df3 = eval('f')(X + x3 * s, *list(args)) df3 = df3.reshape(-1, 1) if np.isnan(f3) or np.isinf(f3) or any( np.isnan(df3) + np.isinf(df3)): raise Exception( 'Either nan or inf in function eval or gradients') success = True except: # catch any error occuring in f x3 = (x2 + x3) / 2 # bisect and try again if f3 < F0: X0 = X + x3 * s F0 = f3 dF0 = df3 # keep best values d3 = df3.T @ s # new slope if d3 > SIG * d0 or f3 > f0 + x3 * RHO * d0 or M == 0: break # finished extrapolating x1 = x2 f1 = f2 d1 = d2 # move point 2 to point 1 x2 = x3 f2 = f3 d2 = d3 # move point 3 to point 2 A = 6 * (f1 - f2) + 3 * (d2 + d1) * (x2 - x1 ) # make cubic extrapolation B = 3 * (f2 - f1) - (2 * d1 + d2) * (x2 - x1) x3 = x1 - d1 * (x2 - x1)**2 / (B + np.sqrt(B * B - A * d1 * (x2 - x1)) ) # num. error possible, ok! if np.iscomplex(x3) or np.isnan(x3) or np.isinf( x3) or x3 < 0: # num prob | wrong sign x3 = x2 * EXT elif x3 > x2 * EXT: x3 = x2 * EXT elif x3 < x2 + INT * (x2 - x1): x3 = x2 + INT * (x2 - x1) while (abs(d3) > -SIG * d0 or f3 > f0 + x3 * RHO * d0) and M > 0: # keep interpolating if d3 > 0 or f3 > f0 + x3 * RHO * d0: # choose subinterval x4 = x3 f4 = f3 d4 = d3 # move point 3 to point 4 else: x2 = x3 f2 = f3 d2 = d3 # move point 3 to point 2 if f4 > f0: x3 = x2 - (0.5 * d2 * (x4 - x2)**2) / ( f4 - f2 - d2 * (x4 - x2)) # quadratic interpolation else: A = 6 * (f2 - f4) / (x4 - x2) + 3 * (d4 + d2 ) # cubic interpolation B = 3 * (f4 - f2) - (2 * d2 + d4) * (x4 - x2) x3 = x2 + (np.sqrt(B * B - A * d2 * (x4 - x2)**2) - B) / A # num. error possible, ok! if np.isnan(x3) or np.isinf(x3): x3 = (x2 + x4) / 2 # if we had a numerical problem then bisect x3 = max(min(x3, x4 - INT * (x4 - x2)), x2 + INT * (x4 - x2)) # don't accept too close f3, df3 = eval('f')(X + x3 * s, *list(args)) df3 = df3.reshape(-1, 1) if f3 < F0: X0 = X + x3 * s F0 = f3 dF0 = df3 # keep best values M -= 1 i += (length < 0) # count epochs?! d3 = df3.T @ s # new slope if abs( d3 ) < -SIG * d0 and f3 < f0 + x3 * RHO * d0: # if line search succeeded X = X + x3 * s f0 = f3 fX.append(f0) Xd.append(X) # update variables if verbose: print('%s %6i; Value %4.6e\r' % (S, i, f0)) s = (df3.T @ df3 - df0.T @ df3) / ( df0.T @ df0) * s - df3 # Polack-Ribiere CG direction df0 = df3 # swap derivatives d3 = d0 d0 = df0.T @ s if d0 > 0: # new slope must be negative s = -df0.reshape(-1, 1) d0 = -s.T @ s # otherwise use steepest direction x3 = x3 * min( RATIO, d3 / (d0 - np.finfo(np.double).tiny)) # slope ratio but max RATIO ls_failed = False # this line search did not fail else: X = X0 f0 = F0 df0 = dF0 # restore best point so far if ls_failed or i > abs( length): # line search failed twice in a row break # or we ran out of time, so we give up s = -df0.reshape(-1, 1) d0 = -s.T @ s # try steepest x3 = 1 / (1 - d0) ls_failed = True # this line search failed if concise: convergence = fX[-1] # return only the minimum function value else: convergence = np.hstack((np.array(fX).reshape(-1, 1), np.array(Xd)[:, :, 0])) # bundle convergence info Xs = X # solution return Xs, convergence, i
def _hessian_states(self, state_op: StateFn, meas_op: Optional[OperatorBase] = None, target_params: Optional[Union[Tuple[ParameterExpression, ParameterExpression], List[Tuple[ParameterExpression, ParameterExpression]]]] = None ) -> OperatorBase: """Generate the operator states whose evaluation returns the Hessian (items). Args: state_op: The operator representing the quantum state for which we compute the Hessian. meas_op: The operator representing the observable for which we compute the gradient. target_params: The parameters we are computing the Hessian wrt: ω Returns: Operators which give the Hessian. If a parameter appears multiple times, one circuit is created per parameterized gates to compute the product rule. Raises: AquaError: If one of the circuits could not be constructed. TypeError: If ``operator`` is of unsupported type. """ state_qc = deepcopy(state_op.primitive) if isinstance(target_params, list) and isinstance(target_params[0], tuple): tuples_list = deepcopy(target_params) target_params = [] for tuples in tuples_list: if all(param in state_qc._parameter_table.get_keys() for param in tuples): for param in tuples: if param not in target_params: target_params.append(param) elif isinstance(target_params, tuple): tuples_list = deepcopy([target_params]) target_params = [] for tuples in tuples_list: if all(param in state_qc._parameter_table.get_keys() for param in tuples): for param in tuples: if param not in target_params: target_params.append(param) else: raise TypeError( 'Please define in the parameters for which the Hessian is evaluated either ' 'as parameter tuple or a list of parameter tuples') qr_add0 = QuantumRegister(1, 'work_qubit0') work_q0 = qr_add0[0] qr_add1 = QuantumRegister(1, 'work_qubit1') work_q1 = qr_add1[0] # create a copy of the original circuit with an additional working qubit register circuit = state_qc.copy() circuit.add_register(qr_add0, qr_add1) # Get the circuits needed to compute the Hessian hessian_ops = None for param_a, param_b in tuples_list: if param_a not in state_qc._parameter_table.get_keys() or param_b \ not in state_qc._parameter_table.get_keys(): hessian_op = ~Zero @ One else: param_gates_a = state_qc._parameter_table[param_a] param_gates_b = state_qc._parameter_table[param_b] for i, param_occurence_a in enumerate(param_gates_a): coeffs_a, gates_a = self._gate_gradient_dict(param_occurence_a[0])[ param_occurence_a[1]] # apply Hadamard on working qubit self.insert_gate(circuit, param_occurence_a[0], HGate(), qubits=[work_q0]) self.insert_gate(circuit, param_occurence_a[0], HGate(), qubits=[work_q1]) for j, gate_to_insert_a in enumerate(gates_a): coeff_a = coeffs_a[j] hessian_circuit_temp = QuantumCircuit(*circuit.qregs) hessian_circuit_temp.data = circuit.data # Fix working qubit 0 phase sign = np.sign(coeff_a) is_complex = np.iscomplex(coeff_a) if sign == -1: if is_complex: self.insert_gate(hessian_circuit_temp, param_occurence_a[0], SdgGate(), qubits=[work_q0]) else: self.insert_gate(hessian_circuit_temp, param_occurence_a[0], ZGate(), qubits=[work_q0]) else: if is_complex: self.insert_gate(hessian_circuit_temp, param_occurence_a[0], SGate(), qubits=[work_q0]) # Insert controlled, intercepting gate - controlled by |1> if isinstance(param_occurence_a[0], UGate): if param_occurence_a[1] == 0: self.insert_gate(hessian_circuit_temp, param_occurence_a[0], RZGate(param_occurence_a[0].params[2])) self.insert_gate(hessian_circuit_temp, param_occurence_a[0], RXGate(np.pi / 2)) self.insert_gate(hessian_circuit_temp, param_occurence_a[0], gate_to_insert_a, additional_qubits=([work_q0], [])) self.insert_gate(hessian_circuit_temp, param_occurence_a[0], RXGate(-np.pi / 2)) self.insert_gate(hessian_circuit_temp, param_occurence_a[0], RZGate(-param_occurence_a[0].params[2])) elif param_occurence_a[1] == 1: self.insert_gate(hessian_circuit_temp, param_occurence_a[0], gate_to_insert_a, after=True, additional_qubits=([work_q0], [])) else: self.insert_gate(hessian_circuit_temp, param_occurence_a[0], gate_to_insert_a, additional_qubits=([work_q0], [])) else: self.insert_gate(hessian_circuit_temp, param_occurence_a[0], gate_to_insert_a, additional_qubits=([work_q0], [])) for m, param_occurence_b in enumerate(param_gates_b): coeffs_b, gates_b = self._gate_gradient_dict(param_occurence_b[0])[ param_occurence_b[1]] for n, gate_to_insert_b in enumerate(gates_b): coeff_b = coeffs_b[n] # create a copy of the original circuit with the same registers hessian_circuit = QuantumCircuit(*hessian_circuit_temp.qregs) hessian_circuit.data = hessian_circuit_temp.data # Fix working qubit 1 phase sign = np.sign(coeff_b) is_complex = np.iscomplex(coeff_b) if sign == -1: if is_complex: self.insert_gate(hessian_circuit, param_occurence_b[0], SdgGate(), qubits=[work_q1]) else: self.insert_gate(hessian_circuit, param_occurence_b[0], ZGate(), qubits=[work_q1]) else: if is_complex: self.insert_gate(hessian_circuit, param_occurence_b[0], SGate(), qubits=[work_q1]) # Insert controlled, intercepting gate - controlled by |1> if isinstance(param_occurence_b[0], UGate): if param_occurence_b[1] == 0: self.insert_gate(hessian_circuit, param_occurence_b[0], RZGate(param_occurence_b[0].params[2])) self.insert_gate(hessian_circuit, param_occurence_b[0], RXGate(np.pi / 2)) self.insert_gate(hessian_circuit, param_occurence_b[0], gate_to_insert_b, additional_qubits=([work_q1], [])) self.insert_gate(hessian_circuit, param_occurence_b[0], RXGate(-np.pi / 2)) self.insert_gate(hessian_circuit, param_occurence_b[0], RZGate(-param_occurence_b[0].params[2])) elif param_occurence_b[1] == 1: self.insert_gate(hessian_circuit, param_occurence_b[0], gate_to_insert_b, after=True, additional_qubits=([work_q1], [])) else: self.insert_gate(hessian_circuit, param_occurence_b[0], gate_to_insert_b, additional_qubits=([work_q1], [])) else: self.insert_gate(hessian_circuit, param_occurence_b[0], gate_to_insert_b, additional_qubits=([work_q1], [])) hessian_circuit.h(work_q0) hessian_circuit.cz(work_q1, work_q0) hessian_circuit.h(work_q1) term = state_op.coeff * np.sqrt(np.abs(coeff_a) * np.abs(coeff_b)) \ * CircuitStateFn(hessian_circuit) # Chain Rule Parameter Expression gate_param_a = param_occurence_a[0].params[param_occurence_a[1]] gate_param_b = param_occurence_b[0].params[param_occurence_b[1]] if meas_op: meas = deepcopy(meas_op) if isinstance(gate_param_a, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad( gate_param_a, param_a) meas *= expr_grad if isinstance(gate_param_b, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad( gate_param_a, param_a) meas *= expr_grad term = meas @ term else: term = ListOp([term], combo_fn=partial(self._hess_combo_fn, state_op=state_op)) if isinstance(gate_param_a, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad( gate_param_a, param_a) term *= expr_grad if isinstance(gate_param_b, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad( gate_param_a, param_a) term *= expr_grad if i == 0 and j == 0 and m == 0 and n == 0: hessian_op = term else: # Product Rule hessian_op += term # Create a list of Hessian elements w.r.t. the given parameter tuples if len(tuples_list) == 1: return hessian_op else: if not hessian_ops: hessian_ops = [hessian_op] else: hessian_ops += [hessian_op] return ListOp(hessian_ops)
print('\nOriginal Array:') print(x) print('\n') rows = np.array([[0, 0], [3, 3]]) cols = np.array([[0, 2], [0, 2]]) y = x[rows, cols] print('4 Corner Point:') print(y) print('\n') a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = a[1:3, 1:3] c = a[1:3, [1, 2]] d = a[..., 1:] print(a) print(b) print(c) print(d) print(a[a > 5]) a = np.array([np.nan, 1, 2, np.nan, 3, 4, 5]) print(a[~np.isnan(a)]) a = np.array([1, 2 + 6j, 5, 3.5 + 5j]) print(a[np.iscomplex(a)]) x = np.arange(32).reshape((8, 4)) print(x) print(x[[4, 2, 1, 7]]) print(x[[-4, -2, -1, -7]]) print(x[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])])
def ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000): """ Dynamical tensor-train approximation based on projector splitting This function performs one step of dynamical tensor-train approximation for the equation .. math :: \\frac{dy}{dt} = A y, \\quad y(0) = y_0 and outputs approximation for :math:`y(\\tau)` :References: 1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken. Time integration of tensor trains. arXiv preprint 1407.2042, 2014. http://arxiv.org/abs/1407.2042 2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator for dynamical low-rank approximation. BIT, 54(1):171-188, 2014. http://dx.doi.org/10.1007/s10543-013-0454-0 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial condition in the TT-format, :type y0: tensor :param tau: Timestep :type tau: float :param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order :type scheme: str :param space: Maximal dimension of the Krylov space for the local EXPOKIT solver. :type space: int :rtype: tensor :Example: >>> import tt >>> import tt.ksl >>> import numpy as np >>> d = 8 >>> a = tt.qlaplace_dd([d, d, d]) >>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0) Solving a block eigenvalue problem Looking for 1 eigenvalues with accuracy 1E-06 swp: 1 er = 1.1408 rmax:2 swp: 2 er = 190.01 rmax:2 swp: 3 er = 2.72582E-08 rmax:2 Total number of matvecs: 0 >>> y1 = tt.ksl.ksl(a, y0, 1e-2) Solving a real-valued dynamical problem with tau=1E-02 >>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change 0.0 """ ry = y0.r.copy() if scheme is 'symm': tp = 2 else: tp = 1 #Check for dtype y = tensor() if np.iscomplex(A.tt.core).any() or np.iscomplex(y0.core).any(): dyn_tt.dyn_tt.ztt_ksl(y0.d, A.n, A.m, A.tt.r, A.tt.core + 0j, y0.core + 0j, ry, tau, rmax, 0, 10, verb, tp, space) y.core = dyn_tt.dyn_tt.zresult_core.copy() else: A.tt.core = np.real(A.tt.core) y0.core = np.real(y0.core) dyn_tt.dyn_tt.tt_ksl(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, tau, rmax, 0, 10, verb) y.core = dyn_tt.dyn_tt.result_core.copy() dyn_tt.dyn_tt.deallocate_result() y.d = y0.d y.n = A.n.copy() y.r = ry y.get_ps() return y
def lpmn(m, n, z): """Associated Legendre function of the first kind, Pmn(z) Computes the associated Legendre function of the first kind of order m and degree n,:: Pmn(z) = P_n^m(z) and its derivative, ``Pmn'(z)``. Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. This function takes a real argument ``z``. For complex arguments ``z`` use clpmn instead. Parameters ---------- m : int ``|m| <= n``; the order of the Legendre function. n : int where ``n >= 0``; the degree of the Legendre function. Often called ``l`` (lower case L) in descriptions of the associated Legendre function z : float Input value. Returns ------- Pmn_z : (m+1, n+1) array Values for all orders 0..m and degrees 0..n Pmn_d_z : (m+1, n+1) array Derivatives for all orders 0..m and degrees 0..n See Also -------- clpmn: associated Legendre functions of the first kind for complex z Notes ----- In the interval (-1, 1), Ferrer's function of the first kind is returned. The phase convention used for the intervals (1, inf) and (-inf, -1) is such that the result is always real. References ---------- .. [1] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/14.3 """ if not isscalar(m) or (abs(m) > n): raise ValueError("m must be <= n.") if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") if iscomplex(z): raise ValueError("Argument must be real. Use clpmn instead.") if (m < 0): mp = -m mf, nf = mgrid[0:mp + 1, 0:n + 1] sv = errprint(0) if abs(z) < 1: # Ferrer function; DLMF 14.9.3 fixarr = where(mf > nf, 0.0, (-1)**mf * gamma(nf - mf + 1) / gamma(nf + mf + 1)) else: # Match to clpmn; DLMF 14.9.13 fixarr = where(mf > nf, 0.0, gamma(nf - mf + 1) / gamma(nf + mf + 1)) sv = errprint(sv) else: mp = m p, pd = specfun.lpmn(mp, n, z) if (m < 0): p = p * fixarr pd = pd * fixarr return p, pd
def test_iscomplex(self): self.check(np.iscomplex) o = np.iscomplex(Masked([1. + 1j], mask=False)) assert o.unmasked and not o.mask o = np.iscomplex(Masked([1. + 1j], mask=True)) assert o.unmasked and o.mask
def _isComplex(x): if hasattr(x, 'dtype'): return (x.dtype.str[1] == 'c') else: return np.iscomplex(x)
def problematic_hessian(hessian, check_pd=True, check_sing=True, verbose=False): """ Checks if the Hessian (covariance matrix) is problematic. Computes the eigenvalues and checks for negative ones. Also checks if the matrix is singular. Args: hessian: a matrix to be checked. Returns: True if the array is valid covariance matrix and False otherwise. """ if hessian is None: return True # Check for nan and inf if np.isnan(hessian).any(): if verbose: print("Warning: some Hessian elements are not finite...") return True # Check for nan and inf if not np.all(np.isfinite(hessian)): if verbose: print("Warning: some Hessian elements are not finite...") return True # Check for nan and inf if np.any(np.isnan(hessian)): if verbose: print("Warning: some Hessian elements are NaN...") return True # Check for complex elements if np.any(np.iscomplex(hessian)): if verbose: print("Warning: some Hessian elements are complex...") return True # Check for large elements if np.any(np.diag(hessian) > 1e20): if verbose: print("Warning: some Hessian elements are too large...") return True # Singular matrix (too small eigenvalues) # Negative eigenvalues flag = False try: eig_values = eigh(hessian, lower=True, check_finite=True)[0] eps = _eigvalsh_to_eps(eig_values, None, None) if check_sing and (np.abs(eig_values) < eps).any(): flag = True if verbose: print( "Warning: Hessian has very small eigenvalues: {}.".format( np.min(eig_values))) large_eig_values = eig_values[np.abs(eig_values) > eps] if check_sing and len(large_eig_values) < len(eig_values): if verbose: print("Warning: Hessian is illconditioned with eigenvalues:") print(eig_values) flag = True if check_pd and np.min(eig_values) < 0.0: if verbose: neg_eig_values = eig_values[eig_values < 0.0] print("Warning: Hessian has negative eigenvalues: {}".format( neg_eig_values)) flag = True except Exception as e: print(e) raise Warning( "Numerical issues in eigenvalue computations, rejecting.") flag = True return flag
def save_image(filename, im, scaling='auto', depth=8): """Save an ndarray or image as a tiff. Parameters ---------- im : ndarray or :class:`holopy.image.Image` image to save. filename : basestring filename in which to save image. If im is an image the function should default to the image's name field if no filename is specified scaling : 'auto', None, or (None|Int, None|Int) How the image should be scaled for saving. Ignored for float output. It defaults to auto, use the full range of the output format. Other options are None, meaning no scaling, or a pair of integers specifying the values which should be set to the maximum and minimum values of the image format. depth : 8, 16 or 'float' What type of image to save. Options other than 8bit may not be supported for many image types. You probably don't want to save 8bit images without some kind of scaling. """ # if we don't have an extension, default to tif if os.path.splitext(filename)[1] is '': filename += '.tif' if im.ndim > 2 + hasattr(im,illumination) + hasattr(im, 'z'): raise BadImage("Cannot interpret multidimensional image") else: im = im.copy() if isinstance(im, xr.DataArray): if illumination in im.dims and len(im.illumination) == 2: im = pad_channel(im) elif illumination in im.dims and len(im.illumination) > 3: raise BadImage("Too many illumination channels") if 'z' in im.dims: im = im.isel(z=0) if im.ndim == 3: im = im.transpose('x','y','illumination') metadat=False if os.path.splitext(filename)[1] in tiflist and isinstance(im, xr.DataArray): if im.name is None: im.name=os.path.splitext(os.path.split(filename)[-1])[0] metadat = pack_attrs(im, do_spacing = True, scaling=scaling) from PIL.TiffImagePlugin import ImageFileDirectory_v2 as ifd2 #hiding this import here since it doesn't play nice in some scenarios tiffinfo = ifd2() tiffinfo[270] = yaml.dump(metadat) #This edits the 'imagedescription' field of the tiff metadata if np.iscomplex(im).any(): raise BadImage("Cannot interpret image with complex values") if isinstance(im, xr.DataArray): im = im.values if scaling is not None: if scaling is 'auto': min = im.min() max = im.max() elif len(scaling) == 2: min, max = scaling im = np.minimum(im, max) im = np.maximum(im, min) else: raise Error("Invalid image scaling") if min is not None: im = im - min if max is not None: im = im / (max-min) if depth is not 'float': if depth is 8: depth = 8 typestr = 'uint8' elif depth is 16 or depth is 32: depth = depth-1 typestr = 'int' + str(depth) else: raise Error("Unknown image depth") if im.max() <= 1: im = im * ((2**depth)-1) + .499999 im = im.astype(typestr) if metadat: pilimage.fromarray(im).save(filename, tiffinfo=tiffinfo) else: pilimage.fromarray(im).save(filename)
def _compute_beamformer(method, G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank, is_free_ori, inversion=None): """Compute a spatial filter (LCMV or DICS).""" # Tikhonov regularization using reg parameter d to control for # trade-off between spatial resolution and noise sensitivity if method == 'lcmv': Cm_inv, d = _reg_pinv(Cm.copy(), reg) elif method == 'dics': Cm_inv, _ = _reg_pinv(Cm, reg, rcond='auto') if weight_norm is not None and inversion is not 'single': # Compute square of Cm_inv used for weight normalization Cm_inv_sq = np.dot(Cm_inv, Cm_inv) if weight_norm == 'nai': # estimate noise level based on covariance matrix, taking the # smallest eigenvalue that is not zero noise, _ = linalg.eigh(Cm) if rank is not None: rank_Cm = rank else: rank_Cm = estimate_rank(Cm, tol='auto', norm=False, return_singular=False) noise = noise[len(noise) - rank_Cm] # use either noise floor or regularization parameter d noise = max(noise, d) # compute spatial filter W = np.dot(G.T, Cm_inv) n_sources = G.shape[1] // n_orient for k in range(n_sources): Wk = W[n_orient * k: n_orient * k + n_orient] Gk = G[:, n_orient * k: n_orient * k + n_orient] if method == 'lcmv' and np.all(Gk == 0.): continue Ck = np.dot(Wk, Gk) if method == 'dics': # Normalize the spatial filters: if Wk.ndim == 2 and len(Wk) > 1: # Free source orientation if inversion == 'single': # Invert for each dipole separately using plain division Wk /= np.diag(Ck)[:, np.newaxis] elif inversion == 'matrix': # Invert for all dipoles simultaneously using matrix # inversion. Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk) else: # Fixed source orientation Wk /= Ck # compute scalar beamformer by finding the source orientation # which maximizes output source power if pick_ori == 'max-power': if weight_norm is not None and inversion is not 'single': # finding optimal orientation for NAI and unit-noise-gain # based on [2]_, Eq. 4.47 tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk)) if reduce_rank: # use pseudo inverse computation setting smallest component # to zero if the leadfield is not full rank tmp_inv = _eig_inv(tmp, tmp.shape[0] - 1) else: # use straight inverse with full rank leadfield try: tmp_inv = linalg.inv(tmp) except np.linalg.linalg.LinAlgError: raise ValueError('Singular matrix detected when ' 'estimating spatial filters. ' 'Consider reducing the rank of the ' 'leadfield by using ' 'reduce_rank=True.') power = np.dot(tmp_inv, np.dot(Wk, Gk)) elif weight_norm is not None and inversion == 'single': # First make the filters unit gain, then apply them to the # CSD matrix to compute power. norm = 1 / np.sqrt(np.sum(Wk ** 2, axis=1)) Wk_norm = Wk / norm[:, np.newaxis] power = Wk_norm.dot(Cm).dot(Wk_norm.T) else: if method == 'dics': # Compute spectral power by applying the spatial filters to # the CSD matrix. power = Wk.dot(Cm).dot(Wk.T) elif method == 'lcmv': # no weight-normalization and max-power is not implemented # yet for lcmv beamformer: raise NotImplementedError('The max-power orientation ' 'selection is not yet ' 'implemented with weight_norm ' 'set to None.') # compute the orientation: if method == 'lcmv': eig_vals, eig_vecs = linalg.eig(power) if np.iscomplex(eig_vecs).any(): raise ValueError('The eigenspectrum of the leadfield ' 'at this voxel is complex. Consider ' 'reducing the rank of the leadfield ' 'by using reduce_rank=True.') idx_max = eig_vals.argmax() max_ori = eig_vecs[:, idx_max] Wk[:] = np.dot(max_ori, Wk) Gk = np.dot(Gk, max_ori) # compute spatial filter for NAI or unit-noise-gain tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk)) denom = np.sqrt(tmp) Wk /= denom if weight_norm == 'nai': Wk /= np.sqrt(noise) is_free_ori = False elif method == 'dics': # Compute the direction of max power u, s, _ = np.linalg.svd(power.real) max_ori = u[:, 0] Wk[:] = np.dot(max_ori, Wk) else: # do vector beamformer if method == 'lcmv': # compute the filters: if is_free_ori: # Free source orientation Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk) else: # Fixed source orientation Wk /= Ck # handle noise normalization with free/normal source # orientation: if weight_norm == 'nai': raise NotImplementedError('Weight normalization with ' 'neural activity index is not ' 'implemented yet with free or ' 'fixed orientation.') elif weight_norm == 'unit-noise-gain': noise_norm = np.sum(Wk ** 2, axis=1) if is_free_ori: noise_norm = np.sum(noise_norm) noise_norm = np.sqrt(noise_norm) if noise_norm == 0.: noise_norm_inv = 0. # avoid division by 0 else: noise_norm_inv = 1. / noise_norm Wk[:] *= noise_norm_inv # picking source orientation maximizing output source power if pick_ori == 'max-power': W = W[0::3] elif pick_ori == 'normal': W = W[2::3] is_free_ori = False if method == 'dics': if weight_norm == 'unit-noise-gain': # Scale weights so that W @ I @ W.T == I if pick_ori is None and n_orient > 1: # Compute the norm for each set of 3 dipoles W = W.reshape(-1, 3, W.shape[1]) norm = np.sqrt(np.sum(W ** 2, axis=(1, 2))) W /= norm[:, np.newaxis, np.newaxis] W = W.reshape(-1, W.shape[2]) else: # Compute the norm for each dipole norm = np.sqrt(np.sum(W ** 2, axis=1)) W /= norm[:, np.newaxis] return W, is_free_ori
def _arr_to_complex(A): if np.iscomplex(A).any(): return Complex(re=A.real, im=A.imag) else: return Complex(re=A, im=np.zeros_like(A))
def convert( self, operator: CircuitStateFn, params: Optional[Union[ParameterExpression, ParameterVector, List[ParameterExpression]]] = None, ) -> ListOp: r""" Args: operator: The operator corresponding to the quantum state :math:`|\psi(\omega)\rangle` for which we compute the QFI. params: The parameters :math:`\omega` with respect to which we are computing the QFI. Returns: A ``ListOp[ListOp]`` where the operator at position ``[k][l]`` corresponds to the matrix element :math:`k, l` of the QFI. Raises: AquaError: If one of the circuits could not be constructed. TypeError: If ``operator`` is an unsupported type. """ # QFI & phase fix observable qfi_observable = ~StateFn(4 * Z ^ (I ^ operator.num_qubits)) phase_fix_observable = ~StateFn((X + 1j * Y) ^ (I ^ operator.num_qubits)) # see https://arxiv.org/pdf/quant-ph/0108146.pdf if not isinstance(operator, CircuitStateFn): raise TypeError( 'LinCombFull is only compatible with states that are given as CircuitStateFn' ) if not isinstance(params, (list, np.ndarray)): params = [params] state_qc = operator.primitive # First, the operators are computed which can compensate for a potential phase-mismatch # between target and trained state, i.e.〈ψ|∂lψ〉 phase_fix_states = None qr_work = QuantumRegister(1, 'work_qubit') work_q = qr_work[0] additional_qubits: Tuple[List[Qubit], List[Qubit]] = ([work_q], []) # create a copy of the original state with an additional work_q register for param in params: param_gates = state_qc._parameter_table[param] for m, param_occurence in enumerate(param_gates): coeffs_i, gates_i = LinComb._gate_gradient_dict( param_occurence[0])[param_occurence[1]] for k, gate_to_insert_i in enumerate(gates_i): grad_state = state_qc.copy() grad_state.add_register(qr_work) # apply Hadamard on work_q LinComb.insert_gate(grad_state, param_occurence[0], HGate(), qubits=[work_q]) # Fix work_q phase coeff_i = coeffs_i[k] sign = np.sign(coeff_i) is_complex = np.iscomplex(coeff_i) if sign == -1: if is_complex: LinComb.insert_gate(grad_state, param_occurence[0], SdgGate(), qubits=[work_q]) else: LinComb.insert_gate(grad_state, param_occurence[0], ZGate(), qubits=[work_q]) else: if is_complex: LinComb.insert_gate(grad_state, param_occurence[0], SGate(), qubits=[work_q]) # Insert controlled, intercepting gate - controlled by |0> if isinstance(param_occurence[0], UGate): if param_occurence[1] == 0: LinComb.insert_gate( grad_state, param_occurence[0], RZGate(param_occurence[0].params[2])) LinComb.insert_gate(grad_state, param_occurence[0], RXGate(np.pi / 2)) LinComb.insert_gate( grad_state, param_occurence[0], gate_to_insert_i, additional_qubits=additional_qubits) LinComb.insert_gate(grad_state, param_occurence[0], RXGate(-np.pi / 2)) LinComb.insert_gate( grad_state, param_occurence[0], RZGate(-param_occurence[0].params[2])) elif param_occurence[1] == 1: LinComb.insert_gate( grad_state, param_occurence[0], gate_to_insert_i, after=True, additional_qubits=additional_qubits) else: LinComb.insert_gate( grad_state, param_occurence[0], gate_to_insert_i, additional_qubits=additional_qubits) else: LinComb.insert_gate( grad_state, param_occurence[0], gate_to_insert_i, additional_qubits=additional_qubits) grad_state = self.trim_circuit(grad_state, param_occurence[0]) grad_state.h(work_q) state = np.sqrt(np.abs( coeff_i)) * operator.coeff * CircuitStateFn(grad_state) # Chain Rule parameter expressions gate_param = param_occurence[0].params[param_occurence[1]] if gate_param == param: state = phase_fix_observable @ state else: if isinstance(gate_param, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad( gate_param, param) state = (expr_grad * phase_fix_observable) @ state else: state *= 0 if m == 0 and k == 0: phase_fix_state = state else: phase_fix_state += state if not phase_fix_states: phase_fix_states = [phase_fix_state] else: phase_fix_states += [phase_fix_state] # Get 4 * Re[〈∂kψ|∂lψ] qfi_operators = [] qr_work_qubit = QuantumRegister(1, 'work_qubit') work_qubit = qr_work_qubit[0] additional_qubits = ([work_qubit], []) # create a copy of the original circuit with an additional work_qubit register circuit = state_qc.copy() circuit.add_register(qr_work_qubit) LinComb.insert_gate(circuit, state_qc._parameter_table[params[0]][0][0], HGate(), qubits=[work_qubit]) # Get the circuits needed to compute〈∂iψ|∂jψ〉 for i, param_i in enumerate(params): # loop over parameters qfi_ops = None for j, param_j in enumerate(params): # Construct the circuits param_gates_i = state_qc._parameter_table[param_i] for m_i, param_occurence_i in enumerate(param_gates_i): coeffs_i, gates_i = LinComb._gate_gradient_dict( param_occurence_i[0])[param_occurence_i[1]] # apply Hadamard on work_qubit for k_i, gate_to_insert_i in enumerate(gates_i): coeff_i = coeffs_i[k_i] param_gates_j = state_qc._parameter_table[param_j] for m_j, param_occurence_j in enumerate(param_gates_j): coeffs_j, gates_j = \ LinComb._gate_gradient_dict(param_occurence_j[0])[ param_occurence_j[1]] for k_j, gate_to_insert_j in enumerate(gates_j): coeff_j = coeffs_j[k_j] # create a copy of the original circuit with the same registers qfi_circuit = QuantumCircuit(*circuit.qregs) qfi_circuit.data = circuit.data # Fix work_qubit phase sign = np.sign(np.conj(coeff_i) * coeff_j) is_complex = np.iscomplex( np.conj(coeff_i) * coeff_j) if sign == -1: if is_complex: LinComb.insert_gate( qfi_circuit, param_occurence_i[0], SdgGate(), qubits=[work_qubit]) else: LinComb.insert_gate( qfi_circuit, param_occurence_i[0], ZGate(), qubits=[work_qubit]) else: if is_complex: LinComb.insert_gate( qfi_circuit, param_occurence_i[0], SGate(), qubits=[work_qubit]) LinComb.insert_gate(qfi_circuit, param_occurence_i[0], XGate(), qubits=[work_qubit]) # Insert controlled, intercepting gate - controlled by |1> if isinstance(param_occurence_i[0], UGate): if param_occurence_i[1] == 0: LinComb.insert_gate( qfi_circuit, param_occurence_i[0], RZGate(param_occurence_i[0]. params[2])) LinComb.insert_gate( qfi_circuit, param_occurence_i[0], RXGate(np.pi / 2)) LinComb.insert_gate( qfi_circuit, param_occurence_i[0], gate_to_insert_i, additional_qubits=additional_qubits ) LinComb.insert_gate( qfi_circuit, param_occurence_i[0], RXGate(-np.pi / 2)) LinComb.insert_gate( qfi_circuit, param_occurence_i[0], RZGate(-param_occurence_i[0]. params[2])) elif param_occurence_i[1] == 1: LinComb.insert_gate( qfi_circuit, param_occurence_i[0], gate_to_insert_i, after=True, additional_qubits=additional_qubits ) else: LinComb.insert_gate( qfi_circuit, param_occurence_i[0], gate_to_insert_i, additional_qubits=additional_qubits ) else: LinComb.insert_gate( qfi_circuit, param_occurence_i[0], gate_to_insert_i, additional_qubits=additional_qubits) LinComb.insert_gate(qfi_circuit, gate_to_insert_i, XGate(), qubits=[work_qubit], after=True) # Insert controlled, intercepting gate - controlled by |0> if isinstance(param_occurence_j[0], UGate): if param_occurence_j[1] == 0: LinComb.insert_gate( qfi_circuit, param_occurence_j[0], RZGate(param_occurence_j[0]. params[2])) LinComb.insert_gate( qfi_circuit, param_occurence_j[0], RXGate(np.pi / 2)) LinComb.insert_gate( qfi_circuit, param_occurence_j[0], gate_to_insert_j, additional_qubits=additional_qubits ) LinComb.insert_gate( qfi_circuit, param_occurence_j[0], RXGate(-np.pi / 2)) LinComb.insert_gate( qfi_circuit, param_occurence_j[0], RZGate(-param_occurence_j[0]. params[2])) elif param_occurence_j[1] == 1: LinComb.insert_gate( qfi_circuit, param_occurence_j[0], gate_to_insert_j, after=True, additional_qubits=additional_qubits ) else: LinComb.insert_gate( qfi_circuit, param_occurence_j[0], gate_to_insert_j, additional_qubits=additional_qubits ) else: LinComb.insert_gate( qfi_circuit, param_occurence_j[0], gate_to_insert_j, additional_qubits=additional_qubits) # Remove redundant gates if j <= i: qfi_circuit = self.trim_circuit( qfi_circuit, param_occurence_i[0]) else: qfi_circuit = self.trim_circuit( qfi_circuit, param_occurence_j[0]) qfi_circuit.h(work_qubit) # Convert the quantum circuit into a CircuitStateFn term = np.sqrt( np.abs(coeff_i) * np.abs(coeff_j)) * operator.coeff term = term * CircuitStateFn(qfi_circuit) # Chain Rule Parameter Expression gate_param_i = param_occurence_i[0].params[ param_occurence_i[1]] gate_param_j = param_occurence_j[0].params[ param_occurence_j[1]] meas = deepcopy(qfi_observable) if isinstance(gate_param_i, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad( gate_param_i, param_i) meas *= expr_grad if isinstance(gate_param_j, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad( gate_param_j, param_j) meas *= expr_grad term = meas @ term if m_i == 0 and k_i == 0 and m_j == 0 and k_j == 0: qfi_op = term else: # Product Rule qfi_op += term # Compute −4 * Re(〈∂kψ|ψ〉〈ψ|∂lψ〉) def phase_fix_combo_fn(x): return 4 * (-0.5) * (x[0] * np.conjugate(x[1]) + x[1] * np.conjugate(x[0])) phase_fix = ListOp([phase_fix_states[i], phase_fix_states[j]], combo_fn=phase_fix_combo_fn) # Add the phase fix quantities to the entries of the QFI # Get 4 * Re[〈∂kψ|∂lψ〉−〈∂kψ|ψ〉〈ψ|∂lψ〉] if not qfi_ops: qfi_ops = [qfi_op + phase_fix] else: qfi_ops += [qfi_op + phase_fix] qfi_operators.append(ListOp(qfi_ops)) # Return the full QFI return ListOp(qfi_operators)
def _plot(ys, yfits, yuqs, axis=None, xlabel=None): # =========================================================================================== """ Plot method for the FitResult object ==================================== Plots the input dataset(s), their fits, and uncertainty bands. """ complexy = np.any([np.iscomplex(y).any() for y in ys]) nSignals = len(ys) ncols = 2 if complexy else 1 fig, axs = plt.subplots(nSignals, ncols, figsize=[7 * ncols, 4 * nSignals]) axs = np.atleast_1d(axs) if axis is None: axis = [np.arange(len(y)) for y in ys] if not isinstance(axis, list): axis = [axis] axis = [np.real(ax) for ax in axis] if xlabel is None: xlabel = 'Array elements' n = 0 for i, (y, yfit, yuq) in enumerate(zip(ys, yfits, yuqs)): # Plot the experimental signal and fit axs[n].plot(axis[i], y.real, '.', color='grey') axs[n].plot(axis[i], yfit.real, color='#4550e6') if yuq.type != 'void': axs[i].fill_between(axis[i], yuq.ci(95)[:, 0].real, yuq.ci(95)[:, 1].real, alpha=0.4, linewidth=0, color='#4550e6') axs[n].set_xlabel(xlabel) axs[n].set_ylabel(f'Dataset #{i+1}') axs[n].legend(('Data (real)', 'Fit', '95%-CI'), loc='best', frameon=False) n += 1 if complexy: axs[n].plot(axis[i], y.imag, '.', color='grey') axs[n].plot(axis[i], yfit.imag, color='tab:orange') if yuq.type != 'void': axs[n].fill_between(axis[i], yuq.ci(95)[:, 0].imag, yuq.ci(95)[:, 1].imag, alpha=0.4, color='tab:orange', linewidth=0) axs[n].set_xlabel(xlabel) axs[n].set_ylabel(f'Dataset #{i+1}') axs[n].legend(('Data (imag)', 'Fit', '95%-CI'), loc='best', frameon=False) n += 1 plt.tight_layout() plt.autoscale(enable=True, axis='both', tight=True) return fig
def get_connectivity(self, measure_name, plot=False): """ Calculate spectral connectivity measure. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. plot : {False, None, Figure object}, optional Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the Figure object. If set to **None**, a new figure is created. Returns ------- measure : array, shape = [n_channels, n_channels, nfft] Values of the connectivity measure. fig : Figure object Instance of the figure in which was plotted. This is only returned if `plot` is not **False**. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a fitted VAR model. """ if self.connectivity_ is None: raise RuntimeError( "Connectivity requires a VAR model (run do_mvarica or fit_var first)" ) cm = getattr(self.connectivity_, measure_name)() cm = np.abs(cm) if np.any(np.iscomplex(cm)) else cm if plot is None or plot: fig = plot if self.plot_diagonal == 'fill': diagonal = 0 elif self.plot_diagonal == 'S': diagonal = -1 sm = np.abs(self.connectivity_.S()) sm /= np.max( sm ) # scale to 1 since components are scaled arbitrarily anyway fig = self.plotting.plot_connectivity_spectrum( sm, fs=self.fs_, freq_range=self.plot_f_range, diagonal=1, border=self.plot_outside_topo, fig=fig) else: diagonal = -1 fig = self.plotting.plot_connectivity_spectrum( cm, fs=self.fs_, freq_range=self.plot_f_range, diagonal=diagonal, border=self.plot_outside_topo, fig=fig) return cm, fig return cm
def c_abs(z): if np.all(np.iscomplex(z)): return np.where(np.real(z) >= 0, z, -z) return np.abs(z)
def __ge__(self, other): if type(other) == ut: return self.t_ep16 >= other.t_ep16 elif np.iscomplex(other): return self.t_ep16 >= other else: return self.ut() >= other
def _gradient_states(self, state_op: StateFn, meas_op: Optional[OperatorBase] = None, target_params: Optional[ Union[ParameterExpression, ParameterVector, List[ParameterExpression]]] = None ) -> ListOp: """Generate the gradient states. Args: state_op: The operator representing the quantum state for which we compute the gradient. meas_op: The operator representing the observable for which we compute the gradient. target_params: The parameters we are taking the gradient wrt: ω Returns: ListOp of StateFns as quantum circuits which are the states w.r.t. which we compute the gradient. If a parameter appears multiple times, one circuit is created per parameterized gates to compute the product rule. Raises: AquaError: If one of the circuits could not be constructed. TypeError: If the operators is of unsupported type. """ state_qc = deepcopy(state_op.primitive) # Define the working qubit to realize the linear combination of unitaries qr_work = QuantumRegister(1, 'work_qubit_lin_comb_grad') work_q = qr_work[0] if not isinstance(target_params, (list, np.ndarray)): target_params = [target_params] if len(target_params) > 1: states = None additional_qubits: Tuple[List[Qubit], List[Qubit]] = ([work_q], []) for param in target_params: if param not in state_qc._parameter_table.get_keys(): op = ~Zero @ One else: param_gates = state_qc._parameter_table[param] for m, param_occurence in enumerate(param_gates): coeffs, gates = self._gate_gradient_dict(param_occurence[0])[param_occurence[1]] # construct the states for k, gate_to_insert in enumerate(gates): grad_state = QuantumCircuit(*state_qc.qregs, qr_work) grad_state.compose(state_qc, inplace=True) # apply Hadamard on work_q self.insert_gate(grad_state, param_occurence[0], HGate(), qubits=[work_q]) # Fix work_q phase coeff_i = coeffs[k] sign = np.sign(coeff_i) is_complex = np.iscomplex(coeff_i) if sign == -1: if is_complex: self.insert_gate(grad_state, param_occurence[0], SdgGate(), qubits=[work_q]) else: self.insert_gate(grad_state, param_occurence[0], ZGate(), qubits=[work_q]) else: if is_complex: self.insert_gate(grad_state, param_occurence[0], SGate(), qubits=[work_q]) # Insert controlled, intercepting gate - controlled by |0> if isinstance(param_occurence[0], UGate): if param_occurence[1] == 0: self.insert_gate(grad_state, param_occurence[0], RZGate(param_occurence[0].params[2])) self.insert_gate(grad_state, param_occurence[0], RXGate(np.pi / 2)) self.insert_gate(grad_state, param_occurence[0], gate_to_insert, additional_qubits=additional_qubits) self.insert_gate(grad_state, param_occurence[0], RXGate(-np.pi / 2)) self.insert_gate(grad_state, param_occurence[0], RZGate(-param_occurence[0].params[2])) elif param_occurence[1] == 1: self.insert_gate(grad_state, param_occurence[0], gate_to_insert, after=True, additional_qubits=additional_qubits) else: self.insert_gate(grad_state, param_occurence[0], gate_to_insert, additional_qubits=additional_qubits) else: self.insert_gate(grad_state, param_occurence[0], gate_to_insert, additional_qubits=additional_qubits) grad_state.h(work_q) state = np.sqrt(np.abs(coeff_i)) * state_op.coeff * CircuitStateFn( grad_state) # Chain Rule parameter expressions gate_param = param_occurence[0].params[param_occurence[1]] if meas_op: if gate_param == param: state = meas_op @ state else: if isinstance(gate_param, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad(gate_param, param) state = (expr_grad * meas_op) @ state else: state = ~Zero @ One else: if gate_param == param: state = ListOp([state], combo_fn=partial(self._grad_combo_fn, state_op=state_op)) else: if isinstance(gate_param, ParameterExpression): expr_grad = DerivativeBase.parameter_expression_grad(gate_param, param) state = expr_grad * ListOp( [state], combo_fn=partial(self._grad_combo_fn, state_op=state_op)) else: state = ~Zero @ One if m == 0 and k == 0: op = state else: # Product Rule op += state if len(target_params) > 1: if not states: states = [op] else: states += [op] else: return op if len(target_params) > 1: return ListOp(states) else: return op
def drawPlot(): """Function to set up the lines, calculate the eigenvalue and svd """ global tlr, line1old, line2old, line3old, line4old global text1old, text2old, text3old, text4old, oldModeText global egv1, egv2, singv1, singv2, bSVD global egv1txt, egv2txt, indTxt, svd1txt, svd2txt global A, fig, ax, root, w # (w is make a global as it is used in resetAxis()) # update the figure text to indicate current mode (eigen/svd) if bSVD: currmode = 'SVD' else: currmode = 'Eigen' # Calculate the eigen value and set the axis limits accordingly w, v = np.linalg.eig( A) # w contains the eigen values, v contains the eigen vectors resetAxes() detA = np.linalg.det(A) # determinant rankA = np.ndim(A) # rank traceA = np.trace(A) # trace # complexity test if np.sum(np.iscomplex(v)) >= 1: complexEigenVecs = True else: complexEigenVecs = False if np.sum(np.iscomplex(w)) >= 1: complexEigenVals = True else: complexEigenVals = False # fixed text to show the array arrtext = "Matrix A = \n[[%1.3f, %1.3f],\n[%1.3f, %1.3f]]\n\ndet(A) \ = \n%1.3f\n\ntrace(A) = \n%1.3f\n\nrank(A) = \n%d" \ % (A[0, 0], A[0, 1], A[1, 0], A[1, 1], detA, traceA, rankA) fig.text(0.013, 0.20, arrtext, fontsize='medium', color='b', \ bbox=dict(facecolor='white', edgecolor='white', alpha=1.0), zorder=0) # fixed text to indicate mode (eigen mode/svd mode) oldModeText.set_visible(False) modeText = fig.text(0.04, 0.8, currmode, fontsize='xx-large', \ fontweight='semibold', color='#FF8000') oldModeText = modeText # starting lines/vectors xstart = np.matrix([1, 0]).T ystart = np.matrix([0, 1]).T # for svd mode Axstart = np.dot(A, xstart) Aystart = np.dot(A, ystart) # for svd mode # Plot the columns of the matrix A col1, = ax.plot([0.0, A[0, 0]], [0.0, A[1, 0]], 'k--', alpha=0.6, lw='3', \ label='$col_1(A)$') col2, = ax.plot([0.0, A[0, 1]], [0.0, A[1, 1]], 'k--', alpha=0.4, lw='2', \ label='$col_2 (A)$') # plot the eigen vectors (it will not be seen initially as the visibility is false) # w, v = np.linalg.eig(A) # moved up egv1, = ax.plot([0.0, v[0, 0]], [0.0, v[1, 0]], 'b', lw='2', alpha=0.5, \ aa=True, label='$eigvec_1$', visible=False) egv2, = ax.plot([0.0, v[0, 1]], [0.0, v[1, 1]], 'r', lw='2', alpha=0.5, \ aa=True, label='$eigvec_2$', visible=False) egv1str = "e0=%1.2f, v0=[%1.3f,%1.3f]'" % (w[0], v[0, 0], v[1, 0]) egv2str = "e1=%1.2f, v1=[%1.3f,%1.3f]'" % (w[1], v[0, 1], v[1, 1]) egv1txt = ax.text(0.01, 0.06, egv1str, ha='left', color='r', \ bbox=dict(facecolor='white', edgecolor='white', alpha=1.0), \ visible=False, transform=ax.transAxes) egv2txt = ax.text(0.01, 0.02, egv2str, ha='left', color='r', \ bbox=dict(facecolor='white', edgecolor='white', alpha=1.0), \ visible=False, transform=ax.transAxes) # calculate the svd U, S, V = np.linalg.svd(A) # complexity test if np.sum(np.iscomplex(U)) >= 1: complexSingVecs = True else: complexSingVecs = False if np.sum(np.iscomplex(S)) >= 1: complexSingVals = True else: complexSingVals = False # plot the svd (it will not be seen initially as the visibility is false) singv1, = ax.plot([0.0, U[0, 0]], [0.0, U[1, 0]], 'g--', lw='2', alpha=0.5, \ aa=True, label='$singvec_1$', visible=False) singv2, = ax.plot([0.0, U[0, 1]], [0.0, U[1, 1]], 'm--', lw='2', alpha=0.5, \ aa=True, label='$singvec_2$', visible=False) svd1str = "s0=%1.2f, u0=[%1.3f,%1.3f]'" % (S[0], U[0, 0], U[1, 0]) svd2str = "s1=%1.2f, u1=[%1.3f,%1.3f]'" % (S[1], U[0, 1], U[1, 1]) svd1txt = ax.text(0.01, 0.06, svd1str, ha='left', color='r', \ bbox=dict(facecolor='white', edgecolor='white', alpha=1.0), visible=False, \ transform=ax.transAxes) svd2txt = ax.text(0.01, 0.02, svd2str, ha='left', color='r', \ bbox=dict(facecolor='white', edgecolor='white', alpha=1.0), visible=False, \ transform=ax.transAxes) # lines related to just the eigen vectors line1, = ax.plot([0.0, xstart[0, 0]], [0.0, xstart[1, 0]], aa=True, c='r') line2, = ax.plot([0.0, Axstart[0, 0]], [0.0, Axstart[1, 0]], aa=True, c='b') text1 = ax.text((0.0 + 0.8 * tlr * xstart[0, 0]), (0.0 + 0.8 * tlr * xstart[1, 0]), \ '$x$', fontsize=15, color='r', bbox=dict(facecolor='white', edgecolor='white', \ alpha=0.5)) text2 = ax.text((0.0 + tlr * Axstart[0, 0]), (0.0 + tlr * Axstart[1, 0]), '$Ax$', \ fontsize=15, color='b', bbox=dict(facecolor='white', edgecolor='white', alpha=0.5)) line1old, line2old = line1, line2 text1old, text2old = text1, text2 # lines related to just the svd vectors (depending on the svdVis) line3, = ax.plot([0.0, ystart[0, 0]], [0.0, ystart[1, 0]], aa=True, c='m', \ visible=svdVis) line4, = ax.plot([0.0, Aystart[0, 0]], [0.0, Aystart[1, 0]], aa=True, c='g', \ visible=svdVis) text3 = ax.text((0.0 + 0.8 * tlr * ystart[0, 0]), (0.0 + 0.8 * tlr * ystart[1, 0]), \ '$y$', fontsize=15, color='m', bbox=dict(facecolor='white', edgecolor='white', \ alpha=0.5), visible=svdVis) text4 = ax.text((0.0 + tlr * Aystart[0, 0]), (0.0 + tlr * Aystart[1, 0]), '$Ay$', \ fontsize=15, color='g', bbox=dict(facecolor='white', edgecolor='white', \ alpha=0.5), visible=svdVis) line3old, line4old = line3, line4 text3old, text4old = text3, text4 # Text to indicate complex/real nature of vectors and values if complexEigenVals and not bSVD: comValTxt = ax.text(0 - 0.5 * ax.get_xlim()[1], 0.5 * ax.get_ylim()[1], \ "Complex eigen values", ha='left', \ color='y', fontsize='large', fontweight='bold', alpha=0.4) if complexEigenVecs and not bSVD: comVecTxt = ax.text(0 - 0.5 * ax.get_xlim()[1], 0.4 * ax.get_ylim()[1], \ "Complex eigen vectors", ha='left', \ color='y', fontsize='large', fontweight='bold', alpha=0.4) if complexSingVals and bSVD: comValTxt = ax.text(0 - 0.5 * ax.get_xlim()[1], 0.5 * ax.get_ylim()[1], \ "Complex singular values", ha='left', \ color='y', fontsize='large', fontweight='bold', alpha=0.4) if complexSingVecs and bSVD: comVecTxt = ax.text(0 - 0.5 * ax.get_xlim()[1], 0.4 * ax.get_ylim()[1], \ "Complex singular vectors", ha='left', \ color='y', fontsize='large', fontweight='bold', alpha=0.4) # Text to indicate goal if not bSVD: indStr = "Make A*x parallel to x ." # don't change space else: indStr = "Make A*x perpendicular to A*y ." # don't change space indTxt = fig.text(0.3, 0.03, indStr, ha='left', color='g', fontsize='large', \ fontweight='bold', bbox=dict(facecolor='white', edgecolor='white', alpha=1.0), \ visible=True)
def _find_and_push_spots(spots, i_frame, c_probs, scales=None, c_ratio=0.5, p_thresh=0.3, r_min=0, r_max=1e6, crop_box=None): n_dims = len(c_probs.shape) assert n_dims == 2 or n_dims == 3, ( f'n_dims: len(c_probs.shape) shoud be 2 or 3 but got {n_dims}' ) origins = crop_box[:n_dims] if crop_box is not None else (0,) * n_dims labels = skimage.measure.label(p_thresh < c_probs) regions = skimage.measure.regionprops(labels, c_probs) if scales is None: scales = (1.,) * n_dims # https://forum.image.sc/t/regionprops-inertia-tensor-eigvals/23559/2 if n_dims == 2: idx = ((2, 1, 1, 0), (0, 1, 1, 2)) min_area = math.pi * (r_min * c_ratio) ** 2 / reduce(mul, scales) elif n_dims == 3: idx = ((2, 1, 1, 1, 0, 0, 1, 0, 0), (0, 1, 0, 1, 2, 1, 0, 1, 0), (0, 0, 1, 0, 0, 1, 1, 1, 2)) min_area = 4 / 3 * math.pi * \ (r_min * c_ratio) ** 3 / reduce(mul, scales) for i, region in enumerate(regions): if region.area < min_area: # print('skip a spot with volume {} below threshold {}' # .format(region.area, min_area)) continue centroid = [(o + c) * s for c, s, o in zip(region.centroid, scales, origins)] bbox_shape = [region.bbox[i + n_dims] - region.bbox[i] for i in range(n_dims)] # correction for floor effect duriing label generation c_scales = [scales[i] * (bbox_shape[i] + 1.) / bbox_shape[i] for i in range(n_dims)] moments_central = scaled_moments_central( region.image, c_scales, order=2) cov = moments_central[idx].reshape((n_dims, n_dims)) if not cov.any(): # if all zeros continue cov /= moments_central[(0,) * n_dims] eigvals, eigvecs = np.linalg.eigh(cov) if ((eigvals < 0).any() or np.iscomplex(eigvals).any() or np.iscomplex(eigvecs).any()): print(f'skip a spot with invalid eigen value(s): {eigvals}') continue # https://stackoverflow.com/questions/22146383/covariance-matrix-of-an-ellipse # https://github.com/scikit-image/scikit-image/blob/master/skimage/measure/_regionprops.py#L288 radii = 2 * np.sqrt(eigvals) radii /= (c_ratio / region.mean_intensity) radii = np.array([radii[i] if 0 < radii[i] else max( r_min, scales[i]) for i in range(len(radii))]) if (radii < r_min).any(): # print(f'skip a spot with radii {radii} below threshold {r_min}') continue radii = np.minimum(r_max, radii) cov = eigvecs.dot(np.diag(radii ** 2)).dot(eigvecs.T) def flatten(o): return [item for sublist in o for item in sublist] spot = { 't': i_frame, 'pos': centroid[::-1], 'covariance': flatten(cov)[::-1] } spots.append(spot)
def huge_error(x, factor=100.): if np.iscomplex(x): return (1+1j)*factor*abs(x) else: return abs(x)*factor
def evolve(self, evolve_dt=None, nsteps=None, evolve_time=None): if evolve_dt is not None: assert np.iscomplex(evolve_dt) and evolve_dt.imag < 0 if evolve_time is not None: assert np.iscomplex(evolve_time) and evolve_time.imag < 0 super().evolve(evolve_dt, nsteps, evolve_time)
(x.shape[0], x.shape[1], x.shape[2] / 2, x.shape[2] / 2)), lambda x: abs(x), lambda x: x > 0.5, lambda x: x.rechunk((4, 4, 4)), lambda x: x.rechunk((2, 2, 1)), pytest.param(lambda x: da.einsum("ijk,ijk", x, x), ), lambda x: np.isneginf(x), lambda x: np.isposinf(x), pytest.param( lambda x: np.isreal(x), marks=pytest.mark.skipif( not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"), ), pytest.param( lambda x: np.iscomplex(x), marks=pytest.mark.skipif( not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"), ), pytest.param( lambda x: np.real(x), marks=pytest.mark.skipif( not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"), ), pytest.param( lambda x: np.imag(x), marks=pytest.mark.skipif( not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"),
def PNG(X, log=True, verbosity = 1): # Assuming X consists of N points on Gr(p, n), p < n # options: # log: print projection info # return an N x p(n-p) score array N, n, p = X.shape cpx = np.iscomplex(X).any() # true if X is complex-valued scores = np.zeros((N,int(p*(n-p))), dtype = X.dtype) scores[:] = np.NaN X_old = X.copy() # Gr(p, n) -> Gr(p, n-1) -> ... -> Gr(p, p+1) for i in range(n-1, p, -1): if log: print(f'Gr({p}, {i+1}) -> Gr({p}, {i})') #X_new, A, B = NG_dr(X_old, i, verbosity) X_new, A, A_perp, b = NG_dr1(X_old, verbosity) #A_perp = ortho_complement(A)[:,0] AAT = np.matmul(A, A.conj().T) #IAATB = np.matmul(np.eye(X_old.shape[1]) - AAT, B) A_perpBT = np.matmul(A_perp, b.T) #X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + IAATB)[0] for i in range(N)]) X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + A_perpBT)[0] for i in range(N)]) # compute scores if cpx: gr = ComplexGrassmann(X_old.shape[1], X_old.shape[2]) else: gr = Grassmann(X_old.shape[1], X_old.shape[2]) for j in range(N): scores[j,((n-i-1)*p):(n-i)*p] = gr.dist(X_old[j], X_new_embedded[j]) * \ np.matmul(X_old[j].conj().transpose(), A_perp)[:,0] X_old = X_new if p > 1: # Gr(p, p+1) -> Gr(1, p+1) X_new = np.zeros((N, p+1, 1), dtype=X_old.dtype) if log: print(f'Gr({p}, {p+1}) -> Gr(1, {p+1})') for i in range(N): X_new[i] = ortho_complement(X_old[i]) X_old = X_new # Gr(1, p+1) -> ... -> Gr(1,2) for i in range(p, 1, -1): if log: print(f'Gr(1, {i+1}) -> Gr(1, {i})') #X_new, A, B = NG_dr(X_old, i, verbosity) X_new, A, A_perp, b = NG_dr1(X_old, verbosity) #A_perp = ortho_complement(A)[:,0] AAT = np.matmul(A, A.conj().T) #IAATB = np.matmul(np.eye(X_old.shape[1]) - AAT, B) A_perpBT = np.matmul(A_perp, b.T) #X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + IAATB)[0] for i in range(N)]) X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + A_perpBT)[0] for i in range(N)]) # compute scores if cpx: gr = ComplexGrassmann(X_old.shape[1], X_old.shape[2]) else: gr = Grassmann(X_old.shape[1], X_old.shape[2]) for j in range(N): scores[j,(n-p)*p-i] = gr.dist(X_old[j], X_new_embedded[j]) * \ np.matmul(X_old[j].conj().transpose(), A_perp) X_old = X_new # Gr(1,2) -> NGM if log: print('Gr(1, 2) -> NGM') if cpx: gr = ComplexGrassmann(2,1) else: gr = Grassmann(2,1) NGM = compute_centroid(gr, X_new) v_0 = gr.log(NGM, X_new[0]) v_0 = v_0/np.linalg.norm(v_0) for j in range(N): v = gr.log(NGM, X_new[j])/v_0 scores[j, (n-p)*p-1] = v[0] # signed distance return scores[:,::-1]
def main(argv=None): #%% Check argv if argv == None: argv = sys.argv start = time.time() ver='1.1.2'; date=20210209; author="Y. Morishita" print("\n{} ver{} {} {}".format(os.path.basename(argv[0]), ver, date, author), flush=True) print("{} {}".format(os.path.basename(argv[0]), ' '.join(argv[1:])), flush=True) #%% Set default infiletxt = [] out_prefix = '' resampleAlg = 'bilinear' #'cubicspline'# 'near' # 'cubic' out_stats_flag = False compress_option = ['COMPRESS=DEFLATE', 'PREDICTOR=3'] compress_option_uint = ['COMPRESS=DEFLATE', 'PREDICTOR=1'] d9 = Decimal('1E-9') ## ~0.1mm in deg #%% Read options try: try: opts, args = getopt.getopt(argv[1:], "hf:o:r:", ["help", "out_stats"]) except getopt.error as msg: raise Usage(msg) for o, a in opts: if o == '-h' or o == '--help': print(__doc__) return 0 elif o == '-f': infiletxt = a elif o == '-o': out_prefix = a elif o == '-r': resampleAlg = a elif o == '--out_stats': out_stats_flag = True if not infiletxt: raise Usage('No input text file given, -f is not optional!') if not os.path.exists(infiletxt): raise Usage('No {} exists!'.format(infiletxt)) except Usage as err: print("\nERROR:", file=sys.stderr, end='') print(" "+str(err.msg), file=sys.stderr) print("\nFor help, use -h or --help.\n", file=sys.stderr) return 2 #%% Set input GeoTIFF files data_tifs = [] LOSe_tifs = [] LOSn_tifs = [] with open(infiletxt) as f: line = f.readline().split() while line: data_tifs.append(line[0]) LOSe_tifs.append(line[1]) LOSn_tifs.append(line[2]) line = f.readline().split() n_data = len(data_tifs) print('\nNumber of input LOS data: {}'.format(n_data)) #%% Identify area with at least 1 each from E and W ### All latlon values in this script are in pixel registration print('\nRead area of each GeoTIFF... ') lon_w_E = lon_w_W = lat_s_E = lat_s_W = np.inf lon_e_E = lon_e_W = lat_n_E = lat_n_W = -np.inf dlon = dlat = 0.0 for i in range(n_data): LOSe1 = gdal.Open(LOSe_tifs[i]) width1 = LOSe1.RasterXSize length1 = LOSe1.RasterYSize lon_w1, dlon1, _, lat_n1, _, dlat1 = LOSe1.GetGeoTransform() lon_w1 = Decimal(lon_w1).quantize(d9) lat_n1 = Decimal(lat_n1).quantize(d9) dlat1 = Decimal(dlat1).quantize(d9) dlon1 = Decimal(dlon1).quantize(d9) lon_e1 = lon_w1 + dlon1*width1 lat_s1 = lat_n1 + dlat1*length1 ### Identify whether from E or W from LOSe data if np.nanmedian(LOSe1.ReadAsArray()) > 0: ## LOSe > 0 -> From East EW = 'East' else: EW = 'West' print('\nLOS{}: {}'.format(i+1, data_tifs[i])) print(' Observed from {}'.format(EW)) print(' Area : {}/{}/{}/{} deg'.format(lon_w1, lon_e1, lat_s1, lat_n1)) print(' Resolution: {}/{} deg'.format(dlon1, dlat1)) print(' Size : {} x {}'.format(width1, length1)) ### Set max area for each direction and max resolution if EW == 'East': if lon_w1 < lon_w_E: lon_w_E = lon_w1 if lon_e1 > lon_e_E: lon_e_E = lon_e1 if lat_s1 < lat_s_E: lat_s_E = lat_s1 if lat_n1 > lat_n_E: lat_n_E = lat_n1 elif EW == 'West': if lon_w1 < lon_w_W: lon_w_W = lon_w1 if lon_e1 > lon_e_W: lon_e_W = lon_e1 if lat_s1 < lat_s_W: lat_s_W = lat_s1 if lat_n1 > lat_n_W: lat_n_W = lat_n1 if np.abs(dlon1) > np.abs(dlon): dlon = dlon1 if np.abs(dlat1) > np.abs(dlat): dlat = dlat1 ### Check if both from E and W used if lon_w_E == np.inf: print('\nERROR: No LOS data from East!\n', file=sys.stderr) return 2 elif lon_w_W == np.inf: print('\nERROR: No LOS data from West!\n', file=sys.stderr) return 2 ### Set common area between E and W lon_w = lon_w_E if lon_w_E > lon_w_W else lon_w_W lon_e = lon_e_E if lon_e_E < lon_e_W else lon_e_W lat_s = lat_s_E if lat_s_E > lat_s_W else lat_s_W lat_n = lat_n_E if lat_n_E < lat_n_W else lat_n_W width = int((lon_e-lon_w)/dlon) lon_e = lon_w + dlon*width length = int((lat_s-lat_n)/dlat) lat_s = lat_n + dlat*length print('\nCommon area: {}/{}/{}/{}'.format(lon_w, lon_e, lat_s, lat_n)) print('Resolution : {}/{} deg'.format(dlon, dlat)) print('Size : {} x {}\n'.format(width, length)) #%% Resample the input data using gdalwarp data_list = [] LOSe_list = [] LOSu_list = [] for i in range(n_data): print('Read and resample {}...'.format(data_tifs[i])) data_list.append(gdal.Warp("", data_tifs[i], format='MEM', outputBounds=(lon_w, lat_s, lon_e, lat_n), width=width, height=length, resampleAlg=resampleAlg, srcNodata=np.nan).ReadAsArray()) LOSe_list.append(gdal.Warp("", LOSe_tifs[i], format='MEM', outputBounds=(lon_w, lat_s, lon_e, lat_n), width=width, height=length, resampleAlg=resampleAlg, srcNodata=np.nan).ReadAsArray()) _LOSn = gdal.Warp("", LOSn_tifs[i], format='MEM', outputBounds=(lon_w, lat_s, lon_e, lat_n), width=width, height=length, resampleAlg=resampleAlg, srcNodata=np.nan).ReadAsArray() _LOSu = np.sqrt(1-_LOSn**2-LOSe_list[i]**2) _LOSu[np.iscomplex(_LOSu)] = 0 LOSu_list.append(_LOSu) del _LOSn, _LOSu print('') #%% Extract valid pixels with at least 1 each from E and W directions n_data_fromE = n_data_fromW = np.uint8(np.zeros_like(data_list[0])) for i in range(n_data): if np.nanmedian(LOSe_list[i]) >= 0: ## From East n_data_fromE = n_data_fromE + ~np.isnan(data_list[i]) if np.nanmedian(LOSe_list[i]) < 0: ## From West n_data_fromW = n_data_fromW + ~np.isnan(data_list[i]) n_data_total = n_data_fromE + n_data_fromW bool_valid = np.bool8(n_data_fromE) & np.bool8(n_data_fromW) print('\nNumber of valid pixels: {}'.format(bool_valid.sum())) data_part_list = [] LOSe_part_list = [] LOSu_part_list = [] for i in range(n_data): data_part_list.append(data_list[i][bool_valid]) data_part_list[i][np.isnan(data_part_list[i])] = 0 LOSe_part_list.append(LOSe_list[i][bool_valid]) LOSe_part_list[i][np.isnan(LOSe_part_list[i])] = 0 LOSu_part_list.append(LOSu_list[i][bool_valid]) LOSu_part_list[i][np.isnan(LOSu_part_list[i])] = 0 #%% Decompose ## Assuming no NS displacement, ## [dlon1, ..., dlosn].T = [e1, n1, u1; ...; en, nn, un][de, dn, du].T ## = [e1, u1; ...; en, un][de, du].T ## b=A*x -> x=(A.T*A)^(-1)*A.T*b ## [de, du].T = [a11, a12; a12, a22]^(-1)*[be, bu].T ## = 1/det*[a22, -a12; -a12, a11][be, bu].T ## where a11=e1^2+...+en^2, a12=e1*u1+...+en*un, a22=u1^2+...+un^2, ## det=a11*a22-a12^2, be=e1*los1+...+en*losn, bu=u1*los1+...+un*losn print('\nDecompose {} LOS displacements...'.format(n_data)) a11 = a12 = a22 = be = bu = 0 for i in range(n_data): a11 = a11+LOSe_part_list[i]**2 a12 = a12+LOSe_part_list[i]*LOSu_part_list[i] a22 = a22+LOSu_part_list[i]**2 be = be+LOSe_part_list[i]*data_part_list[i] bu = bu+LOSu_part_list[i]*data_part_list[i] det = (a11*a22-a12**2) det[det==0] = np.nan ## To avoid zero division detinv = 1/det ew_part = detinv*(a22*be-a12*bu) ud_part = detinv*(-a12*be+a11*bu) ew = np.zeros_like(bool_valid, dtype=np.float32)*np.nan ew[bool_valid] = ew_part ud = np.zeros_like(bool_valid, dtype=np.float32)*np.nan ud[bool_valid] = ud_part #%% Save geotiff outfileEW = out_prefix + 'EW.geo.tif' outfileUD = out_prefix + 'UD.geo.tif' io_lib.make_geotiff(ew, lat_n, lon_w, dlat, dlon, outfileEW, compress_option, np.nan) io_lib.make_geotiff(ud, lat_n, lon_w, dlat, dlon, outfileUD, compress_option, np.nan) #%% Stats if out_stats_flag: if n_data >= 3: for i in range(n_data): outfile_resid = out_prefix + 'resid_LOS{}.geo.tif'.format(i+1) data_part_list[i][data_part_list[i]==0] = np.nan resid_los_part = data_part_list[i] - \ (LOSe_part_list[i]*ew_part + LOSu_part_list[i]*ud_part) resid_los = np.zeros_like(bool_valid, dtype=np.float32)*np.nan resid_los[bool_valid] = resid_los_part io_lib.make_geotiff(resid_los, lat_n, lon_w, dlat, dlon, outfile_resid, compress_option, np.nan) ### n_data outfile_n_data = out_prefix + 'n_data_fromE.geo.tif' io_lib.make_geotiff(n_data_fromE, lat_n, lon_w, dlat, dlon, outfile_n_data, compress_option_uint) outfile_n_data = out_prefix + 'n_data_fromW.geo.tif' io_lib.make_geotiff(n_data_fromW, lat_n, lon_w, dlat, dlon, outfile_n_data, compress_option_uint) outfile_n_data = out_prefix + 'n_data_total.geo.tif' io_lib.make_geotiff(n_data_total, lat_n, lon_w, dlat, dlon, outfile_n_data, compress_option_uint) #%% Finish elapsed_time = time.time()-start hour = int(elapsed_time/3600) minite = int(np.mod((elapsed_time/60),60)) sec = int(np.mod(elapsed_time,60)) print("\nElapsed time: {0:02}h {1:02}m {2:02}s".format(hour,minite,sec)) print('\n{} Successfully finished!!\n'.format(os.path.basename(argv[0]))) print('Output: {}'.format(outfileEW), flush=True) print(' {}'.format(outfileUD), flush=True) print('')
# find index of closest value for E{z(t+1)} m = np.argmin(np.abs(zvec - zvec[i] * phi)) for j in range(0, nptsk): # for all values of k(t) maxval = -10000000000 for l in range(0, nptsk): # search over all values of k(t+1) if theta == 1: temp = np.log(kvec[j]**alpha * np.exp( (1 - alpha) * zvec[i]) + (1 - delta) * kvec[j] - kvec[l] * (1 + g) * (1 + n)) + beta * value[m, l] else: temp = (((kvec[j]**alpha*np.exp((1-alpha)*zvec[i]) +(1-delta)*kvec[j]-kvec[l]*(1+g)*(1+n))**(1-theta)-1)) \ / (1-theta) + beta*value[m,l] # print i, j, temp if np.iscomplex(temp): temp = -100000000 if np.isnan(temp): temp = -100000000 if temp > maxval: maxval = temp newval[i, j] = temp trans[i, j] = kvec[l] # print newval distance = np.mean(np.abs(value / newval - 1.0)) print count, distance for i in range(0, nptsz): for j in range(0, nptsk): value[i, j] = newval[i, j] # fit a polynomial
print(np.random.normal(0,1)) 14th que=GENERATE 15 RANDOME NOS import numpy as np print(np.random.normal(0,1,15)) 15th que=CREATE VECTOR FROM 15-56 AND PRINT ALL NOS EXCEPT FST AND LAST import numpy as np a=np.arange(15,56) print(a[1:41]) 2nd que=CHECK ELEMETS ARE REAL,COMPLEX or SCALAR import numpy as np a=np.array([1,2,3,4+5j,10,2+3j,6+8j]) print(np.isreal(a)) print(np.iscomplex(a)) print(np.isscalar(a)) ''' ***DAY 11*** 1st que=CREATE ARRAY OF 3*4 AND ITERATE IT import numpy as np ar=np.arange(1,13) ar=ar.reshape(3,4) print(ar) for i in np.nditer(ar): print(i) 2nd que=CREATE NUMPY ARRAY OF SIZE 10 AND EVENLY DISTRIBUTED BETWEEN 5-50 import numpy as np a=np.linspace(5,50,10) print(a)
def plot_transform(transform, scales=None, multiview=False): """ Display the different bands on the requested scales. Parameters ---------- transform: WaveletTransformBase derived instance a wavelet decomposition. scales: list of int, default None the desired scales, if None compute at all scales. multiview: bool, default False if True use a slider to select a specific band. """ # Set default scales scales = scales or range(transform.nb_scale) # Create application and tab widget app = pyqtgraph.mkQApp() tabs = QtGui.QTabWidget() tabs.setWindowTitle("Wavelet Transform") # Go through each scale pen = pyqtgraph.intColor(2) for scale in scales: # Create the plots for this scales with scrolling possibilities scroller = QtGui.QScrollArea() tabs.addTab(scroller, "Scale {0}".format(scale)) # Go through each band of the current scale # > using multiview # TODO: update this code if multiview: raise NotImplementedError( "Multiview transform view not yet implemented.") window = pyqtgraph.image(numpy.asarray(transform[scale])) scroller.setWidget(window) # > using mosaic else: window = pyqtgraph.GraphicsWindow() scroller.setWidget(window) scale_data = transform[scale] if not isinstance(scale_data, list): scale_data = [scale_data] for subband_data in scale_data: # Deal with complex data if numpy.iscomplex(subband_data).any(): subband_data = numpy.abs(subband_data) subband_data = numpy.lib.pad( subband_data, 1, "constant", constant_values=subband_data.max()) # Select viewer if subband_data.ndim == 1: ax = window.addPlot() ax.plot(subband_data, pen=pen) elif subband_data.ndim == 2: box = window.addViewBox() box.setAspectLocked(True) image = pyqtgraph.ImageItem(subband_data) box.addItem(image) else: raise ValueError("This function currently support only " "1D or 2D data.") window.nextRow() # Display the tab tabs.show() # Run application app.exec_()