def fftconv(a, b, axes=(0, 1)): """ Compute a multi-dimensional convolution via the Discrete Fourier Transform. Parameters ---------- a : array_like Input array b : array_like Input array axes : sequence of ints, optional (default (0, 1)) Axes on which to perform convolution Returns ------- ab : ndarray Convolution of input arrays, a and b, along specified axes """ if np.isrealobj(a) and np.isrealobj(b): fft = rfftn ifft = irfftn else: fft = fftn ifft = ifftn dims = np.maximum([a.shape[i] for i in axes], [b.shape[i] for i in axes]) af = fft(a, dims, axes) bf = fft(b, dims, axes) return ifft(af * bf, dims, axes)
def coherence_spec(fxy, fxx, fyy): r""" Compute the coherence between the spectra of two time series. Parameters of this function are in the frequency domain. Parameters ---------- fxy : array The cross-spectrum of the time series fyy, fxx : array The spectra of the signals Returns ------- float : a frequency-band-dependent measure of the linear association between the two time series See also -------- :func:`coherence` """ if not np.isrealobj(fxx): fxx = np.real(fxx) if not np.isrealobj(fyy): fyy = np.real(fyy) c = np.abs(fxy) ** 2 / (fxx * fyy) return c
def _coherence_bavg(fxy, fxx, fyy): r""" Compute the band-averaged coherency between the spectra of two time series. input to this function is in the frequency domain Parameters ---------- fxy : float array The cross-spectrum of the time series fyy,fxx : float array The spectra of the signals Returns ------- float : the band-averaged coherence """ if not np.isrealobj(fxx): fxx = np.real(fxx) if not np.isrealobj(fyy): fyy = np.real(fyy) return (np.abs(fxy.sum()) ** 2) / (fxx.sum() * fyy.sum())
def same_upto_phase_factor(self, *others, **kwargs): if sanity_checking_enabled: if not np.isrealobj(self) or any(not np.isrealobj(o) for o in others): raise NotImplementedError("phase factor detection complex Tensors is not yet implemented") cutoff = kwargs.pop('cutoff', Tensor.same_tensor_cutoff) for other in others: if (abs(other - self) > cutoff).any() and (abs(other + self) > cutoff).any(): return False return True
def evaluate(self, ind, **kwargs): """ Note that math functions used in the solutions are imported from either utilities.fitness.math_functions or called from numpy. :param ind: An individual to be evaluated. :param kwargs: An optional parameter for problems with training/test data. Specifies the distribution (i.e. training or test) upon which evaluation is to be performed. :return: The fitness of the evaluated individual. """ dist = kwargs.get('dist', 'training') if dist == "training": # Set training datasets. x = self.training_in y = self.training_exp elif dist == "test": # Set test datasets. x = self.test_in y = self.test_exp else: raise ValueError("Unknown dist: " + dist) if params['OPTIMIZE_CONSTANTS']: # if we are training, then optimize the constants by # gradient descent and save the resulting phenotype # string as ind.phenotype_with_c0123 (eg x[0] + # c[0] * x[1]**c[1]) and values for constants as # ind.opt_consts (eg (0.5, 0.7). Later, when testing, # use the saved string and constants to evaluate. if dist == "training": return optimize_constants(x, y, ind) else: # this string has been created during training phen = ind.phenotype_consec_consts c = ind.opt_consts # phen will refer to x (ie test_in), and possibly to c yhat = eval(phen) assert np.isrealobj(yhat) # let's always call the error function with the # true values first, the estimate second return params['ERROR_METRIC'](y, yhat) else: # phenotype won't refer to C yhat = eval(ind.phenotype) assert np.isrealobj(yhat) # let's always call the error function with the true # values first, the estimate second return params['ERROR_METRIC'](y, yhat)
def eigenConcat(omega, Q, AB, BB, k): """ Find the eigen update of a matrix [A, B]'[A B] where A'A = V diag(s) V* and AB = A*B, BB = B*B. Q is the set of eigenvectors of A*A and s is the vector of eigenvalues. """ #logging.debug("< eigenConcat >") Parameter.checkInt(k, 0, omega.shape[0]) if not numpy.isrealobj(omega) or not numpy.isrealobj(Q): raise ValueError("Eigenvalues and eigenvectors must be real") if not numpy.isrealobj(AB) or not numpy.isrealobj(BB): raise ValueError("AB and BB must be real") if omega.ndim != 1: raise ValueError("omega must be 1-d array") if omega.shape[0] != Q.shape[1]: raise ValueError("Must have same number of eigenvalues and eigenvectors") if Q.shape[0] != AB.shape[0]: raise ValueError("Q must have the same number of rows as AB") if AB.shape[1] != BB.shape[0] or BB.shape[0]!=BB.shape[1]: raise ValueError("AB must have the same number of cols/rows as BB") #Check Q is orthogonal if __debug__: Parameter.checkOrthogonal(Q, tol=EigenUpdater.tol, softCheck=True, arrayInfo = "input Q in eigenConcat()") m = Q.shape[0] p = BB.shape[0] inds = numpy.flipud(numpy.argsort(numpy.abs(omega))) Q = Q[:, inds[0:k]] omega = omega[inds[0:k]] Omega = numpy.diag(omega) QAB = Q.conj().T.dot(AB) F = numpy.c_[numpy.r_[Omega, QAB.conj().T], numpy.r_[QAB, BB]] D = numpy.c_[numpy.r_[Q, numpy.zeros((p, Q.shape[1]))], numpy.r_[numpy.zeros((m, p)), numpy.eye(p)]] pi, H = scipy.linalg.eigh(F) inds = numpy.flipud(numpy.argsort(numpy.abs(pi))) inds = inds[numpy.abs(pi)>EigenUpdater.tol] H = H[:, inds[0:k]] pi = pi[inds[0:k]] V = numpy.dot(D, H) #logging.debug("</ eigenConcat >") return pi, V
def testRealEven(x): """ Inputs: x (numpy array)= input signal of length M (M is odd) Output: The function should return a tuple (isRealEven, dftbuffer, X) isRealEven (boolean) = True if the input x is real and even, and False otherwise dftbuffer (numpy array, possibly complex) = The M point zero phase windowed version of x X (numpy array, possibly complex) = The M point DFT of dftbuffer """ N = len(x) hM1 = np.floor((N+1)/2) hM2 = np.floor(N/2) dftbuffer = np.zeros(N) dftbuffer[:hM1] = x[hM2:] dftbuffer[-hM2:] = x[:hM2] Mx = fft(dftbuffer) Mx = Mx[: len(Mx)/2 + 1] xfh = x[: np.floor(len(x)/2)] # First half # add np.float() otherwise len(x)/ is int so floor xsh = x[np.ceil(np.float(len(x))/2):] # second half xsh = xsh[::-1] if np.isrealobj(x): if all(xfh == xsh): return (True, dftbuffer, Mx) else: return (False, dftbuffer, Mx) else: return (False, dftbuffer, Mx)
def dctii(x): """Compute a Discrete Cosine Transform, type II. The DCT type II is defined as: \forall u \in 0...N-1, dct(u) = a(u) sum_{i=0}^{N-1}{f(i)cos((i + 0.5)\pi u} Where a(0) = sqrt(1/(4N)), a(u) = sqrt(1/(2N)) for u > 0 Parameters ========== x : array-like input signal Returns ======= y : array-like DCT-II Note ==== Use fft. """ if not np.isrealobj(x): raise ValueError("Complex input not supported") n = x.size y = np.zeros(n * 4, x.dtype) y[1:2*n:2] = x y[2*n+1::2] = x[-1::-1] y = np.real(fft(y))[:n] y[0] *= np.sqrt(.25 / n) y[1:] *= np.sqrt(.5 / n) return y
def acorr(x, axis=-1, onesided=False, scale='none'): """Compute autocorrelation of x along given axis. Parameters ---------- x : array-like signal to correlate. axis : int axis along which autocorrelation is computed. onesided: bool, optional if True, only returns the right side of the autocorrelation. scale: {'none', 'coeff'} scaling mode. If 'coeff', the correlation is normalized such as the 0-lag is equal to 1. Notes ----- Use fft for computation: is more efficient than direct computation for relatively large n. """ if not np.isrealobj(x): raise ValueError("Complex input not supported yet") if not scale in ['none', 'coeff']: raise ValueError("scale mode %s not understood" % scale) maxlag = x.shape[axis] nfft = 2 ** nextpow2(2 * maxlag - 1) if axis != -1: x = np.swapaxes(x, -1, axis) a = _acorr_last_axis(x, nfft, maxlag, onesided, scale) if axis != -1: a = np.swapaxes(a, -1, axis) return a
def irfft( x, n=None, axis=-1, overwrite_x=False, planner_effort="FFTW_MEASURE", threads=1, auto_align_input=True, auto_contiguous=True, ): """Perform a 1D real inverse FFT. The first three arguments are as per :func:`scipy.fftpack.irfft`; the rest of the arguments are documented in the :ref:`additional argument docs<interfaces_additional_args>`. """ if not numpy.isrealobj(x): raise TypeError("Input array must be real to maintain " "compatibility with scipy.fftpack.irfft.") x = numpy.asanyarray(x) if n is None: n = x.shape[axis] complex_input = _irfft_input_to_complex(x, axis) return numpy_fft.irfft( complex_input, n, axis, overwrite_x, planner_effort, threads, auto_align_input, auto_contiguous )
def __init__( self, shape=None, dtype=None, data=None, shape_out=None, axes=None, normalize="rescale", stream=None, ): if not(__have_cufft__) or not(__have_cufft__): raise ImportError("Please install pycuda and scikit-cuda to use the CUDA back-end") super(CUFFT, self).__init__( shape=shape, dtype=dtype, data=data, shape_out=shape_out, axes=axes, normalize=normalize, ) self.cufft_stream = stream self.backend = "cufft" self.configure_batched_transform() self.allocate_arrays() self.real_transform = np.isrealobj(self.data_in) self.compute_forward_plan() self.compute_inverse_plan() self.refs = { "data_in": self.data_in, "data_out": self.data_out, } self.configure_normalization()
def pressureResponse(sim, forcex, forcez): nz = sim.nz nx = sim.nx transform = False if(np.isrealobj(forcex)): forcex = sim.makeSpect(forcex) forcez = sim.makeSpect(forcez) transform = True sim.assertDkx(forcex) sim.assertDkz(forcex) div = forcex * sim.dkx + forcez * sim.dkz pres = div / (sim.dkz**2 + sim.dkx**2) pres[0,0,0] = 0 solx = -pres * sim.dkx solz = -pres * sim.dkz if (transform == True): solx = np.fft.irfftn(solx) solz = np.fft.irfftn(solz) pres = np.fft.irfftn(pres) return [solx,solz,pres]
def _maybe_real(A, B, tol=None): """ Return either B or the real part of B, depending on properties of A and B. The motivation is that B has been computed as a complicated function of A, and B may be perturbed by negligible imaginary components. If A is real and B is complex with small imaginary components, then return a real copy of B. The assumption in that case would be that the imaginary components of B are numerical artifacts. Parameters ---------- A : ndarray Input array whose type is to be checked as real vs. complex. B : ndarray Array to be returned, possibly without its imaginary part. tol : float Absolute tolerance. Returns ------- out : real or complex array Either the input array B or only the real part of the input array B. """ # Note that booleans and integers compare as real. if np.isrealobj(A) and np.iscomplexobj(B): if tol is None: tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]] if np.allclose(B.imag, 0.0, atol=tol): B = B.real return B
def is_real_array(array): ''' Check if a value is a list or array of real scalars by aggregating several numpy checks. Parameters ---------- array: any type The parameter to check Returns ------ check : bool True if ```array``` is a list or array of a real scalars, False otherwise. ''' if not (type(array) is list or type(array) is np.ndarray): return False else: if (not np.all([np.isreal(x) for x in array]) or not np.isrealobj(array) or not np.asarray(list(map(np.isscalar, array))).all()): return False else: return True
def app_luinv_to_spmat(alu_solve, Z): """ compute A.-1*Z where A comes factored and with a solve routine for possibly complex Z Parameters ---------- alu_solve : callable f(v) returning a matrix inverse applied to `v` Z : (N,K) ndarray, real or complex the inverse is to be applied to Returns ------- , : (N,K) ndarray matrix inverse applied to ndarray """ Z.tocsc() # ainvz = np.zeros(Z.shape) ainvzl = [] # to allow for complex values for ccol in range(Z.shape[1]): zcol = Z[:, ccol].toarray().flatten() if np.isrealobj(zcol): ainvzl.append(alu_solve(zcol)) else: ainvzl.append(alu_solve(zcol.real) + 1j*alu_solve(zcol.imag)) return np.asarray(ainvzl).T
def test_numpy_fft(self): """ Test the numpy backend against native fft. Results should be exactly the same. """ trinfos = self.param["transform_infos"] trdim = self.param["trdim"] ndim = len(self.param["size"]) input_data = self.param["test_data"].data_refs[ndim].astype(trinfos.modes[self.param["mode"]]) np_fft, np_ifft = self.transforms[trdim][np.isrealobj(input_data)] F = FFT( template=input_data, axes=trinfos.axes[trdim], backend="numpy" ) # Test FFT res = F.fft(input_data) ref = np_fft(input_data) self.assertTrue(np.allclose(res, ref)) # Test IFFT res2 = F.ifft(res) ref2 = np_ifft(ref) self.assertTrue(np.allclose(res2, ref2))
def __setitem__(self, name, value): if not isinstance(value, ndarray): value = array(value) if not isrealobj(value): self.__matlab_object.handle.PutFullMatrix(name, self.__name_space, value.real, value.imag) else: self.__matlab_object.handle.PutWorkspaceData(name, self.__name_space, value)
def irfft(x, n=None, axis=-1, overwrite_x=0): """ irfft(x, n=None, axis=-1, overwrite_x=0) -> y Return inverse discrete Fourier transform of real sequence x. The contents of x is interpreted as the output of rfft(..) function. The returned real array contains [y(0),y(1),...,y(n-1)] where for n is even y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0] + (-1)**(j) x[n-1]) and for n is odd y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0]) c.c. denotes complex conjugate of preceeding expression. Optional input: see rfft.__doc__ """ tmp = asarray(x) if not numpy.isrealobj(tmp): raise TypeError,"1st argument must be real sequence" if istype(tmp, numpy.float32): work_function = fftpack.rfft else: work_function = fftpack.drfft return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function)
def rfft(x, n=None, axis=-1, overwrite_x=0): """ rfft(x, n=None, axis=-1, overwrite_x=0) -> y Return discrete Fourier transform of real sequence x. The returned real arrays contains [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd where y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n) j = 0..n-1 Note that y(-j) = y(n-j). Optional input: n Defines the length of the Fourier transform. If n is not specified then n=x.shape[axis] is set. If n<x.shape[axis], x is truncated. If n>x.shape[axis], x is zero-padded. axis The transform is applied along the given axis of the input array (or the newly constructed array if n argument was used). overwrite_x If set to true, the contents of x can be destroyed. Notes: y == rfft(irfft(y)) within numerical accuracy. """ tmp = asarray(x) if not numpy.isrealobj(tmp): raise TypeError,"1st argument must be real sequence" work_function = fftpack.drfft return _raw_fft(tmp,n,axis,1,overwrite_x,work_function)
def rfft(x, n=None, axis=-1, overwrite_x=False, planner_effort=None, threads=None, auto_align_input=True, auto_contiguous=True): '''Perform a 1D real FFT. The first three arguments are as per :func:`scipy.fftpack.rfft`; the rest of the arguments are documented in the :ref:`additional argument docs<interfaces_additional_args>`. ''' if not numpy.isrealobj(x): raise TypeError('Input array must be real to maintain ' 'compatibility with scipy.fftpack.rfft.') x = numpy.asanyarray(x) planner_effort = _default_effort(planner_effort) threads = _default_threads(threads) complex_output = numpy_fft.rfft(x, n, axis, None, overwrite_x, planner_effort, threads, auto_align_input, auto_contiguous) output_shape = list(x.shape) if n is not None: output_shape[axis] = n return _complex_to_rfft_output(complex_output, output_shape, axis)
def __init__(self, x): """Compute Givens rotation for provided vector x. Computes Givens rotation :math:`G=\\begin{bmatrix}c&s\\\\-\\overline{s}&c\\end{bmatrix}` such that :math:`Gx=\\begin{bmatrix}r\\\\0\\end{bmatrix}`. """ # make sure that x is a vector ;) if x.shape!=(2,1): raise ValueError('x is not a vector of shape (2,1)') a = numpy.asscalar(x[0]) b = numpy.asscalar(x[1]) # real vector if numpy.isrealobj(x): c, s = blas.drotg(a,b) # complex vector else: c, s = blas.zrotg(a,b) self.c = c self.s = s self.r = c*a + s*b self.G = numpy.array([[c, s], [-numpy.conj(s), c]])
def __init__( self, shape=None, dtype=None, data=None, shape_out=None, axes=None, normalize="rescale", ): super(NPFFT, self).__init__( shape=shape, dtype=dtype, data=data, shape_out=shape_out, axes=axes, normalize=normalize, ) self.backend = "numpy" self.real_transform = False if data is not None and np.isrealobj(data): self.real_transform = True # For numpy functions. # TODO Issue warning if user wants ifft(fft(data)) = N*data ? if normalize != "ortho": self.normalize = None self.set_fft_functions() #~ self.allocate_arrays() # not needed for this backend self.compute_plans()
def extend_dofs(self, dofs, fill_value=None): """ Extend DOFs to the whole domain using the `fill_value`, or the smallest value in `dofs` if `fill_value` is None. """ if fill_value is None: if nm.isrealobj(dofs): fill_value = get_min_value(dofs) else: # Complex values - treat real and imaginary parts separately. fill_value = get_min_value(dofs.real) fill_value += 1j * get_min_value(dofs.imag) if self.approx_order != 0: indx = self.get_vertices() n_nod = self.domain.shape.n_nod new_dofs = nm.empty((n_nod, dofs.shape[1]), dtype=self.dtype) new_dofs.fill(fill_value) new_dofs[indx] = dofs[:indx.size] else: new_dofs = extend_cell_data(dofs, self.domain, self.region, val=fill_value) return new_dofs
def irfft(x, n=None, axis=-1, overwrite_x=0): """ irfft(x, n=None, axis=-1, overwrite_x=0) -> y Return inverse discrete Fourier transform of real sequence x. The contents of x is interpreted as the output of rfft(..) function. The returned real array contains [y(0),y(1),...,y(n-1)] where for n is even y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0] + (-1)**(j) x[n-1]) and for n is odd y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0]) c.c. denotes complex conjugate of preceeding expression. Optional input: see rfft.__doc__ """ tmp = _asfarray(x) if not numpy.isrealobj(tmp): raise TypeError,"1st argument must be real sequence" try: work_function = _DTYPE_TO_RFFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function)
def sift(input_signal, threshold): ft = numpy.fft.fft(input_signal) ft[numpy.absolute(ft) < threshold] = 0.0 sifted = numpy.fft.ifft(ft) # applying SIFT to real data should also return real data, but casting to # a real type raises a ComplexWarning if we don't do this first if numpy.isrealobj(input_signal): sifted = sifted.real return sifted.astype(input_signal.dtype)
def H(self): options = {'inverse': self.solver_options.get('inverse_adjoint'), 'inverse_adjoint': self.solver_options.get('inverse')} if self.solver_options else None if self.sparse: adjoint_matrix = self.matrix.transpose(copy=False).conj(copy=False) elif np.isrealobj(self.matrix): adjoint_matrix = self.matrix.T else: adjoint_matrix = self.matrix.T.conj() return self.with_(matrix=adjoint_matrix, source_id=self.range_id, range_id=self.source_id, solver_options=options, name=self.name + '_adjoint')
def rfft(x, n=None, axis=-1, overwrite_x=0): """ Discrete Fourier transform of a real sequence. The returned real arrays contains:: [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd where :: y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n) j = 0..n-1 Note that ``y(-j) == y(n-j).conjugate()``. Parameters ---------- x : array_like, real-valued The data to transform. n : int, optional Defines the length of the Fourier transform. If `n` is not specified (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``, `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded. axis : int, optional The axis along which the transform is applied. The default is the last axis. overwrite_x : bool, optional If set to true, the contents of `x` can be overwritten. Default is False. See Also -------- fft, irfft, scipy.fftpack.basic Notes ----- Within numerical accuracy, ``y == rfft(irfft(y))``. """ tmp = _asfarray(x) if not numpy.isrealobj(tmp): raise TypeError("1st argument must be real sequence") try: work_function = _DTYPE_TO_RFFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) overwrite_x = overwrite_x or _datacopied(tmp, x) return _raw_fft(tmp,n,axis,1,overwrite_x,work_function)
def checkArray(array, softCheck = False, arrayInfo = ""): """ Check that an array contains no nan or inf values """ if numpy.isinf(array).any(): return Parameter.whatToDo("The array " + arrayInfo + " contains a 'inf' value", softCheck) if numpy.isnan(array).any(): return Parameter.whatToDo("The array " + arrayInfo + " contains a 'NaN' value", softCheck) if not numpy.isrealobj(array): return Parameter.whatToDo("The array " + arrayInfo + " has an imaginary part", softCheck) return True
def lazyEigenConcatAsUpdate(omega, Q, AB, BB, k, debug= False): """ Find the eigen update of a matrix [A, B]'[A B] where A'A = Q diag(omega) Q* and AB = A*B, BB = B*B. Q is the set of eigenvectors of A*A and omega is the vector of eigenvalues. Simply expand Q, and update the eigen decomposition using EigenAdd2. Computation could be upgraded a bit because of the particular update type (Y1Bar = Y1 = [0,I]', Y2Bar = [(I-QQ')A'B, 0]'). """ #logging.debug("< lazyEigenConcatAsUpdate >") Parameter.checkClass(omega, numpy.ndarray) Parameter.checkClass(Q, numpy.ndarray) Parameter.checkClass(AB, numpy.ndarray) Parameter.checkClass(BB, numpy.ndarray) Parameter.checkInt(k, 0, AB.shape[0] + BB.shape[0]) if not numpy.isrealobj(omega) or not numpy.isrealobj(Q): logging.info("Eigenvalues or eigenvectors are not real") if not numpy.isrealobj(AB) or not numpy.isrealobj(BB): logging.info("AB or BB are not real") if omega.ndim != 1: raise ValueError("omega must be 1-d array") if omega.shape[0] != Q.shape[1]: raise ValueError("Must have same number of eigenvalues and eigenvectors") if Q.shape[0] != AB.shape[0]: raise ValueError("Q must have the same number of rows as AB") if AB.shape[1] != BB.shape[0] or BB.shape[0]!=BB.shape[1]: raise ValueError("AB must have the same number of cols/rows as BB") if __debug__: if not Parameter.checkOrthogonal(Q, tol=EigenUpdater.tol, softCheck=True, investigate=True, arrayInfo="input Q in lazyEigenConcatAsUpdate()"): print("omega:\n", omega) m = Q.shape[0] p = BB.shape[0] Q = numpy.r_[Q, numpy.zeros((p, Q.shape[1]))] Y1 = numpy.r_[numpy.zeros((m,p)), numpy.eye(p)] Y2 = numpy.r_[AB, 0.5*BB] return EigenUpdater.eigenAdd2(omega, Q, Y1, Y2, k, debug=debug)
def __test(x, ref_power, amin, top_db): y = librosa.logamplitude(x, ref_power=ref_power, amin=amin, top_db=top_db) assert np.isrealobj(y) eq_(y.shape, x.shape) if top_db is not None: assert y.min() >= y.max()-top_db
def conj(self): if np.isrealobj(self.to_numpy()): return self.copy() return NumpyVectorArray(np.conj(self.to_numpy()), self.space)
def sign(a): """Sign of an array, which works for both real and complex array.""" if np.isrealobj(a): return np.sign(a) else: return np.exp(1.0J * np.angle(a))
def ladlassopath(yx, Xx, intcpt=True, eps=10**-3, L=120, reltol=1e-6, printitn=0): if type(intcpt) != bool: raise TypeError('intcpt should be a boolean instead of ' + str(intcpt)) if hasattr(eps, "__iter__") or eps < 0 or not np.isfinite(eps) or \ not np.isrealobj(eps): raise ValueError('eps should be a real, positive, scalar') if not np.isrealobj(L) or L < 0 or not np.isfinite(L) or \ hasattr(L, "__iter__"): raise ValueError('L should be a real, positive, scalar') y = np.array(yx) #np.copy(np.asarray(yx)) y = y if not len( y.shape) == 2 else y.flatten() # ensure that y is Nx1 and not just N X = np.array(Xx) #np.copy(np.asarray(Xx)) n, p = X.shape if intcpt: p = p + 1 medy = np.median(y) if np.isrealobj(y) else spatmed(y) yc = y - medy lam0 = np.max(X.T @ np.sign(yc)) # max of a column vector else: np.max(X.T @ np.sign(yc)) lamgrid = eps**(np.arange(0, L + 1, 1) / L) * lam0 # grid of penalty values B = np.zeros([p, L + 1]) # initial regression vector binit = np.concatenate( (np.asarray([medy]), np.zeros(p - 1))) if intcpt else np.zeros(p) for jj in range(L + 1): B[:, jj] = ladlasso(y, X, lamgrid[jj], binit, intcpt, reltol, printitn)[0].flatten() binit = B[:, jj] stats = {} # slightly different than Matlab if intcpt: B[np.vstack((np.zeros((1, L + 1), dtype=bool), np.abs(B[1:, :]) < 1e-7))] = 0 stats['DF'] = np.sum(np.abs(B[1:, :]) != 0, axis=0) stats['MeAD'] = np.sqrt(np.pi/2) * \ np.mean( \ np.abs(np.repeat(y[:,np.newaxis],L+1,axis=1) -np.hstack((np.ones((n,1)),X)) @B),axis=0) const = np.sqrt(n / (n - stats['DF'] - 1)) else: B[np.abs(B) < 1e-7] = 0 stats['DF'] = np.sum(np.abs(B) != 0, axis=0) stats['MeAD'] = np.sqrt(np.pi/2) * \ np.mean( \ np.abs(np.repeat(y[:,np.newaxis],L+1,axis=1) -X@B),axis=0) const = np.sqrt(n / (n - stats['DF'])) stats['MeAD'] = stats['MeAD'] * const stats['gBIC'] = 2 * n * np.log(stats['MeAD']) + stats['DF'] * np.log( n) # BIC values stats['Lambda'] = lamgrid return B, stats
def scattering(self, input, local=False): if not type(input) is np.ndarray: raise TypeError('The input should be a NumPy array.') if np.isrealobj(input): loc_cplx = False else: loc_cplx = True if len(input.shape) < 2: raise RuntimeError( 'Input array must have at least two dimensions.') if (input.shape[-1] != self.N or input.shape[-2] != self.M) and not self.pre_pad: raise RuntimeError('NumPy array must be of spatial size (%i,%i).' % (self.M, self.N)) if (input.shape[-1] != self.N_padded or input.shape[-2] != self.M_padded) and self.pre_pad: raise RuntimeError( 'Padded array must be of spatial size (%i,%i).' % (self.M_padded, self.N_padded)) if not self.out_type in ('array', 'list'): raise RuntimeError( "The out_type must be one of 'array' or 'list'.") batch_shape = input.shape[:-2] signal_shape = input.shape[-2:] input = input.reshape((-1, ) + signal_shape) S = scattering2d(input, self.pad, self.unpad, self.backend, self.J, self.L, self.OS, self.phi, self.psi, self.max_order, self.out_type, local=local, loc_cplx=loc_cplx) if self.out_type == 'array': if local: scattering_shape = S.shape[-3:] else: scattering_shape = S.shape[-1:] new_shape = batch_shape + scattering_shape S = S.reshape(new_shape) else: if local: scattering_shape = S[0]['coef'].shape[-2:] else: scattering_shape = () new_shape = batch_shape + scattering_shape for x in S: x['coef'] = x['coef'].reshape(new_shape) return S
def update_image(self): # Extract slice. idx = [] for i in range(self.ndim): if i in [self.x, self.y, self.z, self.c]: idx.append(slice(None, None, self.flips[i])) else: idx.append(self.slices[i]) idx = tuple(idx) imv = sp.to_device(self.im[idx]) # Transpose to have [z, y, x, c]. imv_dims = [self.y, self.x] if self.z is not None: imv_dims = [self.z] + imv_dims if self.c is not None: imv_dims = imv_dims + [self.c] imv = np.transpose(imv, np.argsort(np.argsort(imv_dims))) imv = array_to_image(imv, color=self.c is not None) if self.mode is None: if np.isrealobj(imv): self.mode = 'r' else: self.mode = 'm' if self.mode == 'm': imv = np.abs(imv) elif self.mode == 'p': imv = np.angle(imv) elif self.mode == 'r': imv = np.real(imv) elif self.mode == 'i': imv = np.imag(imv) elif self.mode == 'l': imv = np.abs(imv) imv = np.log(imv, out=np.ones_like(imv) * -31, where=imv != 0) if self.vmin is None: self.vmin = imv.min() if self.vmax is None: self.vmax = imv.max() if self.axim is None: self.axim = self.ax.imshow( imv, vmin=self.vmin, vmax=self.vmax, cmap='gray', origin='lower', interpolation=self.interpolation, aspect=1.0, extent=[ 0, imv.shape[1], 0, imv.shape[0]]) else: self.axim.set_data(imv) self.axim.set_extent([0, imv.shape[1], 0, imv.shape[0]]) self.axim.set_clim(self.vmin, self.vmax) if self.help_text is None: bbox_props = dict(boxstyle="round", pad=1, fc="white", alpha=0.95, lw=0) l, b, w, h = self.ax.get_position().bounds self.help_text = self.ax.text(imv.shape[0] / 2, imv.shape[1] / 2, image_plot_help_str, ha='center', va='center', linespacing=1.5, ma='left', size=8, bbox=bbox_props) self.help_text.set_visible(self.show_help)
def sqrtm(A, disp=True, blocksize=64): """ Matrix square root. Parameters ---------- A : (N, N) array_like Matrix whose square root to evaluate disp : bool, optional Print warning if error in the result is estimated large instead of returning estimated error. (Default: True) blocksize : integer, optional If the blocksize is not degenerate with respect to the size of the input array, then use a blocked algorithm. (Default: 64) Returns ------- sqrtm : (N, N) ndarray Value of the sqrt function at `A` errest : float (if disp == False) Frobenius norm of the estimated error, ||err||_F / ||A||_F References ---------- .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) "Blocked Schur Algorithms for Computing the Matrix Square Root, Lecture Notes in Computer Science, 7782. pp. 171-182. Examples -------- >>> from scipy.linalg import sqrtm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> r = sqrtm(a) >>> r array([[ 0.75592895, 1.13389342], [ 0.37796447, 1.88982237]]) >>> r.dot(r) array([[ 1., 3.], [ 1., 4.]]) """ A = _asarray_validated(A, check_finite=True, as_inexact=True) if len(A.shape) != 2: raise ValueError("Non-matrix input to matrix function.") if blocksize < 1: raise ValueError("The blocksize should be at least 1.") keep_it_real = np.isrealobj(A) if keep_it_real: T, Z = schur(A) if not np.array_equal(T, np.triu(T)): T, Z = rsf2csf(T, Z) else: T, Z = schur(A, output='complex') failflag = False try: R = _sqrtm_triu(T, blocksize=blocksize) ZH = np.conjugate(Z).T X = Z.dot(R).dot(ZH) except SqrtmError: failflag = True X = np.empty_like(A) X.fill(np.nan) if disp: if failflag: print("Failed to find a square root.") return X else: try: arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro') except ValueError: # NaNs in matrix arg2 = np.inf return X, arg2
def _sqrtm_triu(T, blocksize=64): """ Matrix square root of an upper triangular matrix. This is a helper function for `sqrtm` and `logm`. Parameters ---------- T : (N, N) array_like upper triangular Matrix whose square root to evaluate blocksize : integer, optional If the blocksize is not degenerate with respect to the size of the input array, then use a blocked algorithm. (Default: 64) Returns ------- sqrtm : (N, N) ndarray Value of the sqrt function at `T` References ---------- .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) "Blocked Schur Algorithms for Computing the Matrix Square Root, Lecture Notes in Computer Science, 7782. pp. 171-182. """ T_diag = np.diag(T) keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 if not keep_it_real: T_diag = T_diag.astype(complex) R = np.diag(np.sqrt(T_diag)) # Compute the number of blocks to use; use at least one block. n, n = T.shape nblocks = max(n // blocksize, 1) # Compute the smaller of the two sizes of blocks that # we will actually use, and compute the number of large blocks. bsmall, nlarge = divmod(n, nblocks) blarge = bsmall + 1 nsmall = nblocks - nlarge if nsmall * bsmall + nlarge * blarge != n: raise Exception('internal inconsistency') # Define the index range covered by each block. start_stop_pairs = [] start = 0 for count, size in ((nsmall, bsmall), (nlarge, blarge)): for i in range(count): start_stop_pairs.append((start, start + size)) start += size # Within-block interactions. for start, stop in start_stop_pairs: for j in range(start, stop): for i in range(j - 1, start - 1, -1): s = 0 if j - i > 1: s = R[i, i + 1:j].dot(R[i + 1:j, j]) denom = R[i, i] + R[j, j] if not denom: raise SqrtmError('failed to find the matrix square root') R[i, j] = (T[i, j] - s) / denom # Between-block interactions. for j in range(nblocks): jstart, jstop = start_stop_pairs[j] for i in range(j - 1, -1, -1): istart, istop = start_stop_pairs[i] S = T[istart:istop, jstart:jstop] if j - i > 1: S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart, jstart:jstop]) # Invoke LAPACK. # For more details, see the solve_sylvester implemention # and the fortran dtrsyl and ztrsyl docs. Rii = R[istart:istop, istart:istop] Rjj = R[jstart:jstop, jstart:jstop] if keep_it_real: x, scale, info = dtrsyl(Rii, Rjj, S) else: x, scale, info = ztrsyl(Rii, Rjj, S) R[istart:istop, jstart:jstop] = x * scale # Return the matrix square root. return R
def rfft(x, n=None, axis=-1, overwrite_x=False): """ Discrete Fourier transform of a real sequence. Parameters ---------- x : array_like, real-valued The data to transform. n : int, optional Defines the length of the Fourier transform. If `n` is not specified (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``, `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded. axis : int, optional The axis along which the transform is applied. The default is the last axis. overwrite_x : bool, optional If set to true, the contents of `x` can be overwritten. Default is False. Returns ------- z : real ndarray The returned real array contains:: [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd where:: y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n) j = 0..n-1 See Also -------- fft, irfft, numpy.fft.rfft Notes ----- Within numerical accuracy, ``y == rfft(irfft(y))``. Both single and double precision routines are implemented. Half precision inputs will be converted to single precision. Non floating-point inputs will be converted to double precision. Long-double precision inputs are not supported. To get an output with a complex datatype, consider using the related function `numpy.fft.rfft`. Examples -------- >>> from scipy.fftpack import fft, rfft >>> a = [9, -9, 1, 3] >>> fft(a) array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j]) >>> rfft(a) array([ 4., 8., 12., 16.]) """ tmp = _asfarray(x) if not numpy.isrealobj(tmp): raise TypeError("1st argument must be real sequence") try: work_function = _DTYPE_TO_RFFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) overwrite_x = overwrite_x or _datacopied(tmp, x) return _raw_fft(tmp, n, axis, 1, overwrite_x, work_function)
def _check_kbins(self, att, val): if not np.isrealobj(val): raise TypeError("k_bins must be real numbers") if not len(val): raise ValueError("k_bins must have at least one element.")
def hessian_band_pass(data, scale=1): """ Hessian, Gaussian 2nd order partial derivatives filter in the fourier domain """ # Gausian 2nd derivative in each direction # g = G(s) - G(s+1) # from one scale to the next r**2 -> 4*r**2 # (i*x)*(i*y)*g, etc # Pad to the bigger scale pd = _pad(data, scale + 1) # Get the scaled coordinate system if data.ndim == 2: x, y = _scale_coordinates(pd.shape, scale) rsq = x ** 2 + y ** 2 g = np.exp(-0.5 * rsq) - np.exp(-0.5 * 4 * rsq) temp = -1.0 * g * fftshift(fftn(pd)) dxx = ifftn(ifftshift(x * x * temp)) dxy = ifftn(ifftshift(x * y * temp)) dyy = ifftn(ifftshift(y * y * temp)) # Crop dxx = _crop(dxx, scale + 1) dxy = _crop(dxy, scale + 1) dyy = _crop(dyy, scale + 1) # Ensure that real functions stay real if np.isrealobj(data): dxx = np.real(dxx) dxy = np.real(dxy) dyy = np.real(dyy) return [dxx, dxy, dyy] elif data.ndim == 3: x, y, z = _scale_coordinates(pd.shape, scale) rsq = x ** 2 + y ** 2 + z ** 2 g = np.exp(-0.5 * rsq) - np.exp(-0.5 * 4 * rsq) temp = -1.0 * g * fftshift(fftn(pd)) dxx = ifftn(ifftshift(x * x * temp)) dxy = ifftn(ifftshift(x * y * temp)) dxz = ifftn(ifftshift(x * z * temp)) dyy = ifftn(ifftshift(y * y * temp)) dyz = ifftn(ifftshift(y * z * temp)) dzz = ifftn(ifftshift(z * z * temp)) # Crop dxx = _crop(dxx, scale + 1) dxy = _crop(dxy, scale + 1) dxz = _crop(dxz, scale + 1) dyy = _crop(dyy, scale + 1) dyz = _crop(dyz, scale + 1) dzz = _crop(dzz, scale + 1) # Ensure that real functions stay real if np.isrealobj(data): dxx = np.real(dxx) dxy = np.real(dxy) dxz = np.real(dxz) dyy = np.real(dyy) dyz = np.real(dyz) dzz = np.real(dzz) return [dxx, dxy, dxz, dyy, dyz, dzz] else: raise RuntimeError( "Unsupported number of dimensions {}. We only supports 2 or 3D arrays.".format( data.ndim ) )
def pmtm(x, eigenvalues, tapers, n_fft=None, method='adapt'): """Multitapering spectral estimation :param array x: the data :param eigenvalues: the window concentrations (eigenvalues) :param tapers: the matrix containing the tapering windows :param str method: set how the eigenvalues are used. Must be in ['unity', 'adapt', 'eigen'] :return: Sk (each complex), weights, eigenvalues Usually in spectral estimation the mean to reduce bias is to use tapering window. In order to reduce variance we need to average different spectrum. The problem is that we have only one set of data. Thus we need to decompose a set into several segments. Such method are well-known: simple daniell's periodogram, Welch's method and so on. The drawback of such methods is a loss of resolution since the segments used to compute the spectrum are smaller than the data set. The interest of multitapering method is to keep a good resolution while reducing bias and variance. How does it work? First we compute different simple periodogram with the whole data set (to keep good resolution) but each periodgram is computed with a differenttapering windows. Then, we average all these spectrum. To avoid redundancy and bias due to the tapers mtm use special tapers. from spectrum import data_cosine, dpss, pmtm data = data_cosine(N=2048, A=0.1, sampling=1024, freq=200) [tapers, eigen] = dpss(2048, 2.5, 4) res = pmtm(data, eigenvalues=eigen, tapers=tapers, show=False) .. versionchanged:: 0.6.2a The most of spectrum.pmtm original code is to calc PSD but it is not returns so here we return it + Removed redandand functionality (calling semilogy plot and that what included in spectrum.dpss) """ assert method in ['adapt', 'eigen', 'unity'] N = len(x) if eigenvalues is not None and tapers is not None: eig = eigenvalues[:] tapers = tapers[:] else: raise ValueError( "if eigenvalues provided, v must be provided as well and viceversa." ) nwin = len(eig) # length of the eigen values vector to be used later if n_fft is None: n_fft = max(256, 2**np.ceil(np.log2(N)).astype('int')) Sk_complex = np.fft.fft(tapers.transpose() * x, n_fft) # if nfft < N, cut otherwise add zero. Sk = (Sk_complex * Sk_complex.conj()).real # abs() ** 2 if method in ['eigen', 'unity']: if method == 'unity': weights = np.ones((nwin, 1)) elif method == 'eigen': # The S_k spectrum can be weighted by the eigenvalues, as in Park et al. weights = np.array( [_x / float(i + 1) for i, _x in enumerate(eig)]) weights = weights.reshape(nwin, 1) Sk = np.mean(Sk * weights, axis=0) elif method == 'adapt': # This version uses the equations from [2] (P&W pp 368-370). Sk = Sk.transpose() S = Sk[:, :2].mean() # Initial spectrum estimate # Set tolerance for acceptance of spectral estimate: sig2 = np.dot(x, x) / float(N) tol = 0.0005 * sig2 / float(n_fft) a = sig2 * (1 - eig) # Wrap the data modulo nfft if N > nfft S = S.reshape(n_fft, 1) for i in range( 100): # converges very quickly but for safety; set i<100 # calculate weights b1 = np.multiply(S, np.ones((1, nwin))) b2 = np.multiply(S, eig.transpose()) + np.ones( (n_fft, 1)) * a.transpose() b = b1 / b2 # calculate new spectral estimate weights = (b**2) * (np.ones((n_fft, 1)) * eig.transpose()) S1 = ((weights * Sk).sum(axis=1, keepdims=True) / weights.sum(axis=1, keepdims=True)) S, S1 = S1, S if np.abs(S - S1).sum() / n_fft < tol: break Sk = (weights * Sk).mean(axis=1) if np.isrealobj( x ): # Double to account for the energy in the negative frequencies if prm['n_fft'] % 2 == 0: Sk = 2 * Sk[:int(prm['n_fft'] / 2 + 1)] else: Sk = 2 * Sk[:int((prm['n_fft'] + 1) / 2)] return Sk_complex, Sk, weights
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1): """ Estimate power spectral density using Welch's method. Welch's method [1]_ computes an estimate of the power spectral density by dividing the data into overlapping segments, computing a modified periodogram for each segment and averaging the periodograms. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series in units of Hz. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length will be used for nperseg. Defaults to 'hanning'. nperseg : int, optional Length of each segment. Defaults to 256. noverlap: int, optional Number of points to overlap between segments. If None, ``noverlap = nperseg / 2``. Defaults to None. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If None, the FFT length is `nperseg`. Defaults to None. detrend : str or function or False, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the ``type`` argument to `detrend`. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is False, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If True, return a one-sided spectrum for real data. If False return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the power spectral density ('density') where Pxx has units of V**2/Hz if x is measured in V and computing the power spectrum ('spectrum') where Pxx has units of V**2 if x is measured in V. Defaults to 'density'. axis : int, optional Axis along which the periodogram is computed; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. Pxx : ndarray Power spectral density or power spectrum of x. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data Notes ----- An appropriate amount of overlap will depend on the choice of window and on your requirements. For the default 'hanning' window an overlap of 50% is a reasonable trade off between accurately estimating the signal power, while not over counting any of the data. Narrower windows may require a larger overlap. If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_. .. versionadded:: 0.12.0 References ---------- .. [1] P. Welch, "The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms", IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967. .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika, vol. 37, pp. 1-16, 1950. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by 0.001 V**2/Hz of white noise sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2*np.sqrt(2) >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> x = amp*np.sin(2*np.pi*freq*time) >>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape) Compute and plot the power spectral density. >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) >>> plt.semilogy(f, Pxx_den) >>> plt.ylim([0.5e-3, 1]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('PSD [V**2/Hz]') >>> plt.show() If we average the last half of the spectral density, to exclude the peak, we can recover the noise power on the signal. >>> np.mean(Pxx_den[256:]) 0.0009924865443739191 Now compute and plot the power spectrum. >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum') >>> plt.figure() >>> plt.semilogy(f, np.sqrt(Pxx_spec)) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('Linear spectrum [V RMS]') >>> plt.show() The peak height in the power spectrum is an estimate of the RMS amplitude. >>> np.sqrt(Pxx_spec.max()) 2.0077340678640727 """ x = np.asarray(x) if x.size == 0: return np.empty(x.shape), np.empty(x.shape) if axis != -1: x = np.rollaxis(x, axis, len(x.shape)) if x.shape[-1] < nperseg: warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using ' 'nperseg = x.shape[%d]' % (nperseg, axis, x.shape[axis], axis)) nperseg = x.shape[-1] if isinstance(window, string_types) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] > x.shape[-1]: raise ValueError('window is longer than x.') nperseg = win.shape[0] # numpy 1.5.1 doesn't have result_type. outdtype = (np.array([x[0]]) * np.array([1], 'f')).dtype.char.lower() if win.dtype != outdtype: win = win.astype(outdtype) if scaling == 'density': scale = 1.0 / (fs * (win*win).sum()) elif scaling == 'spectrum': scale = 1.0 / win.sum()**2 else: raise ValueError('Unknown scaling: %r' % scaling) if noverlap is None: noverlap = nperseg // 2 elif noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') if nfft is None: nfft = nperseg elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') if not detrend: detrend_func = lambda seg: seg elif not hasattr(detrend, '__call__'): detrend_func = lambda seg: signaltools.detrend(seg, type=detrend) elif axis != -1: # Wrap this function so that it receives a shape that it could # reasonably expect to receive. def detrend_func(seg): seg = np.rollaxis(seg, -1, axis) seg = detrend(seg) return np.rollaxis(seg, axis, len(seg.shape)) else: detrend_func = detrend step = nperseg - noverlap indices = np.arange(0, x.shape[-1]-nperseg+1, step) if np.isrealobj(x) and return_onesided: outshape = list(x.shape) if nfft % 2 == 0: # even outshape[-1] = nfft // 2 + 1 Pxx = np.empty(outshape, outdtype) for k, ind in enumerate(indices): x_dt = detrend_func(x[..., ind:ind+nperseg]) xft = fftpack.rfft(x_dt*win, nfft) # fftpack.rfft returns the positive frequency part of the fft # as real values, packed r r i r i r i ... # this indexing is to extract the matching real and imaginary # parts, while also handling the pure real zero and nyquist # frequencies. if k == 0: Pxx[..., (0,-1)] = xft[..., (0,-1)]**2 Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2 else: Pxx *= k/(k+1.0) Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0) Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \ / (k+1.0) else: # odd outshape[-1] = (nfft+1) // 2 Pxx = np.empty(outshape, outdtype) for k, ind in enumerate(indices): x_dt = detrend_func(x[..., ind:ind+nperseg]) xft = fftpack.rfft(x_dt*win, nfft) if k == 0: Pxx[..., 0] = xft[..., 0]**2 Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2 else: Pxx *= k/(k+1.0) Pxx[..., 0] += xft[..., 0]**2 / (k+1) Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \ / (k+1.0) Pxx[..., 1:-1] *= 2*scale Pxx[..., (0,-1)] *= scale f = np.arange(Pxx.shape[-1]) * (fs/nfft) else: for k, ind in enumerate(indices): x_dt = detrend_func(x[..., ind:ind+nperseg]) xft = fftpack.fft(x_dt*win, nfft) if k == 0: Pxx = (xft * xft.conj()).real else: Pxx *= k/(k+1.0) Pxx += (xft * xft.conj()).real / (k+1.0) Pxx *= scale f = fftpack.fftfreq(nfft, 1.0/fs) if axis != -1: Pxx = np.rollaxis(Pxx, -1, axis) return f, Pxx
def _logm_triu(T): """ Compute matrix logarithm of an upper triangular matrix. The matrix logarithm is the inverse of expm: expm(logm(`T`)) == `T` Parameters ---------- T : (N, N) array_like Upper triangular matrix whose logarithm to evaluate Returns ------- logm : (N, N) ndarray Matrix logarithm of `T` References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm." SIAM Journal on Scientific Computing, 34 (4). C152-C169. ISSN 1095-7197 .. [2] Nicholas J. Higham (2008) "Functions of Matrices: Theory and Computation" ISBN 978-0-898716-46-7 .. [3] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 """ T = np.asarray(T) if len(T.shape) != 2 or T.shape[0] != T.shape[1]: raise ValueError('expected an upper triangular square matrix') n, n = T.shape # Construct T0 with the appropriate type, # depending on the dtype and the spectrum of T. T_diag = np.diag(T) keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 if keep_it_real: T0 = T else: T0 = T.astype(complex) # Define bounds given in Table (2.1). theta = (None, 1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2, 1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1, 4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1, 6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1) R, s, m = _inverse_squaring_helper(T0, theta) # Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1). # This requires the nodes and weights # corresponding to degree-m Gauss-Legendre quadrature. # These quadrature arrays need to be transformed from the [-1, 1] interval # to the [0, 1] interval. nodes, weights = scipy.special.p_roots(m) nodes = nodes.real if nodes.shape != (m,) or weights.shape != (m,): raise Exception('internal error') nodes = 0.5 + 0.5 * nodes weights = 0.5 * weights ident = np.identity(n) U = np.zeros_like(R) for alpha, beta in zip(weights, nodes): U += solve_triangular(ident + beta*R, alpha*R) U *= np.exp2(s) # Skip this step if the principal branch # does not exist at T0; this happens when a diagonal entry of T0 # is negative with imaginary part 0. has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0)) if has_principal_branch: # Recompute diagonal entries of U. U[np.diag_indices(n)] = np.log(np.diag(T0)) # Recompute superdiagonal entries of U. # This indexing of this code should be renovated # when newer np.diagonal() becomes available. for i in range(n-1): l1 = T0[i, i] l2 = T0[i+1, i+1] t12 = T0[i, i+1] U[i, i+1] = _logm_superdiag_entry(l1, l2, t12) # Return the logm of the upper triangular matrix. if not np.array_equal(U, np.triu(U)): raise Exception('U is not upper triangular') return U
def _remainder_matrix_power(A, t): """ Compute the fractional power of a matrix, for fractions -1 < t < 1. This uses algorithm (3.1) of [1]_. The Pade approximation itself uses algorithm (4.1) of [2]_. Parameters ---------- A : (N, N) array_like Matrix whose fractional power to evaluate. t : float Fractional power between -1 and 1 exclusive. Returns ------- X : (N, N) array_like The fractional power of the matrix. References ---------- .. [1] Nicholas J. Higham and Lijing Lin (2013) "An Improved Schur-Pade Algorithm for Fractional Powers of a Matrix and their Frechet Derivatives." .. [2] Nicholas J. Higham and Lijing lin (2011) "A Schur-Pade Algorithm for Fractional Powers of a Matrix." SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 """ # This code block is copied from numpy.matrix_power(). A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('input must be a square array') # Get the number of rows and columns. n, n = A.shape # Triangularize the matrix if necessary, # attempting to preserve dtype if possible. if np.array_equal(A, np.triu(A)): Z = None T = A else: if np.isrealobj(A): T, Z = schur(A) if not np.array_equal(T, np.triu(T)): T, Z = rsf2csf(T, Z) else: T, Z = schur(A, output='complex') # Zeros on the diagonal of the triangular matrix are forbidden, # because the inverse scaling and squaring cannot deal with it. T_diag = np.diag(T) if np.count_nonzero(T_diag) != n: raise FractionalMatrixPowerError( 'cannot use inverse scaling and squaring to find ' 'the fractional matrix power of a singular matrix') # If the triangular matrix is real and has a negative # entry on the diagonal, then force the matrix to be complex. if np.isrealobj(T) and np.min(T_diag) < 0: T = T.astype(complex) # Get the fractional power of the triangular matrix, # and de-triangularize it if necessary. U = _remainder_matrix_power_triu(T, t) if Z is not None: ZH = np.conjugate(Z).T return Z.dot(U).dot(ZH) else: return U
def LEVINSON(r, order=None, allow_singularity=False): r"""Levinson-Durbin recursion. Find the coefficients of a length(r)-1 order autoregressive linear process :param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation) :param order: requested order of the autoregressive coefficients. default is N. :param allow_singularity: false by default. Other implementations may be True (e.g., octave) :return: * the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)` * the prediction errors * the `N` reflections coefficients values This algorithm solves the set of complex linear simultaneous equations using Levinson algorithm. .. math:: \bold{T}_M \left( \begin{array}{c} 1 \\ \bold{a}_M \end{array} \right) = \left( \begin{array}{c} \rho_M \\ \bold{0}_M \end{array} \right) where :math:`\bold{T}_M` is a Hermitian Toeplitz matrix with elements :math:`T_0, T_1, \dots ,T_M`. .. note:: Solving this equations by Gaussian elimination would require :math:`M^3` operations whereas the levinson algorithm requires :math:`M^2+M` additions and :math:`M^2+M` multiplications. This is equivalent to solve the following symmetric Toeplitz system of linear equations .. math:: \left( \begin{array}{cccc} r_1 & r_2^* & \dots & r_{n}^*\\ r_2 & r_1^* & \dots & r_{n-1}^*\\ \dots & \dots & \dots & \dots\\ r_n & \dots & r_2 & r_1 \end{array} \right) \left( \begin{array}{cccc} a_2\\ a_3 \\ \dots \\ a_{N+1} \end{array} \right) = \left( \begin{array}{cccc} -r_2\\ -r_3 \\ \dots \\ -r_{N+1} \end{array} \right) where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and :math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically a vector of autocorrelation coefficients where lag 0 is the first element :math:`r_1`. .. doctest:: >>> import numpy; from spectrum import LEVINSON >>> T = numpy.array([3., -2+0.5j, .7-1j]) >>> a, e, k = LEVINSON(T) """ #from numpy import isrealobj T0 = numpy.real(r[0]) T = r[1:] M = len(T) if order is None: M = len(T) else: assert order <= M, 'order must be less than size of the input data' M = order realdata = numpy.isrealobj(r) if realdata is True: A = numpy.zeros(M, dtype=float) ref = numpy.zeros(M, dtype=float) else: A = numpy.zeros(M, dtype=complex) ref = numpy.zeros(M, dtype=complex) P = T0 for k in range(0, M): save = T[k] if k == 0: temp = -save / P else: #save += sum([A[j]*T[k-j-1] for j in range(0,k)]) for j in range(0, k): save = save + A[j] * T[k - j - 1] temp = -save / P if realdata: P = P * (1. - temp**2.) else: P = P * (1. - (temp.real**2 + temp.imag**2)) if P <= 0 and allow_singularity == False: raise ValueError("singular matrix") A[k] = temp ref[k] = temp # save reflection coeff at each step if k == 0: continue khalf = (k + 1) // 2 if realdata is True: for j in range(0, khalf): kj = k - j - 1 save = A[j] A[j] = save + temp * A[kj] if j != kj: A[kj] += temp * save else: for j in range(0, khalf): kj = k - j - 1 save = A[j] A[j] = save + temp * A[kj].conjugate() if j != kj: A[kj] = A[kj] + temp * save.conjugate() return A, P, ref
def fit( self, observation, embedding, initialization=None, num_classes=None, iterations=100, saliency=None, hermitize=True, covariance_norm='eigenvalue', eigenvalue_floor=1e-10, covariance_type="spherical", fixed_covariance=None, affiliation_eps=1e-10, weight_constant_axis=(-1,), spatial_weight=1., spectral_weight=1. ) -> GCACGMM: """ Args: observation: Shape (F, T, D) embedding: Shape (F, T, E) initialization: Affiliations between 0 and 1. Shape (F, K, T) num_classes: Scalar >0 iterations: Scalar >0 saliency: Importance weighting for each observation, shape (F, T) hermitize: trace_norm: eigenvalue_floor: covariance_type: Either 'full', 'diagonal', or 'spherical' fixed_covariance: Learned, if None. If fixed, you need to provide a covariance matrix with the correct shape. affiliation_eps: Used in M-step to clip saliency. weight_constant_axis: Axis, along which weight is constant. The axis indices are based on affiliation shape. Consequently: (-3, -2, -1) == constant = '' (-3, -1) == 'k' (-1) == vanilla == 'fk' (-3) == 'kt' spatial_weight: spectral_weight: Returns: """ assert xor(initialization is None, num_classes is None), ( "Incompatible input combination. " "Exactly one of the two inputs has to be None: " f"{initialization is None} xor {num_classes is None}" ) assert np.iscomplexobj(observation), observation.dtype assert np.isrealobj(embedding), embedding.dtype assert observation.shape[-1] > 1 observation = observation / np.maximum( np.linalg.norm(observation, axis=-1, keepdims=True), np.finfo(observation.dtype).tiny, ) F, T, D = observation.shape _, _, E = embedding.shape if initialization is None and num_classes is not None: affiliation_shape = (F, num_classes, T) initialization = np.random.uniform(size=affiliation_shape) initialization /= np.einsum("...kt->...t", initialization)[ ..., None, : ] if saliency is None: saliency = np.ones_like(initialization[..., 0, :]) quadratic_form = np.ones_like(initialization) affiliation = initialization for iteration in range(iterations): model = self._m_step( observation, embedding, quadratic_form, affiliation=np.clip( affiliation, affiliation_eps, 1 - affiliation_eps ), saliency=saliency, hermitize=hermitize, covariance_norm=covariance_norm, eigenvalue_floor=eigenvalue_floor, covariance_type=covariance_type, fixed_covariance=fixed_covariance, weight_constant_axis=weight_constant_axis, spatial_weight=spatial_weight, spectral_weight=spectral_weight ) if iteration < iterations - 1: affiliation, quadratic_form = model._predict( observation=observation, embedding=embedding ) return model
def _sqrtm_triu(T, blocksize=64): """ Matrix square root of an upper triangular matrix. This is a helper function for `sqrtm` and `logm`. Parameters ---------- T : (N, N) array_like upper triangular Matrix whose square root to evaluate blocksize : int, optional If the blocksize is not degenerate with respect to the size of the input array, then use a blocked algorithm. (Default: 64) Returns ------- sqrtm : (N, N) ndarray Value of the sqrt function at `T` References ---------- .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) "Blocked Schur Algorithms for Computing the Matrix Square Root, Lecture Notes in Computer Science, 7782. pp. 171-182. """ T_diag = np.diag(T) keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 # Cast to complex as necessary + ensure double precision if not keep_it_real: T = np.asarray(T, dtype=np.complex128, order="C") T_diag = np.asarray(T_diag, dtype=np.complex128) else: T = np.asarray(T, dtype=np.float64, order="C") T_diag = np.asarray(T_diag, dtype=np.float64) R = np.diag(np.sqrt(T_diag)) # Compute the number of blocks to use; use at least one block. n, n = T.shape nblocks = max(n // blocksize, 1) # Compute the smaller of the two sizes of blocks that # we will actually use, and compute the number of large blocks. bsmall, nlarge = divmod(n, nblocks) blarge = bsmall + 1 nsmall = nblocks - nlarge if nsmall * bsmall + nlarge * blarge != n: raise Exception('internal inconsistency') # Define the index range covered by each block. start_stop_pairs = [] start = 0 for count, size in ((nsmall, bsmall), (nlarge, blarge)): for i in range(count): start_stop_pairs.append((start, start + size)) start += size # Within-block interactions (Cythonized) try: within_block_loop(R, T, start_stop_pairs, nblocks) except RuntimeError as e: raise SqrtmError(*e.args) from e # Between-block interactions (Cython would give no significant speedup) for j in range(nblocks): jstart, jstop = start_stop_pairs[j] for i in range(j - 1, -1, -1): istart, istop = start_stop_pairs[i] S = T[istart:istop, jstart:jstop] if j - i > 1: S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart, jstart:jstop]) # Invoke LAPACK. # For more details, see the solve_sylvester implemention # and the fortran dtrsyl and ztrsyl docs. Rii = R[istart:istop, istart:istop] Rjj = R[jstart:jstop, jstart:jstop] if keep_it_real: x, scale, info = dtrsyl(Rii, Rjj, S) else: x, scale, info = ztrsyl(Rii, Rjj, S) R[istart:istop, jstart:jstop] = x * scale # Return the matrix square root. return R
def spectral_rolloff(y=None, sr=22050, S=None, n_fft=2048, hop_length=512, freq=None, roll_percent=0.85): '''Compute roll-off frequency Parameters ---------- y : np.ndarray [shape=(n,)] or None audio time series sr : number > 0 [scalar] audio sampling rate of `y` S : np.ndarray [shape=(d, t)] or None (optional) spectrogram magnitude n_fft : int > 0 [scalar] FFT window size hop_length : int > 0 [scalar] hop length for STFT. See `librosa.core.stft` for details. freq : None or np.ndarray [shape=(d,) or shape=(d, t)] Center frequencies for spectrogram bins. If `None`, then FFT bin center frequencies are used. Otherwise, it can be a single array of `d` center frequencies, .. note:: `freq` is assumed to be sorted in increasing order roll_percent : float [0 < roll_percent < 1] Roll-off percentage. Returns ------- rolloff : np.ndarray [shape=(1, t)] roll-off frequency for each frame Examples -------- From time-series input >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) >>> rolloff array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]]) From spectrogram input >>> S, phase = librosa.magphase(librosa.stft(y)) >>> librosa.feature.spectral_rolloff(S=S, sr=sr) array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]]) >>> # With a higher roll percentage: >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.95) array([[ 10012.939, 3003.882, ..., 10034.473, 10077.539]]) >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.subplot(2, 1, 1) >>> plt.semilogy(rolloff.T, label='Roll-off frequency') >>> plt.ylabel('Hz') >>> plt.xticks([]) >>> plt.xlim([0, rolloff.shape[-1]]) >>> plt.legend() >>> plt.subplot(2, 1, 2) >>> librosa.display.specshow(librosa.logamplitude(S**2, ref_power=np.max), ... y_axis='log', x_axis='time') >>> plt.title('log Power spectrogram') >>> plt.tight_layout() ''' if not 0.0 < roll_percent < 1.0: raise ParameterError('roll_percent must lie in the range (0, 1)') S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length) if not np.isrealobj(S): raise ParameterError('Spectral rolloff is only defined ' 'with real-valued input') elif np.any(S < 0): raise ParameterError('Spectral rolloff is only defined ' 'with non-negative energies') # Compute the center frequencies of each bin if freq is None: freq = fft_frequencies(sr=sr, n_fft=n_fft) # Make sure that frequency can be broadcast if freq.ndim == 1: freq = freq.reshape((-1, 1)) total_energy = np.cumsum(S, axis=0) threshold = roll_percent * total_energy[-1] ind = np.where(total_energy < threshold, np.nan, 1) return np.nanmin(ind * freq, axis=0, keepdims=True)
def periodogram(x, nfft=None, fs=1): """Compute the periodogram of the given signal, with the given fft size. Parameters ---------- x : array-like input signal nfft : int size of the fft to compute the periodogram. If None (default), the length of the signal is used. if nfft > n, the signal is 0 padded. fs : float Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the Nyquist limit). Returns ------- pxx : array-like The psd estimate. fgrid : array-like Frequency grid over which the periodogram was estimated. Examples -------- Generate a signal with two sinusoids, and compute its periodogram: >>> fs = 1000 >>> x = np.sin(2 * np.pi * 0.1 * fs * np.linspace(0, 0.5, 0.5*fs)) >>> x += np.sin(2 * np.pi * 0.2 * fs * np.linspace(0, 0.5, 0.5*fs)) >>> px, fx = periodogram(x, 512, fs) Notes ----- Only real signals supported for now. Returns the one-sided version of the periodogram. Discrepency with matlab: matlab compute the psd in unit of power / radian / sample, and we compute the psd in unit of power / sample: to get the same result as matlab, just multiply the result from talkbox by 2pi""" # TODO: this is basic to the point of being useless: # - support Daniel smoothing # - support windowing # - trend/mean handling # - one-sided vs two-sided # - plot # - support complex input x = np.atleast_1d(x) n = x.size if x.ndim > 1: raise ValueError("Only rank 1 input supported for now.") if not np.isrealobj(x): raise ValueError("Only real input supported for now.") if not nfft: nfft = n if nfft < n: raise ValueError("nfft < signal size not supported yet") pxx = np.abs(fft(x, nfft))**2 if nfft % 2 == 0: pn = nfft / 2 + 1 else: pn = (nfft + 1) / 2 fgrid = np.linspace(0, fs * 0.5, pn) return pxx[:pn] / (n * fs), fgrid
def spectral_centroid(y=None, sr=22050, S=None, n_fft=2048, hop_length=512, freq=None): '''Compute the spectral centroid. Each frame of a magnitude spectrogram is normalized and treated as a distribution over frequency bins, from which the mean (centroid) is extracted per frame. Parameters ---------- y : np.ndarray [shape=(n,)] or None audio time series sr : number > 0 [scalar] audio sampling rate of `y` S : np.ndarray [shape=(d, t)] or None (optional) spectrogram magnitude n_fft : int > 0 [scalar] FFT window size hop_length : int > 0 [scalar] hop length for STFT. See `librosa.core.stft` for details. freq : None or np.ndarray [shape=(d,) or shape=(d, t)] Center frequencies for spectrogram bins. If `None`, then FFT bin center frequencies are used. Otherwise, it can be a single array of `d` center frequencies, or a matrix of center frequencies as constructed by `librosa.core.ifgram` Returns ------- centroid : np.ndarray [shape=(1, t)] centroid frequencies See Also -------- librosa.core.stft Short-time Fourier Transform librosa.core.ifgram Instantaneous-frequency spectrogram Examples -------- From time-series input: >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> cent = librosa.feature.spectral_centroid(y=y, sr=sr) >>> cent array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]]) From spectrogram input: >>> S, phase = librosa.magphase(librosa.stft(y=y)) >>> librosa.feature.spectral_centroid(S=S) array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]]) Using variable bin center frequencies: >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> if_gram, D = librosa.ifgram(y) >>> librosa.feature.spectral_centroid(S=np.abs(D), freq=if_gram) array([[ 4420.719, 625.769, ..., 5011.86 , 5221.492]]) Plot the result >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.subplot(2, 1, 1) >>> plt.semilogy(cent.T, label='Spectral centroid') >>> plt.ylabel('Hz') >>> plt.xticks([]) >>> plt.xlim([0, cent.shape[-1]]) >>> plt.legend() >>> plt.subplot(2, 1, 2) >>> librosa.display.specshow(librosa.logamplitude(S**2, ref_power=np.max), ... y_axis='log', x_axis='time') >>> plt.title('log Power spectrogram') >>> plt.tight_layout() ''' S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length) if not np.isrealobj(S): raise ParameterError('Spectral centroid is only defined ' 'with real-valued input') elif np.any(S < 0): raise ParameterError('Spectral centroid is only defined ' 'with non-negative energies') # Compute the center frequencies of each bin if freq is None: freq = fft_frequencies(sr=sr, n_fft=n_fft) if freq.ndim == 1: freq = freq.reshape((-1, 1)) # Column-normalize S return np.sum(freq * util.normalize(S, norm=1, axis=0), axis=0, keepdims=True)
def sqrtm(A, disp=True, blocksize=64): """ Matrix square root. Parameters ---------- A : (N, N) array_like Matrix whose square root to evaluate disp : bool, optional Print warning if error in the result is estimated large instead of returning estimated error. (Default: True) blocksize : integer, optional If the blocksize is not degenerate with respect to the size of the input array, then use a blocked algorithm. (Default: 64) Returns ------- sqrtm : (N, N) ndarray Value of the sqrt function at `A` errest : float (if disp == False) Frobenius norm of the estimated error, ||err||_F / ||A||_F References ---------- .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) "Blocked Schur Algorithms for Computing the Matrix Square Root, Lecture Notes in Computer Science, 7782. pp. 171-182. """ A = np.asarray(A) if len(A.shape) != 2: raise ValueError("Non-matrix input to matrix function.") if blocksize < 1: raise ValueError("The blocksize should be at least 1.") keep_it_real = np.isrealobj(A) if keep_it_real: T, Z = schur(A) if not np.array_equal(T, np.triu(T)): T, Z = rsf2csf(T, Z) else: T, Z = schur(A, output='complex') failflag = False try: R = _sqrtm_triu(T, blocksize=blocksize) ZH = np.conjugate(Z).T X = Z.dot(R).dot(ZH) except SqrtmError as e: failflag = True X = np.empty_like(A) X.fill(np.nan) if disp: nzeig = np.any(np.diag(T) == 0) if nzeig: print("Matrix is singular and may not have a square root.") elif failflag: print("Failed to find a square root.") return X else: try: arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro') except ValueError: # NaNs in matrix arg2 = np.inf return X, arg2
def spectral_bandwidth(y=None, sr=22050, S=None, n_fft=2048, hop_length=512, freq=None, centroid=None, norm=True, p=2): '''Compute p'th-order spectral bandwidth: (sum_k S[k] * (freq[k] - centroid)**p)**(1/p) Parameters ---------- y : np.ndarray [shape=(n,)] or None audio time series sr : number > 0 [scalar] audio sampling rate of `y` S : np.ndarray [shape=(d, t)] or None (optional) spectrogram magnitude n_fft : int > 0 [scalar] FFT window size hop_length : int > 0 [scalar] hop length for STFT. See `librosa.core.stft` for details. freq : None or np.ndarray [shape=(d,) or shape=(d, t)] Center frequencies for spectrogram bins. If `None`, then FFT bin center frequencies are used. Otherwise, it can be a single array of `d` center frequencies, or a matrix of center frequencies as constructed by `librosa.core.ifgram` centroid : None or np.ndarray [shape=(1, t)] pre-computed centroid frequencies norm : bool Normalize per-frame spectral energy (sum to one) p : float > 0 Power to raise deviation from spectral centroid. Returns ------- bandwidth : np.ndarray [shape=(1, t)] frequency bandwidth for each frame Examples -------- From time-series input >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) >>> spec_bw array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]]) From spectrogram input >>> S, phase = librosa.magphase(librosa.stft(y=y)) >>> librosa.feature.spectral_bandwidth(S=S) array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]]) Using variable bin center frequencies >>> if_gram, D = librosa.ifgram(y) >>> librosa.feature.spectral_bandwidth(S=np.abs(D), freq=if_gram) array([[ 3380.011, 1429.11 , ..., 3235.22 , 3080.148]]) Plot the result >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.subplot(2, 1, 1) >>> plt.semilogy(spec_bw.T, label='Spectral bandwidth') >>> plt.ylabel('Hz') >>> plt.xticks([]) >>> plt.xlim([0, spec_bw.shape[-1]]) >>> plt.legend() >>> plt.subplot(2, 1, 2) >>> librosa.display.specshow(librosa.logamplitude(S**2, ref_power=np.max), ... y_axis='log', x_axis='time') >>> plt.title('log Power spectrogram') >>> plt.tight_layout() ''' S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length) if not np.isrealobj(S): raise ParameterError('Spectral bandwidth is only defined ' 'with real-valued input') elif np.any(S < 0): raise ParameterError('Spectral bandwidth is only defined ' 'with non-negative energies') if centroid is None: centroid = spectral_centroid(y=y, sr=sr, S=S, n_fft=n_fft, hop_length=hop_length, freq=freq) # Compute the center frequencies of each bin if freq is None: freq = fft_frequencies(sr=sr, n_fft=n_fft) if freq.ndim == 1: deviation = np.abs(np.subtract.outer(freq, np.squeeze(centroid))) else: deviation = np.abs(freq - np.squeeze(centroid)) # Column-normalize S if norm: S = util.normalize(S, norm=1, axis=0) return np.sum(S * deviation**p, axis=0, keepdims=True)**(1. / p)
def _dst(x, type, n=None, axis=-1, overwrite_x=False, normalize=None): """ Return Discrete Sine Transform of arbitrary type sequence x. Parameters ---------- x : array-like input array. n : int, optional Length of the transform. axis : int, optional Axis along which the dst is computed. (default=-1) overwrite_x : bool, optional If True the contents of x can be destroyed. (default=False) Returns ------- z : real ndarray """ tmp = np.asarray(x) if not np.isrealobj(tmp): raise TypeError("1st argument must be real sequence") if n is None: n = tmp.shape[axis] else: raise NotImplementedError("Padding/truncating not yet implemented") if tmp.dtype == np.double: if type == 1: f = _fftpack.ddst1 elif type == 2: f = _fftpack.ddst2 elif type == 3: f = _fftpack.ddst3 else: raise ValueError("Type %d not understood" % type) elif tmp.dtype == np.float32: if type == 1: f = _fftpack.dst1 elif type == 2: f = _fftpack.dst2 elif type == 3: f = _fftpack.dst3 else: raise ValueError("Type %d not understood" % type) else: raise ValueError("dtype %s not supported" % tmp.dtype) if normalize: if normalize == "ortho": nm = 1 else: raise ValueError("Unknown normalize mode %s" % normalize) else: nm = 0 if type == 1 and n < 2: raise ValueError("DST-I is not defined for size < 2") overwrite_x = overwrite_x or _datacopied(tmp, x) if axis == -1 or axis == len(tmp.shape) - 1: return f(tmp, n, nm, overwrite_x) #else: # raise NotImplementedError("Axis arg not yet implemented") tmp = np.swapaxes(tmp, axis, -1) tmp = f(tmp, n, nm, overwrite_x) return np.swapaxes(tmp, axis, -1)
def double_fermi_surface_average(q, e, g2=None, kT=0.025, occupations=occupations.fermi_dirac, comm=comm): r"""Calculate double Fermi-surface average. Please note that not the average itself is returned! .. math:: \langle g^2 \rangle = \frac { \sum_{\vec q \nu \vec k m n} |g_{\vec q \nu \vec k m n}|^2 \delta(\epsilon_{\vec k n}) \delta(\epsilon_{\vec k + \vec q m}) }{ \sum_{\vec q \vec k m n} \delta(\epsilon_{\vec k n}) \delta(\epsilon_{\vec k + \vec q m}) } Parameters ---------- q : list of tuple List of q points in crystal coordinates :math:`q_i \in [0, 2 \pi)`. e : ndarray Electron dispersion on uniform mesh. The Fermi level must be at zero. g2 : ndarray Quantity to be averaged, typically squared electron-phonon coupling. kT : float Smearing temperature. occupations : function Particle distribution as a function of energy divided by `kT`. Returns ------- ndarray Enumerator of double Fermi-surface average before :math:`\vec q \nu` summation. ndarray Denominator of double Fermi-surface average before :math:`\vec q` summation. """ nQ = len(q) q_orig = q q = np.zeros((nQ, 3)) q[:, :len(q_orig[0])] = q_orig nk_orig = e.shape[:-1] nk = np.ones(3, dtype=int) nk[:len(nk_orig)] = nk_orig nbnd = e.shape[-1] e = np.reshape(e, (nk[0], nk[1], nk[2], nbnd)) if g2 is None: g2 = np.ones((nQ, 1)) else: g2 = np.reshape(g2, (nQ, -1, nk[0], nk[1], nk[2], nbnd, nbnd)) nmodes = g2.shape[1] d = occupations.delta(e / kT) / kT e = np.tile(e, (2, 2, 2, 1)) d = np.tile(d, (2, 2, 2, 1)) scale = nk / (2 * np.pi) sizes, bounds = MPI.distribute(nQ, bounds=True, comm=comm) my_enum = np.empty((sizes[comm.rank], nmodes), dtype=float if np.isrealobj(g2) else complex) my_deno = np.empty(sizes[comm.rank]) d2 = np.empty((nk[0], nk[1], nk[2], nbnd, nbnd)) k1 = slice(0, nk[0]) k2 = slice(0, nk[1]) k3 = slice(0, nk[2]) for my_iq, iq in enumerate(range(*bounds[comm.rank:comm.rank + 2])): q1, q2, q3 = np.round(q[iq] * scale).astype(int) % nk kq1 = slice(q1, q1 + nk[0]) kq2 = slice(q2, q2 + nk[1]) kq3 = slice(q3, q3 + nk[2]) for m in range(nbnd): for n in range(nbnd): d2[..., m, n] = d[kq1, kq2, kq3, m] * d[k1, k2, k3, n] for nu in range(nmodes): my_enum[my_iq, nu] = (g2[iq, nu] * d2).sum() my_deno[my_iq] = d2.sum() enum = np.empty((nQ, nmodes), dtype=my_enum.dtype) deno = np.empty(nQ) comm.Allgatherv(my_enum, (enum, sizes * nmodes)) comm.Allgatherv(my_deno, (deno, sizes)) return enum, deno
def startpy(): in_array = [1, 3, 5, 4] print("Input array : ", in_array) output_value = np.isrealobj(in_array) print("\nIs real : ", output_value)
def phonon_self_energy(q, e, g2=None, kT=0.025, eps=1e-15, omega=0.0, occupations=occupations.fermi_dirac, fluctuations=False, Delta=None, Delta_diff=False, Delta_occupations=occupations.gauss, Delta_kT=0.025, comm=comm): r"""Calculate phonon self-energy. .. math:: \Pi_{\vec q \nu}(\omega) = \frac 2 N \sum_{\vec k m n} |g_{\vec q \nu \vec k m n}|^2 \frac {f(\epsilon_{\vec k n}) - f(\epsilon_{\vec k + \vec q m})} {\epsilon_{\vec k n} - \epsilon_{\vec k + \vec q m} + \omega} Parameters ---------- q : list of tuple List of q points in crystal coordinates :math:`q_i \in [0, 2 \pi)`. e : ndarray Electron dispersion on uniform mesh. The Fermi level must be at zero. g2 : ndarray Squared electron-phonon coupling. kT : float Smearing temperature. eps : float Smallest allowed absolute value of divisor. omega : float Nonadiabatic frequency argument; shall include small imaginary regulator if nonzero. occupations : function Particle distribution as a function of energy divided by `kT`. fluctuations : bool Return integrand too (for fluctuation analysis)? Delta : float Half the width of energy window around Fermi level to be excluded. Delta_diff : bool Calculate derivative of phonon self-energy w.r.t. `Delta`? Delta_occupations : function Smoothened Heaviside function to realize excluded energy window. Delta_kT : float Temperature to smoothen Heaviside function. Returns ------- ndarray Phonon self-energy. """ nQ = len(q) q_orig = q q = np.zeros((nQ, 3)) q[:, :len(q_orig[0])] = q_orig nk_orig = e.shape[:-1] nk = np.ones(3, dtype=int) nk[:len(nk_orig)] = nk_orig nbnd = e.shape[-1] e = np.reshape(e, (nk[0], nk[1], nk[2], nbnd)) if g2 is None: g2 = np.ones((nQ, 1)) else: g2 = np.reshape(g2, (nQ, -1, nk[0], nk[1], nk[2], nbnd, nbnd)) nmodes = g2.shape[1] x = e / kT f = occupations(x) d = occupations.delta(x) / (-kT) if Delta is not None: x1 = (e - Delta) / Delta_kT x2 = (-e - Delta) / Delta_kT Theta = 2 - Delta_occupations(x1) - Delta_occupations(x2) if Delta_diff: delta = Delta_occupations.delta(x1) + Delta_occupations.delta(x2) delta /= -Delta_kT e = np.tile(e, (2, 2, 2, 1)) f = np.tile(f, (2, 2, 2, 1)) if Delta is not None: Theta = np.tile(Theta, (2, 2, 2, 1)) if Delta_diff: delta = np.tile(delta, (2, 2, 2, 1)) scale = nk / (2 * np.pi) prefactor = 2.0 / nk.prod() sizes, bounds = MPI.distribute(nQ, bounds=True, comm=comm) my_Pi = np.empty( (sizes[comm.rank], nmodes), dtype=float if np.isrealobj(g2) and np.isrealobj(omega) else complex) if fluctuations: my_Pi_k = np.empty( (sizes[comm.rank], nmodes, nk[0], nk[1], nk[2], nbnd, nbnd), dtype=my_Pi.dtype) dfde = np.empty((nk[0], nk[1], nk[2], nbnd, nbnd), dtype=float if np.isrealobj(omega) else complex) k1 = slice(0, nk[0]) k2 = slice(0, nk[1]) k3 = slice(0, nk[2]) for my_iq, iq in enumerate(range(*bounds[comm.rank:comm.rank + 2])): q1, q2, q3 = np.round(q[iq] * scale).astype(int) % nk kq1 = slice(q1, q1 + nk[0]) kq2 = slice(q2, q2 + nk[1]) kq3 = slice(q3, q3 + nk[2]) for m in range(nbnd): for n in range(nbnd): df = f[k1, k2, k3, n] - f[kq1, kq2, kq3, m] de = e[k1, k2, k3, n] - e[kq1, kq2, kq3, m] if omega: dfde[..., m, n] = df / (de + omega) else: ok = abs(de) > eps dfde[..., m, n][ok] = df[ok] / de[ok] dfde[..., m, n][~ok] = d[..., n][~ok] if Delta is not None: if Delta_diff: envelope = ( Theta[kq1, kq2, kq3, m] * delta[k1, k2, k3, n] + delta[kq1, kq2, kq3, m] * Theta[k1, k2, k3, n]) else: envelope = (Theta[kq1, kq2, kq3, m] * Theta[k1, k2, k3, n]) dfde[..., m, n] *= envelope for nu in range(nmodes): Pi_k = g2[iq, nu] * dfde my_Pi[my_iq, nu] = prefactor * Pi_k.sum() if fluctuations: my_Pi_k[my_iq, nu] = 2 * Pi_k Pi = np.empty((nQ, nmodes), dtype=my_Pi.dtype) comm.Allgatherv(my_Pi, (Pi, sizes * nmodes)) if fluctuations: Pi_k = np.empty((nQ, nmodes) + nk_orig + (nbnd, nbnd), dtype=my_Pi_k.dtype) comm.Allgatherv(my_Pi_k, (Pi_k, sizes * nmodes * nk.prod() * nbnd * nbnd)) return Pi, Pi_k else: return Pi
def spectral_flatness(y=None, S=None, n_fft=2048, hop_length=512, amin=1e-10, power=2.0): '''Compute spectral flatness Spectral flatness (or tonality coefficient) is a measure to quantify how much noise-like a sound is, as opposed to being tone-like [1]_. A high spectral flatness (closer to 1.0) indicates the spectrum is similar to white noise. It is often converted to decibel. .. [1] Dubnov, Shlomo "Generalization of spectral flatness measure for non-gaussian linear processes" IEEE Signal Processing Letters, 2004, Vol. 11. Parameters ---------- y : np.ndarray [shape=(n,)] or None audio time series S : np.ndarray [shape=(d, t)] or None (optional) pre-computed spectrogram magnitude n_fft : int > 0 [scalar] FFT window size hop_length : int > 0 [scalar] hop length for STFT. See `librosa.core.stft` for details. amin : float > 0 [scalar] minimum threshold for `S` (=added noise floor for numerical stability) power : float > 0 [scalar] Exponent for the magnitude spectrogram. e.g., 1 for energy, 2 for power, etc. Power spectrogram is usually used for computing spectral flatness. Returns ------- flatness : np.ndarray [shape=(1, t)] spectral flatness for each frame. The returned value is in [0, 1] and often converted to dB scale. Examples -------- From time-series input >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> flatness = librosa.feature.spectral_flatness(y=y) >>> flatness array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01, 1.00000e+00, 1.00000e+00]], dtype=float32) From spectrogram input >>> S, phase = librosa.magphase(librosa.stft(y)) >>> librosa.feature.spectral_flatness(S=S) array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01, 1.00000e+00, 1.00000e+00]], dtype=float32) From power spectrogram input >>> S, phase = librosa.magphase(librosa.stft(y)) >>> S_power = S ** 2 >>> librosa.feature.spectral_flatness(S=S_power, power=1.0) array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01, 1.00000e+00, 1.00000e+00]], dtype=float32) ''' if amin <= 0: raise ParameterError('amin must be strictly positive') S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=1.) if not np.isrealobj(S): raise ParameterError('Spectral flatness is only defined ' 'with real-valued input') elif np.any(S < 0): raise ParameterError('Spectral flatness is only defined ' 'with non-negative energies') S_thresh = np.maximum(amin, S**power) gmean = np.exp(np.mean(np.log(S_thresh), axis=0, keepdims=True)) amean = np.mean(S_thresh, axis=0, keepdims=True) return gmean / amean
def rlevinson(a, efinal): """computes the autocorrelation coefficients, R based on the prediction polynomial A and the final prediction error Efinal, using the stepdown algorithm. Works for real or complex data :param a: :param efinal: :return: * R, the autocorrelation * U prediction coefficient * kr reflection coefficients * e errors A should be a minimum phase polynomial and A(1) is assumed to be unity. :returns: (P+1) by (P+1) upper triangular matrix, U, that holds the i'th order prediction polynomials Ai, i=1:P, where P is the order of the input polynomial, A. [ 1 a1(1)* a2(2)* ..... aP(P) * ] [ 0 1 a2(1)* ..... aP(P-1)* ] U = [ .................................] [ 0 0 0 ..... 1 ] from which the i'th order prediction polynomial can be extracted using Ai=U(i+1:-1:1,i+1)'. The first row of U contains the conjugates of the reflection coefficients, and the K's may be extracted using, K=conj(U(1,2:end)). .. todo:: remove the conjugate when data is real data, clean up the code test and doc. """ a = numpy.array(a) realdata = numpy.isrealobj(a) assert a[ 0] == 1, 'First coefficient of the prediction polynomial must be unity' p = len(a) if p < 2: raise ValueError('Polynomial should have at least two coefficients') if realdata == True: U = numpy.zeros((p, p)) # This matrix will have the prediction # polynomials of orders 1:p else: U = numpy.zeros((p, p), dtype=complex) U[:, p - 1] = numpy.conj(a[-1::-1]) # Prediction coefficients of order p p = p - 1 e = numpy.zeros(p) # First we find the prediction coefficients of smaller orders and form the # Matrix U # Initialize the step down e[-1] = efinal # Prediction error of order p # Step down for k in range(p - 1, 0, -1): [a, e[k - 1]] = levdown(a, e[k]) U[:, k] = numpy.concatenate( (numpy.conj(a[-1::-1].transpose()), [0] * (p - k))) e0 = e[0] / (1. - abs(a[1]**2)) #% Because a[1]=1 (true polynomial) U[0, 0] = 1 #% Prediction coefficient of zeroth order kr = numpy.conj(U[0, 1:]) #% The reflection coefficients kr = kr.transpose() #% To make it into a column vector # % Once we have the matrix U and the prediction error at various orders, we can # % use this information to find the autocorrelation coefficients. R = numpy.zeros(1, dtype=complex) #% Initialize recursion k = 1 R0 = e0 # To take care of the zero indexing problem R[0] = -numpy.conj(U[0, 1]) * R0 # R[1]=-a1[1]*R[0] # Actual recursion for k in range(1, p): r = -sum(numpy.conj(U[k - 1::-1, k]) * R[-1::-1]) - kr[k] * e[k - 1] R = numpy.insert(R, len(R), r) # Include R(0) and make it a column vector. Note the dot transpose #R = [R0 R].'; R = numpy.insert(R, 0, e0) return R, U, kr, e
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None): """Create a HBInfo instance from an existing sparse matrix. Parameters ---------- m : sparse matrix the HBInfo instance will derive its parameters from m title : str Title to put in the HB header key : str Key mxtype : HBMatrixType type of the input matrix fmt : dict not implemented Returns ------- hb_info : HBInfo instance """ pointer = m.indptr indices = m.indices values = m.data nrows, ncols = m.shape nnon_zeros = m.nnz if fmt is None: # +1 because HB use one-based indexing (Fortran), and we will write # the indices /pointer as such pointer_fmt = IntFormat.from_number(np.max(pointer + 1)) indices_fmt = IntFormat.from_number(np.max(indices + 1)) if values.dtype.kind in np.typecodes["AllFloat"]: values_fmt = ExpFormat.from_number(-np.max(np.abs(values))) elif values.dtype.kind in np.typecodes["AllInteger"]: values_fmt = IntFormat.from_number(-np.max(np.abs(values))) else: raise NotImplementedError("type %s not implemented yet" % values.dtype.kind) else: raise NotImplementedError("fmt argument not supported yet.") if mxtype is None: if not np.isrealobj(values): raise ValueError("Complex values not supported yet") if values.dtype.kind in np.typecodes["AllInteger"]: tp = "integer" elif values.dtype.kind in np.typecodes["AllFloat"]: tp = "real" else: raise NotImplementedError( "type %s for values not implemented" % values.dtype) mxtype = HBMatrixType(tp, "unsymmetric", "assembled") else: raise ValueError("mxtype argument not handled yet.") def _nlines(fmt, size): nlines = size // fmt.repeat if nlines * fmt.repeat != size: nlines += 1 return nlines pointer_nlines = _nlines(pointer_fmt, pointer.size) indices_nlines = _nlines(indices_fmt, indices.size) values_nlines = _nlines(values_fmt, values.size) total_nlines = pointer_nlines + indices_nlines + values_nlines return cls(title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, pointer_fmt.fortran_format, indices_fmt.fortran_format, values_fmt.fortran_format)
def irfft(x, n=None, axis=-1, overwrite_x=False): """ Return inverse discrete Fourier transform of real sequence x. The contents of `x` are interpreted as the output of the `rfft` function. Parameters ---------- x : array_like Transformed data to invert. n : int, optional Length of the inverse Fourier transform. If n < x.shape[axis], x is truncated. If n > x.shape[axis], x is zero-padded. The default results in n = x.shape[axis]. axis : int, optional Axis along which the ifft's are computed; the default is over the last axis (i.e., axis=-1). overwrite_x : bool, optional If True, the contents of `x` can be destroyed; the default is False. Returns ------- irfft : ndarray of floats The inverse discrete Fourier transform. See Also -------- rfft, ifft, numpy.fft.irfft Notes ----- The returned real array contains:: [y(0),y(1),...,y(n-1)] where for n is even:: y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0] + (-1)**(j) x[n-1]) and for n is odd:: y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) * exp(sqrt(-1)*j*k* 2*pi/n) + c.c. + x[0]) c.c. denotes complex conjugate of preceding expression. For details on input parameters, see `rfft`. To process (conjugate-symmetric) frequency-domain data with a complex datatype, consider using the related function `numpy.fft.irfft`. """ tmp = _asfarray(x) if not numpy.isrealobj(tmp): raise TypeError("1st argument must be real sequence") try: work_function = _DTYPE_TO_RFFT[tmp.dtype] except KeyError: raise ValueError("type %s is not supported" % tmp.dtype) overwrite_x = overwrite_x or _datacopied(tmp, x) return _raw_fft(tmp, n, axis, -1, overwrite_x, work_function)