def write_memory_to_file(A, filename, mode='w', title='test'): """ write memory to a h5 file h5 file contains root.real and root.imag(if A complex) best for transfer data with Matlab A: a ndarray, GPUArray or PitchArray filename: name of file to store mode: 'w' to start a new file 'a' to append, leading dimension of A must be the same as the existing file file can be read by read_file or in matlab using h5read.m """ h5file = tables.openFile(filename, mode, title) if (A.dtype == np.float32) or (A.dtype == np.complex64): tb = tables.Float32Atom elif (A.dtype == np.float64) or (A.dtype == np.complex128): tb = tables.Float64Atom elif A.dtype == np.int32: tb = tables.Int32Atom elif A.dtype == np.int64: tb = tables.Int64Atom else: TypeError("Write file error: unkown input dtype") if PYCUDA: if A.__class__.__name__ in ["GPUArray", "PitchArray"]: B = A.get() elif A.__class__.__name__ == "ndarray": B = A else: raise TypeError("Write file error: unkown input") else: if A.__class__.__name__ == "ndarray": B = A else: raise TypeError("Write file error: unkown input") shape = list(B.shape) shape[0] = 0 if mode == 'w': if np.iscomplexobj(B): h5file.createEArray("/", "real", tb(), tuple(shape)) h5file.createEArray("/", "imag", tb(), tuple(shape)) else: h5file.createEArray("/", "real", tb(), tuple(shape)) if np.iscomplexobj(B): h5file.root.real.append(B.real) h5file.root.imag.append(B.imag) else: h5file.root.real.append(B) h5file.close() if mode == 'w': print "file %s created" % (filename)
def fitToData(self, data): ''' param data: numpy array where [:,0] is x and [:,1] is y ''' x = data[:, 0][:, np.newaxis] y = data[:, 1][:, np.newaxis] D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x))) S = np.dot(D.T, D) C = np.zeros([6, 6]) C[0, 2] = C[2, 0] = 2; C[1, 1] = -1 E, V = eig(np.dot(inv(S), C)) n = np.argmax(np.abs(E)) self.parameters = V[:, n] axes = self.ellipse_axis_length() self.a = axes[0] self.b = axes[1] self.angle = self.ellipse_angle_of_rotation() if not self.a or not self.b or self.parameters == None or np.iscomplexobj(self.parameters) or \ math.isnan(self.a) or math.isnan(self.b) or math.isnan(self.ellipse_center()[0]) or \ np.iscomplex(self.ellipse_center()[0]) or np.iscomplex(self.a) or np.iscomplex(self.b) or \ np.iscomplexobj(self.angle): self.a = 0 self.b = 0 self.parameters = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] self.angle = 0 self.error = True
def test_c2c(comm): # this test requires pfft-python 0.1.16. pm = ParticleMesh(BoxSize=8.0, Nmesh=[8, 8], comm=comm, dtype='complex128') numpy.random.seed(1234) if comm.rank == 0: Npar = 100 else: Npar = 0 pos = 1.0 * (numpy.arange(Npar * len(pm.Nmesh))).reshape(-1, len(pm.Nmesh)) * (7, 7) pos %= (pm.Nmesh + 1) layout = pm.decompose(pos) npos = layout.exchange(pos) real = pm.paint(npos) complex = real.r2c() real2 = complex.c2r() assert numpy.iscomplexobj(real) assert numpy.iscomplexobj(real2) assert numpy.iscomplexobj(complex) assert_array_equal(complex.cshape, pm.Nmesh) assert_array_equal(real2.cshape, pm.Nmesh) assert_array_equal(real.cshape, pm.Nmesh) real.readout(npos) assert_almost_equal(numpy.asarray(real), numpy.asarray(real2), decimal=7)
def __init__(self, f, data, model=default_model, guess=default_guess, functions=default_functions, mask=None, errors=None, weight_by_errors=True): """ Instantiate a resonator using our current best model. Parameter model is a function S_21(params, f) that returns the modeled values of S_21. Parameter guess is a function guess(f, data) that returns a good-enough initial guess at all of the fit parameters. Parameter functions is a dictionary that maps keys that are valid Python variables to functions that take a Parameters object as their only argument. Parameter mask is a boolean array of the same length as f and data; only points f[mask] and data[mask] are used to fit the data. The default is to use all data. Use this to exclude glitches or resonances other than the desired one. """ if not np.iscomplexobj(data): raise TypeError("Resonator data should always be complex, but got real values") if errors is not None: if not np.iscomplexobj(errors): errors = errors*(1+1j) # ensure errors is complex super(Resonator,self).__init__(f,data,model=model,guess=guess,functions=functions,mask=mask, errors=errors,weight_by_errors=weight_by_errors) if self.x_data.max() < 1e6: self.freq_units_MHz = True else: self.freq_units_MHz = False self.freq_data = self.x_data self.s21_data = self.y_data
def dot(self, coords_a, coords_b, frac_coords=False): """ Compute the scalar product of vector(s). Args: coords_a, coords_b: Array-like objects with the coordinates. frac_coords (bool): Boolean stating whether the vector corresponds to fractional or cartesian coordinates. Returns: one-dimensional `numpy` array. """ coords_a, coords_b = np.reshape(coords_a, (-1,3)), \ np.reshape(coords_b, (-1,3)) if len(coords_a) != len(coords_b): raise ValueError("") if np.iscomplexobj(coords_a) or np.iscomplexobj(coords_b): raise TypeError("Complex array!") if not frac_coords: cart_a, cart_b = coords_a, coords_b else: cart_a = np.reshape([self.get_cartesian_coords(vec) for vec in coords_a], (-1,3)) cart_b = np.reshape([self.get_cartesian_coords(vec) for vec in coords_b], (-1,3)) return np.array([np.dot(a,b) for a,b in zip(cart_a, cart_b)])
def filter(self, array, *args, **kwargs): # Processed bandwith in percentages #------------------------------------------------------------------------ sys = kwargs["meta"] bw_proc_az = 1.33 * sys['v0'] / sys['res_az'] bw_proc_rg = 1.33 * (sys['c0']/2.) / sys['res_rg'] percentage_az = 100.* bw_proc_az / (sys['prf']/sys['pre_az']) percentage_rg = 100.* bw_proc_rg / sys['rsf'] bw = [percentage_az, percentage_rg] print(" bw=["+str(bw[0])+","+str(bw[1])+"] ... ") #------------------------------------------------------------------------ if array.ndim == 2 and np.iscomplexobj(array): return self.unweight2d(array, self.ov, bw) if array.ndim == 3 and np.iscomplexobj(array): p = array.shape for k in range(0,p[0]): array_temp = self.unweight2d(array[k,:,:], self.ov, bw) if k == 0: s = array_temp.shape array_new = np.empty((p[0],s[0],s[1]), dtype='complex64') array_new[k,:,:] = array_temp return array_new else: print(" ERROR: Bad input.") return None
def test_dtype_agreement(): dtypes = [np.complex64, np.complex128, np.float32, np.float64] for dtype1 in dtypes: for dtype2 in dtypes: print dtype1,dtype2 y_data = np.random.randn(10)+1j*np.random.randn(10) errors = np.random.randn(10)+1j*np.random.randn(10) with warnings.catch_warnings(): warnings.simplefilter('ignore', np.ComplexWarning) y_data = y_data.astype(dtype1) errors = errors.astype(dtype2) x_data = np.linspace(100,110,10) if np.iscomplexobj(y_data) and np.iscomplexobj(errors): kid_readout.analysis.fitter.Fitter(x_data=x_data,y_data=y_data,errors=errors, model=complex_dummy_model, guess=complex_dummy_guess) elif (not np.iscomplexobj(y_data)) and (not np.iscomplexobj(errors)): kid_readout.analysis.fitter.Fitter(x_data=x_data,y_data=y_data,errors=errors, model=kid_readout.analysis.fitter.line_model, guess=kid_readout.analysis.fitter.line_guess) else: try: kid_readout.analysis.fitter.Fitter(x_data=x_data,y_data=y_data,errors=errors, model=complex_dummy_model, guess=complex_dummy_guess) except TypeError: pass
def get_jk_incore(self, cell=None, dm=None, hermi=1, verbose=logger.DEBUG, kpt=None): '''Get Coulomb (J) and exchange (K) following :func:`scf.hf.RHF.get_jk_`. *Incore* version of Coulomb and exchange build only. Currently RHF always uses PBC AO integrals (unlike RKS), since exchange is currently computed by building PBC AO integrals. ''' if cell is None: cell = self.cell if dm is None: dm = self.make_rdm1() if kpt is None: kpt = self.kpt log = logger.Logger if isinstance(verbose, logger.Logger): log = verbose else: log = logger.Logger(cell.stdout, verbose) log.debug('JK PBC build: incore only with PBC integrals') if self._eri is None: log.debug('Building PBC AO integrals') if kpt is not None and pyscf.lib.norm(kpt) > 1.e-15: raise RuntimeError("Non-zero k points not implemented for exchange") self._eri = ao2mo.get_ao_eri(cell) if np.iscomplexobj(dm) or np.iscomplexobj(self._eri): vj, vk = dot_eri_dm_complex(self._eri, dm, hermi) else: vj, vk = pyscf.scf.hf.dot_eri_dm(self._eri, dm, hermi) return vj, vk
def map_coordinates(input, coordinates, output_type = None, output = None, order = 3, mode = 'constant', cval = 0.0, prefilter = True): """Apply an arbritrary coordinate transformation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at that coordinates is determined by spline interpolation of the requested order. The shape of the output is derived from that of the coordinate array by dropping the first axis. The values of the array along the first axis are the coordinates in the input array at which the output value is found. For example, if the input has dimensions (100,200,3), then the shape of coordinates will be (3,100,200,3), where coordinates[:,1,2,3] specify the input coordinate at which output[1,2,3] is found. Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). The parameter prefilter determines if the input is pre-filtered before interpolation (necessary for spline interpolation of order > 1). If False it is assumed that the input is already filtered. Example usage: >>> a = arange(12.).reshape((4,3)) >>> print a [[ 0. 1. 2.] [ 3. 4. 5.] [ 6. 7. 8.] [ 9. 10. 11.]] >>> output = map_coordinates(a,[[0.5, 2], [0.5, 1]],order=1) >>> print output [ 2. 7.] Here, the interpolated value of a[0.5,0.5] gives output[0], while a[2,1] is output[1]. """ if order < 0 or order > 5: raise RuntimeError, 'spline order not supported' input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError, 'Complex type not supported' coordinates = numpy.asarray(coordinates) if numpy.iscomplexobj(coordinates): raise TypeError, 'Complex type not supported' output_shape = coordinates.shape[1:] if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError, 'input and output rank must be > 0' if coordinates.shape[0] != input.ndim: raise RuntimeError, 'invalid shape for coordinate array' mode = _extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output = numpy.float64) else: filtered = input output, return_value = _ni_support._get_output(output, input, output_type, shape = output_shape) _nd_image.geometric_transform(filtered, None, coordinates, None, None, output, order, mode, cval, None, None) return return_value
def RCCSD(mf, frozen=0, mo_coeff=None, mo_occ=None): __doc__ = ccsd.CCSD.__doc__ import numpy from pyscf import lib from pyscf import scf from pyscf.soscf import newton_ah from pyscf.cc import dfccsd if isinstance(mf, scf.uhf.UHF): raise RuntimeError('RCCSD cannot be used with UHF method.') elif isinstance(mf, scf.rohf.ROHF): lib.logger.warn(mf, 'RCCSD method does not support ROHF method. ROHF object ' 'is converted to UHF object and UCCSD method is called.') return UCCSD(mf, frozen, mo_coeff, mo_occ) if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.hf.RHF): mf = scf.addons.convert_to_rhf(mf) if getattr(mf, 'with_df', None): return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ) elif numpy.iscomplexobj(mo_coeff) or numpy.iscomplexobj(mf.mo_coeff): return rccsd.RCCSD(mf, frozen, mo_coeff, mo_occ) else: return ccsd.CCSD(mf, frozen, mo_coeff, mo_occ)
def _matvec(self, x): from bempp.api.utils.data_types import combined_type if x.ndim == 1: x_new = _np.expand_dims(x, 1) return self.matvec(x_new).ravel() if not self._fill_complete(): raise ValueError("Not all rows or columns contain operators.") row_dim = 0 res = _np.zeros((self.shape[0], x.shape[1]), dtype=combined_type(self.dtype, x.dtype)) for i in range(self._m): col_dim = 0 local_res = res[row_dim:row_dim + self._rows[i], :] for j in range(self._n): local_x = x[col_dim:col_dim + self._cols[j], :] if self._operators[i, j] is not None: op_is_complex = _np.iscomplexobj(self._operators[i, j].dtype.type(1)) if _np.iscomplexobj(x) and not op_is_complex: local_res[:] += (self._operators[i, j] * _np.real(local_x) + 1j * self._operators[i, j] * _np.imag(local_x)) else: local_res[:] += self._operators[i, j].dot(local_x) col_dim += self._cols[j] row_dim += self._rows[i] return res
def imshow(ax, x, y, z, *args, **kwargs): if (np.iscomplexobj(x)): x = np.asfarray(x.real) else: x = np.asfarray(x) if (np.iscomplexobj(y)): y = np.asfarray(y.real) else: y = np.asfarray(y) assert(len(x) == z.shape[1]) assert(len(y) == z.shape[0]) dx = x[1] - x[0] dy = y[1] - y[0] if (np.iscomplexobj(z)): zabs = abs(z) else: zabs = z zabs = np.asfarray(zabs) # Use this to center pixel around (x,y) values extent = (x[0]-dx/2.0, x[-1]+dx/2.0, y[0]-dy/2.0, y[-1]+dy/2.0) # Use this to let (x,y) be the lower-left pixel location (upper-left when origin = 'lower' is not used) #extent = (x[0], x[-1], y[0], y[-1]) im = ax.imshow(zabs, extent = extent, *args, **kwargs) imshow_show_z(ax, z, x, y) ax.set_xlim((x[0], x[-1])) ax.set_ylim((y[0], y[-1])) return im
def __init__(self, frequency, s21, errors, **kwargs): """ General resonator fitting class. Parameters ---------- frequency: array of floats Frequencies at which data was measured s21: array of complex measured S21 data errors: None or array of complex errors on the real and imaginary parts of the s21 data. None means use no errors kwargs: passed on to model.fit """ if not np.iscomplexobj(s21): raise TypeError("S21 must be complex.") if errors is not None and not np.iscomplexobj(errors): raise TypeError("S21 errors must be complex.") if errors is None: weights = None else: weights = 1/errors.real + 1j/errors.imag # kwargs get passed from Fitter to Model.fit directly. Signature is: # def fit(self, data, params=None, weights=None, method='leastsq', # iter_cb=None, scale_covar=True, verbose=False, fit_kws=None, **kwargs): super(GeneralCable, self).__init__(data=s21, f=frequency, model=lmfit_models.GeneralCableModel, weights=weights, **kwargs) self.frequency = frequency self.errors = errors self.weights = weights self.fit()
def subsample(args): """ Rebin / Congrid variant """ arr, shape, mode = args # unpack arguments if mode == 'phase' and np.iscomplexobj(arr): arr = np.angle(arr) if np.iscomplexobj(arr): arr = np.abs(arr) if arr.shape == shape: return arr oshap = arr.shape for d in range(arr.ndim): n1 = shape[d] n2 = oshap[d] if n1 < n2: s = list(arr.shape) s.insert(d + 1, n2 // n1) s[d] = n1 if mode == 'phase': arr = np.angle(np.exp(1j * arr.reshape(s)).mean(d + 1)) elif mode == 'lables': arr = np.take(arr.reshape(s), 0, d + 1) else: arr = arr.reshape(s).mean(d + 1) else: arr = arr.repeat(n1 // n2, axis=d) return arr
def assert_array_almost_equal_nulp(x, y, nulp=1): """Compare two arrays relatively to their spacing. It is a relatively robust method to compare two arrays whose amplitude is variable. Note ---- An assertion is raised if the following condition is not met: abs(x - y) <= nulps * spacing(max(abs(x), abs(y))) Parameters ---------- x: array_like first input array y: array_like second input array nulp: int max number of unit in the last place for tolerance (see Note) """ import numpy as np ax = np.abs(x) ay = np.abs(y) ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) if not np.all(np.abs(x-y) <= ref): if np.iscomplexobj(x) or np.iscomplexobj(y): msg = "X and Y are not equal to %d ULP" % nulp else: max_nulp = np.max(nulp_diff(x, y)) msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) raise AssertionError(msg)
def __init__(self, freq, s21, model=default_model, guess=default_guess, functions=default_functions, mask=None, errors=None): """ Fit a resonator using the given model. f: the frequencies used in a sweep. s21: the complex S_21 data taken at the given frequencies. model: a function S_21(params, f) that returns the modeled values of S_21. guess: a function guess(f, s21) that returns a good-enough initial guess at all of the fit parameters. functions: a dictionary that maps keys that are valid Python variables to functions that take a Parameters object as their only argument. mask: a boolean array of the same length as f and s21; only points f[mask] and s21[mask] are used to fit the data and the default is to use all data; use this to exclude glitches or resonances other than the desired one. """ if not np.iscomplexobj(s21): raise TypeError("Resonator s21 must be complex.") if errors is not None and not np.iscomplexobj(errors): raise TypeError("Resonator s21 errors must be complex.") super(Resonator, self).__init__(freq, s21, model=model, guess=guess, functions=functions, mask=mask, errors=errors) self.freq_data = self.x_data self.s21_data = self.y_data self.freq_units_MHz = self.freq_data.max() < 1e6
def __init__(self, x_data, y_data, model=line_model, guess=line_guess, functions=default_functions, mask=None, errors=None, method='leastsq', **minimize_keywords): """ Arguments: model: a function y(params, x) that returns the modeled values. guess: a function guess(x_data, y_data) that returns a Parameters object containing an initial guess at the fit parameters. functions: a dictionary that maps keys that are valid Python variables to functions that take a Parameters object as their only argument. mask: a boolean array of the same length as f and data; only points x_data[mask] and y_data[mask] are used to fit the data, and the default is to use all data. errors: an array of the same size and data type as y_data with the corresponding error values; method: a string representing the fitting method for lmfit.minimize to use. minimize_keywords: keyword arguments that are passed directly to lmfit.minimize. Returns: A new Fitter using the given data and model. """ self.x_data = x_data if np.iscomplexobj(y_data): y_data = y_data.astype('complex') # promote data to complex128 if needed if errors is not None: if not np.iscomplexobj(errors): raise TypeError( "y_data and errors must both be complex or real, but got complex data with real errors.") errors = errors.astype('complex') else: # data is real y_data = y_data.astype('float') # promote data to float64 if needed if errors is not None: if np.iscomplexobj(errors): raise TypeError( "y_data and errors must both be complex or real, but got real data with complex errors.") errors = errors.astype('float') self.y_data = y_data self._model = model self._functions = functions self.method = method self.minimize_keywords = minimize_keywords if mask is None: self.mask = np.ones(x_data.shape, dtype=np.bool) else: self.mask = mask self.errors = errors if errors is None: self.residual = self._residual_without_errors else: self.residual = self._residual_with_errors self.fit(guess(x_data[self.mask], y_data[self.mask]))
def olafilt(b, x, zi=None): """ Filter a one-dimensional array with an FIR filter Filter a data sequence, `x`, using a FIR filter given in `b`. Filtering uses the overlap-add method converting both `x` and `b` into frequency domain first. The FFT size is determined as the next higher power of 2 of twice the length of `b`. Parameters ---------- b : one-dimensional numpy array The impulse response of the filter x : one-dimensional numpy array Signal to be filtered zi : one-dimensional numpy array, optional Initial condition of the filter, but in reality just the runout of the previous computation. If `zi` is None or not given, then zero initial state is assumed. Returns ------- y : array The output of the digital filter. zf : array, optional If `zi` is None, this is not returned, otherwise, `zf` holds the final filter delay values. """ L_I = b.shape[0] # Find power of 2 larger that 2*L_I (from abarnert on Stackoverflow) L_F = 2<<(L_I-1).bit_length() L_S = L_F - L_I + 1 L_sig = x.shape[0] offsets = range(0, L_sig, L_S) # handle complex or real input if np.iscomplexobj(b) or np.iscomplexobj(x): fft_func = np.fft.fft ifft_func = np.fft.ifft res = np.zeros(L_sig+L_F, dtype=np.complex128) else: fft_func = np.fft.rfft ifft_func = np.fft.irfft res = np.zeros(L_sig+L_F) FDir = fft_func(b, n=L_F) # overlap and add for n in offsets: res[n:n+L_F] += ifft_func(fft_func(x[n:n+L_S], n=L_F)*FDir) if zi is not None: res[:zi.shape[0]] = res[:zi.shape[0]] + zi return res[:L_sig], res[L_sig:] else: return res[:L_sig]
def compute(self): # make a copy for changes data = self.getData('in').copy() # convert complex to mag if np.iscomplexobj(data): data = np.abs(data) # get rid of negative numbers if data.min() < 0.: data -= data.min() # normalize the data data_min = data.min() data_max = data.max() data_range = data_max - data_min val = self.getAttr('L W F C:', 'val') new_min = data_range * val['floor'] / 100.0 + data_min new_max = data_range * val['ceiling'] / 100.0 + data_min data[data < new_min] = new_min data[data > new_max] = new_max data = data - math.fabs(new_min) data = data / (new_max - math.fabs(new_min)) * 255 image = numpy2qimage(data.astype(np.uint8)) if image.isNull(): self.log.warn("Image Viewer: cannot load image") else: self.setAttr('Viewport:', val=image) line = self.getAttr('Viewport:', 'line') if line: a = self.getData('in') if np.iscomplexobj(a): a = np.abs(a) # Make a line with "l" points x0, y0 = line[0] x1, y1 = line[1] l = int(np.hypot(x1 - x0, y1 - y0)) x, y = np.linspace(x0, x1, l), np.linspace(y0, y1, l) # Extract the values along the line, using cubic interpolation arr = ndimage.map_coordinates(a, np.vstack((x, y)), order=3) self.setAttr('Cross Section', val=[arr]) self.setAttr('Cross Section', visible=True) else: self.setAttr('Cross Section', visible=False) return(0)
def plot(self, *args, **kwargs): if "projection" in kwargs: projection = kwargs.pop("projection") else: projection = self.name vars = args[:2] args = args[2:] if len(vars) == 2 and isinstance(vars[1], (str, unicode)): args = (vars[1],) + args vars = vars[:1] if ((len(vars) == 1 and isinstance(vars[0], hfarray) and len(vars[0].dims) >= 1)): y = vars[0] x = hfarray(y.dims[0]) vars = (x, y) if self.HFTOOLS_default_x_name is None: self.HFTOOLS_default_x_name = y.dims[0].name fmt = self.axes.xaxis.get_major_formatter() if hasattr(fmt, "update_template"): fmt.default_label = self.HFTOOLS_default_x_name fmt.update_template() if len(vars) == 1: y = vars[0] if projection in _projfun: x, y = _projfun[projection](None, y) return Axes.plot(self, y, *args, **kwargs) elif np.iscomplexobj(y): return Axes.plot(self, y.real, y.imag, *args, **kwargs) else: return Axes.plot(self, y, *args, **kwargs) elif len(vars) == 2: x = vars[0] y = vars[1] xunit = getattr(x, "unit", None) yunit = getattr(y, "unit", None) if projection in _projfun: x, y = _projfun[projection](x, y) lines = self._plot_helper(x, y, *args, **kwargs) elif np.iscomplexobj(y): xunit = yunit lines = self._plot_helper(y.real, y.imag, *args, **kwargs) else: lines = self._plot_helper(x, y, *args, **kwargs) if xunit: self.set_xlabel_unit(xunit) if yunit: self.set_ylabel_unit(yunit) return lines else: raise Exception("Missing plot data")
def it(Z,c,max_iteration): """ This function takes a Complex matrix and returns a Matrix filled with number of iterations taken by every complex number to escape (|z|>2) """ if not numpy.iscomplexobj(c): raise NotComplexError, "c parameter must be complex" if not numpy.iscomplexobj(Z): raise NotComplexError, "The Z matrix (complex plane) must be complex" if max_iteration == 0 : raise OutOfRangeError, "Number of iterations should be a positive Integer and not 0" if max_iteration < 0 : raise OutOfRangeError, "Number of iterations should be a positive Integer and not negative" if not type(max_iteration) is int: raise NotIntegerError, "Number of iterations should be a positive Integer and not float" # just to keep the dimensions I = Z.copy() C = Z.copy() C.fill(c) W = Z.getA() i = 0 zn = 0 + 0j for z in numpy.nditer(I, op_flags=['readwrite']): i = max_iteration zn = z[()] for j in range(-1,max_iteration): if abs(zn) > 2: i = j +1 break #zn = zn**3 + c #print zn.real zn = cmath.exp(zn**3) + c #print z[...] # hacer aca la trans color(i) z[...] = i*10 return (I.real).astype(int)
def __test(y, truth, max_size, axis): ac = librosa.autocorrelate(y, max_size=max_size, axis=axis) my_slice = [slice(None)] * truth.ndim if max_size is not None and max_size <= y.shape[axis]: my_slice[axis] = slice(min(max_size, y.shape[axis])) if not np.iscomplexobj(y): assert not np.iscomplexobj(ac) assert np.allclose(ac, truth[my_slice])
def is_correct_scalar_dd(a_sub, a_tru, digits=2): # If answers are complex, check real and imaginary parts separately if np.iscomplexobj(a_sub) or np.iscomplexobj(a_tru): return is_correct_scalar_dd(a_sub.real, a_tru.real, digits=digits) and is_correct_scalar_dd(a_sub.imag, a_tru.imag, digits=digits) # Get bounds on submitted answer eps = 0.51 * (10**-digits) lower_bound = a_tru - eps upper_bound = a_tru + eps # Check if submitted answer is in bounds return (a_sub > lower_bound) & (a_sub < upper_bound)
def cpsd(a, b, nfft, fs, window='hann'): """ Compute the cross power spectral density (CPSD) of the signals *a* and *b*. This performs: fft(a)*conj(fft(b)) Note that this is consistent with *np.correlate*'s definition of correlation. (The conjugate of D.B. Chelton's definition of correlation.) The two signals should be the same length, and should both be real. See also: psd,cohere The units of the spectra is the product of the units of *a* and *b*, divided by the units of fs. """ if np.iscomplexobj(a) or np.iscomplexobj(b): raise Exception auto_psd = False if a is b: auto_psd = True max_ind = len(a) nfft = min([nfft, max_ind]) repeats = np.fix(2. * max_ind / nfft) fs = np.float64(fs) if max_ind == nfft: repeats = 1 if window == 'hann': wind = np.hanning(nfft) elif window is None or window == 1: wind = np.ones(nfft) fft_inds = slice(1, np.floor(nfft / 2. + 1)) wght = 2. / (wind ** 2).sum() s1 = fft(detrend(a[0:nfft]) * wind)[fft_inds] if auto_psd: pwr = np.abs(s1) ** 2 else: pwr = s1 * np.conj(fft(detrend(b[0:nfft]) * wind)[fft_inds]) if repeats - 1: step = np.fix((max_ind - nfft) / (repeats - 1)) for i in range(step, max_ind - nfft + 1, step): s1 = fft(detrend(a[i:(i + nfft)]) * wind)[fft_inds] if auto_psd: pwr += np.abs(s1) ** 2 else: pwr += s1 * \ np.conj(fft(detrend(b[i:(i + nfft)]) * wind)[fft_inds]) pwr *= wght / repeats / fs if auto_psd: # No need to take the abs again. return pwr return np.abs(pwr)
def nulp_diff(x, y, dtype=None): """For each item in x and y, return the number of representable floating points between them. Parameters ---------- x : array_like first input array y : array_like second input array Returns ------- nulp: array_like number of representable floating point numbers between each item in x and y. Examples -------- # By definition, epsilon is the smallest number such as 1 + eps != 1, so # there should be exactly one ULP between 1 and 1 + eps >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) 1.0 """ import numpy as np if dtype: x = np.array(x, dtype=dtype) y = np.array(y, dtype=dtype) else: x = np.array(x) y = np.array(y) t = np.common_type(x, y) if np.iscomplexobj(x) or np.iscomplexobj(y): raise NotImplementedError("_nulp not implemented for complex array") x = np.array(x, dtype=t) y = np.array(y, dtype=t) if not x.shape == y.shape: raise ValueError("x and y do not have the same shape: %s - %s" % \ (x.shape, y.shape)) def _diff(rx, ry, vdt): diff = np.array(rx-ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) ry = integer_repr(y) return _diff(rx, ry, t)
def _hdm (a,b): c = tensor() c.d = a.d c.n = a.n c.r = np.zeros((a.d+1,1),dtype=np.int32) c.ps = np.zeros((a.d+1,1),dtype=np.int32) if np.iscomplexobj(a.core) or np.iscomplexobj(b.core): c.r,c.ps = tt_f90.tt_f90.ztt_hdm(a.n,a.r,b.r,a.ps,b.ps,a.core,b.core) c.core = tt_f90.tt_f90.zcore.copy() else: c.r,c.ps = tt_f90.tt_f90.dtt_hdm(a.n,a.r,b.r,a.ps,b.ps,a.core,b.core) c.core = tt_f90.tt_f90.core.copy() tt_f90.tt_f90.tt_dealloc() return c
def validate(self): data1 = self.getData('inLeft') data2 = self.getData('inRight') data1_shape = list(data1.shape) data2_shape = list(data2.shape) if data1_shape != data2_shape: self.log.warn("Two data sets must be the same size") return(1) if np.iscomplexobj(data1) != np.iscomplexobj(data2): self.log.warn("Two data sets must both be complex or both be real") return(1) return(0)
def numpy_to_matlab_sf(A, ndigits=2): """numpy_to_matlab(A, ndigits=2) This function assumes that A is one of these things: - a number (float or complex) - a 2D ndarray (float or complex) It returns A as a MATLAB-formatted string in which each number has ndigits significant digits. """ if np.isscalar(A): if np.iscomplexobj(A): A_str = _string_from_complex_sigfig(A, ndigits) else: A_str = to_precision.to_precision(A, ndigits) return A_str elif A.ndim == 1: s = A.shape m = s[0] A_str = '[' for i in range(0, m): if np.iscomplexobj(A[i]): A_str += _string_from_complex_sigfig(A[i], ndigits) else: A_str += to_precision.to_precision(A[i], ndigits) if i < m - 1: A_str += ', ' A_str += ']' return A_str else: s = A.shape m = s[0] n = s[1] A_str = '[' for i in range(0, m): for j in range(0, n): if np.iscomplexobj(A[i, j]): A_str += _string_from_complex_sigfig(A[i, j], ndigits) else: A_str += to_precision.to_precision(A[i, j], ndigits) if j == n - 1: if i == m - 1: A_str += ']' else: A_str += '; ' else: A_str += ' ' return A_str
def __init__(self, x, h, uprate=1, downrate=1, xdim=-1, hdim=-1): """ Construct the ResamplerBank object. Parameters ---------- x : array-like Input signal array. May be multi-dimensional (ND). The signals will be operated on along the "xdim" dimension of x. This is needed to determine how many Resamplers need to be created, since each one needs to retain state. h : array-like FIR (finite-impulse response) filter coefficients array. May be ND. The filters are along the "hdim" dimension of h. uprate : int, optional Upsampling rate. (default=1) downrate : int, optional Downsampling rate. (default=1) xdim : int, optional Dimension for "x" input signal array. (default=-1) hdim : int, optional Dimension for "h" coefficient array. (default=-1) """ x = np.atleast_1d(x) h = np.atleast_1d(h) klass = klass_lookup(x, h) x = dim2back(x, xdim) h = dim2back(h, hdim) xi = full_index(x) xi[-1] = xi[-1][0:1] # xx is ignored xx, hh = np.broadcast_arrays(x[xi], h) self.hh = hh bank = np.zeros(self.hh.shape[:-1], dtype=object) for idx, hi in enumdims(self.hh, (-1,), complement=True): bank[idx] = klass(uprate, downrate, hi) self.bank = bank self.r0 = self.bank.flat[0] self.coefs_per_phase = (h.shape[-1] + uprate - 1) // uprate self.xdim = xdim if np.iscomplexobj(x) or np.iscomplexobj(h): self.output_type = complex else: self.output_type = float
def iscomplexobj(self): """ Check if values is array of complex numbers. The type of the input is checked, not the value. Even if the input has an imaginary part equal to zero, `iscomplexobj` evaluates to True. """ return np.iscomplexobj(self.values)
def eigss(A, delcc): r"""Solve complex eigen problem for state-space formulation. Parameters ---------- A : 2d ndarray The state-space matrix (which doesn't have to be defined as below) delcc : bool If True, delete one of each complex-conjugate pair and put the appropriate factor of 2. in ur output (see below). This is handy for real time-domain solutions, but not for frequency domain solutions. See note below. Returns ------- A SimpleNamespace with the members: lam : 1d ndarray; complex The vector of complex eigenvalues ur : 2d ndarray; complex Normalized complex right eigenvectors ur_inv : 2d ndarray; complex Inverse of right eigenvectors dups : 1d ndarray Index partition vector for repeated roots; it will be empty (`np.array([])`) if there are no repeated roots. For example, if only the second and third roots are duplicates of each other, `dups` will be `np.array([1, 2])`. wn : 1d ndarray; real Vector of natural frequencies (rad/sec) in the same order as `lam` (see :func:`get_freq_damping`) zeta : 1d ndarray; real Vector of critical damping ratios (see :func:`get_freq_damping`) eig_success : bool True if routine is successful. False if the eigenvectors form a singular matrix or they do not diagonalize `A`; in that case, ODE solution (if computed) is most likely wrong. Notes ----- The typical 2nd order ODE is: .. math:: M \ddot{q} + B \dot{q} + K q = F The 2nd order ODE set of equations are transformed into the 1st order ODE (see :func:`make_A`): .. math:: \left\{ \begin{array}{c} \ddot{q} \\ \dot{q} \end{array} \right\} - \left[ \begin{array}{cc} -M^{-1} B & -M^{-1} K \\ I & 0 \end{array} \right] \left\{ \begin{array}{c} \dot{q} \\ q \end{array} \right\} = \left\{ \begin{array}{c} M^{-1} F \\ 0 \end{array} \right\} or: .. math:: \dot{y} - A y = w When the `M`, `B` and `K` are assembled into the `A` matrix, they must not contain any rigid-body modes since the inverse of `ur` may not exist, causing the method to fail. If you seen any warning messages about a matrix being singular or near singular, the method has likely failed. Duplicate roots can also cause trouble, so if there are duplicates, check to see if ``ur_inv @ ur`` and ``ur_inv @ A @ ur`` are diagonal matrices (if ``not delcc``, these would be identity and the eigenvalues, but if `delcc` is True, the factor of 2.0 discussed next has the chance to modify that). If method fails, see :class:`SolveExp1` or :class:`SolveExp2`. For underdamped modes, the complex eigenvalues and modes come in complex conjugate pairs. Each mode of a pair yields the same solution for real time-domain problems. This routine takes advantage of this fact (if `delcc` is True) by chopping out one of the pair and multiplying the other one by 2.0 (in `ur`). Then, if all modes are underdamped: ``len(lam) = M.shape[0]`` and if no modes are underdamped: ``len(lam) = 2*M.shape[0]``. See also -------- :func:`make_A`, :class:`SolveUnc`, :func:`get_freq_damping`. """ warn1 = ("in :func:`eigss`, the eigenvectors for the state-" "space formulation are poorly conditioned (cond={:.3e}).\n") warn2 = ("Repeated roots detected and equations do not appear " "to be diagonalized. Generally, this is a failure " "condition.\n" "\tMax off-diag / on-diag of `inv(ur) @ A @ ur` = {} / {} = {}\n") note = ("Solution will likely be inaccurate. " "Possible causes/solutions:\n" "\tThe partition vector for the rigid-body modes is " "incorrect or not set\n" "\tThe equations are not in modal space, and the " "rigid-body modes cannot be detected -- use the " "`pre_eig` option\n" "\tUse :class:`SolveExp2` instead for time domain, or\n" "\tUse :class:`FreqDirect` instead for frequency domain\n\n" "\tSetting `eig_success` attribute to False\n") lam, ur = la.eig(A) c = np.linalg.cond(ur) if c > 1 / np.finfo(float).eps: warnings.warn(warn1.format(c) + note, RuntimeWarning) eig_success = False else: eig_success = True ur_inv = la.inv(ur) lam, i, dups = _eigc_dups(lam) ur = ur[:, i] ur_inv = ur_inv[i] if dups.size: uau = ur_inv @ A @ ur d = np.diag(uau) max_off = abs(np.diag(d) - uau).max() max_on = abs(d).max() if max_off > 1e-8 * max_on: warnings.warn( warn2.format(max_off, max_on, max_off / max_on) + note, RuntimeWarning) eig_success = False wn, zeta = get_freq_damping(lam, np.iscomplexobj(A)) if delcc: lam, ur, ur_inv, dups = delconj(lam, ur, ur_inv, dups) return SimpleNamespace( lam=lam, ur=ur, ur_inv=ur_inv, dups=dups, wn=wn, zeta=zeta, eig_success=eig_success, )
def __init__(self, points, values, method="slinear", bounds_error=True, fill_value=np.nan, spline_dim_error=True): """ Initialize instance of interpolation class. Parameters ---------- points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) The points defining the regular grid in n dimensions. values : array_like, shape (m1, ..., mn, ...) The data on the regular grid in n dimensions. method : str, optional The method of interpolation to perform. Supported are 'slinear', 'cubic', and 'quintic'. This parameter will become the default for the object's ``__call__`` method. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then `fill_value` is used. Default is True (raise an exception). fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Note that gradient values will always be extrapolated rather than set to the fill_value if bounds_error=False for any points outside of the interpolation domain. Default is `np.nan`. spline_dim_error : bool, optional If spline_dim_error=True and an order `k` spline interpolation method is used, then if any dimension has fewer points than `k` + 1, an error will be raised. If spline_dim_error=False, then the spline interpolant order will be reduced as needed on a per-dimension basis. Default is True (raise an exception). """ if not make_interp_spline: msg = "'MetaModelStructuredComp' requires scipy>=0.19, but the currently" \ " installed version is %s." % scipy_version warnings.warn(msg) configs = _RegularGridInterp._interp_methods() self._all_methods, self._interp_config = configs if method not in self._all_methods: all_m = ', '.join(['"' + m + '"' for m in self._all_methods]) raise ValueError('Method "%s" is not defined. Valid methods are ' '%s.' % (method, all_m)) self.method = method self.bounds_error = bounds_error if not hasattr(values, 'ndim'): # allow reasonable duck-typed values values = np.asarray(values) if len(points) > values.ndim: raise ValueError("There are %d point arrays, but values has %d " "dimensions" % (len(points), values.ndim)) if hasattr(values, 'dtype') and hasattr(values, 'astype'): if not np.issubdtype(values.dtype, np.inexact): values = values.astype(float) if np.iscomplexobj(values[:]): raise ValueError("method '%s' does not support complex values." % method) self.fill_value = fill_value if fill_value is not None: fill_value_dtype = np.asarray(fill_value).dtype if (hasattr(values, 'dtype') and not np.can_cast( fill_value_dtype, values.dtype, casting='same_kind')): raise ValueError("fill_value must be either 'None' or " "of a type compatible with values") k = self._interp_config[method] self._ki = [] for i, p in enumerate(points): n_p = len(p) if not np.all(np.diff(p) > 0.): raise ValueError("The points in dimension %d must be strictly " "ascending" % i) if not np.asarray(p).ndim == 1: raise ValueError("The points in dimension %d must be " "1-dimensional" % i) if not values.shape[i] == n_p: raise ValueError("There are %d points and %d values in " "dimension %d" % (len(p), values.shape[i], i)) self._ki.append(k) if n_p <= k: if not spline_dim_error: self._ki[-1] = n_p - 1 else: raise ValueError("There are %d points in dimension %d," " but method %s requires at least %d " "points per " "dimension." "" % (n_p, i, method, k + 1)) self.grid = tuple([np.asarray(p) for p in points]) self.values = values self._xi = None self._all_gradients = None self._spline_dim_error = spline_dim_error self._gmethod = None
def make_A(M, B, K): r""" Setup the state-space matrix from mass, damping and stiffness. Parameters ---------- M : 1d or 2d ndarray or None Mass; vector (of diagonal), or full; if None, mass is assumed identity B : 1d or 2d ndarray Damping; vector (of diagonal), or full K : 1d or 2d ndarray Stiffness; vector (of diagonal), or full Returns ------- A : 2d ndarray The state-space matrix defined as shown below Notes ----- The typical 2nd order ODE is: .. math:: M \ddot{q} + B \dot{q} + K q = F The 2nd order ODE set of equations are transformed into the 1st order ODE: .. math:: \left\{ \begin{array}{c} \ddot{q} \\ \dot{q} \end{array} \right\} - \left[ \begin{array}{cc} -M^{-1} B & -M^{-1} K \\ I & 0 \end{array} \right] \left\{ \begin{array}{c} \dot{q} \\ q \end{array} \right\} = \left\{ \begin{array}{c} M^{-1} F \\ 0 \end{array} \right\} or: .. math:: \dot{y} - A y = w When the `M`, `B` and `K` are assembled into the `A` matrix, they must not contain any rigid-body modes. See :func:`eigss`. See also -------- :func:`eigss`, :class:`SolveUnc`, :class:`SolveExp2` """ Atype = float if M is None: B, K = np.atleast_1d(B, K) if np.iscomplexobj(B) or np.iscomplexobj(K): Atype = complex else: M, B, K = np.atleast_1d(M, B, K) if np.iscomplexobj(M) or np.iscomplexobj(B) or np.iscomplexobj(K): Atype = complex n = K.shape[0] A = np.zeros((2 * n, 2 * n), Atype) v1 = range(n) v2 = range(n, 2 * n) if B.ndim == 2: A[:n, :n] = -B else: A[v1, v1] = -B if K.ndim == 2: A[:n, n:] = -K else: A[v1, v2] = -K A[v2, v1] = 1.0 if M is not None: if M.ndim == 1: A[:n] = (1.0 / M).reshape(-1, 1) * A[:n] else: A[:n] = la.solve(M, A[:n]) return A
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None, window=None, noverlap=None, pad_to=None, sides=None, scale_by_freq=None, mode=None): """ Private helper implementing the common parts between the psd, csd, spectrogram and complex, magnitude, angle, and phase spectrums. """ if y is None: # if y is None use x for y same_data = True else: # The checks for if y is x are so that we can use the same function to # implement the core of psd(), csd(), and spectrogram() without doing # extra calculations. We return the unaveraged Pxy, freqs, and t. same_data = y is x if Fs is None: Fs = 2 if noverlap is None: noverlap = 0 if detrend_func is None: detrend_func = detrend_none if window is None: window = window_hanning # if NFFT is set to None use the whole signal if NFFT is None: NFFT = 256 if mode is None or mode == 'default': mode = 'psd' _api.check_in_list( ['default', 'psd', 'complex', 'magnitude', 'angle', 'phase'], mode=mode) if not same_data and mode != 'psd': raise ValueError("x and y must be equal if mode is not 'psd'") # Make sure we're dealing with a numpy array. If y and x were the same # object to start with, keep them that way x = np.asarray(x) if not same_data: y = np.asarray(y) if sides is None or sides == 'default': if np.iscomplexobj(x): sides = 'twosided' else: sides = 'onesided' _api.check_in_list(['default', 'onesided', 'twosided'], sides=sides) # zero pad x and y up to NFFT if they are shorter than NFFT if len(x) < NFFT: n = len(x) x = np.resize(x, NFFT) x[n:] = 0 if not same_data and len(y) < NFFT: n = len(y) y = np.resize(y, NFFT) y[n:] = 0 if pad_to is None: pad_to = NFFT if mode != 'psd': scale_by_freq = False elif scale_by_freq is None: scale_by_freq = True # For real x, ignore the negative frequencies unless told otherwise if sides == 'twosided': numFreqs = pad_to if pad_to % 2: freqcenter = (pad_to - 1)//2 + 1 else: freqcenter = pad_to//2 scaling_factor = 1. elif sides == 'onesided': if pad_to % 2: numFreqs = (pad_to + 1)//2 else: numFreqs = pad_to//2 + 1 scaling_factor = 2. if not np.iterable(window): window = window(np.ones(NFFT, x.dtype)) if len(window) != NFFT: raise ValueError( "The window length must match the data's first dimension") result = stride_windows(x, NFFT, noverlap, axis=0) result = detrend(result, detrend_func, axis=0) result = result * window.reshape((-1, 1)) result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :] freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs] if not same_data: # if same_data is False, mode must be 'psd' resultY = stride_windows(y, NFFT, noverlap) resultY = detrend(resultY, detrend_func, axis=0) resultY = resultY * window.reshape((-1, 1)) resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :] result = np.conj(result) * resultY elif mode == 'psd': result = np.conj(result) * result elif mode == 'magnitude': result = np.abs(result) / np.abs(window).sum() elif mode == 'angle' or mode == 'phase': # we unwrap the phase later to handle the onesided vs. twosided case result = np.angle(result) elif mode == 'complex': result /= np.abs(window).sum() if mode == 'psd': # Also include scaling factors for one-sided densities and dividing by # the sampling frequency, if desired. Scale everything, except the DC # component and the NFFT/2 component: # if we have a even number of frequencies, don't scale NFFT/2 if not NFFT % 2: slc = slice(1, -1, None) # if we have an odd number, just don't scale DC else: slc = slice(1, None, None) result[slc] *= scaling_factor # MATLAB divides by the sampling frequency so that density function # has units of dB/Hz and can be integrated by the plotted frequency # values. Perform the same scaling here. if scale_by_freq: result /= Fs # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2. result /= (np.abs(window)**2).sum() else: # In this case, preserve power in the segment, not amplitude result /= np.abs(window).sum()**2 t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs if sides == 'twosided': # center the frequency range at zero freqs = np.roll(freqs, -freqcenter, axis=0) result = np.roll(result, -freqcenter, axis=0) elif not pad_to % 2: # get the last value correctly, it is negative otherwise freqs[-1] *= -1 # we unwrap the phase here to handle the onesided vs. twosided case if mode == 'phase': result = np.unwrap(result, axis=0) return result, freqs, t
def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None, mode=None, channel_axis=None): """Takes a numpy array and returns a PIL image. This function is only available if Python Imaging Library (PIL) is installed. The mode of the PIL image depends on the array shape and the `pal` and `mode` keywords. For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values (from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode is given as 'F' or 'I' in which case a float and/or integer array is made. .. warning:: This function uses `bytescale` under the hood to rescale images to use the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. It will also cast data for 2-D images to ``uint32`` for ``mode=None`` (which is the default). Notes ----- For 3-D arrays, the `channel_axis` argument tells which dimension of the array holds the channel data. For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' by default or 'YCbCr' if selected. The numpy array must be either 2 dimensional or 3 dimensional. """ data = np.asarray(arr) if np.iscomplexobj(data): raise ValueError("Cannot convert a complex-valued array.") shape = list(data.shape) valid = len(shape) == 2 or ((len(shape) == 3) and ((3 in shape) or (4 in shape))) if not valid: raise ValueError("'arr' does not have a suitable array shape for " "any mode.") if len(shape) == 2: shape = (shape[1], shape[0]) # columns show up first if mode == 'F': data32 = data.astype(np.float32) image = Image.frombytes(mode, shape, data32.tostring()) return image if mode in [None, 'L', 'P']: bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) image = Image.frombytes('L', shape, bytedata.tostring()) if pal is not None: image.putpalette(np.asarray(pal, dtype=np.uint8).tostring()) # Becomes a mode='P' automagically. elif mode == 'P': # default gray-scale pal = (np.arange(0, 256, 1, dtype=np.uint8)[:, np.newaxis] * np.ones((3,), dtype=np.uint8)[np.newaxis, :]) image.putpalette(np.asarray(pal, dtype=np.uint8).tostring()) return image if mode == '1': # high input gives threshold for 1 bytedata = (data > high) image = Image.frombytes('1', shape, bytedata.tostring()) return image if cmin is None: cmin = np.amin(np.ravel(data)) if cmax is None: cmax = np.amax(np.ravel(data)) data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low if mode == 'I': data32 = data.astype(np.uint32) image = Image.frombytes(mode, shape, data32.tostring()) else: raise ValueError(_errstr) return image # if here then 3-d array with a 3 or a 4 in the shape length. # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' if channel_axis is None: if (3 in shape): ca = np.flatnonzero(np.asarray(shape) == 3)[0] else: ca = np.flatnonzero(np.asarray(shape) == 4) if len(ca): ca = ca[0] else: raise ValueError("Could not find channel dimension.") else: ca = channel_axis numch = shape[ca] if numch not in [3, 4]: raise ValueError("Channel axis dimension is not valid.") bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) if ca == 2: strdata = bytedata.tostring() shape = (shape[1], shape[0]) elif ca == 1: strdata = np.transpose(bytedata, (0, 2, 1)).tostring() shape = (shape[2], shape[0]) elif ca == 0: strdata = np.transpose(bytedata, (1, 2, 0)).tostring() shape = (shape[2], shape[1]) if mode is None: if numch == 3: mode = 'RGB' else: mode = 'RGBA' if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']: raise ValueError(_errstr) if mode in ['RGB', 'YCbCr']: if numch != 3: raise ValueError("Invalid array shape for mode.") if mode in ['RGBA', 'CMYK']: if numch != 4: raise ValueError("Invalid array shape for mode.") # Here we know data and mode is correct image = Image.frombytes(mode, shape, strdata) return image
def savetxt_nice(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# '): """ Reimplmenentation of numpy's savetxt that doesn't complain about bytes when saving to unicode files in Python 3. Save an array to a text file. Parameters ---------- fname : filename or file handle If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. X : array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted like `' (%s+%sj)' % (fmt, fmt)` b) a full string specifying every real and imaginary part, e.g. `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns c) a list of specifiers, one per column - in this case, the real and imaginary part must have separate specifiers, e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional String or character separating lines. .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. .. versionadded:: 1.7.0 See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format savez : Save several arrays into an uncompressed ``.npz`` archive savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- Further explanation of the `fmt` parameter (``%[flag]width[.precision]specifier``): flags: ``-`` : left justify ``+`` : Forces to precede result with + or -. ``0`` : Left pad the number with zeros instead of space (see width). width: Minimum number of characters to be printed. The value is not truncated if it has more characters. precision: - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - For ``g`` and ``G``, the maximum number of significant digits. - For ``s``, the maximum number of characters. specifiers: ``c`` : character ``d`` or ``i`` : signed decimal integer ``e`` or ``E`` : scientific notation with ``e`` or ``E``. ``f`` : decimal floating point ``g,G`` : use the shorter of ``e,E`` or ``f`` ``o`` : signed octal ``s`` : string of characters ``u`` : unsigned decimal integer ``x,X`` : unsigned hexadecimal integer This explanation of ``fmt`` is not complete, for an exhaustive specification see [1]_. References ---------- .. [1] `Format Specification Mini-Language <http://docs.python.org/library/string.html# format-specification-mini-language>`_, Python Documentation. Examples -------- >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ # Py3 conversions first if isinstance(fmt, bytes): fmt = asstr(fmt) delimiter = asstr(delimiter) own_fh = False if _is_string_like(fname): own_fh = True if fname.endswith('.gz'): import gzip fh = gzip.open(fname, 'wb') else: if sys.version_info[0] >= 3: fh = open(fname, 'wb') else: fh = open(fname, 'w') elif hasattr(fname, 'write'): fh = fname else: raise ValueError('fname must be a string or file handle') try: X = np.asarray(X) # Handle 1-dimensional arrays if X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T ncol = 1 # Complex dtype -- each field indicates a separate column else: ncol = len(X.dtype.descr) else: ncol = X.shape[1] iscomplex_X = np.iscomplexobj(X) # `fmt` can be a string with multiple insertion points or a # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') print("type(fmt) = %s" % type(fmt)) if type(fmt) in (list, tuple): if len(fmt) != ncol: raise AttributeError('fmt has wrong shape. %s' % str(fmt)) format = asstr(delimiter).join(map(asstr, fmt)) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') error = ValueError('fmt has wrong number of %% formats: %s' % fmt) if n_fmt_chars == 1: if iscomplex_X: fmt = [ ' (%s+%sj)' % (fmt, fmt), ] * ncol else: fmt = [ fmt, ] * ncol format = delimiter.join(fmt) elif iscomplex_X and n_fmt_chars != (2 * ncol): raise error elif ((not iscomplex_X) and n_fmt_chars != ncol): raise error else: format = fmt else: raise ValueError('invalid fmt: %r' % (fmt, )) if len(header) > 0: header = header.replace('\n', '\n' + comments) fh.write(asbytes(comments + header + newline)) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) fh.write(asbytes(format % tuple(row2) + newline)) else: for row in X: try: print('format = %r' % format, type(format)) fh.write(asbytes(format % tuple(row) + newline)) except TypeError: raise TypeError("Mismatch between array dtype ('%s') and " "format specifier ('%s')" % (str(X.dtype), format)) if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) fh.write(asbytes(comments + footer + newline)) finally: if own_fh: fh.close()
def __init__(self, dtype, m, n, compute_u=True, compute_v=True): """ Parameters ---------- dtype : type type of matrix to decompose m : int number of rows of matrix to decompose n : int number of columns of matrix to decompose compute_u : bool, optional whether to compute matrix u, default is True compute_v : bool, optional whether to compute matrix v, default is True """ self.dtype = dtype.type if isinstance(dtype, np.dtype) else dtype self.dtypereal = self.dtype(0).real.dtype.type self.complex = np.iscomplexobj(self.dtype(0)) from .lapack import _gesvd self.gesvd = _gesvd[self.dtype] self.m = m self.n = n mn = min(m, n) self.compute_u = compute_u self.compute_v = compute_v if not compute_u: self.jobu = "n" self.ldu = 0 # never referenced self.u_ptr = ctypes.c_void_p(0) self.write_u_into_a = False elif compute_v and m < n: self.jobu = "s" self.u = np.empty((m, mn), self.dtype, "F") self.ldu = self.u.strides[1] // self.u.itemsize self.u_ptr = self.u.ctypes.data_as(ctypes.c_void_p) self.write_u_into_a = False else: self.jobu = "o" self.ldu = m self.write_u_into_a = True if not compute_v: self.jobv = "n" self.ldv = 0 # never referenced self.v_ptr = ctypes.c_void_p(0) self.write_v_into_a = False elif compute_u and m >= n: self.jobv = "s" self.v = np.empty((mn, n), self.dtype, "F") self.ldv = self.v.strides[1] // self.v.itemsize self.v_ptr = self.v.ctypes.data_as(ctypes.c_void_p) self.write_v_into_a = False else: self.jobv = "o" self.ldv = mn self.write_v_into_a = True self.jobu = ctypes.byref(ctypes.c_char(self.jobu)) self.jobv = ctypes.byref(ctypes.c_char(self.jobv)) self.lda = m self.s = np.empty(mn, self.dtypereal) self.info = ctypes.c_int() work = np.empty(1, self.dtype) if self.complex: self.rwork = np.empty(5 * mn, self.dtypereal) self.gesvd(self.jobu, self.jobv, ctypes.byref(ctypes.c_int(self.m)), ctypes.byref(ctypes.c_int(self.n)), ctypes.c_void_p(0), ctypes.byref(ctypes.c_int(self.lda)), self.s.ctypes.data_as(ctypes.c_void_p), ctypes.c_void_p(0), ctypes.byref(ctypes.c_int(self.ldu)), ctypes.c_void_p(0), ctypes.byref(ctypes.c_int(self.ldv)), work.ctypes.data_as(ctypes.c_void_p), ctypes.byref(ctypes.c_int(-1)), self.rwork.ctypes.data_as(ctypes.c_void_p), ctypes.byref(self.info)) else: self.gesvd(self.jobu, self.jobv, ctypes.byref(ctypes.c_int(self.m)), ctypes.byref(ctypes.c_int(self.n)), ctypes.c_void_p(0), ctypes.byref(ctypes.c_int(self.lda)), self.s.ctypes.data_as(ctypes.c_void_p), ctypes.c_void_p(0), ctypes.byref(ctypes.c_int(self.ldu)), ctypes.c_void_p(0), ctypes.byref(ctypes.c_int(self.ldv)), work.ctypes.data_as(ctypes.c_void_p), ctypes.byref(ctypes.c_int(-1)), ctypes.byref(self.info)) assert self.info.value == 0 self.lwork = int(work[0].real) self.work = np.empty(self.lwork, self.dtype)
def geometric_transform(input, mapping, output_shape=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True, extra_arguments=(), extra_keywords={}): """ Apply an arbitrary geometric transform. The given mapping function is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. Parameters ---------- %(input)s mapping : {callable, scipy.LowLevelCallable} A callable object that accepts a tuple of length equal to the output array rank, and returns the corresponding input coordinates as a tuple of length equal to the input array rank. output_shape : tuple of ints, optional Shape tuple. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s extra_arguments : tuple, optional Extra arguments passed to `mapping`. extra_keywords : dict, optional Extra keywords passed to `mapping`. Returns ------- output : ndarray The filtered input. See Also -------- map_coordinates, affine_transform, spline_filter1d Notes ----- This function also accepts low-level callback functions with one the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int mapping(npy_intp *output_coordinates, double *input_coordinates, int output_rank, int input_rank, void *user_data) int mapping(intptr_t *output_coordinates, double *input_coordinates, int output_rank, int input_rank, void *user_data) The calling function iterates over the elements of the output array, calling the callback function at each element. The coordinates of the current output element are passed through ``output_coordinates``. The callback function must return the coordinates at which the input must be interpolated in ``input_coordinates``. The rank of the input and output arrays are given by ``input_rank`` and ``output_rank`` respectively. ``user_data`` is the area_data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the Python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. For complex-valued `input`, this function transforms the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> import numpy as np >>> from scipy.ndimage import geometric_transform >>> a = np.arange(12.).reshape((4, 3)) >>> def shift_func(output_coords): ... return (output_coords[0] - 0.5, output_coords[1] - 0.5) ... >>> geometric_transform(a, shift_func) array([[ 0. , 0. , 0. ], [ 0. , 1.362, 2.738], [ 0. , 4.812, 6.187], [ 0. , 8.263, 9.637]]) >>> b = [1, 2, 3, 4, 5] >>> def shift_func(output_coords): ... return (output_coords[0] - 3,) ... >>> geometric_transform(b, shift_func, mode='constant') array([0, 0, 0, 1, 2]) >>> geometric_transform(b, shift_func, mode='nearest') array([1, 1, 1, 1, 2]) >>> geometric_transform(b, shift_func, mode='reflect') array([3, 2, 1, 1, 2]) >>> geometric_transform(b, shift_func, mode='wrap') array([2, 3, 4, 1, 2]) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if output_shape is None: output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: kwargs = dict(order=order, mode=mode, prefilter=prefilter, output_shape=output_shape, extra_arguments=extra_arguments, extra_keywords=extra_keywords) geometric_transform(input.real, mapping, output=output.real, cval=numpy.real(cval), **kwargs) geometric_transform(input.imag, mapping, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) _nd_image.geometric_transform(filtered, mapping, None, None, None, output, order, mode, cval, npad, extra_arguments, extra_keywords) return output
def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg, label=None, picks=None, pick_ori=None, verbose=None): """Dynamic Imaging of Coherent Sources (DICS). Calculate the DICS spatial filter based on a given cross-spectral density object and return estimates of source activity based on given data. Parameters ---------- data : array or list / iterable Sensor space data. If data.ndim == 2 a single observation is assumed and a single stc is returned. If data.ndim == 3 or if data is a list / iterable, a list of stc's is returned. info : dict Measurement info. tmin : float Time of first sample. forward : dict Forward operator. noise_csd : instance of CrossSpectralDensity The noise cross-spectral density. data_csd : instance of CrossSpectralDensity The data cross-spectral density. reg : float The regularization for the cross-spectral density. label : Label | None Restricts the solution to a given label. picks : array-like of int | None Indices (in info) of data channels. If None, MEG and EEG data channels (without bad channels) will be used. pick_ori : None | 'normal' If 'normal', rather than pooling the orientations by taking the norm, only the radial component is kept. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- stc : SourceEstimate (or list of SourceEstimate) Source time courses. """ is_free_ori, picks, _, proj, vertno, G =\ _prepare_beamformer_input(info, forward, label, picks, pick_ori) Cm = data_csd.data # Calculating regularized inverse, equivalent to an inverse operation after # regularization: Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm)) Cm_inv = linalg.pinv(Cm, reg) # Compute spatial filters W = np.dot(G.T, Cm_inv) n_orient = 3 if is_free_ori else 1 n_sources = G.shape[1] // n_orient for k in range(n_sources): Wk = W[n_orient * k: n_orient * k + n_orient] Gk = G[:, n_orient * k: n_orient * k + n_orient] Ck = np.dot(Wk, Gk) # TODO: max-power is not implemented yet, however DICS does employ # orientation picking when one eigen value is much larger than the # other if is_free_ori: # Free source orientation Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk) else: # Fixed source orientation Wk /= Ck # Noise normalization noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T) noise_norm = np.abs(noise_norm).trace() Wk /= np.sqrt(noise_norm) # Pick source orientation normal to cortical surface if pick_ori == 'normal': W = W[2::3] is_free_ori = False if isinstance(data, np.ndarray) and data.ndim == 2: data = [data] return_single = True else: return_single = False subject = _subject_from_forward(forward) for i, M in enumerate(data): if len(M) != len(picks): raise ValueError('data and picks must have the same length') if not return_single: logger.info("Processing epoch : %d" % (i + 1)) # Apply SSPs if info['projs']: M = np.dot(proj, M) # project to source space using beamformer weights if is_free_ori: sol = np.dot(W, M) logger.info('combining the current components...') sol = combine_xyz(sol) else: # Linear inverse: do not delay compuation due to non-linear abs sol = np.dot(W, M) tstep = 1.0 / info['sfreq'] if np.iscomplexobj(sol): sol = np.abs(sol) # XXX : STC cannot contain (yet?) complex values yield SourceEstimate(sol, vertices=vertno, tmin=tmin, tstep=tstep, subject=subject) logger.info('[done]')
def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Apply an affine transformation. Given an output image pixel index vector ``o``, the pixel value is determined from the input image at position ``np.dot(matrix, o) + offset``. This does 'pull' (or 'backward') resampling, transforming the output space to the input to locate area_data. Affine transformations are often described in the 'push' (or 'forward') direction, transforming input to output. If you have a matrix for the 'push' transformation, use its inverse (:func:`numpy.linalg.inv`) in this function. Parameters ---------- %(input)s matrix : ndarray The inverse coordinate transformation matrix, mapping output coordinates to input coordinates. If ``ndim`` is the number of dimensions of ``input``, the given matrix must have one of the following shapes: - ``(ndim, ndim)``: the linear transformation matrix for each output coordinate. - ``(ndim,)``: assume that the 2-D transformation matrix is diagonal, with the diagonal specified by the given value. A more efficient algorithm is then used that exploits the separability of the problem. - ``(ndim + 1, ndim + 1)``: assume that the transformation is specified using homogeneous coordinates [1]_. In this case, any value passed to ``offset`` is ignored. - ``(ndim, ndim + 1)``: as above, but the bottom row of a homogeneous transformation matrix is always ``[0, 0, ..., 1]``, and may be omitted. offset : float or sequence, optional The offset into the array where the transform is applied. If a float, `offset` is the same for each axis. If a sequence, `offset` should contain one value for each axis. output_shape : tuple of ints, optional Shape tuple. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- affine_transform : ndarray The transformed input. Notes ----- The given matrix and offset are used to find for each point in the output the corresponding coordinates in the input by an affine transformation. The value of the input at those coordinates is determined by spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. .. versionchanged:: 0.18.0 Previously, the exact interpretation of the affine transformation depended on whether the matrix was supplied as a 1-D or a 2-D array. If a 1-D array was supplied to the matrix parameter, the output pixel value at index ``o`` was determined from the input image at position ``matrix * (o + offset)``. For complex-valued `input`, this function transforms the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. References ---------- .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if output_shape is None: output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: kwargs = dict(offset=offset, output_shape=output_shape, order=order, mode=mode, prefilter=prefilter) affine_transform(input.real, matrix, output=output.real, cval=numpy.real(cval), **kwargs) affine_transform(input.imag, matrix, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) matrix = numpy.asarray(matrix, dtype=numpy.float64) if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: raise RuntimeError('no proper affine matrix provided') if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and (matrix.shape[0] in [input.ndim, input.ndim + 1])): if matrix.shape[0] == input.ndim + 1: exptd = [0] * input.ndim + [1] if not numpy.all(matrix[input.ndim] == exptd): msg = ('Expected homogeneous transformation matrix with ' 'shape %s for image shape %s, but bottom row was ' 'not equal to %s' % (matrix.shape, input.shape, exptd)) raise ValueError(msg) # assume input is homogeneous coordinate transformation matrix offset = matrix[:input.ndim, input.ndim] matrix = matrix[:input.ndim, :input.ndim] if matrix.shape[0] != input.ndim: raise RuntimeError('affine matrix has wrong number of rows') if matrix.ndim == 2 and matrix.shape[1] != output.ndim: raise RuntimeError('affine matrix has wrong number of columns') if not matrix.flags.contiguous: matrix = matrix.copy() offset = _ni_support._normalize_sequence(offset, input.ndim) offset = numpy.asarray(offset, dtype=numpy.float64) if offset.ndim != 1 or offset.shape[0] < 1: raise RuntimeError('no proper offset provided') if not offset.flags.contiguous: offset = offset.copy() if matrix.ndim == 1: warnings.warn( "The behavior of affine_transform with a 1-D " "array supplied for the matrix parameter has changed in " "SciPy 0.18.0." ) _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order, mode, cval, npad, False) else: _nd_image.geometric_transform(filtered, None, None, matrix, offset, output, order, mode, cval, npad, None, None) return output
def map_coordinates(input, coordinates, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Map the input array to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. The shape of the output is derived from that of the coordinate array by dropping the first axis. The values of the array along the first axis are the coordinates in the input array at which the output value is found. Parameters ---------- %(input)s coordinates : array_like The coordinates at which `input` is evaluated. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- map_coordinates : ndarray The result of transforming the input. The shape of the output is derived from that of `coordinates` by dropping the first axis. See Also -------- spline_filter, geometric_transform, scipy.interpolate Notes ----- For complex-valued `input`, this function maps the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> from scipy import ndimage >>> a = np.arange(12.).reshape((4, 3)) >>> a array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.], [ 9., 10., 11.]]) >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) array([ 2., 7.]) Above, the interpolated value of a[0.5, 0.5] gives output[0], while a[2, 1] is output[1]. >>> inds = np.array([[0.5, 2], [0.5, 4]]) >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3) array([ 2. , -33.3]) >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest') array([ 2., 8.]) >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) array([ True, False], dtype=bool) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) coordinates = numpy.asarray(coordinates) if numpy.iscomplexobj(coordinates): raise TypeError('Complex type not supported') output_shape = coordinates.shape[1:] if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') if coordinates.shape[0] != input.ndim: raise RuntimeError('invalid shape for coordinate array') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: kwargs = dict(order=order, mode=mode, prefilter=prefilter) map_coordinates(input.real, coordinates, output=output.real, cval=numpy.real(cval), **kwargs) map_coordinates(input.imag, coordinates, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) _nd_image.geometric_transform(filtered, None, coordinates, None, None, output, order, mode, cval, npad, None, None) return output
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64, mode='mirror'): """ Calculate a 1-D spline filter along the given axis. The lines of the array along the given axis are filtered by a spline filter. The order of the spline must be >= 2 and <= 5. Parameters ---------- %(input)s order : int, optional The order of the spline, default is 3. axis : int, optional The axis along which the spline filter is applied. Default is the last axis. output : ndarray or dtype, optional The array in which to place the output, or the dtype of the returned array. Default is ``numpy.float64``. %(mode_interp_mirror)s Returns ------- spline_filter1d : ndarray The filtered input. Notes ----- All functions in `ndimage.interpolation` do spline interpolation of the input image. If using B-splines of `order > 1`, the input image values have to be converted to B-spline coefficients first, which is done by applying this 1-D filter sequentially along all axes of the input. All functions that require B-spline coefficients will automatically filter their inputs, a behavior controllable with the `prefilter` keyword argument. For functions that accept a `mode` parameter, the result will only be correct if it matches the `mode` used when filtering. For complex-valued `input`, this function processes the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. See Also -------- spline_filter : Multidimensional spline filter. Examples -------- We can filter an image using 1-D spline along the given axis: >>> from scipy.ndimage import spline_filter1d >>> import matplotlib.pyplot as plt >>> orig_img = np.eye(20) # create an image >>> orig_img[10, :] = 1.0 >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0) >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1) >>> f, ax = plt.subplots(1, 3, sharex=True) >>> for ind, area_data in enumerate([[orig_img, "original image"], ... [sp_filter_axis_0, "spline filter (axis=0)"], ... [sp_filter_axis_1, "spline filter (axis=1)"]]): ... ax[ind].imshow(area_data[0], cmap='gray_r') ... ax[ind].set_title(area_data[1]) >>> plt.tight_layout() >>> plt.show() """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, complex_output=complex_output) if complex_output: spline_filter1d(input.real, order, axis, output.real, mode) spline_filter1d(input.imag, order, axis, output.imag, mode) return output if order in [0, 1]: output[...] = numpy.array(input) else: mode = _ni_support._extend_mode_to_code(mode) axis = normalize_axis_index(axis, input.ndim) _nd_image.spline_filter1d(input, order, axis, output, mode) return output
def calc_generator_fid(model, data_loader, args, dims=2048): eps = 1e-6 incept = InceptionV3([InceptionV3.BLOCK_INDEX_BY_DIM[dims]]) model.eval() incept.eval() valiter = iter(data_loader) tot_iter = len(valiter) pred_fake = np.empty((args.val_batch * tot_iter, dims)) pred_real = np.empty((args.val_batch * tot_iter, dims)) if not next(model.parameters()).device == torch.device('cpu'): incept = incept.cuda(args.gpu) for i in tqdm(range(tot_iter)): x_real, _ = next(valiter) z_in = torch.randn(args.val_batch, args.latent_size) if not next(model.parameters()).device == torch.device('cpu'): x_real = x_real.cuda(args.gpu, non_blocking=True) z_in = z_in.cuda(args.gpu, non_blocking=True) out = model(z_in) x_fake = out[0] x_fake = (x_fake + 1.0) / 2.0 x_real = (x_real + 1.0) / 2.0 tmp_fake = incept(x_fake)[0] tmp_real = incept(x_real)[0] if tmp_fake.shape[2] != 1 or tmp_fake.shape[3] != 1: tmp_fake = adaptive_avg_pool2d(tmp_fake, output_size=(1, 1)) tmp_real = adaptive_avg_pool2d(tmp_real, output_size=(1, 1)) pred_fake[i * args.val_batch: (i + 1) * args.val_batch] = tmp_fake.cpu().data.numpy().reshape(args.val_batch, -1) pred_real[i * args.val_batch: (i + 1) * args.val_batch] = tmp_real.cpu().data.numpy().reshape(args.val_batch, -1) mu_fake = np.atleast_1d(np.mean(pred_fake, axis=0)) std_fake = np.atleast_2d(np.cov(pred_fake, rowvar=False)) mu_real = np.atleast_1d(np.mean(pred_real, axis=0)) std_real = np.atleast_2d(np.cov(pred_real, rowvar=False)) assert mu_fake.shape == mu_real.shape assert std_fake.shape == std_real.shape mu_diff = mu_fake - mu_real covmean, _ = linalg.sqrtm(std_fake.dot(std_real), disp=False) if not np.isfinite(covmean).all(): msg = ('fid calculation produces singular product; ' 'adding %s to diagonal of cov estimates') % eps print(msg) offset = np.eye(std_fake.shape[0]) * eps covmean = linalg.sqrtm((std_fake + offset).dot(std_real + offset)) # Numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError('Imaginary component {}'.format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return mu_diff.dot(mu_diff) + np.trace(std_fake) + np.trace(std_real) - 2 * tr_covmean
def process_data(self, data): # Append any data carried from the last run if self.carry.size > 0: data = np.concatenate((self.carry, data)) # This is the largest number of records we can handle num_records = data.size // self.record_length # This is the carryover that we'll store until next round. # If nothing is left then reset the carryover. remaining_points = data.size % self.record_length if remaining_points > 0: if num_records > 0: self.carry = data[-remaining_points:] data = data[:-remaining_points] else: self.carry = data else: self.carry = np.zeros(0, dtype=self.source.descriptor.dtype) if num_records > 0: # The records are processed in parallel after being reshaped here reshaped_data = np.reshape(data, (num_records, self.record_length), order="C") # Update demodulation frequency if necessary if self.follow_axis.value is not "": freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset) // self.pts_before_freq_update] if freq != self.current_freq: self.update_references(freq) self.current_freq = freq self.idx += data.size # first stage decimating filter if self.filters[0] is None: filtered = reshaped_data else: stacked_coeffs = np.concatenate(self.filters[0]) # filter if np.iscomplexobj(reshaped_data): # TODO: compile complex versions of the IPP functions filtered_r = np.empty_like(reshaped_data, dtype=np.float32) filtered_i = np.empty_like(reshaped_data, dtype=np.float32) libipp.filter_records_iir( stacked_coeffs, self.filters[0][0].size - 1, np.ascontiguousarray( reshaped_data.real.astype(np.float32)), self.record_length, num_records, filtered_r) libipp.filter_records_iir( stacked_coeffs, self.filters[0][0].size - 1, np.ascontiguousarray( reshaped_data.imag.astype(np.float32)), self.record_length, num_records, filtered_i) filtered = filtered_r + 1j * filtered_i # decimate if self.decim_factors[0] > 1: filtered = filtered[:, ::self.decim_factors[0]] else: filtered = np.empty_like(reshaped_data, dtype=np.float32) libipp.filter_records_iir( stacked_coeffs, self.filters[0][0].size - 1, np.ascontiguousarray( reshaped_data.real.astype(np.float32)), self.record_length, num_records, filtered) # decimate if self.decim_factors[0] > 1: filtered = filtered[:, ::self.decim_factors[0]] # mix with reference # keep real and imaginary separate for filtering below if np.iscomplexobj(reshaped_data): filtered *= self.reference filtered_r = filtered.real filtered_i = filtered.imag else: filtered_r = self.reference_r * filtered filtered_i = self.reference_i * filtered # channel selection filters for ct in [1, 2]: if self.filters[ct] == None: continue coeffs = self.filters[ct] stacked_coeffs = np.concatenate(self.filters[ct]) out_r = np.empty_like(filtered_r).astype(np.float32) out_i = np.empty_like(filtered_i).astype(np.float32) libipp.filter_records_iir( stacked_coeffs, self.filters[ct][0].size - 1, np.ascontiguousarray(filtered_r.astype(np.float32)), filtered_r.shape[-1], num_records, out_r) libipp.filter_records_iir( stacked_coeffs, self.filters[ct][0].size - 1, np.ascontiguousarray(filtered_i.astype(np.float32)), filtered_i.shape[-1], num_records, out_i) # decimate if self.decim_factors[ct] > 1: filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]], order="C") filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]], order="C") else: filtered_r = out_r filtered_i = out_i filtered = filtered_r + 1j * filtered_i # recover gain from selecting single sideband filtered *= 2 # push to ouptut connectors for os in self.source.output_streams: os.push(filtered)
def reference_evolution_soc(A, B, epsilon=4.0, k=2, proj_type="l2"): """ All python reference implementation for Evolution Strength of Connection --> If doing imaginary test, both A and B should be imaginary type upon entry --> This does the "unsymmetrized" version of the ode measure """ # number of PDEs per point is defined implicitly by block size csrflag = isspmatrix_csr(A) if csrflag: numPDEs = 1 else: numPDEs = A.blocksize[0] A = A.tocsr() # Preliminaries near_zero = finfo(float).eps sqrt_near_zero = sqrt(sqrt(near_zero)) Bmat = mat(B) A.eliminate_zeros() A.sort_indices() dimen = A.shape[1] NullDim = Bmat.shape[1] # Get spectral radius of Dinv*A, this is the time step size for the ODE D = A.diagonal() Dinv = numpy.zeros_like(D) mask = (D != 0.0) Dinv[mask] = 1.0 / D[mask] Dinv[D == 0] = 1.0 Dinv_A = scale_rows(A, Dinv, copy=True) rho_DinvA = approximate_spectral_radius(Dinv_A) # Calculate (Atilde^k) naively S = (scipy.sparse.eye(dimen, dimen, format="csr") - (1.0 / rho_DinvA) * Dinv_A) Atilde = scipy.sparse.eye(dimen, dimen, format="csr") for i in range(k): Atilde = S * Atilde # Strength Info should be row-based, so transpose Atilde Atilde = Atilde.T.tocsr() # Construct and apply a sparsity mask for Atilde that restricts Atilde^T to # the nonzero pattern of A, with the added constraint that row i of # Atilde^T retains only the nonzeros that are also in the same PDE as i. mask = A.copy() # Only consider strength at dofs from your PDE. Use mask to enforce this # by zeroing out all entries in Atilde that aren't from your PDE. if numPDEs > 1: row_length = diff(mask.indptr) my_pde = mod(range(dimen), numPDEs) my_pde = repeat(my_pde, row_length) mask.data[mod(mask.indices, numPDEs) != my_pde] = 0.0 del row_length, my_pde mask.eliminate_zeros() # Apply mask to Atilde, zeros in mask have already been eliminated at start # of routine. mask.data[:] = 1.0 Atilde = Atilde.multiply(mask) Atilde.eliminate_zeros() Atilde.sort_indices() del mask # Calculate strength based on constrained min problem of LHS = mat(zeros((NullDim + 1, NullDim + 1)), dtype=A.dtype) RHS = mat(zeros((NullDim + 1, 1)), dtype=A.dtype) # Choose tolerance for dropping "numerically zero" values later t = Atilde.dtype.char eps = numpy.finfo(numpy.float).eps feps = numpy.finfo(numpy.single).eps geps = numpy.finfo(numpy.longfloat).eps _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2} tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]] for i in range(dimen): # Get rowptrs and col indices from Atilde rowstart = Atilde.indptr[i] rowend = Atilde.indptr[i + 1] length = rowend - rowstart colindx = Atilde.indices[rowstart:rowend] # Local diagonal of A is used for scale invariant min problem D_A = mat(eye(length, dtype=A.dtype)) if proj_type == "D_A": for j in range(length): D_A[j, j] = D[colindx[j]] # Find row i's position in colindx, matrix must have sorted column # indices. iInRow = colindx.searchsorted(i) if length <= NullDim: # Do nothing, because the number of nullspace vectors will # be able to perfectly approximate this row of Atilde. Atilde.data[rowstart:rowend] = 1.0 else: # Grab out what we want from Atilde and B. Put into zi, Bi zi = mat(Atilde.data[rowstart:rowend]).T Bi = Bmat[colindx, :] # Construct constrained min problem LHS[0:NullDim, 0:NullDim] = 2.0 * Bi.H * D_A * Bi LHS[0:NullDim, NullDim] = D_A[iInRow, iInRow] * Bi[iInRow, :].H LHS[NullDim, 0:NullDim] = Bi[iInRow, :] RHS[0:NullDim, 0] = 2.0 * Bi.H * D_A * zi RHS[NullDim, 0] = zi[iInRow, 0] # Calc Soln to Min Problem x = mat(pinv(LHS)) * RHS # Calculate best constrained approximation to zi with span(Bi), and # filter out "numerically" zero values. This is important because # we look only at the sign of values below when calculating angle. zihat = Bi * x[:-1] tol_i = max(abs(zihat)) * tol zihat.real[abs(zihat.real) < tol_i] = 0.0 if numpy.iscomplexobj(zihat): zihat.imag[abs(zihat.imag) < tol_i] = 0.0 # if angle in the complex plane between individual entries is # greater than 90 degrees, then weak. We can just look at the dot # product to determine if angle is greater than 90 degrees. angle = real(ravel(zihat))*real(ravel(zi)) +\ imag(ravel(zihat))*imag(ravel(zi)) angle = angle < 0.0 angle = array(angle, dtype=bool) # Calculate approximation ratio zi = zihat / zi # If the ratio is small, then weak connection zi[abs(zi) <= 1e-4] = 1e100 # If angle is greater than 90 degrees, then weak connection zi[angle] = 1e100 # Calculate Relative Approximation Error zi = abs(1.0 - zi) # important to make "perfect" connections explicitly nonzero zi[zi < sqrt_near_zero] = 1e-4 # Calculate and apply drop-tol. Ignore diagonal by making it very # large zi[iInRow] = 1e5 drop_tol = min(zi) * epsilon zi[zi > drop_tol] = 0.0 Atilde.data[rowstart:rowend] = ravel(zi) # Clean up, and return Atilde Atilde.eliminate_zeros() Atilde.data = array(real(Atilde.data), dtype=float) # Set diagonal to 1.0, as each point is strongly connected to itself. I = scipy.sparse.eye(dimen, dimen, format="csr") I.data -= Atilde.diagonal() Atilde = Atilde + I # If converted BSR to CSR we return amalgamated matrix with the minimum # nonzero for each block making up the nonzeros of Atilde if not csrflag: Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs)) # Atilde = csr_matrix((data, row, col), shape=(*,*)) At = [] for i in range(Atilde.indices.shape[0]): Atmin = Atilde.data[i, :, :][Atilde.data[i, :, :].nonzero()] At.append(Atmin.min()) Atilde = csr_matrix( (array(At), Atilde.indices, Atilde.indptr), shape=(Atilde.shape[0] / numPDEs, Atilde.shape[1] / numPDEs)) # Standardized strength values require small values be weak and large # values be strong. So, we invert the algebraic distances computed here Atilde.data = 1.0 / Atilde.data # Scale Atilde by the largest magnitude entry in each row largest_row_entry = numpy.zeros((Atilde.shape[0], ), dtype=Atilde.dtype) for i in range(Atilde.shape[0]): for j in range(Atilde.indptr[i], Atilde.indptr[i + 1]): val = numpy.abs(Atilde.data[j]) if val > largest_row_entry[i]: largest_row_entry[i] = val largest_row_entry[largest_row_entry != 0] =\ 1.0 / largest_row_entry[largest_row_entry != 0] Atilde = Atilde.tocsr() Atilde = scale_rows(Atilde, largest_row_entry, copy=True) return Atilde
def check(self, data=None, dtype=None): """Check the special function against the data.""" if self.knownfailure: pytest.xfail(reason=self.knownfailure) if data is None: data = self.data if dtype is None: dtype = data.dtype else: data = data.astype(dtype) rtol, atol = self.get_tolerances(dtype) # Apply given filter functions if self.param_filter: param_mask = np.ones((data.shape[0], ), np.bool_) for j, filter in zip(self.param_columns, self.param_filter): if filter: param_mask &= list(filter(data[:, j])) data = data[param_mask] # Pick parameters from the correct columns params = [] for j in self.param_columns: if np.iscomplexobj(j): j = int(j.imag) params.append(data[:, j].astype(complex)) else: params.append(data[:, j]) # Helper for evaluating results def eval_func_at_params(func, skip_mask=None): if self.vectorized: got = func(*params) else: got = [] for j in range(len(params[0])): if skip_mask is not None and skip_mask[j]: got.append(np.nan) continue got.append( func(*tuple([params[i][j] for i in range(len(params))]))) got = np.asarray(got) if not isinstance(got, tuple): got = (got, ) return got # Evaluate function to be tested got = eval_func_at_params(self.func) # Grab the correct results if self.result_columns is not None: # Correct results passed in with the data wanted = tuple([data[:, icol] for icol in self.result_columns]) else: # Function producing correct results passed in skip_mask = None if self.nan_ok and len(got) == 1: # Don't spend time evaluating what doesn't need to be evaluated skip_mask = np.isnan(got[0]) wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask) # Check the validity of each output returned assert_(len(got) == len(wanted)) for output_num, (x, y) in enumerate(zip(got, wanted)): if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign: pinf_x = np.isinf(x) pinf_y = np.isinf(y) minf_x = np.isinf(x) minf_y = np.isinf(y) else: pinf_x = np.isposinf(x) pinf_y = np.isposinf(y) minf_x = np.isneginf(x) minf_y = np.isneginf(y) nan_x = np.isnan(x) nan_y = np.isnan(y) olderr = np.seterr(all='ignore') try: abs_y = np.absolute(y) abs_y[~np.isfinite(abs_y)] = 0 diff = np.absolute(x - y) diff[~np.isfinite(diff)] = 0 rdiff = diff / np.absolute(y) rdiff[~np.isfinite(rdiff)] = 0 finally: np.seterr(**olderr) tol_mask = (diff <= atol + rtol * abs_y) pinf_mask = (pinf_x == pinf_y) minf_mask = (minf_x == minf_y) nan_mask = (nan_x == nan_y) bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask) point_count = bad_j.size if self.nan_ok: bad_j &= ~nan_x bad_j &= ~nan_y point_count -= (nan_x | nan_y).sum() if not self.distinguish_nan_and_inf and not self.nan_ok: # If nan's are okay we've already covered all these cases inf_x = np.isinf(x) inf_y = np.isinf(y) both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y) bad_j &= ~both_nonfinite point_count -= both_nonfinite.sum() if np.any(bad_j): # Some bad results: inform what, where, and how bad msg = [""] msg.append("Max |adiff|: %g" % diff.max()) msg.append("Max |rdiff|: %g" % rdiff.max()) msg.append( "Bad #1lab_results (%d out of %d) for the following points (in output %d):" % ( np.sum(bad_j), point_count, output_num, )) for j in np.where(bad_j)[0]: j = int(j) fmt = lambda x: "%30s" % np.array2string(x[j], precision=18) a = " ".join(map(fmt, params)) b = " ".join(map(fmt, got)) c = " ".join(map(fmt, wanted)) d = fmt(rdiff) msg.append("%s => %s != %s (rdiff %s)" % (a, b, c, d)) assert_(False, "\n".join(msg))
def _normalized_weights(Wk, Gk, Cm_inv_sq, reduce_rank, nn, sk): """Compute the normalized weights in max-power orientation. Uses Eq. 4.47 from [1]_. Operates in place on Wk. Parameters ---------- Wk : ndarray, shape (n_sources, 3, n_channels) The set of un-normalized filters at a single source point. Gk : ndarray, shape (n_sources, n_channels, 3) The leadfield at a single source point. Cm_inv_sq : nsarray, snape (n_channels, n_channels) The squared inverse covariance matrix. reduce_rank : bool Whether to reduce the rank of the filter by one. nn : ndarray, shape (n_sources, 3) The source normal. sk : ndarray, shape (n_sources, 3) The source prior. References ---------- .. [1] Sekihara & Nagarajan. Adaptive spatial filters for electromagnetic brain imaging (2008) Springer Science & Business Media """ # np.dot Gk with Cm_inv_sq on left and right norm_inv = np.matmul(Gk.transpose(0, 2, 1), np.matmul(Cm_inv_sq[np.newaxis], Gk)) # invert this using an eigenvalue decomposition norm = _sym_inv(norm_inv, reduce_rank) # Reapply source covariance after inversion norm *= sk[:, :, np.newaxis] norm *= sk[:, np.newaxis, :] power = np.matmul(norm, np.matmul(Wk, Gk)) # np.dot for each source # Determine orientation of max power assert power.dtype in (np.float64, np.complex128) # LCMV, DICS eig_vals, eig_vecs = np.linalg.eig(power) if not np.iscomplexobj(power) and np.iscomplexobj(eig_vecs): raise ValueError('The eigenspectrum of the leadfield is ' 'complex. Consider reducing the rank of the ' 'leadfield by using reduce_rank=True.') idx_max = np.argmax(eig_vals, axis=1) max_power_ori = eig_vecs[np.arange(eig_vecs.shape[0]), :, idx_max] # set the (otherwise arbitrary) sign to match the normal sign = np.sign(np.sum(max_power_ori * nn, axis=1, keepdims=True)) sign[sign == 0] = 1 max_power_ori *= sign # Compute the filter in the orientation of max power Wk_max = np.matmul(max_power_ori[:, np.newaxis], Wk)[:, 0] Gk_max = np.matmul(Gk, max_power_ori[:, :, np.newaxis]) denom = np.matmul(Gk_max.transpose(0, 2, 1), np.matmul(Cm_inv_sq[np.newaxis], Gk_max))[:, 0] np.sqrt(denom, out=denom) Wk_max /= denom # All three entries get the same value from this operation Wk[:] = Wk_max[:, np.newaxis]
def encode(obj): """ Data encoder """ tobj = type(obj) if isinstance(obj, Index): if isinstance(obj, PeriodIndex): return { 'typ': 'period_index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'freq': getattr(obj, 'freqstr', None), 'dtype': obj.dtype.num, 'data': convert(obj.asi8) } elif isinstance(obj, DatetimeIndex): tz = getattr(obj, 'tz', None) # store tz info and data as UTC if tz is not None: tz = tz.zone obj = obj.tz_convert('UTC') return { 'typ': 'datetime_index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'dtype': obj.dtype.num, 'data': convert(obj.asi8), 'freq': getattr(obj, 'freqstr', None), 'tz': tz } elif isinstance(obj, MultiIndex): return { 'typ': 'multi_index', 'klass': obj.__class__.__name__, 'names': getattr(obj, 'names', None), 'dtype': obj.dtype.num, 'data': convert(obj.values) } else: return { 'typ': 'index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'dtype': obj.dtype.num, 'data': convert(obj.values) } elif isinstance(obj, Series): if isinstance(obj, SparseSeries): raise NotImplementedError( 'msgpack sparse series is not implemented') #d = {'typ': 'sparse_series', # 'klass': obj.__class__.__name__, # 'dtype': obj.dtype.num, # 'index': obj.index, # 'sp_index': obj.sp_index, # 'sp_values': convert(obj.sp_values), # 'compress': compressor} #for f in ['name', 'fill_value', 'kind']: # d[f] = getattr(obj, f, None) #return d else: return { 'typ': 'series', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'index': obj.index, 'dtype': obj.dtype.num, 'data': convert(obj.values), 'compress': compressor } elif issubclass(tobj, NDFrame): if isinstance(obj, SparseDataFrame): raise NotImplementedError( 'msgpack sparse frame is not implemented') #d = {'typ': 'sparse_dataframe', # 'klass': obj.__class__.__name__, # 'columns': obj.columns} #for f in ['default_fill_value', 'default_kind']: # d[f] = getattr(obj, f, None) #d['data'] = dict([(name, ss) # for name, ss in compat.iteritems(obj)]) #return d elif isinstance(obj, SparsePanel): raise NotImplementedError( 'msgpack sparse frame is not implemented') #d = {'typ': 'sparse_panel', # 'klass': obj.__class__.__name__, # 'items': obj.items} #for f in ['default_fill_value', 'default_kind']: # d[f] = getattr(obj, f, None) #d['data'] = dict([(name, df) # for name, df in compat.iteritems(obj)]) #return d else: data = obj._data if not data.is_consolidated(): data = data.consolidate() # the block manager return { 'typ': 'block_manager', 'klass': obj.__class__.__name__, 'axes': data.axes, 'blocks': [{ 'items': b.items, 'values': convert(b.values), 'shape': b.values.shape, 'dtype': b.dtype.num, 'klass': b.__class__.__name__, 'compress': compressor } for b in data.blocks] } elif isinstance( obj, (datetime, date, np.datetime64, timedelta, np.timedelta64)): if isinstance(obj, Timestamp): tz = obj.tzinfo if tz is not None: tz = tz.zone offset = obj.offset if offset is not None: offset = offset.freqstr return { 'typ': 'timestamp', 'value': obj.value, 'offset': offset, 'tz': tz } elif isinstance(obj, np.timedelta64): return {'typ': 'timedelta64', 'data': obj.view('i8')} elif isinstance(obj, timedelta): return { 'typ': 'timedelta', 'data': (obj.days, obj.seconds, obj.microseconds) } elif isinstance(obj, np.datetime64): return {'typ': 'datetime64', 'data': str(obj)} elif isinstance(obj, datetime): return {'typ': 'datetime', 'data': obj.isoformat()} elif isinstance(obj, date): return {'typ': 'date', 'data': obj.isoformat()} raise Exception("cannot encode this datetimelike object: %s" % obj) elif isinstance(obj, Period): return {'typ': 'period', 'ordinal': obj.ordinal, 'freq': obj.freq} elif isinstance(obj, BlockIndex): return { 'typ': 'block_index', 'klass': obj.__class__.__name__, 'blocs': obj.blocs, 'blengths': obj.blengths, 'length': obj.length } elif isinstance(obj, IntIndex): return { 'typ': 'int_index', 'klass': obj.__class__.__name__, 'indices': obj.indices, 'length': obj.length } elif isinstance(obj, np.ndarray): return { 'typ': 'ndarray', 'shape': obj.shape, 'ndim': obj.ndim, 'dtype': obj.dtype.num, 'data': convert(obj), 'compress': compressor } elif isinstance(obj, np.number): if np.iscomplexobj(obj): return { 'typ': 'np_scalar', 'sub_typ': 'np_complex', 'dtype': obj.dtype.name, 'real': obj.real.__repr__(), 'imag': obj.imag.__repr__() } else: return { 'typ': 'np_scalar', 'dtype': obj.dtype.name, 'data': obj.__repr__() } elif isinstance(obj, complex): return { 'typ': 'np_complex', 'real': obj.real.__repr__(), 'imag': obj.imag.__repr__() } return obj
def to_tensor(data): if np.iscomplexobj(data): data = np.stack((data.real, data.imag), axis=-1) return torch.from_numpy(data)
def savetxt_nice(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# ', encoding=None): """ Save an array to a text file. This is 95% a backport of numpy 1.15.1's savetxt. It does not support: - DataSource's URL - pathlib fnames - gz files Parameters ---------- fname : filename or file handle If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. X : 1D or 2D array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: * a single specifier, `fmt='%.4e'`, resulting in numbers formatted like `' (%s+%sj)' % (fmt, fmt)` * a full string specifying every real and imaginary part, e.g. `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns * a list of specifiers, one per column - in this case, the real and imaginary part must have separate specifiers, e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional String or character separating lines. .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. .. versionadded:: 1.7.0 encoding : {None, str}, optional Encoding used to encode the outputfile. Does not apply to output streams. If the encoding is something other than 'bytes' or 'latin1' you will not be able to load the file in NumPy versions < 1.14. Default is 'latin1'. .. versionadded:: 1.14.0 See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format savez : Save several arrays into an uncompressed ``.npz`` archive savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- Further explanation of the `fmt` parameter (``%[flag]width[.precision]specifier``): flags: ``-`` : left justify ``+`` : Forces to precede result with + or -. ``0`` : Left pad the number with zeros instead of space (see width). width: Minimum number of characters to be printed. The value is not truncated if it has more characters. precision: - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - For ``g`` and ``G``, the maximum number of significant digits. - For ``s``, the maximum number of characters. specifiers: ``c`` : character ``d`` or ``i`` : signed decimal integer ``e`` or ``E`` : scientific notation with ``e`` or ``E``. ``f`` : decimal floating point ``g,G`` : use the shorter of ``e,E`` or ``f`` ``o`` : signed octal ``s`` : string of characters ``u`` : unsigned decimal integer ``x,X`` : unsigned hexadecimal integer This explanation of ``fmt`` is not complete, for an exhaustive specification see [1]_. References ---------- .. [1] `Format Specification Mini-Language <http://docs.python.org/library/string.html# format-specification-mini-language>`_, Python Documentation. Examples -------- >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ # Py3 conversions first if isinstance(fmt, bytes): fmt = asstr(fmt) delimiter = asstr(delimiter) class WriteWrap(object): """Convert to unicode in py2 or to bytes on bytestream inputs.""" def __init__(self, fh, encoding): self.fh = fh self.encoding = encoding self.do_write = self.first_write def close(self): self.fh.close() def write(self, v): self.do_write(v) def write_bytes(self, v): if isinstance(v, bytes): self.fh.write(v) else: self.fh.write(v.encode(self.encoding)) def write_normal(self, v): self.fh.write(asunicode(v)) def first_write(self, v): try: self.write_normal(v) self.write = self.write_normal except TypeError: # input is probably a bytestream self.write_bytes(v) self.write = self.write_bytes own_fh = False #if is_pathlib_path(fname): #fname = str(fname) if _is_string_like(fname): # datasource doesn't support creating a new file ... open(fname, 'wt').close() fh = open(fname, 'wt', encoding=encoding) own_fh = True # need to convert str to unicode for text io output if sys.version_info[0] == 2: fh = WriteWrap(fh, encoding or 'latin1') elif hasattr(fname, 'write'): # wrap to handle byte output streams fh = WriteWrap(fname, encoding or 'latin1') #if _is_string_like(fname): #own_fh = True #if fname.endswith('.gz'): #import gzip #fh = gzip.open(fname, 'wb') #else: #if sys.version_info[0] >= 3: #fh = open(fname, 'wb') #else: #fh = open(fname, 'w') #elif hasattr(fname, 'write'): #fh = fname else: raise ValueError('fname must be a string or file handle') try: X = np.asarray(X) # Handle 1-dimensional arrays if X.ndim == 0 or X.ndim > 2: raise ValueError("Expected 1D or 2D array, got %dD array instead" % X.ndim) elif X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T ncol = 1 # Complex dtype -- each field indicates a separate column else: ncol = len(X.dtype.descr) else: ncol = X.shape[1] iscomplex_X = np.iscomplexobj(X) # `fmt` can be a string with multiple insertion points or a # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') if isinstance(fmt, (list, tuple)): if len(fmt) != ncol: raise AttributeError('fmt has wrong shape. %s' % str(fmt)) txt_format = asstr(delimiter).join(map(asstr, fmt)) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') error = ValueError('fmt has wrong number of %% formats: %s' % fmt) if n_fmt_chars == 1: if iscomplex_X: fmt = [ ' (%s+%sj)' % (fmt, fmt), ] * ncol else: fmt = [ fmt, ] * ncol txt_format = delimiter.join(fmt) elif iscomplex_X and n_fmt_chars != (2 * ncol): raise error elif ((not iscomplex_X) and n_fmt_chars != ncol): raise error else: txt_format = fmt else: raise ValueError('invalid fmt: %r' % (fmt, )) if len(header) > 0: header = header.replace('\n', '\n' + comments) fh.write(comments + header + newline) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) s = txt_format % tuple(row2) + newline fh.write(s.replace('+-', '-')) else: for row in X: try: v = txt_format % tuple(row) + newline except TypeError: raise TypeError("Mismatch between array dtype ('%s') and " "format specifier ('%s')" % (str(X.dtype), txt_format)) fh.write(v) if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) fh.write(comments + footer + newline) finally: if own_fh: fh.close()
reveal_type(np.imag(AR_c16)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(np.imag(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[Any]] reveal_type(np.iscomplex(f8)) # E: numpy.bool_ reveal_type( np.iscomplex(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type( np.iscomplex(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(np.isreal(f8)) # E: numpy.bool_ reveal_type( np.isreal(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type( np.isreal(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] reveal_type(np.iscomplexobj(f8)) # E: bool reveal_type(np.isrealobj(f8)) # E: bool reveal_type(np.nan_to_num(f8)) # E: {float64} reveal_type(np.nan_to_num(f, copy=True)) # E: Any reveal_type(np.nan_to_num( AR_f8, nan=1.5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type(np.nan_to_num( AR_LIKE_f, posinf=9999)) # E: numpy.ndarray[Any, numpy.dtype[Any]] reveal_type( np.real_if_close(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] reveal_type( np.real_if_close(AR_c16) ) # E: Union[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{complex128}]]] reveal_type(
def init_guess_by_chkfile(mol, chkfile_name, project=None): '''Read SCF chkfile and make the density matrix for UHF initial guess. Kwargs: project : None or bool Whether to project chkfile's orbitals to the new basis. Note when the geometry of the chkfile and the given molecule are very different, this projection can produce very poor initial guess. In PES scanning, it is recommended to swith off project. If project is set to None, the projection is only applied when the basis sets of the chkfile's molecule are different to the basis sets of the given molecule (regardless whether the geometry of the two molecules are different). Note the basis sets are considered to be different if the two molecules are derived from the same molecule with different ordering of atoms. ''' from pyscf.scf import addons chk_mol, scf_rec = chkfile.load_scf(chkfile_name) if project is None: project = not gto.same_basis_set(chk_mol, mol) # Check whether the two molecules are similar def inertia_momentum(mol): im = gto.inertia_momentum(mol._atom, mol.atom_charges(), mol.atom_coords()) return scipy.linalg.eigh(im)[0] if abs(inertia_momentum(mol) - inertia_momentum(chk_mol)).sum() > 0.5: logger.warn( mol, "Large deviations found between the input " "molecule and the molecule from chkfile\n" "Initial guess density matrix may have large error.") if project: s = hf.get_ovlp(mol) def fproj(mo): if project: mo = addons.project_mo_nr2nr(chk_mol, mo, mol) norm = numpy.einsum('pi,pi->i', mo.conj(), s.dot(mo)) mo /= numpy.sqrt(norm) return mo mo = scf_rec['mo_coeff'] mo_occ = scf_rec['mo_occ'] if getattr(mo[0], 'ndim', None) == 1: # RHF if numpy.iscomplexobj(mo): raise NotImplementedError( 'TODO: project DHF orbital to UHF orbital') mo_coeff = fproj(mo) mo_occa = (mo_occ > 1e-8).astype(numpy.double) mo_occb = mo_occ - mo_occa dm = make_rdm1([mo_coeff, mo_coeff], [mo_occa, mo_occb]) else: #UHF if getattr(mo[0][0], 'ndim', None) == 2: # KUHF logger.warn( mol, 'k-point UHF results are found. Density matrix ' 'at Gamma point is used for the molecular SCF initial guess') mo = mo[0] dm = make_rdm1([fproj(mo[0]), fproj(mo[1])], mo_occ) return dm
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Zoom an array. The array is zoomed using spline interpolation of the requested order. Parameters ---------- %(input)s zoom : float or sequence The zoom factor along the axes. If a float, `zoom` is the same for each axis. If a sequence, `zoom` should contain one value for each axis. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode)s %(cval)s %(prefilter)s Returns ------- zoom : ndarray The zoomed input. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.zoom(ascent, 3.0) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() >>> print(ascent.shape) (512, 512) >>> print(result.shape) (1536, 1536) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if input.ndim < 1: raise RuntimeError('input and output rank must be > 0') mode = _ni_support._extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input zoom = _ni_support._normalize_sequence(zoom, input.ndim) output_shape = tuple( [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) output_shape_old = tuple( [int(ii * jj) for ii, jj in zip(input.shape, zoom)]) if output_shape != output_shape_old: warnings.warn( "From scipy 0.13.0, the output shape of zoom() is calculated " "with round() instead of int() - for these inputs the size of " "the returned array has changed.", UserWarning) zoom_div = numpy.array(output_shape, float) - 1 # Zooming to infinite values is unpredictable, so just choose # zoom factor 1 instead zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div, out=numpy.ones_like(input.shape, dtype=numpy.float64), where=zoom_div != 0) output = _ni_support._get_output(output, input, shape=output_shape) zoom = numpy.ascontiguousarray(zoom) _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval) return output
def _copy_arr_with_correct_type(self, arr): """if arr is of the acceptable types, returns arr, else returns a copy with the acceptable type""" return arr.astype(Bpm3d._complex_type, copy=False) if np.iscomplexobj(arr) else arr.astype( Bpm3d._real_type, copy=False)
def is_complex(self) -> bool: """Is the Leaf complex valued? """ return np.iscomplexobj(self.value)
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Shift an array. The array is shifted using spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. Parameters ---------- %(input)s shift : float or sequence The shift along the axes. If a float, `shift` is the same for each axis. If a sequence, `shift` should contain one value for each axis. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- shift : ndarray The shifted input. Notes ----- For complex-valued `input`, this function shifts the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if input.ndim < 1: raise RuntimeError('input and output rank must be > 0') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, complex_output=complex_output) if complex_output: # import under different name to avoid confusion with shift parameter from scipy.ndimage.interpolation import shift as _shift kwargs = dict(order=order, mode=mode, prefilter=prefilter) _shift(input.real, shift, output=output.real, cval=numpy.real(cval), **kwargs) _shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) shift = _ni_support._normalize_sequence(shift, input.ndim) shift = [-ii for ii in shift] shift = numpy.asarray(shift, dtype=numpy.float64) if not shift.flags.contiguous: shift = shift.copy() _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval, npad, False) return output
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Rotate an array. The array is rotated in the plane defined by the two axes given by the `axes` parameter using spline interpolation of the requested order. Parameters ---------- %(input)s angle : float The rotation angle in degrees. axes : tuple of 2 ints, optional The two axes that define the plane of rotation. Default is the first two axes. reshape : bool, optional If `reshape` is true, the output shape is adapted so that the input array is contained completely in the output. Default is True. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- rotate : ndarray The rotated input. Notes ----- For complex-valued `input`, this function rotates the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure(figsize=(10, 3)) >>> ax1, ax2, ax3 = fig.subplots(1, 3) >>> img = misc.ascent() >>> img_45 = ndimage.rotate(img, 45, reshape=False) >>> full_img_45 = ndimage.rotate(img, 45, reshape=True) >>> ax1.imshow(img, cmap='gray') >>> ax1.set_axis_off() >>> ax2.imshow(img_45, cmap='gray') >>> ax2.set_axis_off() >>> ax3.imshow(full_img_45, cmap='gray') >>> ax3.set_axis_off() >>> fig.set_tight_layout(True) >>> plt.show() >>> print(img.shape) (512, 512) >>> print(img_45.shape) (512, 512) >>> print(full_img_45.shape) (724, 724) """ input_arr = numpy.asarray(input) ndim = input_arr.ndim if ndim < 2: raise ValueError('input array should be at least 2D') axes = list(axes) if len(axes) != 2: raise ValueError('axes should contain exactly two values') if not all([float(ax).is_integer() for ax in axes]): raise ValueError('axes should contain only integer values') if axes[0] < 0: axes[0] += ndim if axes[1] < 0: axes[1] += ndim if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim: raise ValueError('invalid rotation plane specified') axes.sort() c, s = special.cosdg(angle), special.sindg(angle) rot_matrix = numpy.array([[c, s], [-s, c]]) img_shape = numpy.asarray(input_arr.shape) in_plane_shape = img_shape[axes] if reshape: # Compute transformed input bounds iy, ix = in_plane_shape out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]] # Compute the shape of the transformed input plane out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int) else: out_plane_shape = img_shape[axes] out_center = rot_matrix @ ((out_plane_shape - 1) / 2) in_center = (in_plane_shape - 1) / 2 offset = in_center - out_center output_shape = img_shape output_shape[axes] = out_plane_shape output_shape = tuple(output_shape) complex_output = numpy.iscomplexobj(input_arr) output = _ni_support._get_output(output, input_arr, shape=output_shape, complex_output=complex_output) if ndim <= 2: affine_transform(input_arr, rot_matrix, offset, output_shape, output, order, mode, cval, prefilter) else: # If ndim > 2, the rotation is applied over all the planes # parallel to axes planes_coord = itertools.product( *[[slice(None)] if ax in axes else range(img_shape[ax]) for ax in range(ndim)]) out_plane_shape = tuple(out_plane_shape) for coordinates in planes_coord: ia = input_arr[coordinates] oa = output[coordinates] affine_transform(ia, rot_matrix, offset, out_plane_shape, oa, order, mode, cval, prefilter) return output
def odeintw(func, y0, t, **kwargs): """ An odeint-like function for complex array-valued differential equations. The function `scipy.integrate.odeint` is a wrapper of the LSODA function for solving ordinary differential equations. It is designed to handle a system of first order differential equations expressed as a vector function. `odeint` does not handle equations with complex dependent variables, or array equations. `odeintw` is a wrapper of `odeint` that adds the ability to handle complex and array differential equations. See the docstring of odeint for an explanation of its arguments. Unlike odeint, all arguments after the first three position arguments `func` (the system definition function), `y0` (the initial condition) and `t` (the time values) must be given as keyword arguments. Initial conditions ------------------ The initial condition `y0` given to `odeintw` determines the type of the solution that is generated. The data type and shape of the value returned by `func` must match those of the initial condition. If the solution is to be complex, the initial condition must be complex. To solve a complex differential equation with real initial conditions, give complex initial conditions with zero imaginary parts. Similarly, the shape of the solution of a matrix differential equation is determined by the shape of the initial condition. For example, if the initial condition has shape (2,3), then `func` must return a numpy array (or something array-like) that has shape (2,3). Special handling of Jacobian arguments -------------------------------------- The argument `Dfun` may be used with array equations. If `shp` is the shape of the array, then the shape of the Jacobian array returned by `Dfun` must be ``shp + shp``. For example, if the array is 2-d `F`, jac[m, n, i, j] holds dF[m,n]/da[i,j] `Dfun` may also be used with complex equations. However, if the functions computed by `func` are not complex differentiable, the Jacobian function should not be used. To use the Jacobian argument in this case, the equations should be rewritten as a system of real equations for the real and imaginary parts of the solution. For example, the conjugation operation is not complex differentiable, so to use an explicit Jacobian for the complex scalar equation dz/dt = z.conj(), the equation must be written as dx/dt = x dy/dt = -y Then the Jacobian of the real system is [[1, 0], [0, -1]]. If `Dfun` is not given as an argument, the system may be left as a complex differential equation. In the case of arrays with dimension 2 or more, the odeint arguments `col_deriv`, `ml` and `mu` can not be used. Complex array equations are handled, but to use `Dfun`, the same requirement on the complex differentiability of the components holds. """ full_output = kwargs.get('full_output', False) Dfun = kwargs.pop('Dfun', None) y0 = np.atleast_1d(y0) shape = y0.shape if y0.ndim == 1: func1 = func jacfunc1 = Dfun else: # y0 has dimension greater than 1. _check_args(kwargs) # Flatten y0, and create a wrapper for func that can be used # by odeint. y0 = y0.ravel() def vecfunc(y, t, *args): a = y.reshape(shape) dadt = func(a, t, *args) return np.asarray(dadt).ravel() func1 = vecfunc if Dfun is not None: def jacfunc(y, t, *args): a = y.reshape(shape) jac = Dfun(a, t, *args) return np.asarray(jac).reshape(y0.size, y0.size) jacfunc1 = jacfunc else: jacfunc1 = None if not np.iscomplexobj(y0): y0 = y0.astype(np.float64, **_astype_kwargs) func2 = func1 jacfunc2 = jacfunc1 else: # y0 is complex. col_deriv = kwargs.pop('col_deriv', False) ml = kwargs.pop('ml', None) mu = kwargs.pop('mu', None) kwargs['ml'] = None if ml is None else 1 + 2 * ml kwargs['mu'] = None if mu is None else 1 + 2 * mu # Cast y0 to np.complex128. y0 = y0.astype(np.complex128, **_astype_kwargs) # realfunc is a wrapper of the user's function that can be # used by odeint. def realfunc(y, t, *args): z = y.view(np.complex128) dzdt = func1(z, t, *args) # func might return a python list, so convert its return # value to an array with type np.complex128, and then return # a np.float64 view of that array. dydt = np.asarray(dzdt, dtype=np.complex128).view(np.float64) return dydt func2 = realfunc if jacfunc1 is not None: def jacfuncz(y, t, *args): z = y.view(np.complex128) jac = jacfunc1(z, t, *args) if col_deriv: # If col_deriv is True, transpose the result returned # by jacfunc1, and continue as if col_deriv was False. jac = jac.T # Convert jac to real_jac, an array in which each complex value # a + i*b in jac is expanded to the 2x2 array [[a, -b], [b, a]]. real_jac = _complex_to_real_jac(jac) if ml is not None or mu is not None: # Banded; shift every other column up one. real_jac = _transform_banded_jac(real_jac) return real_jac jacfunc2 = jacfuncz else: jacfunc2 = None # Call scipy.integrate.odeint with our wrapper. result = odeint(func2, y0.view(np.float64), t, Dfun=jacfunc2, **kwargs) if full_output: sol, infodict = result else: sol = result if np.iscomplexobj(y0): # Restore the complex view. sol = sol.view(np.complex128) # Restore the shape. sol = sol.reshape(-1, *shape) if full_output: result = (sol, infodict) else: result = sol return result
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, prefilter=True, *, grid_mode=False): """ Zoom an array. The array is zoomed using spline interpolation of the requested order. Parameters ---------- %(input)s zoom : float or sequence The zoom factor along the axes. If a float, `zoom` is the same for each axis. If a sequence, `zoom` should contain one value for each axis. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s grid_mode : bool, optional If False, the distance from the pixel centers is zoomed. Otherwise, the distance including the full pixel extent is used. For example, a 1d signal of length 5 is considered to have length 4 when `grid_mode` is False, but length 5 when `grid_mode` is True. See the following visual illustration: .. code-block:: text | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 | |<-------------------------------------->| vs. |<----------------------------------------------->| The starting point of the arrow in the diagram above corresponds to coordinate location 0 in each mode. Returns ------- zoom : ndarray The zoomed input. Notes ----- For complex-valued `input`, this function zooms the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.zoom(ascent, 3.0) >>> ax1.imshow(ascent, vmin=0, vmax=255) >>> ax2.imshow(result, vmin=0, vmax=255) >>> plt.show() >>> print(ascent.shape) (512, 512) >>> print(result.shape) (1536, 1536) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if input.ndim < 1: raise RuntimeError('input and output rank must be > 0') zoom = _ni_support._normalize_sequence(zoom, input.ndim) output_shape = tuple( [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: # import under different name to avoid confusion with zoom parameter from scipy.ndimage.interpolation import zoom as _zoom kwargs = dict(order=order, mode=mode, prefilter=prefilter) _zoom(input.real, zoom, output=output.real, cval=numpy.real(cval), **kwargs) _zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input if grid_mode: # warn about modes that may have surprising behavior suggest_mode = None if mode == 'constant': suggest_mode = 'grid-constant' elif mode == 'wrap': suggest_mode = 'grid-wrap' if suggest_mode is not None: warnings.warn( ("It is recommended to use mode = {} instead of {} when " "grid_mode is True." ).format(suggest_mode, mode) ) mode = _ni_support._extend_mode_to_code(mode) zoom_div = numpy.array(output_shape) zoom_nominator = numpy.array(input.shape) if not grid_mode: zoom_div -= 1 zoom_nominator -= 1 # Zooming to infinite values is unpredictable, so just choose # zoom factor 1 instead zoom = numpy.divide(zoom_nominator, zoom_div, out=numpy.ones_like(input.shape, dtype=numpy.float64), where=zoom_div != 0) zoom = numpy.ascontiguousarray(zoom) _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad, grid_mode) return output
def _validate(self, array_type, test_shape, dtype, s, kwargs): input_array = self.munge_input_array(array_type(test_shape, dtype), kwargs) orig_input_array = copy.copy(input_array) np_input_array = numpy.asarray(input_array) if np_input_array.dtype == 'clongdouble': np_input_array = numpy.complex128(input_array) elif np_input_array.dtype == 'longdouble': np_input_array = numpy.float64(input_array) with warnings.catch_warnings(record=True) as w: # We catch the warnings so as to pick up on when # a complex array is turned into a real array if 'axes' in kwargs: axes = {'axes': kwargs['axes']} elif 'axis' in kwargs: axes = {'axis': kwargs['axis']} else: axes = {} try: test_out_array = getattr(self.validator_module, self.func)(copy.copy(np_input_array), s, **axes) except Exception as e: interface_exception = None try: getattr(self.test_interface, self.func)(copy.copy(input_array), s, **kwargs) except Exception as _interface_exception: # It's necessary to assign the exception to the # already defined variable in Python 3. # See http://www.python.org/dev/peps/pep-3110/#semantic-changes interface_exception = _interface_exception # If the test interface raised, so must this. self.assertEqual(type(interface_exception), type(e), msg='Interface exception raised. ' + 'Testing for: ' + repr(e)) return output_array = getattr(self.test_interface, self.func)(copy.copy(input_array), s, **kwargs) if (functions[self.func] == 'r2c'): if numpy.iscomplexobj(input_array): if len(w) > 0: # Make sure a warning is raised self.assertIs(w[-1].category, numpy.ComplexWarning) self.assertTrue( numpy.allclose(output_array, test_out_array, rtol=1e-2, atol=1e-4)) input_precision_dtype = numpy.asanyarray(input_array).real.dtype self.assertEqual(input_precision_dtype, output_array.real.dtype) if (not self.overwrite_input_flag in kwargs or not kwargs[self.overwrite_input_flag]): self.assertTrue(numpy.allclose(input_array, orig_input_array)) return output_array