def feedfwd(model, data_source, embedict, bptt, eval_batch_size, dictionary, emsize, criterion): # Turn on evaluation mode which disables dropout. model.eval() ntokens = len(dictionary) hidden = model.init_hidden(eval_batch_size) total_loss = 0 acc_token = cp.float64(0) acc10_token = cp.float64(0) mrr_token = cp.float64(0) ln = cp.int64(0) metrics = [acc_token, mrr_token, acc10_token] for i in range(0, data_source.size(0) - 1, bptt): data, targets = get_batch(data_source, i, bptt, evaluation=True) data = index2embed(data, embedict, dictionary, emsize) output, hidden = model(data, hidden) output_flat = output.view(-1, ntokens) pred = output_flat.data.cpu().numpy() refers = targets.data.cpu().numpy() # convert to cupy arrays and send to performance pred = cp.array(pred) refers = cp.array(refers) ln += len(refers) metrics = stats(pred, refers, metrics) hidden = repackage_hidden(hidden) return metrics[0] / ln * 100, metrics[1] / ln, metrics[2] / ln * 100
def update_positions(self, other_populations): """Intermediate function, to get everything in order for optimized method""" angle = self.angles theta_sense = np.float64(self.theta_sense) horizon_sense = np.float64(self.horizon_sense) theta_walk = np.float64(self.theta_walk) horizon_walk = np.float64(self.horizon_walk) population = self.population adapted_trail = self.trail_map + (self.template * self.template_strength) # include the other species-patterns in the present ones feeding-behaviour for this_pop in other_populations: adapted_trail = adapted_trail + (this_pop * self.social_behaviour) new_population, new_angles = Physarum.optimized_update_positions( population, angle, theta_sense, horizon_sense, theta_walk, horizon_walk, trace_array=adapted_trail) self.population = new_population self.angles = new_angles
def updateGeometry(self): # GPU variables self._psi = cp.zeros(self.shape, dtype=cp.complex64) self._phi = cp.zeros(self.shape, dtype=cp.uint8) self._theta = cp.zeros(self.shape, dtype=cp.float32) self._rho = cp.zeros(self.shape, dtype=cp.float32) alpha = cp.cos(cp.radians(self.phis, dtype=cp.float32)) x = alpha*(cp.arange(self.width, dtype=cp.float32) - cp.float64(self.xs)) y = cp.arange(self.height, dtype=cp.float32) - cp.float32(self.ys) qx = self.qprp * x qy = self.qprp * y self._iqx = (1j * qx).astype(cp.complex64) self._iqy = (1j * qy).astype(cp.complex64) self._iqxz = (1j * self.qpar * x * x).astype(cp.complex64) self._iqyz = (1j * self.qpar * y * y).astype(cp.complex64) self.outeratan2f(y, x, self._theta) self.outerhypot(qy, qx, self._rho) # CPU variables self.phi = self._phi.get() self.iqx = self._iqx.get() self.iqy = self._iqy.get() self.theta = self._theta.get().astype(cp.float64) self.qr = self._rho.get().astype(cp.float64) self.sigUpdateGeometry.emit()
def get_phshift(beamdir: float, freq: float, antenna: int, num_antennas: int, antenna_spacing: float, centre_offset: int = 0.0) -> float: """ Find the phase shift for a given antenna and beam direction. Form the beam given the beam direction (degrees off boresite), the tx frequency, the antenna number, a specified extra phase shift if there is any, the number of antennas in the array, and the spacing between antennas. Parameters ---------- beamdir: float The azimuthal direction of the beam off boresight, in degrees, positive beamdir being to the right of the boresight (looking along boresight from ground). This is for this antenna. freq: float Transmit frequency in kHz antenna: int Antenna number, INDEXED FROM ZERO, zero being the leftmost antenna if looking down the boresight and positive beamdir right of boresight num_antennas: int Number of antennas in this array antenna_spacing: float Distance between antennas in this array, in meters centre_offset: float The phase reference for the midpoint of the array. Default = 0.0, in metres. Important if there is a shift in centre point between arrays in the direction along the array. Positive is shifted to the right when looking along boresight (from the ground). Returns ------- phshift: float A phase shift for the samples for this antenna number, in radians. """ freq = freq * 1000.0 # convert to Hz. # Convert to radians beamrad = xp.pi * xp.float64(beamdir) / 180.0 # Pointing to right of boresight, use point in middle (hypothetically antenna 7.5) as phshift=0 phshift = 2 * xp.pi * freq * (((num_antennas - 1) / 2.0 - antenna) * antenna_spacing + centre_offset) * \ xp.cos(xp.pi / 2.0 - beamrad) / speed_of_light # Bring into range (-2*pi, 2*pi) phshift = xp.fmod(phshift, 2 * xp.pi) return phshift
def cov(a, y=None, rowvar=True, bias=False, ddof=None): """Returns the covariance matrix of an array. This function currently does not support ``fweights`` and ``aweights`` options. Args: a (cupy.ndarray): Array to compute covariance matrix. y (cupy.ndarray): An additional set of variables and observations. rowvar (bool): If ``True``, then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed. bias (bool): If ``False``, normalization is by ``(N - 1)``, where N is the number of observations given (unbiased estimate). If ``True``, then normalization is by ``N``. ddof (int): If not ``None`` the default value implied by bias is overridden. Note that ``ddof=1`` will return the unbiased estimate and ``ddof=0`` will return the simple average. Returns: cupy.ndarray: The covariance matrix of the input array. .. seealso:: :func:`numpy.cov` """ if ddof is not None and ddof != int(ddof): raise ValueError('ddof must be integer') if a.ndim > 2: raise ValueError('Input must be <= 2-d') if y is None: dtype = numpy.promote_types(a.dtype, numpy.float64) else: if y.ndim > 2: raise ValueError('y must be <= 2-d') dtype = functools.reduce(numpy.promote_types, (a.dtype, y.dtype, numpy.float64)) X = cupy.array(a, ndmin=2, dtype=dtype) if not rowvar and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return cupy.array([]).reshape(0, 0) if y is not None: y = cupy.array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T X = core.concatenate_method((X, y), axis=0) if ddof is None: ddof = 0 if bias else 1 fact = X.shape[1] - ddof if fact <= 0: warnings.warn('Degrees of freedom <= 0 for slice', RuntimeWarning, stacklevel=2) fact = 0.0 X -= X.mean(axis=1)[:, None] out = X.dot(X.T.conj()) * (1 / cupy.float64(fact)) return out.squeeze()
help='Enable GPU processing [required:cupy] (Default:False)') args = parser.parse_args() if args.gpu: print('GPU processing mode is enabled') import cupy as np else: import numpy as np fpanties = os.listdir('./converted/Fourier/') os.makedirs('./converted/Fourier_inv/', exist_ok=True) # fpanties = fpanties[len(os.listdir('./converted/Fourier_inv/')):] for fname in fpanties: strs = fname.split('_') pantie = strs[0] + '.png' fmin = np.float64(strs[1]) fmax = np.float64(strs[2][:-4]) print('Process: ' + pantie) fpantie = Image.open('./converted/Fourier/' + fname) fimg_origin = np.fft.ifftshift( np.exp( np.array(fpantie, dtype=np.float64) / np.max(np.array(fpantie)) * fmax + fmin)) mask = (np.array(Image.open('./dream/' + pantie))[:, :, 3] > 0).astype( np.float32)[:, :, None] [r, c, d] = fimg_origin.shape img = np.fft.ifft2(fimg_origin * np.exp(2j * np.pi * np.random.rand(r, c, d)), axes=(0, 1)) # Initialize the phase if args.hio:
def phasesymmono(img, nscale=5, minWaveLength=3, mult=2.1, sigmaOnf=0.55, k=2., polarity=0, noiseMethod=-1): """ This function calculates the phase symmetry of points in an image. This is a contrast invariant measure of symmetry. This function can be used as a line and blob detector. The greyscale 'polarity' of the lines that you want to find can be specified. Arguments: ----------- <Name> <Default> <Description> img N/A The input image nscale 5 Number of wavelet scales, try values 3-6 minWaveLength 3 Wavelength of smallest scale filter. mult 2.1 Scaling factor between successive filters. sigmaOnf 0.55 Ratio of the standard deviation of the Gaussian describing the log Gabor filter's transfer function in the frequency domain to the filter center frequency. k 2.0 No. of standard deviations of the noise energy beyond the mean at which we set the noise threshold point. You may want to vary this up to a value of 10 or 20 for noisy images. polarity 0 Controls 'polarity' of symmetry features to find. 1 only return 'bright' features -1 only return 'dark' features 0 return both 'bright' and 'dark' features noiseMethod -1 Parameter specifies method used to determine noise statistics. -1 use median of smallest scale filter responses -2 use mode of smallest scale filter responses >=0 use this value as the fixed noise threshold Returns: --------- phaseSym Phase symmetry image (values between 0 and 1). totalEnergy Un-normalised raw symmetry energy which may be more to your liking. T Calculated noise threshold (can be useful for diagnosing noise characteristics of images). Once you know this you can then specify fixed thresholds and save some computation time. The convolutions are done via the FFT. Many of the parameters relate to the specification of the filters in the frequency plane. The values do not seem to be very critical and the defaults are usually fine. You may want to experiment with the values of 'nscales' and 'k', the noise compensation factor. Notes on filter settings to obtain even coverage of the spectrum sigmaOnf .85 mult 1.3 sigmaOnf .75 mult 1.6 (filter bandwidth ~1 octave) sigmaOnf .65 mult 2.1 sigmaOnf .55 mult 3 (filter bandwidth ~2 octaves) For maximum speed the input image should have dimensions that correspond to powers of 2, but the code will operate on images of arbitrary size. See also: phasesym, which uses oriented filters and is therefore slower, but also returns an orientation map of the image References: ------------ Peter Kovesi, "Symmetry and Asymmetry From Local Phase" AI'97, Tenth Australian Joint Conference on Artificial Intelligence. 2 - 4 December 1997. http://www.cs.uwa.edu.au/pub/robvis/papers/pk/ai97.ps.gz. Peter Kovesi, "Image Features From Phase Congruency". Videre: A Journal of Computer Vision Research. MIT Press. Volume 1, Number 3, Summer 1999 http://mitpress.mit.edu/e-journals/Videre/001/v13.html Michael Felsberg and Gerald Sommer, "A New Extension of Linear Signal Processing for Estimating Local Properties and Detecting Features". DAGM Symposium 2000, Kiel Michael Felsberg and Gerald Sommer. "The Monogenic Signal" IEEE Transactions on Signal Processing, 49(12):3136-3144, December 2001 """ if img.dtype not in ['float32', 'float64']: img = np.float64(img) imgdtype = 'float64' else: imgdtype = img.dtype if img.ndim == 3: img = img.mean(2) rows, cols = img.shape epsilon = 1E-4 # used to prevent /0. IM = fft2(img) # Fourier transformed image zeromat = np.zeros((rows, cols), dtype=imgdtype) # Matrix for accumulating weighted phase congruency values (energy). totalEnergy = zeromat.copy() # Matrix for accumulating filter response amplitude values. sumAn = zeromat.copy() radius, u1, u2 = filtergrid(rows, cols) # Get rid of the 0 radius value at the 0 frequency point (at top-left # corner after fftshift) so that taking the log of the radius will not # cause trouble. radius[0, 0] = 1. # Construct the monogenic filters in the frequency domain. The two # filters would normally be constructed as follows # H1 = i*u1./radius # H2 = i*u2./radius # However the two filters can be packed together as a complex valued # matrix, one in the real part and one in the imaginary part. Do this by # multiplying H2 by i and then adding it to H1 (note the subtraction # because i*i = -1). When the convolution is performed via the fft the real # part of the result will correspond to the convolution with H1 and the # imaginary part with H2. This allows the two convolutions to be done as # one in the frequency domain, saving time and memory. H = (1j * u1 - u2) / radius # The two monogenic filters H1 and H2 are not selective in terms of the # magnitudes of the frequencies. The code below generates bandpass log- # Gabor filters which are point-wise multiplied by IM to produce different # bandpass versions of the image before being convolved with H1 and H2 # # First construct a low-pass filter that is as large as possible, yet falls # away to zero at the boundaries. All filters are multiplied by this to # ensure no extra frequencies at the 'corners' of the FFT are incorporated # as this can upset the normalisation process when calculating phase # congruency lp = _lowpassfilter([rows, cols], .4, 10) # Radius .4, 'sharpness' 10 logGaborDenom = 2. * np.log(sigmaOnf) ** 2. for ss in range(nscale): wavelength = minWaveLength * mult ** ss fo = 1. / wavelength # Centre frequency of filter logRadOverFo = np.log(radius / fo) logGabor = np.exp(-(logRadOverFo * logRadOverFo) / logGaborDenom) logGabor *= lp # Apply the low-pass filter logGabor[0, 0] = 0. # Undo the radius fudge IMF = IM * logGabor # Frequency bandpassed image f = np.real(ifft2(IMF)) # Spatially bandpassed image # Bandpassed monogenic filtering, real part of h contains convolution # result with h1, imaginary part contains convolution result with h2. h = ifft2(IMF * H) # Squared amplitude of the h1 and h2 filters hAmp2 = h.real * h.real + h.imag * h.imag # Magnitude of energy sumAn += np.sqrt(f * f + hAmp2) # At the smallest scale estimate noise characteristics from the # distribution of the filter amplitude responses stored in sumAn. tau # is the Rayleigh parameter that is used to describe the distribution. if ss == 0: # Use median to estimate noise statistics if noiseMethod == -1: tau = np.median(sumAn.flatten()) / np.sqrt(np.log(4)) # Use the mode to estimate noise statistics elif noiseMethod == -2: tau = _rayleighmode(sumAn.flatten()) # Calculate the phase symmetry measure # look for 'white' and 'black' spots if polarity == 0: totalEnergy += np.abs(f) - np.sqrt(hAmp2) # just look for 'white' spots elif polarity == 1: totalEnergy += f - np.sqrt(hAmp2) # just look for 'black' spots elif polarity == -1: totalEnergy += -f - np.sqrt(hAmp2) # Automatically determine noise threshold # Assuming the noise is Gaussian the response of the filters to noise will # form Rayleigh distribution. We use the filter responses at the smallest # scale as a guide to the underlying noise level because the smallest scale # filters spend most of their time responding to noise, and only # occasionally responding to features. Either the median, or the mode, of # the distribution of filter responses can be used as a robust statistic to # estimate the distribution mean and standard deviation as these are # related to the median or mode by fixed constants. The response of the # larger scale filters to noise can then be estimated from the smallest # scale filter response according to their relative bandwidths. # This code assumes that the expected reponse to noise on the phase # congruency calculation is simply the sum of the expected noise responses # of each of the filters. This is a simplistic overestimate, however these # two quantities should be related by some constant that will depend on the # filter bank being used. Appropriate tuning of the parameter 'k' will # allow you to produce the desired output. # fixed noise threshold if noiseMethod >= 0: T = noiseMethod # Estimate the effect of noise on the sum of the filter responses as the # sum of estimated individual responses (this is a simplistic # overestimate). As the estimated noise response at succesive scales is # scaled inversely proportional to bandwidth we have a simple geometric # sum. else: totalTau = tau * (1. - (1. / mult) ** nscale) / (1. - (1. / mult)) # Calculate mean and std dev from tau using fixed relationship # between these parameters and tau. See # <http://mathworld.wolfram.com/RayleighDistribution.html> EstNoiseEnergyMean = totalTau * np.sqrt(np.pi / 2.) EstNoiseEnergySigma = totalTau * np.sqrt((4 - np.pi) / 2.) # Noise threshold, must be >= epsilon T = np.maximum(EstNoiseEnergyMean + k * EstNoiseEnergySigma, epsilon) # Apply noise threshold - effectively wavelet denoising soft thresholding # and normalize symmetryEnergy by the sumAn to obtain phase symmetry. Note # the flooring operation is not necessary if you are after speed, it is # just 'tidy' not having -ve symmetry values phaseSym = np.maximum(totalEnergy - T, 0) phaseSym /= sumAn + epsilon return phaseSym, totalEnergy, T
def perfft2(im, compute_P=True, compute_spatial=False): """ Moisan's Periodic plus Smooth Image Decomposition. The image is decomposed into two parts: im = s + p where 's' is the 'smooth' component with mean 0, and 'p' is the 'periodic' component which has no sharp discontinuities when one moves cyclically across the image boundaries. useage: S, [P, s, p] = perfft2(im) where: im is the image S is the FFT of the smooth component P is the FFT of the periodic component, returned if compute_P (default) s & p are the smooth and periodic components in the spatial domain, returned if compute_spatial By default this function returns `P` and `S`, the FFTs of the periodic and smooth components respectively. If `compute_spatial=True`, the spatial domain components 'p' and 's' are also computed. This code is adapted from Lionel Moisan's Scilab function 'perdecomp.sci' "Periodic plus Smooth Image Decomposition" 07/2012 available at: <http://www.mi.parisdescartes.fr/~moisan/p+s> """ if im.dtype not in ['float32', 'float64']: im = np.float64(im) rows, cols = im.shape # Compute the boundary image which is equal to the image discontinuity # values across the boundaries at the edges and is 0 elsewhere s = np.zeros_like(im) s[0, :] = im[0, :] - im[-1, :] s[-1, :] = -s[0, :] s[:, 0] = s[:, 0] + im[:, 0] - im[:, -1] s[:, -1] = s[:, -1] - im[:, 0] + im[:, -1] # Generate grid upon which to compute the filter for the boundary image # in the frequency domain. Note that cos is cyclic hence the grid # values can range from 0 .. 2*pi rather than 0 .. pi and then pi .. 0 x, y = (2 * np.pi * np.arange(0, v) / float(v) for v in (cols, rows)) cx, cy = np.meshgrid(x, y) denom = (2. * (2. - np.cos(cx) - np.cos(cy))) denom[0, 0] = 1. # avoid / 0 S = fft2(s) / denom S[0, 0] = 0 # enforce zero mean if compute_P or compute_spatial: P = fft2(im) - S if compute_spatial: s = ifft2(S).real p = im - s return S, P, s, p else: return S, P else: return S
def test_relu_type_error(self): f = cupy.vectorize(lambda x: x if x > 0.0 else cupy.float64(0.0)) a = cupy.array([0.4, -0.2, 1.8, -1.2], dtype=cupy.float32) with pytest.raises(TypeError): return f(a)
def cov(a, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None, *, dtype=None): """Returns the covariance matrix of an array. This function currently does not support ``fweights`` and ``aweights`` options. Args: a (cupy.ndarray): Array to compute covariance matrix. y (cupy.ndarray): An additional set of variables and observations. rowvar (bool): If ``True``, then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed. bias (bool): If ``False``, normalization is by ``(N - 1)``, where N is the number of observations given (unbiased estimate). If ``True``, then normalization is by ``N``. ddof (int): If not ``None`` the default value implied by bias is overridden. Note that ``ddof=1`` will return the unbiased estimate and ``ddof=0`` will return the simple average. fweights (cupy.ndarray, int): 1-D array of integer frequency weights. the number of times each observation vector should be repeated. It is required that fweights >= 0. However, the function will not error when fweights < 0 for performance reasons. aweights (cupy.ndarray): 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. It is required that aweights >= 0. However, the function will not error when aweights < 0 for performance reasons. dtype: Data type specifier. By default, the return data-type will have at least `numpy.float64` precision. Returns: cupy.ndarray: The covariance matrix of the input array. .. seealso:: :func:`numpy.cov` """ if ddof is not None and ddof != int(ddof): raise ValueError('ddof must be integer') if a.ndim > 2: raise ValueError('Input must be <= 2-d') if dtype is None: if y is None: dtype = numpy.promote_types(a.dtype, numpy.float64) else: if y.ndim > 2: raise ValueError('y must be <= 2-d') dtype = functools.reduce(numpy.promote_types, (a.dtype, y.dtype, numpy.float64)) X = cupy.array(a, ndmin=2, dtype=dtype) if not rowvar and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return cupy.array([]).reshape(0, 0) if y is not None: y = cupy.array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T X = _core.concatenate_method((X, y), axis=0) if ddof is None: ddof = 0 if bias else 1 w = None if fweights is not None: if not isinstance(fweights, cupy.ndarray): raise TypeError("fweights must be a cupy.ndarray") if fweights.dtype.char not in 'bBhHiIlLqQ': raise TypeError("fweights must be integer") fweights = fweights.astype(dtype=float) if fweights.ndim > 1: raise RuntimeError("cannot handle multidimensional fweights") if fweights.shape[0] != X.shape[1]: raise RuntimeError("incompatible numbers of samples and fweights") w = fweights if aweights is not None: if not isinstance(fweights, cupy.ndarray): raise TypeError("aweights must be a cupy.ndarray") aweights = aweights.astype(dtype=float) if aweights.ndim > 1: raise RuntimeError("cannot handle multidimensional aweights") if aweights.shape[0] != X.shape[1]: raise RuntimeError("incompatible numbers of samples and aweights") if w is None: w = aweights else: w *= aweights avg, w_sum = cupy.average(X, axis=1, weights=w, returned=True) w_sum = w_sum[0] # Determine the normalization if w is None: fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof * sum(w * aweights) / w_sum if fact <= 0: warnings.warn('Degrees of freedom <= 0 for slice', RuntimeWarning, stacklevel=2) fact = 0.0 X -= X.mean(axis=1)[:, None] if w is None: X_T = X.T else: X_T = (X * w).T out = X.dot(X_T.conj()) * (1 / cupy.float64(fact)) return out.squeeze()