def advection(v, f, *varargs, **kwargs): ''' Return advection of an N-dimensional vector field f (or a scalar field f) along and other N-dimensional vector field v. v is an array-like (N+1)-dimensional object containing a vector field (the first index specifies the component of the vector field). f is either a vector field with same shape as v, or a scalar field (ie an N-dimensional array of same shape as each component of v). Uses the numpy.gradient function to compute partial derivatives and inherits the same optional arguments. ''' if len(shape(v)) == 0: raise TypeError('Invalid vector field v') N = shape(v)[0] shp = shape(v[0]) if len(shp) != N: raise TypeError('Invalid vector field v') D = [] for d in v: if shape(d) != shp: raise TypeError('Invalid vector field v') if len(shape(f)) == N: D = gradient(f, *varargs, **kwargs) return asum(asarray(v) * D, axis=0) elif len(shape(f)) == N + 1: for d in f: D += [gradient(d, *varargs, **kwargs)] else: raise TypeError('Invalid scalar/vector field f') return [asum(asarray(v) * D[i], axis=0) for i in range(N)]
def b_matrix(self, shg, shgbar=None, lcoords=None, coords=None, det=None, disp=0, **kwargs): """Assemble and return the B matrix""" B = zeros((self.ndi+self.nshr, self.num_node * self.num_dof_per_node)) B[0, 0::2] = shg[0, :] B[1, 1::2] = shg[1, :] B[3, 0::2] = shg[1, :] B[3, 1::2] = shg[0, :] if not disp: return B # Algorithm in # The Finite Element Method: Its Basis and Fundamentals # By Olek C Zienkiewicz, Robert L Taylor, J.Z. Zhu # Jacobian at element centroid dNdxi = self.shape.grad([0., 0.]) dxdxi = dot(dNdxi, coords) J0 = inv(dxdxi) dt0 = determinant(dxdxi) xi, eta = lcoords dNdxi = array([[-2. * xi, 0.], [0., -2. * eta]]) dNdx = dt0 / det * dot(J0, dNdxi) G1 = array([[dNdx[0, 0], 0], [0, dNdx[0, 1]], [0, 0], [dNdx[0, 1], dNdx[0, 0]]]) G2 = array([[dNdx[1, 0], 0], [0, dNdx[1, 1]], [0, 0], [dNdx[1, 1], dNdx[1, 0]]]) G = concatenate((G1, G2), axis=1) return B, G # Algorithm in Taylor's original paper and # The Finite Element Method: Linear Static and Dynamic # Finite Element Analysis # By Thomas J. R. Hughe xi = self.gauss_coords n = self.num_gauss dxdxi = asum([coords[i, 0] * xi[i, 0] for i in range(n)]) dxdeta = asum([coords[i, 0] * xi[i, 1] for i in range(n)]) dydxi = asum([coords[i, 1] * xi[i, 0] for i in range(n)]) dydeta = asum([coords[i, 1] * xi[i, 1] for i in range(n)]) xi, eta = lcoords G1 = array([[-xi * dydeta, 0], [0, xi * dxdeta], [0, 0], [xi * dxdeta, -xi * dydeta]]) G2 = array([[-eta * dydxi, 0.], [0, eta * dxdxi], [0, 0], [eta * dxdxi, -eta * dydxi]]) G = 2. / det * concatenate((G1, G2), axis=1) return B, G
def center_of_mass(data): """ Takes an input 2D data array (image), calculates and returns the coordinates of the center of mass, and the integrated mass""" Y, X = indices(data.shape) M00 = asum(data) M10 = asum(X * data) M01 = asum(Y * data) xc, yc = M10 / M00, M01 / M00 return xc, yc
def mass(self, X, lumped_mass=0): shg = self.grad(X) mel = zeros((self.num_dof_per_node * self.num_node, self.num_dof_per_node * self.num_node)) w = self.integration.mass_weights rho = self.density # Loop over the integration points for (npt, xi) in enumerate(self.integration.mass_points): # Compute shape functions and derivatives wrt local coords N = self.shape.eval(xi) dNdxi = self.shape.grad(xi) dxdxi = dot(dNdxi, X) jac = det(dxdxi) for a in range(self.num_node): for b in range(self.num_node): for i in range(self.num_dof_per_node): row = self.num_dof_per_node * a + i col = self.num_dof_per_node * b + i mel[col, row] += rho * N[b] * N[a] * w[npt] * jac # Evaluate a lumped mass matrix using the row sum method if lumped_mass: for a in range(self.num_node * self.num_dof_per_node): m = asum(mel[a, :]) mel[a, :] = 0 mel[a, a] = m return mel
def detect_current(cloud, showplots=False): """ Detects whether there is a vortex-like signature of persistent current in the center of a TOF image of an expanded ring BEC """ OD = cloud.get_OD() peak_coord = cloud.results['peak coordinates'] center_region = ROI(center=peak_coord, size=(40, 40)).slices cloud_center = ndi.median_filter(OD[center_region], size=2) minOD = cloud_center.min() maxOD = cloud_center.max() cloud_median = ndi.median_filter(cloud_center, size=10) belowthresh = where(cloud_center < cloud_median * 0.75, 1, 0) opened = ndi.binary_opening(belowthresh, iterations=1) closed = ndi.binary_closing(opened, iterations=3) current_found = ndi.label(closed)[1] cloud.results['current_found'] = current_found if showplots == True: fig = plt.figure(1999) fig.add_subplot(221, xticks=[], yticks=[]) plt.imshow(cloud_center, interpolation='nearest', vmin=minOD, vmax=maxOD) fig.add_subplot(222, xticks=[], yticks=[]) plt.imshow(cloud_median, interpolation='nearest', vmin=minOD, vmax=maxOD) fig.add_subplot(223, xticks=[], yticks=[]) plt.imshow(closed, interpolation='nearest', cmap=plt.cm.get_cmap('binary')) fig.add_subplot(224, xticks=[], yticks=[]) plt.imshow(belowthresh, interpolation='nearest', cmap=plt.cm.get_cmap('binary')) return current_found, asum(closed)
def robust_parameter(clusters, stats, elems): ''' Parameter to measure robustness of a G-mode test. The parameter is given by the weighted average plus a normality estimator: P1 = SUM( N * var ) / SUM( N ) P2 = SUM( N^-1 * var ) / SUM( N^-1 ) P3 = SUM( kstest(cluster, gaussian) ) P = (P1/w1 + P2/w2 + P3/w3) / (w1^-1 + w2^-1 + w3^-1) ''' from scipy.stats import shapiro from math import sqrt from itertools import izip shap, N, var = deque(), deque(), deque() for members, cl in izip(clusters, stats): # cluster size array N.append(len(members)) # cluster variance array var.append(asum(cl[1]**2)) # shapiro-wilk test: W_vec = array([shapiro(elems[members][n])[0]**2 for n in xrange(len(elems[0]))]) # inversed shapiro-wilk W statistic. shap.append( sqrt(asum(1e0/W_vec)) ) shap, N, var = array(shap), array(N), array(var) w1 = sqrt(asum(mad(var, median(var))**2)) w3 = mad(shap, median(shap)) p1 = asum( N * var ) / asum(N) p2 = asum( var/N ) / asum(1e0/N) p3 = median(shap) return (p1/w1 + p2/w1 + p3/w3) / (2e0/w1 + 1e0/w3)
def merge(rows): if x0 < 0: for row in rows: yield row else: for i in range(x0, len(rows)): a = i - x0 b = i + 1 row = asum(array(rows[a:b]), 0) yield row
def peakguess1D(data, filter_rad=5, bgclip=0.2): """ Takes an input data list (1D), calculates and returns the position of the center of mass, and the raw and central moments up to second order""" fdata = medfilt(data, kernel_size=filter_rad) cfdata = where(fdata < bgclip, 0, fdata) X = arange(len(cfdata)) #calculate zeroth order raw moment M0 = asum(cfdata) #Calculate first order raw moment M1 = asum(X * cfdata) # Calculate position of the center of mass xc = M1 / M0 # calculate second order raw moment M2 = asum(X ** 2 * cfdata) # Calculate the second normalized central moment (variance) u2 = M2 / M0 - xc ** 2 sigma = 2 * sqrt(abs(u2)) amp = cfdata.max() return [xc, sigma, amp, 0]
def moments2D(data): """ Takes an input 2D data array (image), calculates and returns the position of the centroid, and the raw and central 2D image moments up to second order""" Y, X = indices(data.shape) M = zeros((3, 3)) u = zeros((3, 3)) # perform matrix calculations A10 = X * data A01 = Y * data A11 = A10 * Y A20 = A10 * X A02 = A01 * Y #calculate zeroth order raw moment M[0][0] = asum(data) #Calculate first order raw moments M[1][0] = asum(A10) M[0][1] = asum(A01) # Calculate position of the centroid xc, yc = M[1][0] / M[0][0], M[0][1] / M[0][0] # calculate second order raw moments M[1][1] = asum(A11) M[2][0] = asum(A20) M[0][2] = asum(A02) # Calculate the normalized central moments up to second order u[0][0] = 1 u[1][1] = M[1][1] / M[0][0] - xc * yc u[2][0] = M[2][0] / M[0][0] - xc ** 2 u[0][2] = M[0][2] / M[0][0] - yc ** 2 return xc, yc, u
def average(self, point, fdata, v=None): """Inverse distance weighted average of integration point data at point""" if v is None: v = range(len(fdata[0].data)) nx = len(v) ave = zeros(nx) w = [] for (i, xi) in enumerate(self.integration.points): d = max(dist(point, xi), 1E-06) w.append(1. / d) ave[:] += [fdata[i][v][x] * w[-1] for x in range(nx)] ave /= asum(w) return ave
def G(N, f, X, ct, iS): ''' G parameter --> Transforms a X2 estimator to a Gaussian estimator ''' #z2 = iR * asum( ( (X - ct) / (iS + TINY) )**2 ) # Z2 estimator # Mahalanobis distance estimator: X = X - ct z2 = asum( fabs( X * ravel( dot(iS, X ) ) ) ) # G transformation: if aall(N*f > 100e0): return sqrt(2e0*z2) - sqrt(2e0*f - 1e0) elif aall(N*f >= 30e0) and aall(N*f <= 100e0): return ((z2/f)**(1e0/3) - (1e0 - (2e0/9)*f))/sqrt((2e0/9)*f) elif aall(N*f < 30e0): return 9e9
def RegisterImages(self, pts=None): from skimage.feature import register_translation from numpy import sum as asum # # Imavg = asum(dstack(self.Images),axis=2)/len(self.Images) if pts is None: imshow(self.AStack) ind = ginput(n=0, timeout=0) close() Isub = [ I[int(round(ind[0][1])):int(round(ind[1][1])), int(round(ind[0][0])):int(round(ind[1][0]))] for I in self.Images ] Isubavg = asum(dstack(Isub), axis=2) / len(Isub) II = [ ShiftImage(self.Images[i], register_translation(Isubavg, Isub[i])[0]) for i in range(len(Isub)) ] self.Images = II # II = [ShiftImage(self.Images[i],pts[i]) for i in range(len(pts))] # for I in self.Images: # # shift, error, diffphase = register_translation(Imavg, I) # # II.append(real(ifftn(fourier_shift(fftn(I), shift)))) self.Images = II self.AxialStack() self.HorizontalStack()
def AxialStack(self, stcktype='max', stdthresh=0.): from numpy import amax, mean, std from numpy import sum as asum if stcktype == 'max': self.AStack = amax(dstack(self.Images), axis=2) elif stcktype == 'sumabovemean': A = moveaxis(dstack(self.Images), 2, 0) # self.AStack = self.AStack - mean(self.AStack,axis=2) # print(mean(self.AStack,axis=2).shape) A = asum(A * (A > mean(A, axis=0) + std(A, axis=0) * stdthresh), axis=0) self.AStack = A
def statistics(a): from numpy import sum as asum r = ("Min, Max: {},{}\n".format(amin(a), amax(a)) + "Range : {}\n".format(ptp(a)) + "Average : {}\n".format(average(a)) + "Mean : {}\n".format(mean(a)) + "Median : {}\n".format(median(a)) + "StdDev : {}\n".format(std(a)) + "Sum : {}\n".format(asum(a))) try: # Try the method for masked arrays. The other method will not # respect the mask r += "Histogram:{}".format( histogram(a.compressed())[0].ravel().tolist()) except: r += "Histogram: {}".format(histogram(a)[0].ravel().tolist()) return r
def statistics(a): from numpy import sum as asum r = ("Min, Max: {},{}\n".format(amin(a), amax(a)) + "Range : {}\n".format(ptp(a)) + "Average : {}\n".format(average(a))+ "Mean : {}\n".format(mean(a))+ "Median : {}\n".format(median(a))+ "StdDev : {}\n".format(std(a))+ "Sum : {}\n".format(asum(a)) ) try: # Try the method for masked arrays. The other method will not # respect the mask r += "Histogram:{}".format(histogram(a.compressed())[0].ravel().tolist()) except: r += "Histogram: {}".format(histogram(a)[0].ravel().tolist()) return r
def ImgToArray(I): from numpy import sum as asum from numpy import amax, array, amin from numpy.linalg import norm bintodec = array([256**2, 256, 1]) II = asum(bintodec * I[:, :, 0:3], axis=-1) II = II / (norm(II.ravel())) # II[II==II[0,0]]=0 # # II[II>0] = II[II>0]-amin(II[II>0]) # # II = II*(II>0.) # # II = II/amax(II) return II.astype(float)
def analyze(self): """ Analyzes basic properties of the atomic cloud and stores the results in the cloud.results data structure. --------------------- Values stored in Cloud.results: --------------------- sumOD: float cloud "mass" - sum of all OD values in the cloud ROI relative COM: (yc, xc) position of the center of mass with respect to the cloud ROI absolute COM: (yc, xc) position of the center of mass in the (full) frame ROI maxOD: float Maximum value of the OD in the cloud ROI raw atoms: integer calculated number of atoms given pixel size and scattering cross section atoms: integer calculated number of atoms, including a multiplicative correction factor """ OD = self.get_OD() self.results['sumOD'] = asum(OD) xc, yc, = center_of_mass(OD) self.results['relative COM'] = (yc, xc) top, left = self.roi.tblr[0], self.roi.tblr[2] self.results['absolute COM'] = (top + yc, left + xc) self.results['maxOD'] = ndi.median_filter(OD, size=(4, 4)).max() self.calc_atoms() print("%s: Analyzing Cloud" % self.cloud_id)
def distance(all_clusters, cluster_stats, elems): Nc = len(all_clusters) # Cluster size N = len(elems) # Sample Size M = len(elems[0]) # Variable size TINY = 1e-9 # Modules: from numpy import zeros, array, float32, sqrt, dot, ravel, vectorize, fabs from numpy import sum as asum from collections import deque from gmode_module import stats, Invert # Input: d2 = zeros((M,Nc,Nc), dtype=float32) Gc = zeros((M,Nc,Nc), dtype=float32) D2 = zeros((Nc,Nc), dtype=float32) # Hash elements of each cluster in a array: elems_cluster = deque() for a in xrange(Nc): elems_cluster.append(elems[all_clusters[a]]) elems_cluster = array(elems_cluster) # Calculate matrix terms: for a in range(0,Nc-1): Na = len(all_clusters[a]) # Statistics of cluster a: ct_a, dev_a, S_a, R_a = cluster_stats[a] #iR_a = Invert(R_a) iS_a = Invert(S_a) for b in xrange(a+1,Nc): Nb = len(all_clusters[b]) # Statistics of cluster b: ct_b, dev_b, S_b, R_b = cluster_stats[b] #iR_b = Invert(R_b) iS_b = Invert(S_b) # Degrees of freedom: fab = (Nb - 1e0)*(M**2)/asum(R_a) fba = (Na - 1e0)*(M**2)/asum(R_b) # Calculating Z²i(a,b) e Z²i(b,a): #Z2iab = asum( ( (elems_cluster[b] - ct_a)/(dev_a + TINY) )**2, axis=0 ) Z2iab = asum( fabs( (elems_cluster[b] - ct_a).T * dot(iS_a , (elems_cluster[b] - ct_a).T ).T), axis=1 ) #Z2iba = asum( ( (elems_cluster[a] - ct_b)/(dev_b + TINY) )**2, axis=0 ) Z2iba = asum( fabs( (elems_cluster[a] - ct_b).T * dot(iS_b , (elems_cluster[a] - ct_b).T ).T), axis=1 ) # Calculating Z²(a,b) e Z2(b,a): #Z2ab = asum( dot(iR_a, Z2iab) ) #Z2ba = asum( dot(iR_b, Z2iba) ) Z2ab = asum(Z2iab) Z2ba = asum(Z2iba) for i in xrange(M): # Calculating d² e Gc : d2[i][a][b] = (Z2iab[i] + Z2iab[i])/(Na + Nb - 1e0) Gc[i][a][b] = sqrt(2e0*(Z2iab[i] + Z2iba[i])) - sqrt(2e0*(Na + Nb) - 1e0) D2[a][b] = (Z2ab + Z2ba)/(fab + fba - 1e0) D2[b][a] = (Z2ab + Z2ba)/(fab + fba - 1e0) return d2, Gc, D2
def airyPSF(x, y, a, r, wl, T=None, resize=False): ''' Returns the PSF of a perfect lens with circular aperture If resize is set to True, the size of the output array is the minimal size allowing exact convolution (no boundary effects) with an image. So with len(x) = Nx, len(y) = Ny we would obtain shape(psf) = (2*Nx - 1, 2*Ny - 1) Parameters ========== x, y : array_like Coordinate array of the image a : scalar (float) Aperture r : scalar (float) Distance from the optics to the object wl : scalar (float) / array_like Wavelength(s) T : scalar (float), required when wl is array_like Black body temperature ''' if not hasattr(wl, '__len__'): wl = [wl] multiple_wl = False else: multiple_wl = True if T == None: raise ValueError('Black body temperature must be provided') psf_bol = [] N = 0. x = asarray(x) y = asarray(y) Nx = len(x) Ny = len(y) dx = x[1] - x[0] dy = y[1] - y[0] if any(x[1:] - x[:-1] != dx) or any(y[1:] - y[:-1] != dy): warn("Coordinates must be equidistant") if resize: X = arange(-(Nx - 1) * dx, Nx * dx + dx / 2., dx) Y = arange(-(Ny - 1) * dy, Ny * dy + dy / 2., dy) else: X = roll(arange(-floor(Nx / 2.) * dx, Nx / 2. * dx, dx), -int(floor(Nx / 2.))) Y = roll(arange(-floor(Ny / 2.) * dy, Ny / 2. * dy, dy), -int(floor(Ny / 2.))) R, theta, z = coordCyl(X, Y, [0], [X[0], Y[0], 0], [0, 0, 1]) R = sqrt(R[0][:, :, 0]**2 + R[1][:, :, 0]**2) for l in wl: k = 2. * pi / l z = .5 * k * a * (R / r) / sqrt(1. + (R / r)**2) z[where(R == 0.)] = 1. print('\tEvaluation of the Bessel function (wl = ' + str(l) + ')...') psf_val = (2. * asarray(j1(z), dtype=float) / z)**2 psf_val[where(R == 0.)] = 1. if multiple_wl: pl = planck(l, T) else: pl = 1. psf_bol += [pl * psf_val] N += pl psf_airy = sum(psf_bol) / N return psf_airy / asum(psf_airy)
def generalPSF(mask, x, y, a, r, wl, T=None, gamma=None, sigma=None): ''' Returns the diffraction-limited PSF generated by a given pupil, convolved with a Voigt profile specified by the parameters gamma and sigma. Parameters ========== mask : array_like (2D) The actual pupil of the telescope x, y : array_like Coordinate array of the image a : scalar (float) Aperture r : scalar (float) Distance from the optics to the object wl : scalar (float) / array_like Wavelength(s) T : scalar (float), required when wl is array_like Black body temperature gamma : scalar (float) Parameter of the Lorentz profile (in arcsec) sigma : scalar (float) Parameter of the Gaussian profile (in arcsec) Note: The convolution of the Lorentz and the Gaussian profiles lead to the ===== Voigt profile ''' if not hasattr(wl, '__len__'): wl = [wl] multiple_wl = False else: multiple_wl = True if T == None: raise ValueError('Black body temperature must be provided') psf_bol = [] N = 0. x = asarray(x) y = asarray(y) Nx = len(x) Ny = len(y) dx = x[1] - x[0] dy = y[1] - y[0] if any(x[1:] - x[:-1] != dx) or any(y[1:] - y[:-1] != dy): warn("Coordinates must be equidistant") if not (gamma is None and sigma is None): X = roll(arange(-floor(Nx / 2.) * dx, Nx / 2. * dx, dx), -int(floor(Nx / 2.))) Y = roll(arange(-floor(Ny / 2.) * dy, Ny / 2. * dy, dy), -int(floor(Ny / 2.))) R, theta, z = coordCyl(X, Y, [0], [X[0], Y[0], 0], [0, 0, 1]) R = sqrt(R[0][:, :, 0]**2 + R[1][:, :, 0]**2) nonIdeal = ones((Nx, Ny), dtype=complex) if gamma == 0.: gamma = None if sigma == 0.: sigma = None if not gamma is None: gamma = r * arctan(gamma * 2. * pi / (360. * 3600.)) nonIdeal *= fft2(.5 * gamma / (pi * (R**2 + .25 * gamma**2))) if not sigma is None: sigma = r * arctan(sigma * 2. * pi / (360. * 3600.)) nonIdeal *= fft2( exp(-R**2 / (2. * sigma**2)) / (sqrt(2. * pi) * sigma)) for l in wl: Lx = l / (Nx * sin(dx / r)) Ly = l / (Ny * sin(dy / r)) s = (1 + 2 * int(floor(.5 * a / Lx)), 1 + 2 * int(floor(.5 * a / Ly))) mask_resampled = resample(mask, s) mask_padded = real(zeroPadding(mask_resampled, (Nx, Ny))) #mask_shifted = (-1)**sum(meshgrid(arange(Nx), arange(Ny)))*mask_padded psf_val = aabs(fft2(mask_padded))**2 #psf_val = psf_val/psf_val[int(floor(Nx/2.)),int(floor(Ny/2.))] psf_val = psf_val / asum(psf_val) if multiple_wl: pl = planck(l, T) else: pl = 1. psf_bol += [pl * psf_val] N += pl if gamma is None and sigma is None: return sum(psf_bol) / N else: nonIdeal /= nonIdeal[0, 0] psf_nonIdeal = real(ifft2(fft2(sum(psf_bol) / N) * nonIdeal)) #return aabs(ifft2(nonIdeal)) #print(mean(psf_nonIdeal)) return psf_nonIdeal
def merge(rows): for i in range(x0, len(rows)): a = i - x0 b = i + 1 row = asum(array(rows[a:b]), 0) yield row
def learn_metadata(metadata_file_location): # from numpy import array # raw_metadata = smart_object(metadata_file_location) # shortand y = raw_metadata # standard_metadata = smart_object() # shorthand x = standard_metadata # Creation date of metadata file x.date_number = getctime(metadata_file_location) ''' %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Calculate derivative quantities %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ''' # Masses x.m1 = y.initial_mass1 x.m2 = y.initial_mass2 # J = array(y.initial_ADM_angular_momentum) S1 = array(y.initial_spin1) S2 = array(y.initial_spin2) S = S1 + S2 L = J - S P = y.initial_ADM_linear_momentum # Prepare to deduce initial linear momenta R1 = array(y.initial_position1) R2 = array(y.initial_position2) rr = R2 - R1 R = array([[0, rr[2], -rr[1]], [-rr[2], 0, rr[0]], [rr[1], -rr[0], 0]]) H = L - cross(y.initial_position2, P) # rmap = abs(asum(R, axis=0)) > 1e-6 k = next(k for k in range(len(rmap)) if rmap[k]) P1 = zeros(P.shape) P1[k:] = dot(inv(R[k:, k:]), H[k:]) P2 = array(y.initial_ADM_linear_momentum) - P1 # x.note = 'The SXS metadata give only total initial linear and angular momenta. In this code, the momenta of each BH has been deduced from basic linear algebra. However, this appraoch does not constrain the X COMPONENT of the total algular momentum, resulting in disagreement between the meta data value, and the value resulting from the appropriate sum. Use the metadata value.' # L1 = cross(R1, P1) L2 = cross(R2, P2) # B = L1 + L2 if norm(L[k:] - B[k:]) > 1e-6: print '>> norm( L[k:] - B[k:] ) = %f > 1e-6' % (norm(L[k:] - B[k:])) msg = '>> Inconsistent handling of initial momenta. Please scrutinize.' raise ValueError(msg) # x.madm = y.initial_ADM_energy # x.P1 = P1 x.P2 = P2 x.S1 = S1 x.S2 = S2 # x.b = float(y.initial_separation) if abs(x.b - norm(R1 - R2)) > 1e-6: msg = '(!!) Inconsistent assignment of initial separation.' raise ValueError(msg) # x.R1 = R1 x.R2 = R2 # x.L1 = L1 x.L2 = L2 # x.valid = True # x.mf = y.remnant_mass # x.Sf = y.remnant_spin x.Xf = x.Sf / (x.mf * x.mf) x.xf = sign(x.Sf[-1]) * norm(x.Sf) / (x.mf**2) # NOTE that I'm not sure about the units of all of these quantities. In particular, there are cases where the initial mass is not scaled to 1. This is inconsistent with nrutils' conventions and we wish to correct that here. # NOTE that the order of the lines below matters significantly. M = x.m1 + x.m2 _m1 = x.m1 / M _m2 = x.m2 / M x.S1 = _m1 * _m1 * x.S1 / (x.m1 * x.m1) x.S2 = _m2 * _m2 * x.S2 / (x.m2 * x.m2) x.m1, x.m2 = _m1, _m2 _mf = y.remnant_mass / M x.Sf = _mf * _mf * x.Sf / (x.mf * x.mf) x.mf = _mf # QUESTION: Should the momenta and distances also be rescaled? # return standard_metadata, raw_metadata
def learn_metadata( metadata_file_location ): # from numpy import array # raw_metadata = smart_object( metadata_file_location ) # shortand y = raw_metadata # standard_metadata = smart_object() # shorthand x = standard_metadata # Creation date of metadata file x.date_number = getctime( metadata_file_location ) ''' %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Calculate derivative quantities %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ''' # Masses x.m1 = y.initial_mass1 x.m2 = y.initial_mass2 # J = array(y.initial_ADM_angular_momentum) S1 = array(y.initial_spin1); S2 = array(y.initial_spin2) S = S1 + S2 L = J-S P = y.initial_ADM_linear_momentum # Prepare to deduce initial linear momenta R1 = array(y.initial_position1); R2 = array(y.initial_position2); rr = R2-R1 R = array( [ [0,rr[2],-rr[1]], [-rr[2],0,rr[0]], [rr[1],-rr[0],0] ] ) H = L - cross( y.initial_position2, P ) # rmap = abs( asum(R,axis=0) ) > 1e-6; k = next(k for k in range(len(rmap)) if rmap[k]) P1 = zeros( P.shape ) P1[k:] = dot( inv(R[k:,k:]), H[k:] ) P2 = array(y.initial_ADM_linear_momentum) - P1 # x.note = 'The SXS metadata give only total initial linear and angular momenta. In this code, the momenta of each BH has been deduced from basic linear algebra. However, this appraoch does not constrain the X COMPONENT of the total algular momentum, resulting in disagreement between the meta data value, and the value resulting from the appropriate sum. Use the metadata value.' # L1 = cross(R1,P1) L2 = cross(R2,P2) # B = L1 + L2 if norm( L[k:] - B[k:] ) > 1e-6 : print '>> norm( L[k:] - B[k:] ) = %f > 1e-6' % (norm( L[k:] - B[k:] )) msg = '>> Inconsistent handling of initial momenta. Please scrutinize.' raise ValueError(msg) # x.madm = y.initial_ADM_energy # x.P1 = P1; x.P2 = P2 x.S1 = S1; x.S2 = S2 # x.b = float( y.initial_separation ) if abs( x.b - norm(R1-R2) ) > 1e-6: msg = '(!!) Inconsistent assignment of initial separation.' raise ValueError(msg) # x.R1 = R1; x.R2 = R2 # x.L1 = L1; x.L2 = L2 # x.valid = True # x.mf = y.remnant_mass # x.Sf = y.remnant_spin x.Xf = x.Sf/(x.mf*x.mf) x.xf = sign(x.Sf[-1])*norm(x.Sf)/(x.mf**2) # NOTE that I'm not sure about the units of all of these quantities. In particular, there are cases where the initial mass is not scaled to 1. This is inconsistent with nrutils' conventions and we wish to correct that here. # NOTE that the order of the lines below matters significantly. M = x.m1+x.m2 _m1 = x.m1/M _m2 = x.m2/M x.S1 = _m1*_m1*x.S1/(x.m1*x.m1) x.S2 = _m2*_m2*x.S2/(x.m2*x.m2) x.m1,x.m2 = _m1,_m2 _mf = y.remnant_mass/M x.Sf = _mf*_mf*x.Sf/(x.mf*x.mf) x.mf = _mf # QUESTION: Should the momenta and distances also be rescaled? # return standard_metadata, raw_metadata
def gsamp_pt(array, x, y, r): w, h = array.shape X, Y = meshgrid(range(w), range(h)) S = exp(-2 * ((X - x) ** 2 + (Y - y) ** 2) / r ** 2) S = S / asum(S) return asum(S * array)
def free(R): return R.shape[1]**2 /asum(R)