def ks(x, alpha=1, D=1): # If x is list, convert to 1d array. if (isinstance(x, list)): x = pylab.array(x) # Length of the output signal must be larger than the length of the # input signal that is, D must be larger than 1. D = max(D, 1) # Number of input samples. M = len(x) # Create a vector of the powers of alpha, [alpha^0 alpha^1 ....]. size_alphaVector = D; alphaVector = (alpha * pylab.ones( size_alphaVector)) ** range(size_alphaVector) # Create a matrix with M columns, each being the vector of the powers # of alpha. alphaMatrix = pylab.tile(alphaVector, (M, 1)).T; # Create a matrix with D rows filled by the input signal x. xMatrix = pylab.tile(x, (D, 1)); # Multipliy the two yMatrix = alphaMatrix * xMatrix # Read out the output row by row y = yMatrix.flatten(0) return y
def magicsq(N): """ copied from internet (lost the source...) Creates an N x N magic square. **Input:** *N* -- an integer in some form, may be float or quotted. **Output:** an ``'int32'`` *N* by *N* array -- the same magic square as in Matlab and Octave ``magic(N)`` commands. In particular, the Siamese method is used for odd *N* (but with a different implementation.) """ from pylab import tile, arange global _constant n = int(N) if n < 0 or n == 2: # consistent with Octave raise TypeError("No such magic squares exist.") elif n % 2 == 1: m = n >> 1 b = n * n + 1 _constant = n * b >> 1 return ( tile(arange(1, b, n), n + 2)[m:-m - 1].reshape(n, n + 1)[..., 1:] + tile(arange(n), n + 2).reshape(n, n + 2)[..., 1:-1]).transpose() elif n % 4 == 0: b = n * n + 1 _constant = n * b >> 1 d = arange(1, b).reshape(n, n) d[0:n:4, 0:n:4] = b - d[0:n:4, 0:n:4] d[0:n:4, 3:n:4] = b - d[0:n:4, 3:n:4] d[3:n:4, 0:n:4] = b - d[3:n:4, 0:n:4] d[3:n:4, 3:n:4] = b - d[3:n:4, 3:n:4] d[1:n:4, 1:n:4] = b - d[1:n:4, 1:n:4] d[1:n:4, 2:n:4] = b - d[1:n:4, 2:n:4] d[2:n:4, 1:n:4] = b - d[2:n:4, 1:n:4] d[2:n:4, 2:n:4] = b - d[2:n:4, 2:n:4] return d else: m = n >> 1 k = m >> 1 b = m * m d = tile(magicsq(m), (2, 2)) # that changes the _constant _constant = _constant * 8 - n - m d[:m, :k] += 3 * b d[m:, k:m] += 3 * b d[k, k] += 3 * b d[k, 0] -= 3 * b d[m + k, 0] += 3 * b d[m + k, k] -= 3 * b d[:m, m:n - k + 1] += b + b d[m:, m:n - k + 1] += b d[:m, n - k + 1:] += b d[m:, n - k + 1:] += b + b return d
def magicsq(N): """ copied from internet (lost the source...) Creates an N x N magic square. **Input:** *N* -- an integer in some form, may be float or quotted. **Output:** an ``'int32'`` *N* by *N* array -- the same magic square as in Matlab and Octave ``magic(N)`` commands. In particular, the Siamese method is used for odd *N* (but with a different implementation.) """ from pylab import tile,arange global _constant n = int(N) if n < 0 or n == 2: # consistent with Octave raise TypeError("No such magic squares exist.") elif n%2 == 1: m = n>>1 b = n*n + 1 _constant = n*b>>1 return (tile(arange(1,b,n),n+2)[m:-m-1].reshape(n,n+1)[...,1:]+ tile(arange(n),n+2).reshape(n,n+2)[...,1:-1]).transpose() elif n%4 == 0: b = n*n + 1 _constant = n*b>>1 d=arange(1, b).reshape(n, n) d[0:n:4, 0:n:4] = b - d[0:n:4, 0:n:4] d[0:n:4, 3:n:4] = b - d[0:n:4, 3:n:4] d[3:n:4, 0:n:4] = b - d[3:n:4, 0:n:4] d[3:n:4, 3:n:4] = b - d[3:n:4, 3:n:4] d[1:n:4, 1:n:4] = b - d[1:n:4, 1:n:4] d[1:n:4, 2:n:4] = b - d[1:n:4, 2:n:4] d[2:n:4, 1:n:4] = b - d[2:n:4, 1:n:4] d[2:n:4, 2:n:4] = b - d[2:n:4, 2:n:4] return d else: m = n>>1 k = m>>1 b = m*m d = tile(magicsq(m), (2,2)) # that changes the _constant _constant = _constant*8 - n - m d[:m, :k] += 3*b d[m:,k:m] += 3*b d[ k, k] += 3*b d[ k, 0] -= 3*b d[m+k, 0] += 3*b d[m+k, k] -= 3*b d[:m,m:n-k+1] += b+b d[m:,m:n-k+1] += b d[:m, n-k+1:] += b d[m:, n-k+1:] += b+b return d
def classify(self, x, k): d = self.X - tile(x.reshape(self.n, 1), self.N) dsq = sum(d * d, 0) minindex = argmin(dsq) temp = argsort(dsq) ### Custom code starting here ### # Save the data for each class around the point in a array score = [0, 0, 0] # With the help of k surrounding points score each class for x in range(0, k): if ((self.c[temp[x]]) == 1.0): score[0] += 1 elif ((self.c[temp[x]]) == 2.0): score[1] += 1 elif ((self.c[temp[x]]) == 3.0): score[2] += 1 # Check to which class the point is classified if (score[0] > score[1] and score[0] > score[2]): return 1.0 elif (score[1] > score[2]): return 2.0 # If there are points with the same value, assign the class of the nearest neighbour. elif (score[0] == score[1] and score[0] == score[2]): return self.c[minindex] else: return 3.0
def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01, nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): """Compute Spectral Subband Centroid features from an audio signal. :param signal: the audio signal from which to compute features. Should be an N*1 array :param samplerate: the samplerate of the signal we are working with. :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds) :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds) :param nfilt: the number of filters in the filterbank, default 26. :param nfft: the FFT size. Default is 512. :param lowfreq: lowest band edge of mel filters. In Hz, default is 0. :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2 :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97. :returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. """ highfreq= highfreq or samplerate/2 signal = sigproc.preemphasis(signal,preemph) frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate) pspec = sigproc.powspec(frames,nfft) pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq) feat = pylab.dot(pspec,fb.T) # compute the filterbank energies R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1)) return pylab.dot(pspec*R,fb.T) / feat
def _grad_theta(psi, xxx_todo_changeme1): """ Compute the 1D gradient of a scalar field in the theta direction on a unit- radius spherical shell. Assumes psi is a 2D array with theta changing along axis 1. We use central differences for interior points, one-sided differences for exterior points, and address simple periodic boundaries. """ (phi,theta) = xxx_todo_changeme1 dphi = p.diff(phi,axis=0) dtheta = p.diff(theta,axis=1) # pre-allocate output grid dpsidtheta = p.zeros(theta.shape) # use weighted central differences to compute theta gradient on the interior dpsidtheta[:,1:-1] = (((p.diff(psi[:,:-1],axis=1) / dtheta[:,:-1]**2 + p.diff(psi[:,1:],axis=1) / dtheta[:,1:]**2) / (1/dtheta[:,:-1] + 1/dtheta[:,1:]) ) ) # compute theta gradients at exterior points if p.mod(theta[0,0],2*p.pi) == p.mod(theta[0,-1],2*p.pi): # use weighted central differences to compute gradient if periodic boundary dpsidtheta[:,[0,-1]] = p.tile(((p.diff(psi[:,:2],axis=1) / dtheta[:,0]**2 + p.diff(psi[:,-2:],axis=1) / dtheta[:,-1]**2) / (1/dtheta[:,0] + 1/dtheta[:-1]) ), (1,2) ) else: # use one-sided difference to compute gradient if not a periodic boundary dpsidtheta[:,-1] = (p.diff(psi[:,-2:],axis=1).T / dtheta[:,-1]) dpsidtheta[:,0] = (p.diff(psi[:,:2],axis=1).T / dtheta[:,0]) return dpsidtheta
def classify(self, x): d = self.X - tile(x.reshape(self.n,1), self.N); dsq = sum(d*d,0) # Get N nearest neighbors minindex = np.argsort(dsq)[0:self.k] # Group sum by value return Counter(self.c[minindex]).most_common()[0][0]
def flica_reorder(output_dir, nmod): data_dir = os.path.join(output_dir, '') X = [] for i in range(0, nmod): X.append(np.load(data_dir + '/flica_X' + str(i + 1) + '.npy')) M = np.load(data_dir + '/flica_result.npz') K = len(X) R = M['H'].shape[1] for k in range(0, K): #M.X{k} * diag(M.W{k}.*sqrt( M.H.^2 * makesize(M.lambda{k},[R 1]) * M.DD(k))')]; %#ok<AGROW> if np.matrix(M['lambda1'][k]).shape[0] == R: tmp = np.dot(np.square(M['H']), M['lambda1'][k]) else: tmp = np.dot(np.square(M['H']), tile(M['lambda1'][k], [R, 1])) tmp2 = np.sqrt(np.dot(tmp, M['DD'][k])) tmp3 = np.diag(np.multiply(M['W'][k], tmp2)) tmp4 = np.dot(X[k], np.diag(tmp3)) if k == 0: Xcat = copy.deepcopy(tmp4) else: Xcat = np.concatenate((Xcat, tmp4)) weight = np.sum(np.square(Xcat), 0) order = np.argsort(weight) order = order[::-1] weight = weight[order] np.save(data_dir + 'new_order.npy', order) np.save(data_dir + 'new_weight.npy', weight) return order
def feature_scale(M, normalize=False, dbscale=False, norm=False, bels=False): """ :: Perform mutually-orthogonal scaling operations, otherwise return identity: normalize [False] dbscale [False] norm [False] """ if not (normalize or dbscale or norm or bels): return M else: X = M.copy() # don't alter the original if norm: X = X / pylab.tile(pylab.sqrt((X * X).sum(0)), (X.shape[0], 1)) if normalize: X = adb.normalize(X) if dbscale or bels: X = pylab.log10(X) if dbscale: X = 20 * X return X
def main(): mu = pl.array([[0], [12], [24], [36]]) Sigma = pl.array([[3.01602775, 1.02746769, -3.60224613, -2.08792829], [1.02746769, 5.65146472, -3.98616664, 0.48723704], [-3.60224613, -3.98616664, 13.04508284, -1.59255406], [-2.08792829, 0.48723704, -1.59255406, 8.28742469]]) # The data matrix is created for above mu and Sigma. d, U = pl.eig(Sigma) L = pl.diagflat(d) A = pl.dot(U, pl.sqrt(L)) X = pl.randn(4, 1000) # Y is the data matrix of random samples. Y = pl.dot(A, X) + pl.tile(mu, 1000) pl.figure(1) pl.clf() pl.plot(X[0], Y[1], '+', color='#0000FF', label='i=0,j=1') pl.plot(X[0], Y[2], '+', color='#FF0000', label='i=0,j=2') pl.plot(X[0], Y[3], '+', color='#00FF00', label='i=0,j=3') pl.plot(X[1], Y[0], 'x', color='#FFFF00', label='i=1,j=0') pl.plot(X[1], Y[2], 'x', color='#00FFFF', label='i=1,j=2') pl.plot(X[1], Y[3], 'x', color='#444444', label='i=1,j=3') pl.plot(X[2], Y[0], '.', color='#774411', label='i=2,j=0') pl.plot(X[2], Y[1], '.', color='#222222', label='i=2,j=1') pl.plot(X[2], Y[3], '.', color='#AAAAAA', label='i=2,j=3') pl.plot(X[3], Y[0], '+', color='#FFAA22', label='i=3,j=0') pl.plot(X[3], Y[1], '+', color='#22AAFF', label='i=3,j=1') pl.plot(X[3], Y[2], '+', color='#FFDD00', label='i=3,j=2') pl.legend() pl.savefig('fig21.png')
def plot(): ( ml_idxs, ml_lengths, ml_lanes, or_idxs, capacity, w, ff, j, Time, Lengths, XLengths, Lanes, density, flow, velocity, queues, dt, n, m ) = scen_output.load() rho_crit = pylab.array([fm / vff for fm, vff in zip(capacity, ff)]) * 1000.0 rho_max = pylab.array(j) * 1000.0 norm_density = density / pylab.tile(rho_crit, (m, 1)).transpose() norm_density_max = density / pylab.tile(rho_max, (m, 1)).transpose() norm_flow = flow / (pylab.tile(capacity, (m,1)).transpose() * 3600.0) nplots = 2 mplots = 4 g = [0] def plot_grid(data, title): g[0] += 1 pylab.subplot(nplots, mplots, g[0]) pylab.pcolormesh(Time, XLengths, data) pylab.colorbar() pylab.title(title) pylab.xlabel("Time (hours)") pylab.ylabel("Offset (km)") plot_grid(density, "Mainline Density (veh / km-lane)") plot_grid(queues, "Onramps (veh)") plot_grid(flow, "Flow (veh / hour / lane)") plot_grid(velocity, "Velocity (km / hr)") plot_grid(norm_density, "Ratio over Critical Mainline") plot_grid(norm_density_max, "Ratio over Max Mainline") plot_grid(norm_flow, "Ratio over Capacity Flow") pylab.show()
def raytrace (self, uv, lam=1): uvMat = np.atleast_2d (uv) if uvMat.shape[0] == 1: uvMat = uvMat.T npts = uvMat.shape[1] invPx = self.pinvP.dot (homogeneous.homogenize (uvMat)) XLambda = invPx + pl.tile (lam*homogeneous.homogenize (self.C), (npts, 1)).T return homogeneous.dehomogenize (XLambda)
def translate_back(outputs,threshold=0.7,pos=0): """Translate back. Thresholds on class 0, then assigns the maximum class to each region.""" # print outputs labels,n = measurements.label(outputs[:,0]<threshold) mask = tile(labels.reshape(-1,1), (1,outputs.shape[1])) maxima = measurements.maximum_position(outputs,mask,arange(1,amax(mask)+1)) if pos: return maxima return [c for (r,c) in maxima]
def raytrace(self, uv, lam=1): uvMat = np.atleast_2d(uv) if uvMat.shape[0] == 1: uvMat = uvMat.T npts = uvMat.shape[1] invPx = self.pinvP.dot(homogeneous.homogenize(uvMat)) XLambda = invPx + pl.tile(lam * homogeneous.homogenize(self.C), (npts, 1)).T return homogeneous.dehomogenize(XLambda)
def translate_back(outputs, threshold=0.7, pos=0): labels, n = measurements.label(outputs[:, 0] < threshold) mask = tile(labels.reshape(-1, 1), (1, outputs.shape[1])) maxima = measurements.maximum_position(outputs, mask, arange(1, amax(mask) + 1)) if pos == 1: return maxima if pos == 2: return [(c, outputs[r, c]) for (r, c) in maxima] return [c for (r, c) in maxima]
def mi(x,y, bins=11): """Given two arrays x and y of equal length, return their mutual information in bits """ Hxy, xe, ye = pylab.histogram2d(x,y,bins=bins) Hx = Hxy.sum(axis=1) Hy = Hxy.sum(axis=0) Pxy = Hxy/float(x.size) Px = Hx/float(x.size) Py = Hy/float(x.size) pxy = Pxy.ravel() px = Px.repeat(Py.size) py = pylab.tile(Py, Px.size) idx = pylab.find((pxy > 0) & (px > 0) & (py > 0)) return (pxy[idx]*pylab.log2(pxy[idx]/(px[idx]*py[idx]))).sum()
def classify(self, x, k): d = self.X - tile(x.reshape(self.n,1), self.N) # Occurrence of a class occd = {} for i in range(k): dsq = sum(d*d,0) minindex = argmin(dsq) if self.c[minindex] in occd: occd[self.c[minindex]] += 1 else: occd[self.c[minindex]] = 1 # Prevent next iter giving this index again d[:, minindex] = self.max + 1 # Return the name of the class that occurred most return max(occd.iteritems(), key=operator.itemgetter(1))[0]
def feature_scale(M, normalize=False, dbscale=False, norm=False, bels=False): """ Perform mutually-orthogonal scaling operations, otherwise return identity: normalize [False] dbscale [False] norm [False] """ if not (normalize or dbscale or norm or bels): return M else: X = M.copy() # don't alter the original if norm: X = X / P.tile(P.sqrt((X * X).sum(0)), (X.shape[0], 1)) if normalize: X = _normalize(X) if dbscale or bels: X = P.log10(P.clip(X, 0.0001, X.max())) if dbscale: X = 20 * X return X
def mutual_information(x,y, bins=11): """Given two arrays x and y of equal length, return their mutual information in bits >>> N = 10000 >>> xi = pylab.randn(N) >>> xi[pylab.find(xi>0)] = 1 >>> xi[pylab.find(xi<=0)] = 0 >>> yi = xi >>> print round(mutual_information(xi, yi, 1000),2) #One bit of info 1.0 >>> N = 10000 >>> xi = pylab.uniform(size=N) >>> yi = pylab.floor(xi*8) >>> print round(mutual_information(xi, yi, 1000),2) #Three bits of info 3.0 >>> N = 100000 >>> xi = pylab.randn(N) >>> yi = pylab.randn(N) >>> print round(mutual_information(xi, yi),2) #Should be zero given enough data and not too sparse binning 0.0 """ Hxy, xe, ye = pylab.histogram2d(x,y,bins=bins) Hx = Hxy.sum(axis=1) Hy = Hxy.sum(axis=0) Pxy = Hxy/float(x.size) Px = Hx/float(x.size) Py = Hy/float(x.size) pxy = Pxy.ravel() px = Px.repeat(Py.size) py = pylab.tile(Py, Px.size) idx = pylab.find((pxy > 0) & (px > 0) & (py > 0)) mi = (pxy[idx]*pylab.log2(pxy[idx]/(px[idx]*py[idx]))).sum() return mi
def mutual_information(x, y, bins=11): """Given two arrays x and y of equal length, return their mutual information in bits >>> N = 10000 >>> xi = pylab.randn(N) >>> xi[pylab.find(xi>0)] = 1 >>> xi[pylab.find(xi<=0)] = 0 >>> yi = xi >>> print round(mutual_information(xi, yi, 1000),2) #One bit of info 1.0 >>> N = 10000 >>> xi = pylab.uniform(size=N) >>> yi = pylab.floor(xi*8) >>> print round(mutual_information(xi, yi, 1000),2) #Three bits of info 3.0 >>> N = 100000 >>> xi = pylab.randn(N) >>> yi = pylab.randn(N) >>> print round(mutual_information(xi, yi),2) #Should be zero given enough data and not too sparse binning 0.0 """ Hxy, xe, ye = pylab.histogram2d(x, y, bins=bins) Hx = Hxy.sum(axis=1) Hy = Hxy.sum(axis=0) Pxy = Hxy / float(x.size) Px = Hx / float(x.size) Py = Hy / float(x.size) pxy = Pxy.ravel() px = Px.repeat(Py.size) py = pylab.tile(Py, Px.size) idx = pylab.find((pxy > 0) & (px > 0) & (py > 0)) mi = (pxy[idx] * pylab.log2(pxy[idx] / (px[idx] * py[idx]))).sum() return mi
def feature_scale(M, normalize=False, dbscale=False, norm=False, bels=False): """ :: Perform mutually-orthogonal scaling operations, otherwise return identity: normalize [False] dbscale [False] norm [False] """ if not (normalize or dbscale or norm or bels): return M else: X = M.copy() # don't alter the original if norm: X = X / P.tile(P.sqrt((X*X).sum(0)),(X.shape[0],1)) if normalize: X = _normalize(X) if dbscale or bels: X = P.log10(P.clip(X,0.0001,X.max())) if dbscale: X = 20*X return X
def __init__(self, X, c): self.n, self.N = X.shape self.X = X self.mu = empty((3, self.n)) self.cov = empty((3, self.n, self.n)) self.P = empty(3) cond = zeros(self.N) for i in range(0, 3): cond = cond + 1.0 indices = where(c == cond) # Xa bevat alle elementen uit X waar de klasse gelijk van is aan i + 1.0 Xa = [X[:, b] for b in indices] # Bovenstaande pakt de xjes in een extra array, dit willen we niet Xa = Xa[0] Na = shape(Xa)[1] self.mu[i] = mean(Xa, axis=1) # Tile smeert mu uit zodat we mu kunnen aftrekken van de X matrix self.cov[i] = cov(Xa - tile(self.mu[i].T, Na).reshape(self.n, Na)) # De kans op deze klasse self.P[i] = (Na * 1.0) / self.N
def _grad_phi(psi, xxx_todo_changeme): """ Compute the 1D gradient of a scalar field in the phi direction on a unit- radius spherical shell. Assumes psi is a 2D array with phi changing along axis 0. We use central differences for interior points, one-sided differences for exterior points, and address simple periodic boundaries. """ (phi,theta) = xxx_todo_changeme dphi = p.diff(phi,axis=0) dtheta = p.diff(theta,axis=1) # pre-allocate output grid dpsidphi = p.zeros(phi.shape) # use weighted central differences to compute gradient on the interior dpsidphi[1:-1,:] = (((p.diff(psi[:-1,:],axis=0) / dphi[:-1,:]**2 + p.diff(psi[1:,:],axis=0) / dphi[1:,:]**2) / (1/dphi[:-1,:] + 1/dphi[1:,:]) ) * 1./p.sin(theta[1:-1,:]) ) # compute phi gradients at exterior points if p.mod(phi[0,0],2*p.pi) == p.mod(phi[-1,0],2*p.pi): # use weighted central differences to compute gradient if periodic boundary dpsidphi[[0,-1],:] = p.tile(((p.diff(psi[:2,:],axis=0) / dphi[0,:]**2 + p.diff(psi[-2:,:],axis=0) / dphi[-1,:]**2) / (1/dphi[0,:] + 1/dphi[-1,:]) ) * 1./p.sin(theta[0,:]), (2,1) ) else: # use one-sided difference to compute gradient if not a periodic boundary dpsidphi[-1,:] = (p.diff(psi[-2:,:],axis=0) / dphi[-1,:] / p.sin(theta[-1,:]) ) dpsidphi[0,:] = (p.diff(psi[:2,:],axis=0) / dphi[0,:] / p.sin(theta[0,:]) ) return dpsidphi
import pylab as plt n = 1000 mu = [[0], [0], [0], [0]] Sigma = [[3.01602775, 1.02746769, -3.60224613, -2.08792829], [1.02746769, 5.65146472, -3.98616664, 0.48723704], [-3.60224613, -3.98616664, 13.04508284, -1.59255406], [-2.08792829, 0.48723704, -1.59255406, 8.28742469]] d, U = plt.eig(Sigma) # Sigma = U L Ut L = plt.diagflat(d) A = plt.dot(U, plt.sqrt(L)) # Required transform matrix. X = plt.randn(4, n) # 4*n matrix with each element ~ N(0,1) # 4*n each column vector ~N(mu,Sigma), random draws from distribution. Y = plt.dot(A, X) + plt.tile(mu, n) Ybar = [[avg] for avg in plt.mean(Y, 1)] # Mean along the 1 axis. Yzm = Y - plt.tile(Ybar, n) # Subtract mean from each column. # Estimator for covariance matrix. S = plt.dot(Yzm, plt.transpose(Yzm)) / n - 1 print(Ybar, S)
def classify(self, x): d = self.X - tile(x.reshape(self.n, 1), self.N) dsq = sum(d*d, 0) neighbours = self.c[argpartition(dsq, self.k)[:self.k]] most_common = argmax(bincount(neighbours.astype(int))) return most_common
def classify(self, x): d = self.X - tile(x.reshape(self.n,1), self.N) dsq = sum(d*d,0) minindex = argmin(dsq) return self.c[minindex]
def CreateFrames(path='./', run='', t0='', t1='', hemisphere='north', geoGrid=False, ignoreBinary=False, binaryType='pkl', configFile=None): """ Compute deltaBs hemispheric grid given LFM-MIX output files in path. Computes: pngFiles - a list of PNG filenames, each corresponding to a snapshot in time of LFM-MIX ground deltaB vector fields NOTE: while the output is a list of filenames, the function generates binary pkl/mat files that can be read in by other software for further analysis. FIXME: southern hemisphere summary plots are presented from a point of view below the south pole, looking up. Likewise for the data files. This is not technically a right-handed frame of reference, so any analysis of southern hemisphere data must rotate fields 180 degrees about the X axis, or 0 longitude, 0 latitude line. This odd data file convention is consistent with how southern hemisphere data is stored in MIX files, but there is a strong argument to just store everything assuming the same (northern) POV, and rotate to a southern POV for display purposes only; this will require significant work. Requires: Nothing, all inputs are optional Optional: path - path to data directory holding LFM and MIX model output files (default is current directory) run - output filename prefix identifying LFM-MIX run (i.e., the part of the filename prior to [mhd|mix]_yyyy-mm-ddTHH-MM-SSZ.hdf) (default is any mhd|mix files in path) t0 - datetime object specifying the earliest of available time step to include in the extraction (default is earliest available) t1 - datetime object specfiying the latest of available time step to include in the extraction (default is last available) hemisphere - specify 'north' or 'south' hemisphere (default is 'north') geoGrid - if True, assume observatory coordinates are in geographic coordinates rather than solar magnetic; same for outputs (default is False) ignoreBinary - if True, ignore any pre-computed binary files and re- compute everything from scratch; NOTE: individual binary files will be ignored anyway if they are incompatible with specified inputs, but this option avoids reading the binary file entirely. (default is False) binaryType - binary type to generate, NOT to read in...routine looks for PKL files first, then mat files, then proceeds to re-compute if neither are available. (default is 'pkl') configFile - specifies plotting config file; if None, default config file is path/figs/north|south/deltaBSum.config; if this doesn't exist, create new one with default config parameters. """ assert( (hemisphere == 'north') | (hemisphere == 'south') ) hemiSelect = {'north': 'North', 'south': 'South'}[hemisphere] # Make sure the output directory exisits if not make it dirname = os.path.join(path, 'figs', hemisphere) if not os.path.exists(dirname): os.makedirs( dirname ) print(('Rendering ' + hemiSelect + 'ern hemisphere, storing frames at ' + dirname)) #Now check to make sure the MIX files are correct dMIX = pyLTR.Models.MIX(path, run) modelVars = dMIX.getVarNames() for v in ['Grid X', 'Grid Y', 'Potential North [V]', 'Potential South [V]', 'FAC North [A/m^2]', 'FAC South [A/m^2]', 'Pedersen conductance North [S]', 'Pedersen conductance South [S]', 'Hall conductance North [S]', 'Hall conductance South [S]']: assert( v in modelVars ) timeRange = dMIX.getTimeRange() #Now check to make sure the LFM files are correct dLFM = pyLTR.Models.LFM(path, run) modelVars = dLFM.getVarNames() for v in ['X_grid', 'Y_grid', 'Z_grid', 'bx_', 'by_', 'bz_']: assert( v in modelVars ) # check that LFM output timeRanges are exactly the same as MIX output timeRanges trLFM = dLFM.getTimeRange() # _roundTime() rounds to nearest minute by default, which should suffice here # NOTE: we do NOT change the time stamps at all, just make sure they match # to a 1-minute tolerance if list(map(_roundTime, timeRange)) != list(map(_roundTime, trLFM)): raise Exception(('Mismatched MIX and LFM output files')) if len(timeRange) == 0: raise Exception(('No data files found. Are you pointing to the correct run directory?')) ## Original code defaulted to entire data set if the user-supplied time range ## fell outside of the available data, almost certainly not a desired result. ## Now if the user requests a time range that falls completely outside of the ## available data, and exception is raised. ##index0 = 0 ##if t0: ## for i,t in enumerate(timeRange): ## if t0 >= t: ## index0 = i ## ###index1 = len(timeRange)-1 ##index1 = len(timeRange) # we were skipping the last valid time step ##if t1: ## for i,t in enumerate(timeRange): ## if t1 >= t: ## #index1 = i ## index1 = i + 1 # we were skipping the last valid time step if t0: if t1 and t1 < timeRange[0]: # upper time stamp below lowest available time stamp raise Exception('Requested time range falls outside available data') if t0 > timeRange[-1]: # lower time stamp above highest available time stamp raise Exception('Requested time range falls outside available data') for i,t in enumerate(timeRange): if t0 >= t: index0 = i else: index0 = 0 if t1: if t0 and t0 > timeRange[-1]: # lower time stamp above highest available time stamp raise Exception('Requested time range falls outside available data') if t1 < timeRange[0]: # upper time stamp below lowest available time stamp raise Exception('Requested time range falls outside available data') for i,t in enumerate(timeRange): if t1 >= t: index1 = i+1 else: index1 = len(timeRange) if index1 > index0: print(( 'Extracting LFM and MIX quantities for time series over %d time steps.' % (index1-index0) )) else: raise Exception('Requested time range is invalid') # Output a status bar displaying how far along the computation is. progress = pyLTR.StatusBar(0, index1-index0) progress.start() # Pre-compute r and theta x = dMIX.read('Grid X', timeRange[index0]) xdict={'data':x*6500e3,'name':'X','units':r'm'} y = dMIX.read('Grid Y', timeRange[index0]) ydict={'data':y*6500e3,'name':'Y','units':r'm'} theta=n.arctan2(y,x) theta[theta<0]=theta[theta<0]+2*n.pi # plotting routines now rotate local noon to point up #theta=theta+n.pi/2 # to put noon up r=n.sqrt(x**2+y**2) # plotting routines now expect longitude and colatitude, in radians, stored in dictionaries longitude = {'data':theta,'name':r'\phi','units':r'rad'} colatitude = {'data':n.arcsin(r),'name':r'\theta','units':r'rad'} # Deal with the plot options if (configFile == None and os.path.exists(os.path.join(dirname,'deltaBSum.config')) ): configFile = os.path.join(dirname,'deltaBSum.config') if configFile == None: # scalar radial magnetic pertubations dBradialTotOpts={'min':-100,'max':100,'colormap':'jet'} dBradialIonOpts={'min':-100,'max':100,'colormap':'jet'} dBradialFACOpts={'min':-100,'max':100,'colormap':'jet'} dBradialMagOpts={'min':-100,'max':100,'colormap':'jet'} # 2D vector horizontal perturbations dBhvecTotOpts={'width':.0025,'scale':1e3,'pivot':'middle'} dBhvecIonOpts={'width':.0025,'scale':1e3,'pivot':'middle'} dBhvecFACOpts={'width':.0025,'scale':1e3,'pivot':'middle'} dBhvecMagOpts={'width':.0025,'scale':1e3,'pivot':'middle'} # place all config dictionaries in one big dictionary optsObject = {'dBradialTot':dBradialTotOpts, 'dBradialIon':dBradialIonOpts, 'dBradialFAC':dBradialFACOpts, 'dBradialMag':dBradialMagOpts, 'dBhvecTot':dBhvecTotOpts, 'dBhvecIon':dBhvecIonOpts, 'dBhvecFAC':dBhvecFACOpts, 'dBhvecMag':dBhvecMagOpts, 'altPole':altPoleOpts} configFilename=os.path.join(dirname,'deltaBSum.config') print(("Writing plot config file at " + configFilename)) f=open(configFilename,'w') f.write(pyLTR.yaml.safe_dump(optsObject,default_flow_style=False)) f.close() else: f=open(configFile,'r') optsDict=pyLTR.yaml.safe_load(f.read()) f.close() if ('dBradialTot' in optsDict): dBradialTotOpts = optsDict['dBradialTot'] else: dBradialTotOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('dBradialIon' in optsDict): dBradialIonOpts = optsDict['dBradialIon'] else: dBradialIonOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('dBradialFAC' in optsDict): dBradialFACOpts = optsDict['dBradialFAC'] else: dBradialFACOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('dBradialMag' in optsDict): dBradialMagOpts = optsDict['dBradialMag'] else: dBradialMagOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('dBhvecTot' in optsDict): dBhvecTotOpts = optsDict['dBhvecTot'] else: dBhvecTotOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('dBhvecIon' in optsDict): dBhvecIonOpts = optsDict['dBhvecIon'] else: dBhvecIonOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('dBhvecFAC' in optsDict): dBhvecFACOpts = optsDict['dBhvecFAC'] else: dBhvecFACOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('dBhvecMag' in optsDict): dBhvecMagOpts = optsDict['dBhvecMag'] else: dBhvecMagOpts={'min':-100.,'max':100.,'colormap':'jet'} if ('altPole' in optsDict): altPoleOpts = optsDict['altPole'] else: altPoleOpts = {'poleMarker1':'x', 'poleMarker2':'x', 'poleSize1':7, 'poleSize2':5, 'poleWidth1':3, 'poleWidth2':1, 'poleColor1':'blue', 'poleColor2':'white'} # initialize output list pngFilenames = [] for i,time in enumerate(timeRange[index0:index1]): try: try: # ignore binary file even if one exists if ignoreBinary: raise Exception # look for a .pkl file that already holds all the data required for # subsequent plots before recalculating all the derived data...if # this fails, look for a .mat file, if this fails, fall through to # recalculate all summary data filePrefix = os.path.join(path,'figs',hemisphere) # this is a possible race condition, but try/except just doesn't do what I want if os.path.exists(os.path.join(filePrefix, 'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ.pkl'% (time.year,time.month,time.day, time.hour,time.minute,time.second))): binFilename = os.path.join(filePrefix, 'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ.pkl'% (time.year,time.month,time.day, time.hour,time.minute,time.second)) fh=open(binFilename,'rb') allDict = pickle.load(fh) fh.close() elif os.path.exists(os.path.join(filePrefix, 'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ.mat'% (time.year,time.month,time.day, time.hour,time.minute,time.second))): binFilename = os.path.join(filePrefix, 'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ.mat'% (time.year,time.month,time.day, time.hour,time.minute,time.second)) fh=open(binFilename,'rb') allDict = sio.loadmat(fh, squeeze_me=True) fh.close() else: print(('No binary file found, recalculating '+ 'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ'% (time.year,time.month,time.day,time.hour,time.minute,time.second)+ '...')) raise Exception # ignore binary file if the coordinate system is not consistent with geoGrid if ((geoGrid and allDict['coordinates'] != 'Geographic') or (not(geoGrid) and allDict['coordinates'] != 'Solar Magnetic')): raise Exception phi_dict,theta_dict,rho_dict = allDict['dB_obs'] phi = phi_dict['data'] * 1. # '*1' forces array of floats, NOT objects theta = theta_dict['data'] * 1. # '*1' forces array of floats, NOT objects rho = rho_dict['data'] * 1. # '*1' forces array of floats, NOT objects if geoGrid: phi_geo = phi theta_geo = theta rho_geo = rho dBphi_ion_dict,dBtheta_ion_dict,dBrho_ion_dict = allDict['dB_ion'] dBphi_ion = dBphi_ion_dict['data'] / 1e9 # convert to Tesla dBtheta_ion = dBtheta_ion_dict['data'] / 1e9 # convert to Tesla dBrho_ion = dBrho_ion_dict['data'] / 1e9 # convert to Tesla dBphi_fac_dict,dBtheta_fac_dict,dBrho_fac_dict = allDict['dB_fac'] dBphi_fac = dBphi_fac_dict['data'] / 1e9 # convert to Tesla dBtheta_fac = dBtheta_fac_dict['data'] / 1e9 # convert to Tesla dBrho_fac = dBrho_fac_dict['data'] / 1e9 # convert to Tesla dBphi_mag_dict,dBtheta_mag_dict,dBrho_mag_dict = allDict['dB_mag'] dBphi_mag = dBphi_mag_dict['data'] / 1e9 # convert to Tesla dBtheta_mag = dBtheta_mag_dict['data'] / 1e9 # convert to Tesla dBrho_mag = dBrho_mag_dict['data'] / 1e9 # convert to Tesla except: # first read the MIX data vals=dMIX.read('Potential '+hemiSelect+' [V]',time)/1000.0 psi_dict={'data':vals,'name':r'$\Phi$','units':r'kV'} vals=dMIX.read('Pedersen conductance '+hemiSelect+' [S]',time) sigmap_dict={'data':vals,'name':r'$\Sigma_{P}$','units':r'S'} vals=dMIX.read('Hall conductance '+hemiSelect+' [S]',time) sigmah_dict={'data':vals,'name':r'$\Sigma_{H}$','units':r'S'} vals=dMIX.read('FAC '+hemiSelect+' [A/m^2]',time) fac_dict={'data':vals*1e6,'name':r'$J_\parallel$','units':r'$\mu A/m^2$'} # then compute the electric field vectors ((phi_dict,theta_dict), (ephi_dict,etheta_dict)) = pyLTR.Physics.MIXCalcs.efieldDict( xdict, ydict, psi_dict, ri=6500e3) # then compute total, Pedersen, and Hall current vectors if hemisphere=='north': ((Jphi_dict,Jtheta_dict), (Jpedphi_dict,Jpedtheta_dict), (Jhallphi_dict,Jhalltheta_dict)) = pyLTR.Physics.MIXCalcs.jphithetaDict( (ephi_dict,etheta_dict), sigmap_dict, sigmah_dict, colatitude['data']) else: ((Jphi_dict,Jtheta_dict), (Jpedphi_dict,Jpedtheta_dict), (Jhallphi_dict,Jhalltheta_dict)) = pyLTR.Physics.MIXCalcs.jphithetaDict( (ephi_dict,etheta_dict), sigmap_dict, sigmah_dict, n.pi-colatitude['data']) # then generate the SSECS, starting with min/max bounds of ionosphere # segments phi = phi_dict['data'] theta = theta_dict['data'] # caclulate MIX grid cell boundaries in phi and theta rion_min = [None] * 3 # initialize empty 3 list rion_min[0] = p.zeros(phi.shape) rion_min[0][1:,:] = phi[1:,:] - p.diff(phi, axis=0)/2. rion_min[0][0,:] = phi[0,:] - p.diff(phi[0:2,:], axis=0).squeeze()/2. rion_min[1] = p.zeros(theta.shape) rion_min[1][:,1:] = theta[:,1:] - p.diff(theta, axis=1)/2. rion_min[1][:,0] = theta[:,0] - p.diff(theta[:,0:2], axis=1).squeeze()/2. rion_min[2] = p.zeros(theta.shape) rion_min[2][:,:] = 6500.e3 rion_max = [None] * 3 # initialize empty 3 list rion_max[0] = p.zeros(phi.shape) rion_max[0][:-1,:] = phi[:-1,:] + p.diff(phi, axis=0)/2. rion_max[0][-1,:] = phi[-1,:] + p.diff(phi[-2:,:], axis=0).squeeze()/2. rion_max[1] = p.zeros(theta.shape) rion_max[1][:,:-1] = theta[:,:-1] + p.diff(theta, axis=1)/2. rion_max[1][:,-1] = theta[:,-1] + p.diff(theta[:,-2:], axis=1).squeeze()/2. rion_max[2] = p.zeros(theta.shape) rion_max[2][:,:] = p.Inf # generate SSECS for ionospheric currents only (rv_ion, Jv_ion, dv_ion) = pyLTR.Physics.SSECS.ssecs_sphere([rion_min[0],rion_min[1],rion_min[2]], [rion_max[0],rion_max[1],rion_min[2]], (Jphi_dict['data']/1e6, Jtheta_dict['data']/1e6), 10, False) # generate SSECS inside LFM inner boundary (rv_IBin, Jv_IBin, dv_IBin) = pyLTR.Physics.SSECS.ssecs_sphere([rion_min[0],rion_min[1],rion_min[2]], [rion_max[0],rion_max[1],2.5*rion_min[2]], (Jphi_dict['data']/1e6, Jtheta_dict['data']/1e6), 10, False) # do NOT attempt to generate SSECS for FACs alone...this isn't possible # with existing code; However, the deltaB from FACs is the difference # between deltaBs calculated from the two SSECS above # generate SSECS outside LFM inner boundary # (this is serving as a proxy for magnetosphere currents for now) (rv_mag, Jv_mag, dv_mag) = pyLTR.Physics.SSECS.ssecs_sphere([rion_min[0],rion_min[1],2.5*rion_min[2]], [rion_max[0],rion_max[1],rion_min[2]], (Jphi_dict['data']/1e6, Jtheta_dict['data']/1e6), 10, False) # extract currents from MHD data # NOTE: LFM time stamps are not necessarily the same as MIX, so it # is necessary to use the LFM's timeRange list (i.e., trLFM) x=dLFM.read('X_grid', trLFM[index0:index1][i]) # this is in cm y=dLFM.read('Y_grid', trLFM[index0:index1][i]) # this is in cm z=dLFM.read('Z_grid', trLFM[index0:index1][i]) # this is in cm Bx=dLFM.read('bx_', trLFM[index0:index1][i]) # this is in G By=dLFM.read('by_', trLFM[index0:index1][i]) # this is in G Bz=dLFM.read('bz_', trLFM[index0:index1][i]) # this is in G hgrid=pyLTR.Grids.HexahedralGrid(x,y,z) xB,yB,zB=hgrid.cellCenters() hgridcc=pyLTR.Grids.HexahedralGrid(xB,yB,zB) # B is at cell centers Jx,Jy,Jz = pyLTR.Physics.LFMCurrent(hgridcc,Bx,By,Bz,rion=1) # ...should be A/m^2 given default input units xJ,yJ,zJ = hgridcc.cellCenters() # ...and J is at the centers of these cells xJ = xJ/100 # ...and the coordinates should be in meters for BS.py yJ = yJ/100 # ...and the coordinates should be in meters for BS.py zJ = zJ/100 # ...and the coordinates should be in meters for BS.py ldV = hgridcc.cellVolume()/(100**3) # ...and we need dV in m^3 for BS.py if hemisphere=='south': # it's easier to rotate the LFM grid than convert the MIX coordinates # for southern hemisphere output yJ = -yJ zJ = -zJ Jy = -Jy Jz = -Jz # # This is a little ugly...Quad (and Oct) resolution LFM runs use # MIX grids that are different resoltions than Single and Double # runs; not surprising, but I was slow to figure this out. Anyway, # we need to visualize and cross-validate on similar grids, thus # the following kludge ('kludge' because the better answer is to # specify a useful grid without any reference to the MIX grid). # if phi.size == 181*27: pass elif phi.size == 361*48: phi = phi[::2,[0,1,2,3,4]+list(range(5,48,2))] theta = theta[::2,[0,1,2,3,4]+list(range(5,48,2))] else: raise Exception('Unrecognized MIX grid dimensions') # calculate deltaBs on a grid that decimates MIX grid by 2/3, and removes # the lowest 3 colatitutdes phi = phi[::3,3:] theta = theta[::3,3:] rho = p.tile(6378e3,phi.shape) if geoGrid: phi_geo = phi theta_geo = theta rho_geo = rho x,y,z = pyLTR.transform.SPHtoCAR(phi,theta,rho) x,y,z = pyLTR.transform.GEOtoSM(x,y,z,time) phi,theta,rho = pyLTR.transform.CARtoSPH(x,y,z) # deltaB for ionospheric currents (dBphi_ion, dBtheta_ion, dBrho_ion) = pyLTR.Physics.BS.bs_sphere(rv_ion, Jv_ion, dv_ion, (phi,theta,rho)) # deltaB for currents inside IB (dBphi_IBin, dBtheta_IBin, dBrho_IBin) = pyLTR.Physics.BS.bs_sphere(rv_IBin, Jv_IBin, dv_IBin, (phi,theta,rho)) # difference between dB*_IBin and dB*_ion is the FAC inside IB dBphi_fac = dBphi_IBin - dBphi_ion dBtheta_fac = dBtheta_IBin - dBtheta_ion dBrho_fac = dBrho_IBin - dBrho_ion # deltaB from magnetospheric currents # convert cartesian positions and vectors to spherical lphi,ltheta,lrho,lJphi,lJtheta,lJrho=pyLTR.transform.CARtoSPH(xJ,yJ,zJ,Jx,Jy,Jz) (dBphi_mag, dBtheta_mag, dBrho_mag) = pyLTR.Physics.BS.bs_sphere((lphi,ltheta,lrho), (lJphi,lJtheta,lJrho), ldV, (phi,theta,rho)) if geoGrid: # rotate ionospheric contribution from SM to GEO coordinates; leave # position vectors unchanged for subsequent rotations x,y,z,dx,dy,dz = pyLTR.transform.SPHtoCAR(phi,theta,rho,dBphi_ion,dBtheta_ion,dBrho_ion) x,y,z = pyLTR.transform.SMtoGEO(x,y,z,time) dx,dy,dz = pyLTR.transform.SMtoGEO(dx,dy,dz,time) _, _, _, dBphi_ion, dBtheta_ion, dBrho_ion = pyLTR.transform.CARtoSPH(x,y,z,dx,dy,dz) # rotate FAC contribution from SM to GEO coordinates leave # position vectors unchanged for subsequent rotations x,y,z,dx,dy,dz = pyLTR.transform.SPHtoCAR(phi,theta,rho,dBphi_fac,dBtheta_fac,dBrho_fac) x,y,z = pyLTR.transform.SMtoGEO(x,y,z,time) dx,dy,dz = pyLTR.transform.SMtoGEO(dx,dy,dz,time) _, _, _, dBphi_fac, dBtheta_fac, dBrho_fac = pyLTR.transform.CARtoSPH(x,y,z,dx,dy,dz) # rotate magnetospheric contribution from SM to GEO coordinates x,y,z,dx,dy,dz = pyLTR.transform.SPHtoCAR(phi,theta,rho,dBphi_mag,dBtheta_mag,dBrho_mag) x,y,z = pyLTR.transform.SMtoGEO(x,y,z,time) dx,dy,dz = pyLTR.transform.SMtoGEO(dx,dy,dz,time) _, _, _, dBphi_mag, dBtheta_mag, dBrho_mag = pyLTR.transform.CARtoSPH(x,y,z,dx,dy,dz) phi = phi_geo theta = theta_geo rho = rho_geo # # end of [re]processing 'except' block # # (re)create grid dictionary for subsequent plots and pickling toPickle={} if hemisphere=='south': toPickle['pov'] = 'south' else: toPickle['pov'] = 'north' if geoGrid: toPickle['coordinates'] = 'Geographic' # get geographic coordinates of sm pole x,y,z = pyLTR.transform.SPHtoCAR(0,0,1) x,y,z = pyLTR.transform.SMtoGEO(x,y,z,time) poleCoords = pyLTR.transform.CARtoSPH(x,y,z) else: toPickle['coordinates'] = 'Solar Magnetic' ## # get sm coordinates of geographic pole ## x,y,z = pyLTR.transform.SPHtoCAR(0,0,1) ## x,y,z = pyLTR.transform.GEOtoSM(x,y,z,time) ## poleCoords = pyLTR.transform.CARtoSPH(x,y,z) ## now that MapPlot is being used, and until it can properly ## plot in centered SM coordinates, we want to just plot the ## magnetic pole; poleCoords = (0, 0, 1) phi_dict = {'data':phi,'name':r'$\phi$','units':r'rad'} theta_dict = {'data':theta,'name':r'$\theta$','units':r'rad'} rho_dict = {'data':rho,'name':r'$\rho$','units':r'm'} toPickle['dB_obs'] = (phi_dict,theta_dict,rho_dict) ##################################################################### ## ## FIXME: move plots from this function into if __name__ == '__main__' ## section like deltaBTimeSeries.py; plots will then only be ## generated if user requests it, and this function may be ## called as part of a module... ## But note: one reason the plots are generated inside this ## function is that a long time series of gridded ## data becomes unmanageable memory-wise fairly ## quickly...think this through carefully. -EJR ## ##################################################################### # Now onto the plots tt=time.timetuple() p.figure(1,figsize=(28,6)) p.figtext(0.5,0.92,'Ground '+'$\Delta{\mathbf{B}}$'+' - '+hemiSelect+ '\n%4d-%02d-%02d %02d:%02d:%02d' % (tt.tm_year,tt.tm_mon,tt.tm_mday, tt.tm_hour,tt.tm_min,tt.tm_sec), fontsize=14,multialignment='center') # plot total deltaB ax=p.subplot(141) # temporary dictionaries dBphi_dict = {'data':(dBphi_ion + dBphi_fac + dBphi_mag)*1e9,'name':r'$\Delta \mathrm{B}_{\phi}$','units':'nT'} dBtheta_dict = {'data':(dBtheta_ion + dBtheta_fac + dBtheta_mag)*1e9,'name':r'$\Delta \mathrm{B}_{\theta}$','units':'nT'} dBrho_dict = {'data':(dBrho_ion + dBrho_fac + dBrho_mag)*1e9,'name':r'$\Delta \mathrm{B}_{\rho}$','units':'nT'} bm = pyLTR.Graphics.MapPlot.QuiverPlotDict(phi_dict,theta_dict, dBrho_dict,(dBphi_dict, dBtheta_dict), dtUTC=time, coordSystem=toPickle['coordinates'], plotOpts1=dBradialTotOpts, plotOpts2=dBhvecTotOpts, points=[(poleCoords[0],poleCoords[1])], userAxes=ax, northPOV=hemisphere=='north') to=p.text(.05, .95, r"$\Delta \mathbf{B}_{\mathrm{Total}}$", fontsize=14, transform=ax.transAxes) # plot ionospheric deltaB ax=p.subplot(142) # temporary dictionaries dBphi_dict = {'data':(dBphi_ion)*1e9,'name':r'$\Delta \mathrm{B}_{\phi}$','units':'nT'} dBtheta_dict = {'data':(dBtheta_ion)*1e9,'name':r'$\Delta \mathrm{B}_{\theta}$','units':'nT'} dBrho_dict = {'data':(dBrho_ion)*1e9,'name':r'$\Delta \mathrm{B}_{\rho}$','units':'nT'} bm = pyLTR.Graphics.MapPlot.QuiverPlotDict(phi_dict,theta_dict, dBrho_dict,(dBphi_dict, dBtheta_dict), dtUTC=time, coordSystem=toPickle['coordinates'], plotOpts1=dBradialIonOpts, plotOpts2=dBhvecIonOpts, points=[(poleCoords[0],poleCoords[1])], userAxes=ax, northPOV=hemisphere=='north') to=p.text(.05, .95, r"$\Delta \mathbf{B}_{\mathrm{ion}}$", fontsize=14, transform=ax.transAxes) # for subsequent pickling toPickle['dB_ion'] = (dBphi_dict,dBtheta_dict,dBrho_dict) # plot FAC deltaB ax=p.subplot(143) # temporary dictionaries dBphi_dict = {'data':(dBphi_fac)*1e9,'name':r'$\Delta \mathrm{B}_{\phi}$','units':'nT'} dBtheta_dict = {'data':(dBtheta_fac)*1e9,'name':r'$\Delta \mathrm{B}_{\theta}$','units':'nT'} dBrho_dict = {'data':(dBrho_fac)*1e9,'name':r'$\Delta \mathrm{B}_{\rho}$','units':'nT'} bm = pyLTR.Graphics.MapPlot.QuiverPlotDict(phi_dict,theta_dict, dBrho_dict,(dBphi_dict, dBtheta_dict), dtUTC=time, coordSystem=toPickle['coordinates'], plotOpts1=dBradialFACOpts, plotOpts2=dBhvecFACOpts, points=[(poleCoords[0],poleCoords[1])], userAxes=ax, northPOV=hemisphere=='north') to=p.text(.05, .95, r"$\Delta \mathbf{B}_{\mathrm{fac}}$", fontsize=14, transform=ax.transAxes) # for subsequent pickling toPickle['dB_fac'] = (dBphi_dict,dBtheta_dict,dBrho_dict) # plot magnetospheric deltaB ax=p.subplot(144) # temporary dictionaries dBphi_dict = {'data':(dBphi_mag)*1e9,r'name':'$\Delta \mathrm{B}_{\phi}$','units':'nT'} dBtheta_dict = {'data':(dBtheta_mag)*1e9,r'name':'$\Delta \mathrm{B}_{\theta}$','units':'nT'} dBrho_dict = {'data':(dBrho_mag)*1e9,'name':r'$\Delta \mathrm{B}_{\rho}$','units':'nT'} bm = pyLTR.Graphics.MapPlot.QuiverPlotDict(phi_dict,theta_dict, dBrho_dict,(dBphi_dict, dBtheta_dict), dtUTC=time, coordSystem=toPickle['coordinates'], plotOpts1=dBradialMagOpts, plotOpts2=dBhvecMagOpts, points=[(poleCoords[0],poleCoords[1])], userAxes=ax, northPOV=hemisphere=='north') to=p.text(.05, .95, r"$\Delta \mathbf{B}_{\mathrm{mag}}$", fontsize=14, transform=ax.transAxes) # for subsequent pickling toPickle['dB_mag'] = (dBphi_dict,dBtheta_dict,dBrho_dict) #savefigName = os.path.join(path,'figs',hemisphere,'frame_deltaB_%05d.png'%i) filePrefix = os.path.join(path,'figs',hemisphere) pngFilename = os.path.join(filePrefix,'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ.png'% (time.year,time.month,time.day,time.hour,time.minute,time.second)) p.savefig(pngFilename,dpi=150) p.clf() if binaryType.lower() == 'pkl' or binaryType.lower() == '.pkl' or binaryType.lower() == 'pickle': # --- Dump a pickle! pklFilename = os.path.join(filePrefix,'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ.pkl'% (time.year,time.month,time.day,time.hour,time.minute,time.second)) fh = open(pklFilename, 'wb') pickle.dump(toPickle, fh, protocol=2) fh.close() elif binaryType.lower() == 'mat' or binaryType.lower() == '.mat' or binaryType.lower() == 'matlab': # --- Dump a .mat file! matFilename = os.path.join(filePrefix,'frame_deltaB_%04d-%02d-%02dT%02d-%02d-%02dZ.mat'% (time.year,time.month,time.day,time.hour,time.minute,time.second)) sio.savemat(matFilename, toPickle) elif binaryType.lower() == 'none': pass else: print(('Unrecognized binary type '+binaryType+' requested')) raise Exception progress.increment() except KeyboardInterrupt: # Exit when the user hits CTRL+C. progress.stop() progress.join() print('Exiting.') import sys sys.exit(0) except: # Cleanup progress bar if something bad happened. progress.stop() progress.join() raise # append pngFilename to list of fully qualified filenames pngFilenames.append(pngFilename) progress.stop() progress.join() #return os.path.join(path,'figs',hemisphere) return pngFilenames
def main(): mu = pl.array([[2], [8], [16], [32]]) Sigma = pl.array([[3.01602775, 1.02746769, -3.60224613, -2.08792829], [1.02746769, 5.65146472, -3.98616664, 0.48723704], [-3.60224613, -3.98616664, 13.04508284, -1.59255406], [-2.08792829, 0.48723704, -1.59255406, 8.28742469]]) d, U = pl.eig(Sigma) L = pl.diagflat(d) A = pl.dot(U, pl.sqrt(L)) N = [] mu_deviations = [] Sigma_deviations = [] # First part of the exercise. # This loop is used to get different sizes of N. for i in range(1, 40): means = pl.array([]) covariances = pl.array([]) N.append(50 * i) # From this loop, the average is taken to get an accurate measurement. for _ in range(1, 200): X = pl.randn(4, 50 * i) Y = pl.dot(A, X) + pl.tile(mu, 50 * i) mean = pl.mean(Y, axis=1) covariance = pl.cov(Y) covariance = covariance.reshape((1, 16)) if (len(means) == 0 and len(covariances) == 0): means = mean covariances = covariance else: means = pl.vstack((means, mean)) covariances = pl.vstack((covariances, covariance)) mu_deviations.append(pl.mean(pl.std(covariances, axis=0))) Sigma_deviations.append(pl.mean(pl.std(means, axis=0))) pl.figure(1) pl.clf() pl.title('The average deviation, over 200 times,\n of the mean\ and covariance matrix for a given N') pl.xlabel('N') pl.ylabel('average deviation') pl.plot(N, mu_deviations, label='average mean deviation') pl.plot(N, Sigma_deviations, label='average covariance deviation') pl.legend() pl.savefig('fig22.png') # Second part of the exercise. covariances = pl.array([]) # Over the loop is iterated to create a data matrix of the covariances of # the data matrices obtained from the multivariate normal distribution. # The covariance from this data matrix of covariances is shown. for _ in range(1, 200): X = pl.randn(4, 1000) Y = pl.dot(A, X) + pl.tile(mu, 1000) covariance = pl.cov(Y) if (len(covariances) == 0): covariances = covariance else: covariances = pl.hstack((covariances, covariance)) covariance_data = pl.cov(covariances) print(covariance_data)
n = 1000 mu = [[0], [0], [0], [0]] Sigma = [[3.01602775, 1.02746769, -3.60224613, -2.08792829], [1.02746769, 5.65146472, -3.98616664, 0.48723704], [-3.60224613, -3.98616664, 13.04508284, -1.59255406], [-2.08792829, 0.48723704, -1.59255406, 8.28742469]] d, U = plt.eig(Sigma) # Sigma = U L Ut L = plt.diagflat(d) A = plt.dot(U, plt.sqrt(L)) # required transform matrix X = plt.randn(4, n) # 4xn matrix with each element ~ N(0,1) Y = plt.dot(A, X) + plt.tile(mu, n) # 4xn each column vector ~N(mu,Sigma) f, axarr = plt.subplots(4, 4, sharex=True, sharey=True) for i in range(0, len(Y)): for j in range(0, len(Y)): if(i == j): axarr[i][j].set_title(str(i) + ',' + str(j)) axarr[i][j].axis('off') continue axarr[i][j].plot(Y[i], Y[j], 'xg') axarr[i][j].set_title(str(i) + ',' + str(j)) plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False) plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False) plt.tight_layout()
# calculate ionospheric components of SSECS from MIX data (rv_MIX_iono, Jv_MIX_iono, dv_MIX_iono) = pyLTR.Physics.SSECS.ssecs_sphere( [rion_min_MIX[0], rion_min_MIX[1], rion_min_MIX[2]], [rion_max_MIX[0], rion_max_MIX[1], rion_min_MIX[2]], (Jphi_MIX_dict['data'] / 1e6, Jtheta_MIX_dict['data'] / 1e6), 10, False) # calculate non-ionospheric component of SSECS from MIX data (rv_MIX_faceq, Jv_MIX_faceq, dv_MIX_faceq) = pyLTR.Physics.SSECS.ssecs_sphere( [rion_min_MIX[0], rion_min_MIX[1], rion_min_MIX[2] + 1], [rion_max_MIX[0], rion_max_MIX[1], rion_min_MIX[2]], (Jphi_MIX_dict['data'] / 1e6, Jtheta_MIX_dict['data'] / 1e6), 10, False) # calculate deltaB for total SSECS (dBphi_MIX_total, dBtheta_MIX_total, dBrho_MIX_total) = pyLTR.Physics.BS.bs_sphere( rv_MIX_total, Jv_MIX_total, dv_MIX_total, (phi_MIX, theta_MIX, p.tile(6378e3, phi_MIX.shape))) # convert into nanoTeslas dBphi_MIX_total_dict = { 'data': dBphi_MIX_total * 1e9, 'name': r'$dB_\phi$', 'units': r'nT' } dBtheta_MIX_total_dict = { 'data': dBtheta_MIX_total * 1e9, 'name': r'$dB_\theta$', 'units': r'nT' } dBrho_MIX_total_dict = { 'data': dBrho_MIX_total * 1e9, 'name': r'$dB_\rho$', 'units': r'nT'