def plot_parameters(gid, data): print("Plotting transformed wavepacket parameters of group '"+str(gid)+"'") # Grid of mother and first spawned packet grid_m = data[0][0] grid_s = data[1][0] # Parameters of mother and first spawned packet P, Q, S, p, q = data[0][1] B, A, S, b, a = data[1][1] X = P*abs(Q)/Q # Various interesting figures fig = figure() ax = fig.gca() ax.plot(grid_m, real(X), "*", label=r"$\Re \frac{P |Q|}{Q}$") ax.plot(grid_s, real(B), "o", label=r"$\Re B$") ax.legend() ax.grid(True) fig.savefig("test_spawned_PI_realparts_group"+str(gid)+GD.output_format) fig = figure() ax = fig.gca() ax.plot(grid_m, imag(X), "*", label=r"$\Im \frac{P |Q|}{Q}$") ax.plot(grid_s, imag(B), "o", label=r"$\Im B$") ax.legend() ax.grid(True) fig.savefig("test_spawned_PI_imagparts_group"+str(gid)+GD.output_format) fig = figure() ax = fig.gca() ax.plot(real(X), imag(X), "-*", label=r"traject $\frac{P |Q|}{Q}$") ax.plot(real(B), imag(B), "-*", label=r"traject $B$") ax.legend() ax.grid(True) fig.savefig("test_spawned_PI_complex_trajectories_group"+str(gid)+GD.output_format) fig = figure() ax = fig.gca() ax.plot(grid_m, angle(X), label=r"$\arg \frac{P |Q|}{Q}$") ax.plot(grid_s, angle(B), label=r"$\arg B$") ax.legend() ax.grid(True) fig.savefig("test_spawned_PI_angles_group"+str(gid)+GD.output_format)
def zplane(self, title="", fontsize=18): """ Display filter in the complex plane Parameters ---------- """ rb = self.z ra = self.p t = np.arange(0, 2 * np.pi + 0.1, 0.1) plt.plot(np.cos(t), np.sin(t), "k") plt.plot(np.real(ra), np.imag(ra), "x", color="r") plt.plot(np.real(rb), np.imag(rb), "o", color="b") M1 = -10000 M2 = -10000 if len(ra) > 0: M1 = np.max([np.abs(np.real(ra)), np.abs(np.imag(ra))]) if len(rb) > 0: M2 = np.max([np.abs(np.real(rb)), np.abs(np.imag(rb))]) M = 1.6 * max(1.2, M1, M2) plt.axis([-M, M, -0.7 * M, 0.7 * M]) plt.title(title, fontsize=fontsize) plt.show()
def SaveData (self, fname, verbose = True): if (verbose): print (" Saving measurement to %s ... " % fname) start = time.clock() f = h5py.File(fname, 'w') f['data_r'] = np.squeeze(np.real(self.data).transpose()) f['data_i'] = np.squeeze(np.imag(self.data).transpose()) if (self.noise != 0): f['noise_r'] = np.squeeze(np.real(self.noise).transpose()) f['noise_i'] = np.squeeze(np.imag(self.noise).transpose()) if (self.acs != 0): f['acs_r'] = np.squeeze(np.real(self.acs).transpose()) f['acs_i'] = np.squeeze(np.imag(self.acs).transpose()) if (self.sync.any() != 0): f['sync'] = self.sync.transpose() f.close() if (verbose): print ' ... saved in %(time).1f s.\n' % {"time": time.clock()-start} return
def real(a): # doubled ranks! b = tensor() b.n = a.n b.u[0] = np.concatenate((np.real(a.u[0]), np.imag(a.u[0])), 1) b.u[1] = np.concatenate((np.real(a.u[1]), np.imag(a.u[1])), 1) b.u[2] = np.concatenate((np.real(a.u[2]), np.imag(a.u[2])), 1) R1 = np.zeros((2*a.r[0], a.r[0]), dtype = np.complex128) R2 = np.zeros((2*a.r[1], a.r[1]), dtype = np.complex128) R3 = np.zeros((2*a.r[2], a.r[2]), dtype = np.complex128) R1[:a.r[0], :] = np.identity(a.r[0]) R1[a.r[0]:, :] = 1j*np.identity(a.r[0]) R2[:a.r[1], :] = np.identity(a.r[1]) R2[a.r[1]:, :] = 1j*np.identity(a.r[1]) R3[:a.r[2], :] = np.identity(a.r[2]) R3[a.r[2]:, :] = 1j*np.identity(a.r[2]) GG = np.tensordot(np.transpose(a.core,[2,1,0]),np.transpose(R1), (2,0)) GG = np.tensordot(np.transpose(GG,[0,2,1]),np.transpose(R2), (2,0)) GG = np.transpose(GG,[1,2,0]) b.core = np.real(np.tensordot(GG,np.transpose(R3), (2,0))) b.r = b.core.shape return b
def process(self, X, V, C): BifPoint.process(self, X, V, C) J_coords = C.sysfunc.jac(X, C.coords) eigs, VL, VR = linalg.eig(J_coords, left=1, right=1) # Check for nonreal multipliers found = False for i in range(len(eigs)): for j in range(i+1,len(eigs)): if abs(imag(eigs[i])) > 1e-10 and \ abs(imag(eigs[j])) > 1e-10 and \ abs(eigs[i]*eigs[j] - 1) < 1e-5: found = True if not found: del self.found[-1] return False self.found[-1].eigs = eigs self.info(C, -1) return True
def childGeneration(N, particles, tree, i): maxInCell = 3 p, t = noOfParticlesInside(particles, N[i]) N[i].particleIndex = t if p > maxInCell: t = len(N) tree[i] = [t, t + 1, t + 2, t + 3] c = N[i].center v = N[i].vertex N.append(Node((c - (v - c) / 2), c, t)) N.append(Node((c + (v - c) / 2), v, t + 1)) N.append(Node(c + (conj(v - c) / 2), real(v) + 1j * imag(c), t + 2)) N.append(Node(c - (conj(v - c) / 2), real(c) + 1j * imag(v), t + 3)) tree.append([]) tree.append([]) tree.append([]) tree.append([]) for ch in tree[i]: childGeneration(N, particles, tree, ch) N[i].aj = zeros(noOfTerms) * 1j for j in range(1, noOfTerms + 1): for k in range(1, j + 1): for chi in tree[i]: N[i].aj[j - 1] += ( N[chi].aj[k - 1] * combitorial(j - 1, k - 1) * pow((-N[i].center + N[chi].center), j - k) ) else: N[i].aj = zeros(noOfTerms) * 1j for j in range(1, noOfTerms + 1): for tempIdx in t: N[i].aj[j - 1] += particles[tempIdx].strength * pow((particles[tempIdx].xy - N[i].center), (j - 1))
def fourier(self): """ Generate a profile of fourier coefficients, amplitudes and phases """ if pynbody.config['verbose'] : print 'Profile: fourier()' f = {'c': np.zeros((7, self.nbins),dtype=complex), 'amp': np.zeros((7, self.nbins)), 'phi': np.zeros((7, self.nbins))} for i in range(self.nbins): if self._profiles['n'][i] > 100: phi = np.arctan2(self.sim['y'][self.binind[i]], self.sim['x'][self.binind[i]]) mass = self.sim['mass'][self.binind[i]] hist, binphi = np.histogram(phi,weights=mass,bins=100) binphi = .5*(binphi[1:]+binphi[:-1]) for m in range(7) : f['c'][m,i] = np.sum(hist*np.exp(-1j*m*binphi)) f['c'][:,self['mass']>0] /= self['mass'][self['mass']>0] f['amp'] = np.sqrt(np.imag(f['c'])**2 + np.real(f['c'])**2) f['phi'] = np.arctan2(np.imag(f['c']), np.real(f['c'])) return f
def writeToAscii(self, directory): """ This function writes the values of the normalization integral to a text file. Make sure to run execute method first. """ outfile = open(os.path.join(directory, "normint.txt"), "w") outfile.write(str(len(self.waves)) + "\n") outfile.write(str(len(self.alphaList)) + "\n") for eps1 in range(2): for eps2 in range(2): outfile.write( str(len(self.waves)) + " " + str(len(self.waves)) + " " + str(eps1) + " " + str(eps2) + "\n" ) for index1 in range(len(self.waves)): for index2 in range(len(self.waves) - 1): tempcomplex = self.ret[eps1, eps2, index1, index2] tempreal = numpy.real(tempcomplex) tempim = numpy.imag(tempcomplex) outfile.write(" (" + str(tempreal) + " + i " + str(tempim) + ") , ") tempcomplex = self.ret[eps1, eps2, index1, int(len(self.waves) - 1)] tempreal = numpy.real(tempcomplex) tempim = numpy.imag(tempcomplex) outfile.write(" (" + str(tempreal) + " + i " + str(tempim) + ")") outfile.write("\n") outfile.write("\n") outfile.write(str(len(self.waves)) + "\n") for wave in self.waves: outfile.write(wave.filename + " " + str(self.waves.index(wave)) + "\n") outfile.close()
def start(self, f, a, b, args=()): r"""Prepare for the iterations.""" self.function_calls = 0 self.iterations = 0 self.f = f self.args = args self.ab[:] = [a, b] if not np.isfinite(a) or np.imag(a) != 0: raise ValueError("Invalid x value: %s " % (a)) if not np.isfinite(b) or np.imag(b) != 0: raise ValueError("Invalid x value: %s " % (b)) fa = self._callf(a) if not np.isfinite(fa) or np.imag(fa) != 0: raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa)) if fa == 0: return _ECONVERGED, a fb = self._callf(b) if not np.isfinite(fb) or np.imag(fb) != 0: raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb)) if fb == 0: return _ECONVERGED, b if np.sign(fb) * np.sign(fa) > 0: raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " % (a, fa, b, fb)) self.fab[:] = [fa, fb] return _EINPROGRESS, sum(self.ab) / 2.0
def mix_parameters(self, Pibra, Piket): r"""Mix the two parameter sets :math:`\Pi_i` and :math:`\Pi_j` from the 'bra' and the 'ket' wavepackets :math:`\Phi\left[\Pi_i\right]` and :math:`\Phi^\prime\left[\Pi_j\right]`. :param Pibra: The parameter set :math:`\Pi_i` from the bra part wavepacket. :param Piket: The parameter set :math:`\Pi_j` from the ket part wavepacket. :return: The mixed parameters :math:`q_0` and :math:`Q_S`. (See the theory for details.) """ # <Pibra | ... | Piket> qr, pr, Qr, Pr = Pibra qc, pc, Qc, Pc = Piket # Mix the parameters Gr = dot(Pr, inv(Qr)) Gc = dot(Pc, inv(Qc)) r = imag(Gc - conjugate(Gr.T)) s = imag(dot(Gc, qc) - dot(conjugate(Gr.T), qr)) q0 = dot(inv(r), s) Q0 = 0.5 * r # Here we can not avoid the matrix root by using svd Qs = inv(sqrtm(Q0)) return (q0, Qs)
def writeToDatFile(data, file, field="E", punits="mm", funits="V/m", pscale=1.0, fscale=1.0): """ Write to field map to DAT data file. """ nx = data.nx ny = data.ny nz = data.nz file.write(" x [{0}] y [{0}] z [{0}] {1}xRe [{2}] {1}yRe [{2}] {1}zRe [{2}] {1}xIm [{2}] {1}yIm [{2}] {1}zIm [{2}] \r\n".format(punits, field, funits)) file.write("------------------------------------------------------------------------------------------------------------------------------------------\r\n") for x in xrange(nx): px = pscale * data.px[x] for y in xrange(ny): py = pscale * data.py[y] for z in xrange(nz): pz = pscale * data.pz[z] fxre = fscale * numpy.real(data.fx[x,y,z]) fyre = fscale * numpy.real(data.fy[x,y,z]) fzre = fscale * numpy.real(data.fz[x,y,z]) fxim = fscale * numpy.imag(data.fx[x,y,z]) fyim = fscale * numpy.imag(data.fy[x,y,z]) fzim = fscale * numpy.imag(data.fz[x,y,z]) file.write("%13.1f %13.1f %13.1f %13.6g %13.6g %13.6g %13.6g %13.6g %13.6g\r\n" % (px, py, pz, fxre, fyre, fzre, fxim, fyim, fzim))
def getArgumentVariable(_ComplexVariable): #Debug ''' print('l 31 Numscipier') print('_ComplexVariable is ') print(_ComplexVariable) print('') ''' #import import numpy as np #return return 2.*np.arctan( np.imag(_ComplexVariable)/( np.sqrt( np.imag( _ComplexVariable )**2+np.real( _ComplexVariable )**2)+np.real( _ComplexVariable ) ) );
def instantaneous_frequency(data, fs, fk): """ Instantaneous frequency of a signal. Computes the instantaneous frequency of the given data which can be windowed or not. The instantaneous frequency is determined by the time derivative of the analytic signal of the input data. :type data: :class:`~numpy.ndarray` :param data: Data to determine instantaneous frequency of. :param fs: Sampling frequency. :param fk: Coefficients for calculating time derivatives (calculated via central difference). :return: **omega[, domega]** - Instantaneous frequency of input data, Time derivative of instantaneous frequency (windowed only). """ x = envelope(data) if len(x[0].shape) > 1: omega = np.zeros(x[0].shape[0], dtype=np.float64) i = 0 for row in x[0]: f = np.real(row) h = np.imag(row) # faster alternative to calculate f_add f_add = np.hstack(([f[0]] * (np.size(fk) // 2), f, [f[np.size(f) - 1]] * (np.size(fk) // 2))) fd = signal.lfilter(fk, 1, f_add) # correct start and end values of time derivative fd = fd[np.size(fk) - 1 : np.size(fd)] # faster alternative to calculate h_add h_add = np.hstack(([h[0]] * (np.size(fk) // 2), h, [h[np.size(h) - 1]] * (np.size(fk) // 2))) hd = signal.lfilter(fk, 1, h_add) # correct start and end values of time derivative hd = hd[np.size(fk) - 1 : np.size(hd)] omega_win = abs(((f * hd - fd * h) / (f * f + h * h)) * fs / 2 / np.pi) omega[i] = np.median(omega_win) i = i + 1 # faster alternative to calculate omega_add omega_add = np.hstack( ([omega[0]] * (np.size(fk) // 2), omega, [omega[np.size(omega) - 1]] * (np.size(fk) // 2)) ) domega = signal.lfilter(fk, 1, omega_add) # correct start and end values of time derivative domega = domega[np.size(fk) - 1 : np.size(domega)] return omega, domega else: omega = np.zeros(np.size(x[0]), dtype=np.float64) f = np.real(x[0]) h = np.imag(x[0]) # faster alternative to calculate f_add f_add = np.hstack(([f[0]] * (np.size(fk) // 2), f, [f[np.size(f) - 1]] * (np.size(fk) // 2))) fd = signal.lfilter(fk, 1, f_add) # correct start and end values of time derivative fd = fd[np.size(fk) - 1 : np.size(fd)] # faster alternative to calculate h_add h_add = np.hstack(([h[0]] * (np.size(fk) // 2), h, [h[np.size(h) - 1]] * (np.size(fk) // 2))) hd = signal.lfilter(fk, 1, h_add) # correct start and end values of time derivative hd = hd[np.size(fk) - 1 : np.size(hd)] omega = abs(((f * hd - fd * h) / (f * f + h * h)) * fs / 2 / np.pi) return omega
def test_mexh(): LB = -5 UB = 5 N = 1000 [psi, x] = ref_mexh(LB, UB, N) w = pywt.ContinuousWavelet("mexh") w.upper_bound = UB w.lower_bound = LB PSI, X = w.wavefun(length=N) assert_allclose(np.real(PSI), np.real(psi)) assert_allclose(np.imag(PSI), np.imag(psi)) assert_allclose(X, x) LB = -5 UB = 5 N = 1001 [psi, x] = ref_mexh(LB, UB, N) w = pywt.ContinuousWavelet("mexh") w.upper_bound = UB w.lower_bound = LB PSI, X = w.wavefun(length=N) assert_allclose(np.real(PSI), np.real(psi)) assert_allclose(np.imag(PSI), np.imag(psi)) assert_allclose(X, x)
def dynamic_axis(zero,pole,K): if list(zero)+list(pole)==[]: x_min,x_max,y_min,y_max = -1,1,-1,1 else: x_min = min(list(np.real(zero))+list(np.real(pole))) x_max = max(list(np.real(zero))+list(np.real(pole))) if x_min == x_max: x_min -= 1 x_max += 1 else: x_min,x_max = (x_min- 0.75*(x_max-x_min)),(x_max + 0.75*(x_max-x_min)) y_min = min(list(np.imag(zero))+list(np.imag(pole))) y_max = max(list(np.imag(zero))+list(np.imag(pole))) if y_min == y_max: y_min -= 1 y_max += 1 else: y_min,y_max = (y_min- 0.75*(y_max-y_min)),(y_max + 0.75*(y_max-y_min)) if K == 0: z_min,z_max = -1,1 else: K = abs(K) print K z_min,z_max = K*0.1,K*10.0 return x_min,x_max,y_min,y_max,z_min,z_max
def state_toString(self,state,params={}): states = [ 'l0', 'l1', 'Q', 'ml0', 'ml1', 'q', 'u', 'd' ] s = "" for i,v in enumerate(state): if v != 0: if i < 8: repr = states[i] else: repr = self.__state_str(i) if s != "" and v >= 0: s +="+" if v == 1: s += "|%s>" % repr else: if np.imag(v) != 0: s += "(%.3f + %.3fi)|%s>" % (np.real(v),np.imag(v),repr) else: s += "%.3f|%s>" % (v,repr) return s
def GaussLk( self, newLkFile, sigma ) : fout = open( newLkFile, "w" ) fout.write("# input data : %s\n" % "+ Gaussian error" ) percentU = percentQ = 0. for lineStr,f1,f2 in zip (self.LeakList[0].lineStr, self.LeakList[0].f1, self.LeakList[0].f2) : print lineStr fout.write("#\n") for ant in range(1,16) : DRlist = [] DLlist = [] for Lk in self.LeakList : if Lk.ant == ant : for lineStr1,DR1,DL1 in zip( Lk.lineStr, Lk.DR, Lk.DL ) : if (lineStr1 == lineStr) and (abs(DR1) > 0.) and (abs(DL1) > 0.) : DRlist.append( DR1 ) DLlist.append( DL1 ) print "... ant %d - appending data from %s" % ( ant, Lk.legend ) if len(DRlist) > 0 : DRmean = numpy.mean(DRlist) DLmean = numpy.mean(DLlist) DRnew = random.gauss(numpy.real(DRmean), sigma) + random.gauss(numpy.imag(DRmean), sigma) * 1j DLnew = random.gauss(numpy.real(DLmean), sigma) + random.gauss(numpy.imag(DLmean), sigma) * 1j else : DRnew = 0. + 0j DLnew = 0. + 0j print ant, DRnew, DLnew fout.write("C%02d %8.3f %8.3f %8.3f %6.3f %8.3f %6.3f %8.3f %6.3f %s\n" % \ ( ant, f1, f2, DRnew.real, DRnew.imag, DLnew.real, \ DLnew.imag, percentQ, percentU, lineStr) ) fout.close()
def tvtkGridFunction(g): """Return a TVTK object visualizing the specified grid function.""" tvtkObj = tvtkGrid(g.grid()) point_data = g.evaluateAtSpecialPoints("vertex_data") cell_data = g.evaluateAtSpecialPoints("cell_data") tvtkObj.cell_data.add_array(np.real(cell_data.T)) tvtkObj.cell_data.add_array(np.imag(cell_data.T)) tvtkObj.cell_data.add_array(np.sum(abs(cell_data)**2,axis=0)) tvtkObj.cell_data.get_abstract_array(0).name = 'real' tvtkObj.cell_data.get_abstract_array(1).name = 'imag' tvtkObj.cell_data.get_abstract_array(2).name = 'abs^2' tvtkObj.point_data.add_array(np.real(point_data.T)) tvtkObj.point_data.add_array(np.imag(point_data.T)) tvtkObj.point_data.add_array(np.sum(abs(point_data)**2,axis=0)) tvtkObj.point_data.get_abstract_array(0).name = 'real' tvtkObj.point_data.get_abstract_array(1).name = 'imag' tvtkObj.point_data.get_abstract_array(2).name = 'abs^2' if g.componentCount()==3: tvtkObj.cell_data.set_active_scalars('abs^2') tvtkObj.point_data.set_active_scalars('abs^2') tvtkObj.cell_data.set_active_vectors('real') tvtkObj.point_data.set_active_vectors('real') elif g.componentCount()==1: tvtkObj.cell_data.set_active_scalars('real') tvtkObj.point_data.set_active_scalars('real') else: raise Exception("plotGridFunction: Only GridFunctions with " "componentCount 1 or 3 are supported.") return tvtkObj
def analytic(self): """The natural output for this analyzer is the analytic signal""" data = self.input.data sampling_rate = self.input.sampling_rate a_signal =\ ts.TimeSeries(data=np.zeros(self.freqs.shape + data.shape, dtype='D'), sampling_rate=sampling_rate) if self.freqs.ndim == 0: w = self.wavelet(self.freqs, self.sd, sampling_rate=sampling_rate, ns=5, normed='area') # nd = (w.shape[0] - 1) / 2 a_signal.data[...] = (np.convolve(data, np.real(w), mode='same') + 1j * np.convolve(data, np.imag(w), mode='same')) else: for i, (f, sd) in enumerate(zip(self.freqs, self.sd)): w = self.wavelet(f, sd, sampling_rate=sampling_rate, ns=5, normed='area') # nd = (w.shape[0] - 1) / 2 a_signal.data[i, ...] = ( np.convolve(data, np.real(w), mode='same') + 1j * np.convolve(data, np.imag(w), mode='same')) return a_signal
def test_Transect(self): for src in self.prb.survey.srcList: print(' --- testing {} --- '.format(src.__class__.__name__)) bfz = self.mesh.r(self.u[src, 'b'],'F','Fz','M') x = np.linspace(-55,55,12) XYZ = Utils.ndgrid(x,np.r_[0],np.r_[0]) P = self.mesh.getInterpolationMat(XYZ, 'Fz') ana = mu_0*np.imag(EM.Analytics.FDEM.hzAnalyticDipoleF(x, src.freq, self.sig)) num = P*np.imag(self.u[src, 'b']) diff = np.linalg.norm(num - ana) if plotIt: import matplotlib.pyplot as plt plt.plot(x, np.log10(np.abs(num))) plt.plot(x, np.log10(np.abs(ana)), 'r') plt.plot(x, diff, 'g') plt.show() norm_num = np.linalg.norm(num) norm_ana = np.linalg.norm(ana) tol = tol_Transect*(norm_num + norm_ana)/2. passed = diff < tol print ('analytic: {}, numeric {}, difference {} < tolerance {} ? ' ' {}'.format(norm_ana, norm_num, diff, tol, passed)) self.assertTrue(passed)
def test_pupilfn_3(): """ Test PF X derivative (C library). """ dx = 1.0e-6 geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4) pf = geo.createFromZernike(1.0, [[1.3, 2, 2]]) pf_c = pfFnC.PupilFunction(geometry = geo) pf_c.setPF(pf) # Calculate derivative of magnitude as a function of x. psf_c = pf_c.getPSF() psf_c_dx = pf_c.getPSFdx() mag_dx_calc = 2.0 * (numpy.real(psf_c)*numpy.real(psf_c_dx) + numpy.imag(psf_c)*numpy.imag(psf_c_dx)) # Estimate derivative using (f(x+dx) - f(x))/dx mag = pupilMath.intensity(psf_c) pf_c.translate(dx,0.0,0.0) mag_dx_est = (pupilMath.intensity(pf_c.getPSF()) - mag)/dx if False: with tifffile.TiffWriter(storm_analysis.getPathOutputTest("test_pupilfn_3.tif")) as tf: #tf.save(mag.astype(numpy.float32)) tf.save(mag_dx_calc.astype(numpy.float32)) tf.save(mag_dx_est.astype(numpy.float32)) tf.save(numpy.abs(mag_dx_calc - mag_dx_est).astype(numpy.float32)) assert numpy.allclose(mag_dx_calc, mag_dx_est, atol = 1.0e-6) pf_c.cleanup()
def normalmode_frequencies(hessian, metric=None, eps=1e-4): """calculate (squared) normal mode frequencies Parameters ---------- hessian: 2d array hessian matrix metric: 2d array mass weighted metric tensor Returns ------- sorted array of normal mode frequencies """ A = hessian if metric is not None: A = np.dot(np.linalg.pinv(metric), hessian) frq = np.linalg.eigvals(A) if np.max(np.abs(np.imag(frq))) > eps: print(frq) raise ValueError("imaginary eigenvalue in frequency calculation" ", check hessian + metric tensor\n" "the largest imaginary part is %g" % np.max(np.abs(np.imag(frq)))) return np.sort(np.real(frq))
def mode_finder(xxx_todo_changeme): (det_mat_slice, wl_list, kx) = xxx_todo_changeme dispcurve = [] xtol = 1e-4*wl_0 for j in range(len(wl_list)-1): # Check determinant crosses zero, noth real and imaginary if np.real(det_mat_slice[j])*np.real(det_mat_slice[j+1]) < 0: if np.imag(det_mat_slice[j])*np.imag(det_mat_slice[j+1]) < 0: if j != 0: diffreq = np.abs(det_mat_slice[j-1]-det_mat_slice[j]) else: diffreq = np.abs(det_mat_slice[j+2]-det_mat_slice[j+1]) # Check we are not just at a discontinuity if np.abs(det_mat_slice[j+1]-det_mat_slice[j]) < 3*diffreq: try: # Optimise the wl finwl = optimize.brentq(lambda wl: np.real(np.exp(1j)*simulate_stack([wl, kx])),wl_list[j],wl_list[j+1],rtol=1e-3,xtol=xtol) findet=simulate_stack([finwl, kx]) print('#################################') print('found root = ', findet) print('#################################') #check the final determinant is below some tolerance if np.abs(findet)< 1.e-3: finfreq=2*np.pi*c_speed*1e9/finwl dispcurve.append((kx*1e9,finfreq)) except AttributeError: print(det_mat_slice[j], det_mat_slice[j+1]) return dispcurve
def extract_outfield_from_dict( outpdict ): """ extract only the output field (time, freq) and the freq vectors from a dict created by mydict = loadoutput(filename) returns a Nx7 numpy.array """ tvec = outpdict['tvec'] omvec = outpdict['omvec'] relomvec = outpdict['relomvec'] tfieldreal = np.real(outpdict['tfield2']) tfieldimag = np.imag(outpdict['tfield2']) ffieldreal = np.real(outpdict['ffield2']) ffieldimag = np.imag(outpdict['ffield2']) M = np.zeros( [len( tvec), 7]) M[:,0]=tvec M[:,1]=omvec M[:,2]=relomvec M[:,3]=tfieldreal M[:,4]=tfieldimag M[:,5]=ffieldreal M[:,6]=ffieldimag return M
def resData2(self,xval,yval,yval_sim=None,xlabel='Frequency',xunit='Hz',plottype='amp', ampformat='log',save=False,Dir=None): self.figure.clear() self.figure.subplots_adjust(bottom=0.15,left=0.17) self.axes = self.figure.add_subplot(111) if plottype=='real/imag': self.axes.plot(np.real(yval),np.imag(yval),np.real(yval_sim),np.imag(yval_sim)) self.axes.set_xlabel("Re(S)") self.axes.set_ylabel("Im(S)") else: self.axes.set_xlabel(xlabel+' ['+xunit+']') if plottype=='amp': if ampformat=="log": self.axes.plot(xval,10*np.log(np.absolute(yval)),xval,10*np.log(np.absolute(yval_sim))) self.axes.set_ylabel("Amplitude [dB]") if ampformat=='lin': self.axes.plot(xval,np.absolute(yval),xval,np.absolute(yval_sim)) self.axes.set_ylabel("Amplitude") if plottype=='phase': if yval_sim==None: #this option is needed for lorentz function since we only fit the amplitude self.axes.plot(xval,np.angle(yval)) else: self.axes.plot(xval,np.angle(yval),xval,np.angle(yval_sim)) self.axes.set_ylabel('Phase [rad]') if save: print_fig = self.figure print_fig.savefig(Dir) else: self.draw()
def rot_ell(m_rt_ps): '''Utility to compute rotation and ellipticity starting from reflection and transmission matrix Parameters ---------- 'm_RTsp' = sp reflection and transmission matrix Returns ------- 'a dictionary' = {'theta_p':theta_p, 'eps_p':eps_p, 'theta_s':theta_s, 'eps_s':eps_s} ''' # extracting values from the matrix rt_pp = m_rt_ps[0,0] rt_ps = m_rt_ps[0,1] rt_sp = m_rt_ps[1,0] rt_ss = m_rt_ps[1,1] # calculating the values theta_p = np.real(rt_sp/rt_pp) eps_p = np.imag(rt_sp/rt_pp) theta_s = np.real(rt_ps/rt_ss) eps_s = np.imag(rt_ps/rt_ss) out_dict = {'theta_p':theta_p, 'eps_p':eps_p, 'theta_s':theta_s, 'eps_s':eps_s} return out_dict
def fidelity(statevec, dm, u_dm, ustatevec=np.array([0,0])): ''' returns the fidelity (and its uncertainty) of the measured density matrix with a given state vector. ''' f = error.Formula() beta,a,x,b,alpha = sympy.symbols('beta,a,x,b,alpha') v = Matrix([alpha, beta]) rho = Matrix([[x,a+1j*b],[a-1j*b, 1-x]]) f.formula = (v.conjugate().transpose() * rho * v)[0] f.values[alpha] = statevec[0] f.values[beta] = statevec[1] f.values[a]=float(np.real(dm[0,1])) f.values[x]=float(dm[0,0]) f.values[b]=float(np.imag(dm[0,1])) f.uncertainties[alpha]=ustatevec[0] f.uncertainties[beta]=ustatevec[1] f.uncertainties[x]=u_dm[0,0] f.uncertainties[a]=float(np.real(u_dm[0,1])) f.uncertainties[b]=float(np.imag(u_dm[0,1])) _fid,_ufid = f.num_eval() fid = float(_fid.as_real_imag()[0]) ufid = float(_ufid.as_real_imag()[0]) return (fid,ufid)
def write_readable(sigma, g_w, n_k, n_w, wgrid): print "Writing E_file for checking" print n_k, n_w print sigma[n_w-1][n_k-1][n_k-1] greal_file=open("PARSED_Greal_1", "w+") gimag_file=open("PARSED_Gimag_1", "w+") sigma_file=open("PARSED_SIGMA_1", "w+") for w in range (n_w/2, n_w): print w print np.real(g_w[w][0][0]) write_string=str(wgrid[w])+" " write_string2=str(wgrid[w])+" " write_string_sigma=str(wgrid[w])+" " for kx in range (n_k): for ky in range (n_k): write_string+=" "+str(np.real(g_w[w][kx][ky])[0]) write_string2+=" "+" "+str(np.imag(g_w[w][kx][ky])[0]) write_string_sigma+=" "+str(np.real(sigma[w][kx][ky])[0])+" "+str(np.imag(sigma[w][kx][ky])[0]) # print write_string greal_file.write(write_string+"\n") gimag_file.write(write_string+"\n") sigma_file.write(write_string_sigma+"\n")
def check_sum_rule(self, df1_w=None, df2_w=None): """Check f-sum rule.""" if df1_w is None: df1_w = self.df1_w df2_w = self.df2_w N1 = N2 = 0 for iw in range(self.Nw): w = iw * self.dw N1 += np.imag(df1_w[iw]) * w N2 += np.imag(df2_w[iw]) * w N1 *= self.dw * self.vol / (2 * pi**2) N2 *= self.dw * self.vol / (2 * pi**2) self.printtxt('') self.printtxt('Sum rule for ABS:') nv = self.nvalence self.printtxt('Without local field: N1 = %f, %f %% error' %(N1, (N1 - nv) / nv * 100) ) self.printtxt('Include local field: N2 = %f, %f %% error' %(N2, (N2 - nv) / nv * 100) ) N1 = N2 = 0 for iw in range(self.Nw): w = iw * self.dw N1 -= np.imag(1/df1_w[iw]) * w N2 -= np.imag(1/df2_w[iw]) * w N1 *= self.dw * self.vol / (2 * pi**2) N2 *= self.dw * self.vol / (2 * pi**2) self.printtxt('') self.printtxt('Sum rule for EELS:') nv = self.nvalence self.printtxt('Without local field: N1 = %f, %f %% error' %(N1, (N1 - nv) / nv * 100) ) self.printtxt('Include local field: N2 = %f, %f %% error' %(N2, (N2 - nv) / nv * 100) )
def ARLineSpectra(ar): """ Convert AR coeffs to LSPs From wikipedia: A palindromic polynomial (i.e., P) of odd degree has -1 as a root. An antipalindromic polynomial (i.e., Q) has 1 as a root. An antipalindromic polynomial of even degree has -1 and 1 as roots """ order = ar.shape[-1] ret = np.zeros(ar.shape) for a, o in core.refiter([ar, ret], core.newshape(ar.shape)): p = np.ones((order+2)) q = np.ones((order+2)) q[-1] = -1.0 for i in range(order): p[i+1] = -a[i] - a[order-i-1] q[i+1] = -a[i] + a[order-i-1] pr = np.roots(p) qr = np.roots(q) j = 0 an = np.ndarray((order+2)) for i in range(len(pr)): if np.imag(pr[i]) >= 0.0: an[j] = np.angle(pr[i]) j += 1 if np.imag(qr[i]) >= 0.0: an[j] = np.angle(qr[i]) j += 1 # The angle list (an) will always contain both 0 and pi; they # will move to the ends after the sort o[...] = np.sort(an)[1:-1] return ret;
def _propagate_SExp_RTOp_ReSymK_Re_numpy(self, rhoi, Ham, RT, dt, L=4): """Integration by short exponentional expansion Integration by expanding exponential (_SExp_) to Lth order. This is a numpy (_numpy) implementation with real (_Re_) matrices for a system part of the system-bath interaction operator ``K`` in a form of real symmetric operator (ReSymK). The relaxation tensor is assumed in form of a set of operators (_RTOp_) """ Nref = self.Nref Nt = self.Nt verbose = self.verbose timea = self.TimeAxis prop_name = self.propagation_name # no self beyond this point qr.log_detail("PROPAGATION (short exponential with "+ "relaxation in operator form): order ", L, verbose=verbose) qr.log_detail("Using real valued numpy implementation") pr = ReducedDensityMatrixEvolution(timea, rhoi, name=prop_name) rho1_r = numpy.real(rhoi.data) rho2_r = numpy.real(rhoi.data) rho1_i = numpy.imag(rhoi.data) rho2_i = numpy.imag(rhoi.data) HH = Ham.data try: Km = RT.Km #self.RelaxationTensor.Km # real Lm_r = numpy.real(RT.Lm) #self.RelaxationTensor.Lm) # complex Lm_i = numpy.imag(RT.Lm) #self.RelaxationTensor.Lm) Nm = Km.shape[0] except: raise Exception("Tensor is not in operator form") indx = 1 # verbosity inside loops levs = [qr.LOG_QUICK] verb = qr.loglevels2bool(levs, verbose=self.verbose) # after each step we apply pure dephasing (if present) if self.has_PDeph: # loop over time for ii in range(1, Nt): qr.printlog("time step ", ii, "of", Nt, verbose=verb[0], loglevel=levs[0], end="\r") # steps in between saving the results for jj in range(Nref): # L interations to get short exponential expansion for ll in range(1, L+1): A = numpy.dot(HH,rho1_i) B = numpy.dot(HH,rho1_r) rhoY_r = (dt/ll)*(A + numpy.transpose(A)) rhoY_i = -(dt/ll)*(B - numpy.transpose(B)) for mm in range(Nm): a = numpy.dot(Lm_r[mm,:,:], rho1_r) A = a - numpy.transpose(a) b = numpy.dot(Lm_i[mm,:,:], rho1_i) B = b - numpy.transpose(b) c = numpy.dot(Lm_r[mm,:,:], rho1_i) C = -(c + numpy.transpose(c)) d = numpy.dot(Lm_i[mm,:,:], rho1_r) D = d + numpy.transpose(d) E = B - A F = C - D A = numpy.dot(Km[mm,:,:], E) B = numpy.dot(Km[mm,:,:], F) rhoY_r += (dt/ll)*(A + numpy.transpose(A)) rhoY_i += (dt/ll)*(B - numpy.transpose(B)) rho1_r = rhoY_r rho1_i = rhoY_i rho2_r += rho1_r rho2_i += rho1_i rho2_r = rho2_r*numpy.exp(-self.PDeph.data*dt) rho2_i = rho2_i*numpy.exp(-self.PDeph.data*dt) rho1_r = rho2_r rho1_i = rho2_i pr.data[indx,:,:] = rho2_r + 1j*rho2_i indx += 1 # propagatiomn with no extra dephasing else: # loop over time for ii in range(1, Nt): qr.printlog("time step ", ii, "of", Nt, verbose=verb[0], loglevel=levs[0], end="\r") # steps in between saving the results for jj in range(Nref): # L interations to get short exponential expansion for ll in range(1, L+1): A = numpy.dot(HH,rho1_i) B = numpy.dot(HH,rho1_r) rhoY_r = (dt/ll)*(A + numpy.transpose(A)) rhoY_i = -(dt/ll)*(B - numpy.transpose(B)) for mm in range(Nm): a = numpy.dot(Lm_r[mm,:,:], rho1_r) A = a - numpy.transpose(a) b = numpy.dot(Lm_i[mm,:,:], rho1_i) B = b - numpy.transpose(b) c = numpy.dot(Lm_r[mm,:,:], rho1_i) C = -(c + numpy.transpose(c)) d = numpy.dot(Lm_i[mm,:,:], rho1_r) D = d + numpy.transpose(d) E = B - A F = C - D A = numpy.dot(Km[mm,:,:], E) B = numpy.dot(Km[mm,:,:], F) rhoY_r += (dt/ll)*(A + numpy.transpose(A)) rhoY_i += (dt/ll)*(B - numpy.transpose(B)) rho1_r = rhoY_r rho1_i = rhoY_i rho2_r += rho1_r rho2_i += rho1_i rho1_r = rho2_r rho1_i = rho2_i pr.data[indx,:,:] = rho2_r + 1j*rho2_i indx += 1 qr.log_detail() qr.log_detail("...DONE") return pr
A6 = Asum6(0.5) A7 = Asum7(0.5) pl = Plotter() #line(frq/1e9, real(A), plotter=pl) #line(frq/1e9, imag(A), plotter=pl, color="red") #line(frq/1e9, real(A2), plotter=pl, color="green", linewidth=0.5) #line(frq/1e9, imag(A2), plotter=pl, color="black", linewidth=0.5) line(frq / 1e9, sin(pi * 9 * frq / f0) / sin(pi * frq / f0), plotter=pl) line(frq / 1e9, real(A3), plotter=pl, color="green", linewidth=0.5) line(frq / 1e9, imag(A3), plotter=pl, color="black", linewidth=0.5) line(frq / 1e9, A4, plotter=pl, color="red", linewidth=0.5) #line(frq/1e9, A5, plotter=pl, color="purple", linewidth=0.5) line(frq / 1e9, real(A6), plotter=pl, color="darkgray", linewidth=0.5) line(frq / 1e9, imag(A6), plotter=pl, color="darkgray", linewidth=0.5) #pl.show() pl = Plotter() line(frq / 1e9, 1 / 81.0 * absolute(A)**2, plotter=pl, color="black") #line(frq/1e9, 1/81.0*absolute(sin(pi*9*frq/f0)/sin(pi*frq/f0))**2, plotter=pl, color="green", linewidth=0.4) #line(frq/1e9, 1/81.0*absolute(A5)**2, plotter=pl, color="blue", linewidth=0.5) line(frq / 1e9, 1 / 81.0 * absolute(A6)**2, plotter=pl,
def update_data_matrix(self, tableWidgetItem): lower_bound = 0.0 upper_bound = float("inf") value = 0.0 if tableWidgetItem.column() == 0: element = "length" value = utility.validate(tableWidgetItem.text(), lower_bound, upper_bound, l_inclusive=False, u_inclusive=False) elif tableWidgetItem.column() == 1: element = "separation" lower_bound = -1.0 * float("inf") value = utility.validate(tableWidgetItem.text(), lower_bound, upper_bound, l_inclusive=False, u_inclusive=False) elif tableWidgetItem.column() == 2: element = "earth impedance" try: # explicit string conversion required for Python 2.7 value = np.complex(str(tableWidgetItem.text())) except: value = False elif tableWidgetItem.column() == 3: element = "soil resistivity" value = utility.validate(tableWidgetItem.text(), lower_bound, upper_bound, l_inclusive=False, u_inclusive=False) if not value is False: columns = [0, 1, 2, 4, 5] column = columns[tableWidgetItem.column()] update_mapping = (globals.sections[tableWidgetItem.row(), column] != value) if isinstance(value, np.complex): globals.sections[tableWidgetItem.row(), column] = np.real(value) globals.sections[tableWidgetItem.row(), column + 1] = np.imag(value) if np.imag(value) == 0.0: tableWidgetItem.setText(str(np.real(value))) else: tableWidgetItem.setText(str(value)) else: globals.sections[tableWidgetItem.row(), column] = value #tableWidgetItem.setText(str(value)) if update_mapping: self.main_window.refresh_mapping() else: self.main_window.show_status_message( "Section " + str(tableWidgetItem.row() + 1) + " " + element + ": Input value '" + tableWidgetItem.text() + "' out of bounds. (" + str(lower_bound) + " to " + str(upper_bound) + "). Value not set.", error=True, beep=True) self.tableWidget.itemChanged.disconnect() self.refresh_data() self.tableWidget.itemChanged.connect(self.update_data_matrix)
tf_fwd = muscat.computeconvolution(TF_ASF=None, is_padding=True) ''' Evaluate the model ''' sess = tf.Session() #config=tf.ConfigProto(log_device_placement=True)) sess.run(tf.global_variables_initializer()) ''' Compute the ATF ''' myATF = sess.run(muscat.TF_ATF) myASF = sess.run(muscat.TF_ASF) #%% run model and measure memory/time start = time.time() myfwd = sess.run( tf_fwd, feed_dict={ muscat.TF_ASF_placeholder: myASF, muscat.TF_obj: np.real(obj), muscat.TF_obj_absorption: np.imag(obj) } ) #, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE, output_partition_graphs=True), run_metadata=run_metadata) end = time.time() print(end - start) #%% display the results centerslice = myfwd.shape[0] // 2 #sess.run(muscat.normfac) if (muscat.Nz == 1 and False): plt.figure() plt.subplot(221), plt.title('real XY'), plt.imshow( np.real(np.squeeze(myfwd))), plt.colorbar() #, plt.show() plt.subplot(222), plt.title('imag XY'), plt.imshow(
first_iter = False # sddict = {'f':f,'11':P11,'12':P12,'13':P13,'22':P22,'23':P23,'33':P33} # sdpath = avgsddir + '/d%03d.cpkl' % day # file = open( sd_path , 'wb' ) # cpkl.dump( sddict , file , -1 ) # file.close() sdpath = avgsddir + '/avgsd.dat' if avgsddir not in glob.glob( avgsddir ) : os.system( 'mkdir -p %s' % avgsddir ) file = open( sdpath , 'w' ) print >> file , '\n' print >> file , '#CSD' print >> file , ('#'+'%19s'+8*'%20s') % ('freq[Hz]','Re{g12}','Im{g12}','Re{P12}','Im{P12}','Re{P11}', 'Im{P11}','Re{P22}','Im{P22}') for k in range( f.shape[0] ) : print >> file , (9*'%20.10e') % ( f[k] ,np.real(g12[k]),np.imag(g12[k]), np.real(P12[k]),np.imag(P12[k]), np.real(P11[k]),np.imag(P11[k]), np.real(P22[k]),np.imag(P22[k]) ) file.close()
while True: if s.code_p < l1cd.code_length / 2: n = int(fs * 0.01 * (l1cd.code_length - s.code_p) / l1cd.code_length) else: n = int(fs * 0.01 * (2 * l1cd.code_length - s.code_p) / l1cd.code_length) x = io.get_samples_complex(fp, n) if x is None: break nco.mix(x, -coffset / fs, coffset_phase) coffset_phase = coffset_phase - n * coffset / fs coffset_phase = np.mod(coffset_phase, 1) for j in range(10): a, b = int(j * n / 10), int((j + 1) * n / 10) p_prompt, s = track(x[a:b], s) vars = block, np.real(p_prompt), np.imag( p_prompt), s.carrier_f, s.code_f - l1cd.chip_rate, ( 180 / np.pi) * np.angle(p_prompt), s.early, s.prompt, s.late print('%d %f %f %f %f %f %f %f %f' % vars) block = block + 1 # if (block%100)==0: # sys.stderr.write("%d\n"%block) # if block==500: # s.mode = 'FLL_NARROW' # if block==1000: # s.mode = 'PLL'
def _propagate_SExp_RTOp_ReSymK_Re_pytorch(self, rhoi, Ham, RT, dt, use_gpu=False, L=4): """Integration by short exponentional expansion Integration by expanding exponential (_SExp_) to Lth order. This is a PyTorch (_pytorch) implementation with real (_Re_) matrices for a system part of the system-bath interaction operator ``K`` in a form of real symmetric operator (ReSymK). The relaxation tensor is assumed in form of a set of operators (_RTOp_) """ Nref = self.Nref Nt = self.Nt verbose = self.verbose timea = self.TimeAxis prop_name = self.propagation_name try: import torch except: raise Exception("PyTorch not installed") # no self beyond this point qr.log_detail("PROPAGATION (short exponential with "+ "relaxation in operator form): order ", L, verbose=verbose) qr.log_detail("Using pytorch implementation") qr.log_detail("Using GPU: ", use_gpu & torch.cuda.is_available()) pr = ReducedDensityMatrixEvolution(timea, rhoi, name=prop_name) rho1_r = torch.from_numpy(numpy.real(rhoi.data)) rho2_r = torch.from_numpy(numpy.real(rhoi.data)) rho1_i = torch.from_numpy(numpy.imag(rhoi.data)) rho2_i = torch.from_numpy(numpy.imag(rhoi.data)) HH = torch.from_numpy(Ham.data) try: Km = torch.from_numpy(RT.Km) #self.RelaxationTensor.Km # real Lm_r = torch.from_numpy(numpy.real(RT.Lm)) #self.RelaxationTensor.Lm) # complex Lm_i = torch.from_numpy(numpy.imag(RT.Lm)) #self.RelaxationTensor.Lm) Nm = RT.Km.shape[0] except: raise Exception("Tensor is not in operator form") if use_gpu & torch.cuda.is_available(): rho1_r = rho1_r.cuda() rho2_r = rho1_r rho1_i = rho1_i.cuda() rho2_i = rho1_i HH = HH.cuda() Km = Km.cuda() Lm_r = Lm_r.cuda() Lm_i = Lm_i.cuda() indx = 1 # verbosity inside loops levs = [qr.LOG_QUICK] verb = qr.loglevels2bool(levs) # loop over time for ii in range(1, Nt): qr.printlog(" time step ", ii, "of", Nt, verbose=verb[0], loglevel=levs[0]) # steps in between saving the results for jj in range(Nref): # L interations to get short exponential expansion for ll in range(1, L+1): A = torch.matmul(HH,rho1_i) B = torch.matmul(HH,rho1_r) rhoY_r = torch.mul(A + torch.transpose(A, 0, 1), dt/ll) rhoY_i = torch.mul(B - torch.transpose(B, 0, 1), -dt/ll) for mm in range(Nm): a = torch.matmul(Lm_r[mm,:,:], rho1_r) A = a - torch.transpose(a, 0, 1) b = torch.matmul(Lm_i[mm,:,:], rho1_i) B = b - torch.transpose(b, 0, 1) c = torch.matmul(Lm_r[mm,:,:], rho1_i) C = -(c + torch.transpose(c, 0, 1)) d = torch.matmul(Lm_i[mm,:,:], rho1_r) D = d + torch.transpose(d, 0, 1) E = B - A F = C - D A = torch.matmul(Km[mm,:,:], E) B = torch.matmul(Km[mm,:,:], F) rhoY_r += torch.mul(A + torch.transpose(A, 0, 1),dt/ll) rhoY_i += torch.mul(B - torch.transpose(B, 0, 1),dt/ll) rho1_r = rhoY_r rho1_i = rhoY_i rho2_r += rho1_r rho2_i += rho1_i rho1_r = rho2_r rho1_i = rho2_i if use_gpu & torch.cuda.is_available(): rho2_sr = rho2_r.cpu() rho2_si = rho2_i.cpu() else: rho2_sr = rho2_r rho2_si = rho2_i pr.data[indx,:,:] = rho2_sr.numpy() + 1j*rho2_si.numpy() indx += 1 qr.log_detail("...DONE") return pr
""" Estatisticas basicas """ print(pd.DataFrame(y).describe()) """ Plota a decomposição em series de fourier, com o periodo invés de a frequência """ #y = y-y.mean() signal = y fourier = np.fft.rfft(signal) n = signal.size sample_rate = 1 freq = np.fft.fftfreq(n, d=1. / sample_rate) module = (np.real(y)**2 + np.imag(y)**2)**0.5 angle = np.angle(y) plt.figure(figsize=(16, 4)) periodo = 1 / freq[:int(len(module) / 2)] plt.plot(module[:int(len(module) / 2)]) #plt.plot(angle[:int(len(angle)/2)]) plt.grid(linestyle='dashed') plt.ylim(min(module[:int(len(module) / 2)]), max(module[:int(len(module) / 2)])) plt.xlim(1, 82) plt.xticks(range(0, len(periodo), 5), periodo[range(0, len(periodo), 5)].round(decimals=2)) periodo = 1 / freq[:int(len(module) / 2)] plt.xlabel("Period (1/F)") plt.ylabel("Amplitude")
#np.savetxt('ValsRe.txt', np.real(HamiltonEigVals), fmt='%1.4e') # use exponential notation #np.savetxt('ValsIm.txt', np.imag(HamiltonEigVals), fmt='%1.4e') # use exponential notation #np.savetxt('Vals.txt', HamiltonEigVals, fmt='%1.4e') # use exponential notation #np.savetxt('VecsHamiltonCalculator0010.txt', HamiltonEigVecs, fmt='%1.4e') # use exponential notation for m1 in range(0, 2 * NNN * SSS): file1.write("%s" % (MomentumAxis[m1])), file1.write("\t"), file1.write("%s" % (KineticAxis[m1])) file1.write("\t"), file1.write("%s" % (SelfEnergyAxis[m1])) file1.write("\t"), file1.write("%s" % (np.real(HamiltonEigVals[m1]))) file1.write("\t"), file1.write("%s" % (np.imag(HamiltonEigVals[m1]))) file1.write("\t"), file1.write("%s" % (HamiltonEigVals[m1])) file1.write('\n'), file1.close() print "gK = ", GGG print "aK = ", AAA print "L = ", LLL print "N = ", NNN print "Fin" #216
def s_sys(en): s = kwant.smatrix(sys, en).data return s si0 = s_sys(e1) sf0 = s_sys(e1 + df) dos_sys = np.trace(si0.conj().T @ (sf0 - si0)) / df / (4 * np.pi**2) * (-1j) def eigentime(u0, u1, vh0, vh1): vv0 = vh0.conj().T vv1 = vh1.conj().T t = (u0.conj().T @ (u1 - u0) - vv0.conj().T @ (vv1 - vv0)) / 1j / df / (2 * np.pi) return t gf = kwant.greens_function(sys, e1).submatrix(1, 0) u0, s0, vh0 = np.linalg.svd(gf, full_matrices=True, compute_uv=True) gf1 = kwant.greens_function(sys, e1 + df).submatrix(1, 0) u1, s1, vh1 = np.linalg.svd(gf1, full_matrices=True, compute_uv=True) #u0, s0, vh0=np.linalg.svd(si[n:2*n,0:n], full_matrices=True, compute_uv=True) #u1, s1, vh1=np.linalg.svd(sf[n:2*n,0:n], full_matrices=True, compute_uv=True) t = np.diag(eigentime(u0, u1, vh0, vh1) / np.pi) tt = sum(t[np.abs(np.imag(t)) < 1e-4]) elapsed = time() - t_ini
def main(): # Settings. scenario_name = fledge.config.config['tests']['scenario_name'] results_path = fledge.utils.get_results_path(__file__, scenario_name) power_multipliers = np.arange(-0.2, 1.2, 0.1) # Recreate / overwrite database, to incorporate changes in the CSV files. fledge.data_interface.recreate_database() # Obtain base scaling parameters. scenario_data = fledge.data_interface.ScenarioData(scenario_name) base_power = scenario_data.scenario.at['base_apparent_power'] base_voltage = scenario_data.scenario.at['base_voltage'] # Obtain electric grid model. electric_grid_model = fledge.electric_grid_models.ElectricGridModelDefault( scenario_name) # Obtain power flow solution for nominal power conditions. power_flow_solution_initial = fledge.electric_grid_models.PowerFlowSolutionFixedPoint( electric_grid_model) # Obtain linear electric grid model for nominal power conditions. linear_electric_grid_model = ( fledge.electric_grid_models.LinearElectricGridModelGlobal( electric_grid_model, power_flow_solution_initial)) # Instantiate results variables. der_power_vector_active = (pd.DataFrame(index=power_multipliers, columns=electric_grid_model.ders, dtype=float)) der_power_vector_reactive = (pd.DataFrame(index=power_multipliers, columns=electric_grid_model.ders, dtype=float)) der_power_vector_active_change = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.ders, dtype=float)) der_power_vector_reactive_change = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.ders, dtype=float)) node_voltage_vector_power_flow = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.nodes, dtype=complex)) node_voltage_vector_linear_model = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.nodes, dtype=complex)) node_voltage_vector_magnitude_power_flow = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.nodes, dtype=float)) node_voltage_vector_magnitude_linear_model = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.nodes, dtype=float)) branch_power_vector_1_squared_power_flow = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) branch_power_vector_1_squared_linear_model = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) branch_power_vector_2_squared_power_flow = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) branch_power_vector_2_squared_linear_model = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) branch_power_vector_1_magnitude_power_flow = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) branch_power_vector_1_magnitude_linear_model = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) branch_power_vector_2_magnitude_power_flow = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) branch_power_vector_2_magnitude_linear_model = (pd.DataFrame( index=power_multipliers, columns=electric_grid_model.branches, dtype=float)) loss_active_power_flow = (pd.Series(index=power_multipliers, dtype=float)) loss_active_linear_model = (pd.Series(index=power_multipliers, dtype=float)) loss_reactive_power_flow = (pd.Series(index=power_multipliers, dtype=float)) loss_reactive_linear_model = (pd.Series(index=power_multipliers, dtype=float)) # Obtain DER power / change. der_power_vector_active.loc[:, :] = ( np.transpose([power_multipliers]) @ np.array( [np.real(power_flow_solution_initial.der_power_vector)])) der_power_vector_reactive.loc[:, :] = ( np.transpose([power_multipliers]) @ np.array( [np.imag(power_flow_solution_initial.der_power_vector)])) der_power_vector_active_change.loc[:, :] = ( np.transpose([power_multipliers - 1]) @ np.array( [np.real(power_flow_solution_initial.der_power_vector)])) der_power_vector_reactive_change.loc[:, :] = ( np.transpose([power_multipliers - 1]) @ np.array( [np.imag(power_flow_solution_initial.der_power_vector)])) # Obtain power flow solutions. power_flow_solutions = (fledge.utils.starmap( fledge.electric_grid_models.PowerFlowSolutionFixedPoint, [(electric_grid_model, row) for row in (der_power_vector_active + 1.0j * der_power_vector_reactive).values])) power_flow_solutions = dict(zip(power_multipliers, power_flow_solutions)) for power_multiplier in power_multipliers: power_flow_solution = power_flow_solutions[power_multiplier] node_voltage_vector_power_flow.loc[ power_multiplier, :] = power_flow_solution.node_voltage_vector node_voltage_vector_magnitude_power_flow.loc[ power_multiplier, :] = np.abs( power_flow_solution.node_voltage_vector) branch_power_vector_1_magnitude_power_flow.loc[ power_multiplier, :] = np.abs( power_flow_solution.branch_power_vector_1) branch_power_vector_2_magnitude_power_flow.loc[ power_multiplier, :] = np.abs( power_flow_solution.branch_power_vector_2) branch_power_vector_1_squared_power_flow.loc[ power_multiplier, :] = np.abs( power_flow_solution.branch_power_vector_1)**2 branch_power_vector_2_squared_power_flow.loc[ power_multiplier, :] = np.abs( power_flow_solution.branch_power_vector_2)**2 loss_active_power_flow.loc[power_multiplier] = np.real( power_flow_solution.loss) loss_reactive_power_flow.loc[power_multiplier] = np.imag( power_flow_solution.loss) # Obtain linear model solutions. node_voltage_vector_linear_model.loc[:, :] = ( np.transpose([power_flow_solution_initial.node_voltage_vector] * len(power_multipliers)) + linear_electric_grid_model.sensitivity_voltage_by_der_power_active @ np.transpose(der_power_vector_active_change.values) + linear_electric_grid_model.sensitivity_voltage_by_der_power_reactive @ np.transpose(der_power_vector_reactive_change.values)).transpose() node_voltage_vector_magnitude_linear_model.loc[:, :] = ( np.transpose([np.abs(power_flow_solution_initial.node_voltage_vector)] * len(power_multipliers)) + linear_electric_grid_model. sensitivity_voltage_magnitude_by_der_power_active @ np.transpose( der_power_vector_active_change.values) + linear_electric_grid_model. sensitivity_voltage_magnitude_by_der_power_reactive @ np.transpose( der_power_vector_reactive_change.values)).transpose() branch_power_vector_1_magnitude_linear_model.loc[:, :] = ( np.transpose( [np.abs(power_flow_solution_initial.branch_power_vector_1)] * len(power_multipliers)) + linear_electric_grid_model. sensitivity_branch_power_1_magnitude_by_der_power_active @ np.transpose(der_power_vector_active_change.values) + linear_electric_grid_model. sensitivity_branch_power_1_magnitude_by_der_power_reactive @ np.transpose(der_power_vector_reactive_change.values)).transpose() branch_power_vector_2_magnitude_linear_model.loc[:, :] = ( np.transpose( [np.abs(power_flow_solution_initial.branch_power_vector_2)] * len(power_multipliers)) + linear_electric_grid_model. sensitivity_branch_power_2_magnitude_by_der_power_active @ np.transpose(der_power_vector_active_change.values) + linear_electric_grid_model. sensitivity_branch_power_2_magnitude_by_der_power_reactive @ np.transpose(der_power_vector_reactive_change.values)).transpose() branch_power_vector_1_squared_linear_model.loc[:, :] = ( np.transpose( [np.abs(power_flow_solution_initial.branch_power_vector_1**2)] * len(power_multipliers)) + linear_electric_grid_model. sensitivity_branch_power_1_squared_by_der_power_active @ np.transpose( der_power_vector_active_change.values) + linear_electric_grid_model. sensitivity_branch_power_1_squared_by_der_power_reactive @ np.transpose(der_power_vector_reactive_change.values)).transpose() branch_power_vector_2_squared_linear_model.loc[:, :] = ( np.transpose( [np.abs(power_flow_solution_initial.branch_power_vector_2**2)] * len(power_multipliers)) + linear_electric_grid_model. sensitivity_branch_power_2_squared_by_der_power_active @ np.transpose( der_power_vector_active_change.values) + linear_electric_grid_model. sensitivity_branch_power_2_squared_by_der_power_reactive @ np.transpose(der_power_vector_reactive_change.values)).transpose() loss_active_linear_model.loc[:] = ( np.transpose([np.real(power_flow_solution_initial.loss)] * len(power_multipliers)) + linear_electric_grid_model.sensitivity_loss_active_by_der_power_active @ np.transpose(der_power_vector_active_change.values) + linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive @ np.transpose(der_power_vector_reactive_change.values)).ravel() loss_reactive_linear_model.loc[:] = ( np.transpose([np.imag(power_flow_solution_initial.loss)] * len(power_multipliers)) + linear_electric_grid_model. sensitivity_loss_reactive_by_der_power_active @ np.transpose( der_power_vector_active_change.values) + linear_electric_grid_model. sensitivity_loss_reactive_by_der_power_reactive @ np.transpose( der_power_vector_reactive_change.values)).ravel() # Instantiate error variables. node_voltage_vector_error = (pd.Series(index=power_multipliers, dtype=float)) node_voltage_vector_magnitude_error = (pd.Series(index=power_multipliers, dtype=float)) branch_power_vector_1_magnitude_error = (pd.Series(index=power_multipliers, dtype=float)) branch_power_vector_2_magnitude_error = (pd.Series(index=power_multipliers, dtype=float)) branch_power_vector_1_squared_error = (pd.Series(index=power_multipliers, dtype=float)) branch_power_vector_2_squared_error = (pd.Series(index=power_multipliers, dtype=float)) loss_active_error = (pd.Series(index=power_multipliers, dtype=float)) loss_reactive_error = (pd.Series(index=power_multipliers, dtype=float)) # Obtain error values. node_voltage_vector_error = (100.0 * ( (node_voltage_vector_linear_model - node_voltage_vector_power_flow) / node_voltage_vector_power_flow).abs().mean(axis='columns')) node_voltage_vector_magnitude_error = (100.0 * ( (node_voltage_vector_magnitude_linear_model - node_voltage_vector_magnitude_power_flow) / node_voltage_vector_magnitude_power_flow).mean(axis='columns')) branch_power_vector_1_magnitude_error = (100.0 * ( (branch_power_vector_1_magnitude_linear_model - branch_power_vector_1_magnitude_power_flow) / branch_power_vector_1_magnitude_power_flow).mean(axis='columns')) branch_power_vector_2_magnitude_error = (100.0 * ( (branch_power_vector_2_magnitude_linear_model - branch_power_vector_2_magnitude_power_flow) / branch_power_vector_2_magnitude_power_flow).mean(axis='columns')) branch_power_vector_1_squared_error = (100.0 * ( (branch_power_vector_1_squared_linear_model - branch_power_vector_1_squared_power_flow) / branch_power_vector_1_squared_power_flow).mean(axis='columns')) branch_power_vector_2_squared_error = (100.0 * ( (branch_power_vector_2_squared_linear_model - branch_power_vector_2_squared_power_flow) / branch_power_vector_2_squared_power_flow).mean(axis='columns')) loss_active_error = (100.0 * ((loss_active_linear_model - loss_active_power_flow) / loss_active_power_flow)) loss_reactive_error = ( 100.0 * ((loss_reactive_linear_model - loss_reactive_power_flow) / loss_reactive_power_flow)) # Obtain error table. linear_electric_grid_model_error = (pd.DataFrame( [ node_voltage_vector_error, node_voltage_vector_magnitude_error, branch_power_vector_1_magnitude_error, branch_power_vector_2_magnitude_error, branch_power_vector_1_squared_error, branch_power_vector_2_squared_error, loss_active_error, loss_reactive_error ], index=[ 'node_voltage_vector_error', 'node_voltage_vector_magnitude_error', 'branch_power_vector_1_magnitude_error', 'branch_power_vector_2_magnitude_error', 'branch_power_vector_1_squared_error', 'branch_power_vector_2_squared_error', 'loss_active_error', 'loss_reactive_error' ])) linear_electric_grid_model_error = linear_electric_grid_model_error.round( 2) # Print results. print(f"der_power_vector_active =\n{der_power_vector_active}") print(f"der_power_vector_reactive =\n{der_power_vector_reactive}") print( f"der_power_vector_active_change =\n{der_power_vector_active_change}") print( f"der_power_vector_reactive_change =\n{der_power_vector_reactive_change}" ) print( f"node_voltage_vector_power_flow =\n{node_voltage_vector_power_flow}") print( f"node_voltage_vector_linear_model =\n{node_voltage_vector_linear_model}" ) print( f"node_voltage_vector_magnitude_power_flow =\n{node_voltage_vector_magnitude_power_flow}" ) print( f"node_voltage_vector_magnitude_linear_model =\n{node_voltage_vector_magnitude_linear_model}" ) print( f"branch_power_vector_1_squared_power_flow =\n{branch_power_vector_1_squared_power_flow}" ) print( f"branch_power_vector_1_squared_linear_model =\n{branch_power_vector_1_squared_linear_model}" ) print( f"branch_power_vector_2_squared_power_flow =\n{branch_power_vector_2_squared_power_flow}" ) print( f"branch_power_vector_2_squared_linear_model =\n{branch_power_vector_2_squared_linear_model}" ) print( f"branch_power_vector_1_magnitude_power_flow =\n{branch_power_vector_1_magnitude_power_flow}" ) print( f"branch_power_vector_1_magnitude_linear_model =\n{branch_power_vector_1_magnitude_linear_model}" ) print( f"branch_power_vector_2_magnitude_power_flow =\n{branch_power_vector_2_magnitude_power_flow}" ) print( f"branch_power_vector_2_magnitude_linear_model =\n{branch_power_vector_2_magnitude_linear_model}" ) print(f"loss_active_power_flow =\n{loss_active_power_flow}") print(f"loss_active_linear_model =\n{loss_active_linear_model}") print(f"loss_reactive_power_flow =\n{loss_reactive_power_flow}") print(f"loss_reactive_linear_model =\n{loss_reactive_linear_model}") print( f"linear_electric_grid_model_error =\n{linear_electric_grid_model_error}" ) # Store results as CSV. der_power_vector_active.to_csv( os.path.join(results_path, 'der_power_vector_active.csv')) der_power_vector_reactive.to_csv( os.path.join(results_path, 'der_power_vector_reactive.csv')) der_power_vector_active_change.to_csv( os.path.join(results_path, 'der_power_vector_active_change.csv')) der_power_vector_reactive_change.to_csv( os.path.join(results_path, 'der_power_vector_reactive_change.csv')) node_voltage_vector_power_flow.to_csv( os.path.join(results_path, 'node_voltage_vector_power_flow.csv')) node_voltage_vector_linear_model.to_csv( os.path.join(results_path, 'node_voltage_vector_linear_model.csv')) node_voltage_vector_magnitude_power_flow.to_csv( os.path.join(results_path, 'node_voltage_vector_magnitude_power_flow.csv')) node_voltage_vector_magnitude_linear_model.to_csv( os.path.join(results_path, 'node_voltage_vector_magnitude_linear_model.csv')) branch_power_vector_1_squared_power_flow.to_csv( os.path.join(results_path, 'branch_power_vector_1_squared_power_flow.csv')) branch_power_vector_1_squared_linear_model.to_csv( os.path.join(results_path, 'branch_power_vector_1_squared_linear_model.csv')) branch_power_vector_2_squared_power_flow.to_csv( os.path.join(results_path, 'branch_power_vector_2_squared_power_flow.csv')) branch_power_vector_2_squared_linear_model.to_csv( os.path.join(results_path, 'branch_power_vector_2_squared_linear_model.csv')) branch_power_vector_1_magnitude_power_flow.to_csv( os.path.join(results_path, 'branch_power_vector_1_magnitude_power_flow.csv')) branch_power_vector_1_magnitude_linear_model.to_csv( os.path.join(results_path, 'branch_power_vector_1_magnitude_linear_model.csv')) branch_power_vector_2_magnitude_power_flow.to_csv( os.path.join(results_path, 'branch_power_vector_2_magnitude_power_flow.csv')) branch_power_vector_2_magnitude_linear_model.to_csv( os.path.join(results_path, 'branch_power_vector_2_magnitude_linear_model.csv')) loss_active_power_flow.to_csv( os.path.join(results_path, 'loss_active_power_flow.csv')) loss_active_linear_model.to_csv( os.path.join(results_path, 'loss_active_linear_model.csv')) loss_reactive_power_flow.to_csv( os.path.join(results_path, 'loss_reactive_power_flow.csv')) loss_reactive_linear_model.to_csv( os.path.join(results_path, 'loss_reactive_linear_model.csv')) linear_electric_grid_model_error.to_csv( os.path.join(results_path, 'linear_electric_grid_model_error.csv')) # Plot results. # Voltage magnitude. for node_index, node in enumerate(electric_grid_model.nodes): plt.plot(power_multipliers, base_voltage * node_voltage_vector_magnitude_power_flow.loc[:, node], label='Power flow') plt.plot(power_multipliers, base_voltage * node_voltage_vector_magnitude_linear_model.loc[:, node], label='Linear model') plt.scatter([0.0], [ base_voltage * abs(electric_grid_model.node_voltage_vector_reference[node_index]) ], label='No load') plt.scatter([1.0], [ base_voltage * abs(power_flow_solution_initial.node_voltage_vector[node_index]) ], label='Initial point') plt.legend() plt.title( f"Voltage magnitude [V] for\n (node_type, node_name, phase): {node}" ) plt.savefig(os.path.join(results_path, f'voltage_magnitude_{node}.png')) # plt.show() plt.close() # Branch flow. for branch_index, branch in enumerate(electric_grid_model.branches): plt.plot(power_multipliers, base_power * branch_power_vector_1_magnitude_power_flow.loc[:, branch], label='Power flow') plt.plot(power_multipliers, base_power * branch_power_vector_1_magnitude_linear_model.loc[:, branch], label='Linear model') plt.scatter([0.0], [0.0], label='No load') plt.scatter([1.0], [ base_power * abs(power_flow_solution_initial. branch_power_vector_1[branch_index]) ], label='Initial point') plt.legend() plt.title( f"Branch power 1 magnitude [VA] for\n (branch_type, branch_name, phase): {branch}" ) plt.savefig( os.path.join(results_path, f'branch_power_1_magnitude_{branch}.png')) # plt.show() plt.close() plt.plot(power_multipliers, base_power * branch_power_vector_2_magnitude_power_flow.loc[:, branch], label='Power flow') plt.plot(power_multipliers, base_power * branch_power_vector_2_magnitude_linear_model.loc[:, branch], label='Linear model') plt.scatter([0.0], [0.0], label='No load') plt.scatter([1.0], [ base_power * abs(power_flow_solution_initial. branch_power_vector_2[branch_index]) ], label='Initial point') plt.legend() plt.title( f"Branch power 2 magnitude [VA] for\n (branch_type, branch_name, phase): {branch}" ) plt.savefig( os.path.join(results_path, f'branch_power_2_magnitude_{branch}.png')) # plt.show() plt.close() plt.plot(power_multipliers, (base_power**2) * branch_power_vector_1_squared_power_flow.loc[:, branch], label='Power flow') plt.plot(power_multipliers, (base_power**2) * branch_power_vector_1_squared_linear_model.loc[:, branch], label='Linear model') plt.scatter([0.0], [0.0], label='No load') plt.scatter([1.0], [(base_power**2) * abs( power_flow_solution_initial.branch_power_vector_1[branch_index]**2) ], label='Initial point') plt.legend() plt.title( f"Branch power 1 squared [VA²] for\n (branch_type, branch_name, phase): {branch}" ) plt.savefig( os.path.join(results_path, f'branch_power_1_squared_{branch}.png')) # plt.show() plt.close() plt.plot(power_multipliers, (base_power**2) * branch_power_vector_2_squared_power_flow.loc[:, branch], label='Power flow') plt.plot(power_multipliers, (base_power**2) * branch_power_vector_2_squared_linear_model.loc[:, branch], label='Linear model') plt.scatter([0.0], [0.0], label='No load') plt.scatter([1.0], [(base_power**2) * abs( power_flow_solution_initial.branch_power_vector_2[branch_index]**2) ], label='Initial point') plt.legend() plt.title( f"Branch power 2 squared [VA²] for\n (branch_type, branch_name, phase): {branch}" ) plt.savefig( os.path.join(results_path, f'branch_power_2_squared_{branch}.png')) # plt.show() plt.close() # Loss. plt.plot(power_multipliers, base_power * loss_active_power_flow, label='Power flow') plt.plot(power_multipliers, base_power * loss_active_linear_model, label='Linear model') plt.scatter([0.0], [0.0], label='No load') plt.scatter([1.0], [base_power * np.real([power_flow_solution_initial.loss])], label='Initial point') plt.legend() plt.title("Total loss active [W]") plt.savefig(os.path.join(results_path, f'loss_active.png')) # plt.show() plt.close() plt.plot(power_multipliers, base_power * loss_reactive_power_flow, label='Power flow') plt.plot(power_multipliers, base_power * loss_reactive_linear_model, label='Linear model') plt.scatter([1.0], [base_power * np.imag([power_flow_solution_initial.loss])], label='Initial point') plt.legend() plt.title("Total loss reactive [VAr]") plt.savefig(os.path.join(results_path, f'loss_reactive.png')) # plt.show() plt.close() # Print results path. fledge.utils.launch(results_path) print(f"Results are stored in: {results_path}")
for K_it in Ks: j = int((K_it - Kstart) / Kstep) rp_arr[i, j] = rp(k_it, K_it, i, theta, phi, psi) # remove horizontal scars omega = omega[np.logical_not(rp_arr[:, 0] == 1. + 0.j)] rp_arr = rp_arr[np.logical_not(rp_arr[:, 0] == 1. + 0.j)] # remove vertical scars qs = qs[np.logical_not(rp_arr[127, :] == 1. + 0.j)] rp_arr = rp_arr[:, np.logical_not(rp_arr[127, :] == 1. + 0.j)] imrp_arr = np.imag(rp_arr) # --------------- Plot -------------- # fig = plt.figure(figsize=(20, 10)) ax = fig.add_subplot(121) ax.set_aspect('auto') X, Y = np.meshgrid(qs, omega) mesh = plt.contourf(X, Y, imrp_arr, np.linspace(cmin, cmax, cstep)) plt.xlabel(r'$q\ (10^{5}\ cm^{-1})$') plt.ylabel(r'$\omega\ (cm^{-1})$') cbar = fig.colorbar(mesh, ax=ax) cbar.set_label(r'$Im(r_p)$', rotation=270) cbar.ax.set_yticklabels([]) ax2 = fig.add_subplot(122, projection='3d')
def eigen_stationary_density(self): """ Solve for the exact stationary density. First constructs the Nz*Ns by Nz*Ns transition matrix Q(a',z'; a,z) from state (a,z) to (a',z'). Then obtains the eigenvector associated with the unique eigenvalue equal to 1. This eigenvector (renormalized so that it sums to one) is the unique stationary density function. Note: About 99% of the computation time is spend on the eigenvalue calculation. For now there is no way to speed this function up as numba only supports np.linalg.eig() when there is no domain change (ex. real numbers to real numbers). Here there is a domain change as some eigenvalues and eigenvector elements are complex. *Output * stationary_pdf: stationary density function * Q: transition matrix """ # a. initialize transition matrix Q = np.zeros((self.Nz * self.Na_fine, self.Nz * self.Na_fine)) # b. interpolate and construct transition matrix for i_z in range(self.Nz): #current productivity for i_a, a0 in enumerate(self.grid_a_fine): # i. interpolate a_intp = interp(self.grid_a, self.pol_sav[i_z, :], a0) #take the grid index to the right. a_intp lies between grid_sav_fine[j-1] and grid_sav_fine[j]. j = np.sum(self.grid_a_fine <= a_intp) #less than or equal to lowest grid value if a_intp <= self.grid_a_fine[0]: p = 0 #more than or equal to greatest grid value elif a_intp >= self.grid_a_fine[-1]: p = 1 j = j - 1 #since right index is outside the grid make it the max index #inside grid else: p = (a_intp - self.grid_a_fine[j - 1]) / ( self.grid_a_fine[j] - self.grid_a_fine[j - 1]) # ii. transition matrix na = i_z * self.Na_fine #minimum row index for i_zz in range(self.Nz): #next productivity state ma = i_zz * self.Na_fine #minimum column index Q[na + i_a, ma + j] = p * self.pi[i_z, i_zz] Q[na + i_a, ma + j - 1] = (1.0 - p) * self.pi[i_z, i_zz] # iii. ensure that the rows sum up to 1 assert np.allclose(Q.sum(axis=1), np.ones( self.Nz * self.Na_fine)), "Transition matrix error: Rows do not sum to 1" # c. get the eigenvector eigen_val, eigen_vec = np.linalg.eig( Q.T) #transpose Q for eig function. # i. find column index for eigen value equal to 1 idx = np.argmin(np.abs(eigen_val - 1.0)) eigen_vec_stat = np.copy(eigen_vec[:, idx]) # ii. ensure complex arguments of any complex numbers are small and convert to real numbers if np.max(np.abs(np.imag(eigen_vec_stat))) < 1e-6: eigen_vec_stat = np.real( eigen_vec_stat ) # drop the complex argument of any complex numbers. else: raise Exception( "Stationary eigenvector error: Maximum complex argument greater than 0.000001. Use a different distribution solution method." ) # d. obtain stationary density from stationary eigenvector # i. reshape stationary_pdf = eigen_vec_stat.reshape(self.Nz, self.Na_fine) # ii. stationary distribution by percent stationary_pdf = stationary_pdf / np.sum(np.sum(stationary_pdf, axis=0)) return stationary_pdf, Q
def set_resolution(file: str, t_start: float, u_start: np.ndarray, num_dofs: int) -> None: """ Create resolution file :param file: file :param t_start: time associated with the input approximate solution u_start :param u_start: approximate solution for the input time t_start :param num_dofs: number of unknowns """ dofpos = np.cumsum([0, num_dofs]) com_str = ['$ResFormat /* GetDP 2.10.0, ascii */', '1.1 0', '$EndResFormat'] for j in range(np.size(t_start)): for k in range(np.size(num_dofs)): com_str.append('$Solution /* DofData #' + str(k) + ' */') com_str.append(str(k) + ' ' + str(t_start) + ' 0 ' + str(j)) y = u_start[dofpos[k]: dofpos[k + 1]] com_str.append("\n".join(" ".join(map(str, line)) for line in np.vstack((np.real(y), np.imag(u_start))).T)) com_str.append('$EndSolution\n') with open(file, "w") as fid: fid.write("\n".join(com_str))
def hqam(M, nn_dist=2): # M is the order of constellation, nn_dist is the nearest neighbour dist # The idea behind this algo is that we will create an M-ary constellation with a center point at(0,-y_1) # and then we will shift the constellation backwards in the x-axis in order to achieve the symmetry # of the constellation around the origin for a regular HQAM # We have to parametrize the func in order to generate different types of HQAM # depending on M and nn_dist # First we check if M is the square of an even integer even = 0 # even number that is even^2=M or the first even number that is even^2>M for i in range(2, M, 2): if i * i == M: even = i break elif i * i > M: even = i break diff = even**2 - M symbols = np.array([]) # this works only for nn_dist = 2, something went wrong with listing comprehension below (check the bounds) # We can observe that we have two possible arrays for the x_axis values if diff == 0: pos_x_axis1 = np.array([ x for x in range(nn_dist // 2, even + 1) if x % 2 == 1 ]) # for 64-HQAM neg_x_axis1 = np.array( [-x for x in range(nn_dist // 2, even + 1) if x % 2 == 1]) pos_x_axis2 = np.array( [x for x in range(nn_dist // 2, even + 1) if x % 2 == 0]) neg_x_axis2 = np.array([-x for x in range(0, even) if x % 2 == 0]) # for 64-HQAM # We can observe that we have only one array for y_axis values y_unity = np.sqrt(3 * (nn_dist**2) / 4) # the height of the basic equilateral triangle pos_y_axis = np.array([ y_unity / 2 + y_unity * i for i in range(0, even // 2) ]) # for 64-HQAM not exactly xd neg_y_axis = np.array([ -y_unity / 2 - y_unity * (even // 2 - i - 1) for i in range(0, even // 2) ]) # build 1st quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if column % 2 == 0: temp = np.ones(np.ceil(even // 4).astype(int)) * pos_x_axis1[ cnt1] # the real part of the symbol temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 == 0]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis2[cnt2] temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 != 0]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 # build 2nd quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if column % 2 == 0: temp = np.ones(np.ceil(even // 4).astype(int)) * neg_x_axis1[ cnt1] # the ceil and astype(int) are added # because of the posibility that we want to produce 32-HQAM and therefore even == 6 temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 == 0]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis2[cnt2] temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 != 0]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 # build 3rd quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if column % 2 == 0: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis1[cnt1] temp = temp + 1j * np.array( [neg_y_axis[i] for i in range(0, even // 2) if i % 2 == 0]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis2[cnt2] temp = temp + 1j * np.array( [neg_y_axis[i] for i in range(0, even // 2) if i % 2 != 0]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 # build 4th quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if column % 2 == 0: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis1[cnt1] temp = temp + 1j * np.array( [neg_y_axis[i] for i in range(0, even // 2) if i % 2 == 0]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis2[cnt2] temp = temp + 1j * np.array( [neg_y_axis[i] for i in range(0, even // 2) if i % 2 != 0]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 # Now we will shift the constellation in x-axis direction shift = np.sqrt((nn_dist / 2)**2 - (y_unity / 2)**2) # calculated by pythagorean theorem for k in range(0, len(symbols)): symbols[k] -= shift # by this move we achieve the symmetry return symbols else: # diff !=0 # We will assume that we can create the constellation with even * even points and then we will erase # diff points from the constellation, we will erase them depending on their energy # First, we have created an even^2 constellation, # symbols is an already constructed np.array with points on the complex plane # Now , we are going to remove the diff points from the symbols pos_x_axis1 = np.array([ x for x in range(nn_dist // 2, even + 1) if x % 2 == 1 ]) # for 64-HQAM neg_x_axis1 = np.array( [-x for x in range(nn_dist // 2, even + 1) if x % 2 == 1]) pos_x_axis2 = np.array( [x for x in range(nn_dist // 2, even + 1) if x % 2 == 0]) neg_x_axis2 = np.array([-x for x in range(0, even) if x % 2 == 0]) # for 64-HQAM # We can observe that we have only one array for y_axis values y_unity = np.sqrt(3 * (nn_dist**2) / 4) # the height of the basic equilateral triangle pos_y_axis = np.array([ y_unity / 2 + y_unity * i for i in range(0, even // 2) ]) # for 64-HQAM not exactly xd neg_y_axis = np.array([ -y_unity / 2 - y_unity * (even // 2 - i - 1) for i in range(0, even // 2) ]) # build 1st quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if column % 2 == 0: temp = np.ones(np.ceil(even // 4).astype(int)) * pos_x_axis1[ cnt1] # the real part of the symbol temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 == 0]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis2[cnt2] temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 != 0]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 # build 2nd quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if column % 2 == 0: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis1[cnt1] temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 == 0]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis2[cnt2] temp = temp + 1j * np.array( [pos_y_axis[i] for i in range(0, even // 2) if i % 2 != 0]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 # build 3rd quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if (even // 2) % 2 == 0: if column % 2 == 0: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis1[cnt1] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 == 0 ]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis2[cnt2] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 != 0 ]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 else: # exception when we have 32-HQAM even = 6 and even//2 = 3 if column % 2 == 0: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis2[cnt1] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 == 0 ]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * neg_x_axis1[cnt2] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 != 0 ]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 # build 4th quadrant cnt1 = 0 cnt2 = 0 for column in range(0, even, 1): if (even // 2) % 2 == 0: if column % 2 == 0: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis1[cnt1] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 == 0 ]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis2[cnt2] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 != 0 ]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 else: # exception when we have 32-HQAM even = 6 and even//2 = 3 if column % 2 == 0: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis2[cnt1] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 == 0 ]) symbols = np.concatenate((symbols, temp)) cnt1 += 1 else: temp = np.ones(np.ceil( even // 4).astype(int)) * pos_x_axis1[cnt2] temp = temp + 1j * np.array([ neg_y_axis[i] for i in range(0, even // 2) if i % 2 != 0 ]) symbols = np.concatenate((symbols, temp)) cnt2 += 1 shift = np.sqrt((nn_dist / 2)**2 - (y_unity / 2)**2) for k in range(0, len(symbols)): symbols[k] -= shift # by this move we achieve the symmetry energy = [ np.real(symbol)**2 + np.imag(symbol)**2 for symbol in symbols ] energy = sorting.merge( energy ) # sorted in ascending order O(n*log n) algorithm complexity erased = [] removed = 0 for k in range(0, len(symbols)): symbol_energy = np.real(symbols[k])**2 + np.imag( symbols[k] )**2 # calculate energy per symbol while traversing symbols array # compare it to the diff-biggest elements of the energy array and if it is inside delete it for j in range(len(energy) - 1, len(energy) - diff, -1): if energy[j] == symbol_energy: removed += 1 erased.append(k) break if removed == diff: break print(len(erased)) symbols = np.delete(symbols, erased) print(len(symbols)) return symbols
def apply_caltable_uvfits(gaincaltable, datastruct, filename_out, cal_amp=False): """apply a calibration table to a uvfits file Args: caltable (Caltable) : a gaincaltable object datastruct (Datastruct) : input data structure in EHTIM format filename_out (str) : uvfits output file name cal_amp (bool): whether to do amplitude calibration """ if datastruct.dtype != "EHTIM": raise Exception( "datastruct must be in EHTIM format in apply_caltable_uvfits!") gains0 = pd.read_csv(gaincaltable) polygain = {} mjd_start = {} polyamp = {} #deterimine which calibration to use when multiple options for multiple periods mjd_mean = datastruct.data['time'].mean() - MJD_0 gains = gains0[(gains0.mjd_start <= mjd_mean) & (gains0.mjd_stop >= mjd_mean)].reset_index( drop=True).copy() for cou, row in gains.iterrows(): polygain[row.station] = poly_from_str(str(row.ratio_phas)) mjd_start[row.station] = row.mjd_start if cal_amp == True: polyamp[row.station] = poly_from_str(str(row.ratio_amp)) else: polyamp[row.station] = poly_from_str('1.0') #print(gains0) #print(polygain) # interpolate the calibration table rinterp = {} linterp = {} skipsites = [] #------------------------------------------- # sort by baseline data = datastruct.data idx = np.lexsort((data['t2'], data['t1'])) bllist = [] for key, group in it.groupby(data[idx], lambda x: set((x['t1'], x['t2']))): bllist.append(np.array([obs for obs in group])) bllist = np.array(bllist) # apply the calibration datatable = [] coub = 0 for bl_obs in bllist: t1 = bl_obs['t1'][0] t2 = bl_obs['t2'][0] coub = coub + 1 print('Calibrating {}-{} baseline, {}/{}'.format( t1, t2, coub, len(bllist))) time_mjd = bl_obs['time'] - MJD_0 #dates are in mjd in Datastruct ########################################################################################################################### #OLD VERSION WHERE LCP IS SHIFTED TO RCP # if t1 in skipsites: # rscale1 = lscale1 = np.array(1.) # else: # try: # rscale1 = 1./np.sqrt(polyamp[t1](time_mjd)) # lscale1 = np.sqrt(polyamp[t1](time_mjd))*np.exp(1j*polygain[t1](time_mjd - mjd_start[t1])*np.pi/180.) # except KeyError: # rscale1 = lscale1 = np.array(1.) # # if t2 in skipsites: # rscale2 = lscale2 = np.array(1.) # else: # try: # rscale2 = 1./np.sqrt(polyamp[t2](time_mjd)) # lscale2 = np.sqrt(polyamp[t2](time_mjd))*np.exp(1j*polygain[t2](time_mjd - mjd_start[t2])*np.pi/180.) # except KeyError: # rscale2 = lscale2 = np.array(1.) ########################################################################################################################### ########################################################################################################################### #NEW VERSION WHERE RCP IS SHIFTED TO LCP // MW 2018/NOV/13 if t1 in skipsites: rscale1 = lscale1 = np.array(1.) else: try: rscale1 = 1. / np.sqrt(polyamp[t1](time_mjd)) * np.exp( -1j * polygain[t1](time_mjd - mjd_start[t1]) * np.pi / 180.) lscale1 = np.sqrt(polyamp[t1](time_mjd)) except KeyError: rscale1 = lscale1 = np.array(1.) if t2 in skipsites: rscale2 = lscale2 = np.array(1.) else: try: rscale2 = 1. / np.sqrt(polyamp[t2](time_mjd)) * np.exp( -1j * polygain[t2](time_mjd - mjd_start[t2]) * np.pi / 180.) lscale2 = np.sqrt(polyamp[t2](time_mjd)) except KeyError: rscale2 = lscale2 = np.array(1.) ########################################################################################################################### rrscale = rscale1 * rscale2.conj() llscale = lscale1 * lscale2.conj() rlscale = rscale1 * lscale2.conj() lrscale = lscale1 * rscale2.conj() bl_obs['rr'] = (bl_obs['rr']) * rrscale bl_obs['ll'] = (bl_obs['ll']) * llscale bl_obs['rl'] = (bl_obs['rl']) * rlscale bl_obs['lr'] = (bl_obs['lr']) * lrscale bl_obs['rrweight'] = (bl_obs['rrweight']) / (np.abs(rrscale)**2) bl_obs['llweight'] = (bl_obs['llweight']) / (np.abs(llscale)**2) bl_obs['rlweight'] = (bl_obs['rlweight']) / (np.abs(rlscale)**2) bl_obs['lrweight'] = (bl_obs['lrweight']) / (np.abs(lrscale)**2) if len(datatable): datatable = np.hstack((datatable, bl_obs)) else: datatable = bl_obs # put in uvfits format datastruct # telescope arrays tarr = datastruct.antenna_info tkeys = {tarr[i]['site']: i for i in range(len(tarr))} tnames = tarr['site'] tnums = np.arange(1, len(tarr) + 1) xyz = np.array([[tarr[i]['x'], tarr[i]['y'], tarr[i]['z']] for i in np.arange(len(tarr))]) # uvfits format output data table bl_list = [] for i in xrange(len(datatable)): entry = datatable[i] t1num = entry['t1'] t2num = entry['t2'] rl = entry['rl'] lr = entry['lr'] if tkeys[entry['t2']] < tkeys[ entry['t1']]: # reorder telescopes if necessary #print entry['t1'], tkeys[entry['t1']], entry['t2'], tkeys[entry['t2']] entry['t1'] = t2num entry['t2'] = t1num entry['u'] = -entry['u'] entry['v'] = -entry['v'] entry['rr'] = np.conj(entry['rr']) entry['ll'] = np.conj(entry['ll']) entry['rl'] = np.conj(lr) entry['lr'] = np.conj(rl) datatable[i] = entry bl_list.append( np.array((entry['time'], entry['t1'], entry['t2']), dtype=BLTYPE)) _, unique_idx_anttime, idx_anttime = np.unique(bl_list, return_index=True, return_inverse=True) _, unique_idx_freq, idx_freq = np.unique(datatable['freq'], return_index=True, return_inverse=True) # random group params u = datatable['u'][unique_idx_anttime] v = datatable['v'][unique_idx_anttime] t1num = [tkeys[scope] + 1 for scope in datatable['t1'][unique_idx_anttime]] t2num = [tkeys[scope] + 1 for scope in datatable['t2'][unique_idx_anttime]] bls = 256 * np.array(t1num) + np.array(t2num) jds = datatable['time'][unique_idx_anttime] tints = datatable['tint'][unique_idx_anttime] # data table nap = len(unique_idx_anttime) nsubchan = 1 nstokes = 4 nchan = datastruct.obs_info.nchan outdat = np.zeros((nap, 1, 1, nchan, nsubchan, nstokes, 3)) outdat[:, :, :, :, :, :, 2] = -1.0 vistypes = ['rr', 'll', 'rl', 'lr'] for i in xrange(len(datatable)): row_freq_idx = idx_freq[i] row_dat_idx = idx_anttime[i] for j in range(len(vistypes)): outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j, 0] = np.real(datatable[i][vistypes[j]]) outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j, 1] = np.imag(datatable[i][vistypes[j]]) outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j, 2] = datatable[i][vistypes[j] + 'weight'] # package data for saving obsinfo_out = datastruct.obs_info antennainfo_out = Antenna_info(tnames, tnums, xyz) uvfitsdata_out = Uvfits_data(u, v, bls, jds, tints, outdat) datastruct_out = Datastruct(obsinfo_out, antennainfo_out, uvfitsdata_out) # save final file save_uvfits(datastruct_out, filename_out) return
# ntmodplot = (tmod[-1]-tmod[0])/dtmod + 1 # tmodplot = # ntmod=tmod.shape[0] #calculate the fourier transform of the data must be evenly spaced, if not evenly spaced, interpolate. ft = np.fft.fft(x) / nt ftreal = np.real(ft) ftimag = np.imag(ft) freq = np.fft.fftfreq(nt,d=dt) w = twopi*freq ftabs = np.abs(ft) for it in range(ntmod): xmodplot[it] = xmodplot[it] + np.sum( ftreal*np.cos(w*tfftmod[it]) - ftimag*np.sin(w*tfftmod[it]) ) # xmodplot = xmodplot ft = myfft.myft(t,x,sig) ftabs = ft[4] sigftabs=ft[5] freq = ft[6]
def get_slowest_pole(self, h_step, dt, NP, known_poles, stride): #print('\nget_slowest called with', NP, known_poles, stride) def d(n, stride): return np.array([h_step[n+stride*i] for i in range(NP+1)]) def get_data_for_stride(stride): num_samples = len(h_step) - NP*stride samples = [d(n, stride) for n in range(num_samples)] # make samples rows in a (tall) matrix sample_matrix = np.stack(samples, 0) return sample_matrix data = get_data_for_stride(stride) # We split the collected data into the initial points, and the final point A = data[:,:-1] B = data[:,-1:] # Consider 5 poles, 2 already known # We can do some linear algebra to find column vector X such that # x0*a[n] + x1*a[n+1] + x2*a[n+2] + x3*a[n+3] + x4*a[n+4] + a[n+5] = 0 # a[n](x0 + x1*r + x2*r^2 + x3*r^3 + x4*r^4 + r^5) = 0 # r^5 + x4*r^4 + x3*r^3 + x2*r^2 + x1*r + x0 = 0 # and then solve this polynomial to find the roots # BUT # With 2 known poles, we know this should factor out to # (r-p1)(r-p2)(r^3 + y0*r^2 + y1*r + y2) = 0 # So we really want to use our linear algebra to find the Y vector instead # First we need a matrix Z, which depends on the known ps, s.t. X = ZY, (5x1) = (5x3)(3x1) # Then our linear algebra that was AX+B=0 becomes AZY+B=0, which is easily solvable for Y # Step 1: find Z # A a reminder, X = ZY where X and Y are defined as: # r^5 + x4*r^4 + x3*r^3 + x2*r^2 + x1*r + x0 = 0 # (r-p1)(r-p2)(r^3 + y2*r^2 + y1*r + y0) = 0 # Define C, which is coefficients of poly from known roots # r^2 + (-p1-p2)*r + p1*p2 -> c0=p1*p2, c1=(-p1-p2) # We see that each term in X[i] is a product of Y[j] and C[k] terms s.t. i=j+k # SO we can directly write Z[i,j] = C[i-j], or 0 if that's outside the C bounds # BE CAREFUL about the leading 1s in these polynomials. # In our example, the full Z would be 6x4, including leading 1s. It's okay to drop the # bottom row because it only contributes to the leading 1 of x, which we want to drop. # But we can't drop the right column, which corresponds to the leading 1 of Y, because # it contributes to other rows in X. # Z: 5x3 version of Z, with bottom row and right column dropped # Z~: 5x4 version of Z, with only bottom row dropped # Y~: 4x1 version of Y, with a constant one in the fourth spot # A Z~ Y~ == -B # We can't use least-squares to find Y right now because of that required constant 1 # E: 4x3 almost-identity # F: 4x1 column, [0,0,0,1] # A Z~ (E Y + F) == -B # A Z~ E Y + A Z~ F == -B # A Z Y == -B - A Z~_last_column # So we need to do a modified regression: we can drop that extra column on Z~, but we have # to first use it to modify the B vector # Similarly, X = Z~ Y~ becomes X = Z Y + Z~_last_column known_rs = np.exp(np.array(known_poles)*stride) if np.isinf(known_rs).any(): # probably got a bad pole in known_poles, should just give up return [] poly = np.polynomial.polynomial C = poly.polyfromroots(known_rs) Z_tilde = np.zeros((NP, NP-len(known_rs)+1), dtype=C.dtype) for i in range(Z_tilde.shape[0]): for j in range(Z_tilde.shape[1]): k = i-j if k >= 0 and k < len(C): Z_tilde[i,j] = C[k] Z = Z_tilde[:,:-1] Z_column = Z_tilde[:,-1:] Y = np.linalg.pinv(A@Z) @ (-B - (A@Z_column)) X = Z@Y + Z_column # x0 * d0 + x1 * d2 + d2 = 0 # a[n](x0 + x1*r + r^2) = 0 poly = np.concatenate([[1], X[::-1,0]]) #print('poly', poly) roots = np.roots(poly) #print('roots', roots) # errors often cause small roots to go negative when they shouldn't. # This messes with the log, so we explicitly call those nan. def mylog(x): if (np.real(x) == 0): return float('nan') if not abs(np.imag(x)/np.real(x)) > 1e-6 and np.real(x) <= 0: return float('nan') else: return np.log(x) ps_with_stride = np.vectorize(mylog)(roots) ps = ps_with_stride / (stride * dt) # remove known poles key=lambda x: float('inf') if np.isnan(x) else abs(x) new_ps = sorted(ps, key=key) #print('Before processing, found ps', new_ps) for known_p in known_poles: for i in range(len(new_ps)): # TODO is this epsilon reasonable when ps are likely ~1e10? if new_ps[i] is not None and abs((new_ps[i] - known_p) < 1e-6): new_ps.pop(i) break else: if np.isnan(new_ps).any(): # the nans are probably causing the error return [] #print(known_poles) #print(new_ps) assert False, f'Known pole {known_p} not found!' # finally, return 0 (if nan), 1, or 2 (if complex conjugate) slowest new poles #print('After processing, new ps', new_ps) assert len(new_ps) > 0, 'Found no new poles ... check NP and len(known_poles)' if abs(np.imag(new_ps[0])) > 1e-6: # complex conjugate pair #print(ps, new_ps) assert len(new_ps) >= 2, 'Only found one of complex conjugate pair?' if abs(np.conj(new_ps[0]) - new_ps[1]) > 1e-6 and np.isnan(new_ps).any(): return [] assert abs(np.conj(new_ps[0]) - new_ps[1]) < 1e-6, 'Issue with conjugate pair, check sorting?' return new_ps[:2] elif not np.isnan(new_ps[0]): return new_ps[:1] else: # empty list return new_ps[:0]
def remap_uv(src_file, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'): ystart = 240 # get time nctime.long_name = 'time' nctime.units = 'days since 1900-01-01 00:00:00' # time reference "days since 1900-01-01 00:00:00" ref = datetime(1900, 1, 1, 0, 0, 0) ref = date2num(ref) tag = src_file.rsplit('/')[-1].rsplit('_')[-1].rsplit('-')[0] year = int(tag[:4]) month = int(tag[4:6]) day = int(tag[6:]) time = datetime(year, month, day, 0, 0, 0) time = date2num(time) time = time - ref time = time + 2.5 # 5-day average # get dimensions Mp, Lp = dst_grd.hgrid.mask_rho.shape # create destination file dst_file = src_file.rsplit('/')[-1] dst_fileu = dst_dir + dst_file[:-4] + '_u_ic_' + dst_grd.name + '.nc' print '\nCreating destination file', dst_fileu if os.path.exists(dst_fileu) is True: os.remove(dst_fileu) pyroms_toolbox.nc_create_roms_file(dst_fileu, dst_grd, nctime) dst_filev = dst_dir + dst_file[:-4] + '_v_ic_' + dst_grd.name + '.nc' print 'Creating destination file', dst_filev if os.path.exists(dst_filev) is True: os.remove(dst_filev) pyroms_toolbox.nc_create_roms_file(dst_filev, dst_grd, nctime) # open destination file ncu = netCDF.Dataset(dst_fileu, 'a', format='NETCDF3_64BIT') ncv = netCDF.Dataset(dst_filev, 'a', format='NETCDF3_64BIT') #load var cdf = netCDF.Dataset(src_file) src_varu = cdf.variables['u'] src_varv = cdf.variables['v'] #get missing value spval = src_varu._FillValue # ARCTIC grid sub-sample src_varu = src_varu[:] src_varu = src_varu[:, np.r_[ystart:np.size(src_varu, 1), -1], :] src_varv = src_varv[:] src_varv = src_varv[:, np.r_[ystart:np.size(src_varv, 1), -1], :] # get weights file wts_file = 'remap_weights_SODA_2.1.6_to_ARCTIC2_bilinear_uv_to_rho.nc' # build intermediate zgrid zlevel = -src_grd.z_t[::-1, 0, 0] nzlevel = len(zlevel) dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel) dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name + '_Z', dst_grd.hgrid, dst_zcoord) # create variable in destination file print 'Creating variable u' ncu.createVariable('u', 'f8', ('ocean_time', 's_rho', 'eta_u', 'xi_u'), fill_value=spval) ncu.variables['u'].long_name = '3D u-momentum component' ncu.variables['u'].units = 'meter second-1' ncu.variables['u'].field = 'u-velocity, scalar, series' # create variable in destination file print 'Creating variable ubar' ncu.createVariable('ubar', 'f8', ('ocean_time', 'eta_u', 'xi_u'), fill_value=spval) ncu.variables['ubar'].long_name = '2D u-momentum component' ncu.variables['ubar'].units = 'meter second-1' ncu.variables['ubar'].field = 'ubar-velocity,, scalar, series' print 'Creating variable v' ncv.createVariable('v', 'f8', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=spval) ncv.variables['v'].long_name = '3D v-momentum component' ncv.variables['v'].units = 'meter second-1' ncv.variables['v'].field = 'v-velocity, scalar, series' print 'Creating variable vbar' ncv.createVariable('vbar', 'f8', ('ocean_time', 'eta_v', 'xi_v'), fill_value=spval) ncv.variables['vbar'].long_name = '2D v-momentum component' ncv.variables['vbar'].units = 'meter second-1' ncv.variables['vbar'].field = 'vbar-velocity,, scalar, series' # remaping print 'remapping and rotating u and v from', src_grd.name, \ 'to', dst_grd.name print 'time =', time # flood the grid print 'flood the grid' src_uz = pyroms_toolbox.BGrid_SODA.flood(src_varu, src_grd, Bpos='uv', \ spval=spval, dmax=dmax, cdepth=cdepth, kk=kk) src_vz = pyroms_toolbox.BGrid_SODA.flood(src_varv, src_grd, Bpos='uv', \ spval=spval, dmax=dmax, cdepth=cdepth, kk=kk) # horizontal interpolation using scrip weights print 'horizontal interpolation using scrip weights' dst_uz = pyroms.remapping.remap(src_uz, wts_file, \ spval=spval) dst_vz = pyroms.remapping.remap(src_vz, wts_file, \ spval=spval) # vertical interpolation from standard z level to sigma print 'vertical interpolation from standard z level to sigma' dst_u = pyroms.remapping.z2roms(dst_uz[::-1,:,:], dst_grdz, \ dst_grd, Cpos='rho', spval=spval, flood=False) dst_v = pyroms.remapping.z2roms(dst_vz[::-1,:,:], dst_grdz, \ dst_grd, Cpos='rho', spval=spval, flood=False) # rotate u,v fields src_angle = np.zeros(dst_grd.hgrid.angle_rho.shape) dst_angle = dst_grd.hgrid.angle_rho angle = dst_angle - src_angle angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1)) U = dst_u + dst_v * 1j eitheta = np.exp(-1j * angle[:, :, :]) U = U * eitheta dst_u = np.real(U) dst_v = np.imag(U) # move back to u,v points dst_u = 0.5 * (dst_u[:, :, :-1] + dst_u[:, :, 1:]) dst_v = 0.5 * (dst_v[:, :-1, :] + dst_v[:, 1:, :]) # spval idxu = np.where(dst_grd.hgrid.mask_u == 0) idxv = np.where(dst_grd.hgrid.mask_v == 0) for n in range(dst_grd.vgrid.N): dst_u[n, idxu[0], idxu[1]] = spval dst_v[n, idxv[0], idxv[1]] = spval # compute depth average velocity ubar and vbar # get z at the right position z_u = 0.5 * (dst_grd.vgrid.z_w[0, :, :, :-1] + dst_grd.vgrid.z_w[0, :, :, 1:]) z_v = 0.5 * (dst_grd.vgrid.z_w[0, :, :-1, :] + dst_grd.vgrid.z_w[0, :, 1:, :]) dst_ubar = np.zeros((dst_u.shape[1], dst_u.shape[2])) dst_vbar = np.zeros((dst_v.shape[1], dst_v.shape[2])) for i in range(dst_ubar.shape[1]): for j in range(dst_ubar.shape[0]): dst_ubar[j, i] = (dst_u[:, j, i] * np.diff(z_u[:, j, i])).sum() / -z_u[0, j, i] for i in range(dst_vbar.shape[1]): for j in range(dst_vbar.shape[0]): dst_vbar[j, i] = (dst_v[:, j, i] * np.diff(z_v[:, j, i])).sum() / -z_v[0, j, i] # spval dst_ubar[idxu[0], idxu[1]] = spval dst_vbar[idxv[0], idxv[1]] = spval # write data in destination file print 'write data in destination file' ncu.variables['ocean_time'][0] = time ncu.variables['u'][0] = dst_u ncu.variables['ubar'][0] = dst_ubar ncv.variables['ocean_time'][0] = time ncv.variables['v'][0] = dst_v ncv.variables['vbar'][0] = dst_vbar print dst_u.shape print dst_ubar.shape print dst_v.shape print dst_vbar.shape # close destination file ncu.close() ncv.close()
irregular = [] while idx < M: irregular.append( temp[hash_map[idx] [0]]) # we create the irregular constelation by selecting the # the lowest M energies from the hash_map idx += 1 irregular = np.array(irregular) return irregular if __name__ == '__main__': constellation = IrrHQAM(16, 2) # Generate Constellation with nn_dist = 2 n = np.random.normal(0, 0.1, 10000) + 1j * np.random.normal( 0, 0.1, 10000) # AWGN np.random.normal(mean, sigma, size) #by changing sigma we change noise power and because energy per symol is fixed due to standard nn_dist we can #change SNR parameter noise_power = 0.5 # SNR parameter r = np.random.choice(constellation, 10000) + n # received symbol energy = [ np.real(symbol)**2 + np.imag(symbol)**2 for symbol in constellation ] total = 0 for k in energy: total += k Es = total / 16 plt.plot(np.real(r), np.imag(r), '.') plt.grid(True) plt.show()
def complex2ReIm(complx): return npy.real(complx), npy.imag(complx)
lambda x: np.linalg.eigvals(x), ], ) def test_array_notimpl_function_dask(func): x = np.random.random((100, 100)) y = da.from_array(x, chunks=(50, 50)) with pytest.warns(FutureWarning, match="The `.*` function is not implemented by Dask"): func(y) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) @pytest.mark.parametrize( "func", [lambda x: np.real(x), lambda x: np.imag(x), lambda x: np.transpose(x)]) def test_array_function_sparse(func): sparse = pytest.importorskip("sparse") x = da.random.random((500, 500), chunks=(100, 100)) x[x < 0.9] = 0 y = x.map_blocks(sparse.COO) assert_eq(func(x), func(y)) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) def test_array_function_sparse_tensordot(): sparse = pytest.importorskip("sparse") x = np.random.random((2, 3, 4)) x[x < 0.9] = 0
def fvol2sf(vol, r, b): """Transfer a volume in Fourier space into a serial of spherical functions. Makes use of the Hermitian symmetry to speed up. Parameters ---------- vol: The volume of Fourier coefficients. It should be full and zero frequency in the center! numpy.ndarray r: Radius in k-space Integer b: Bandwidth, which determines the sampling on the sphere Integer Returns ------- f(the_0, phi_0) ... f(the_0, phi_2B-1), f(the_2B-1, phi_0) ... f(the_2B-1, phi_2B-1) List """ if r > vol.shape[0]//2 or r > vol.shape[1]//2 or r > vol.shape[2]//2: raise RuntimeError("Given radius is larger than the volume!") elif r <= 0: raise RuntimeError("Radius should be larger than the 0!") else: pass # zero frequency position m_x = vol.shape[0]//2; m_y = vol.shape[1]//2; m_z = vol.shape[2]//2 # only half the = np.pi*(2*np.arange(b)+1) / (4*b) phi = np.pi*np.arange(2*b) / b # C order mesh phi, the = np.meshgrid(phi, the) the = the.flatten() phi = phi.flatten() # compute the coordinates x = r*np.cos(phi)*np.sin(the) + m_x y = r*np.sin(phi)*np.sin(the) + m_y z = r*np.cos(the) + m_z # use spline intepolation on the real/imaginary parts # can be done better, but now it suffices vol_r = np.real(vol) vol_i = np.imag(vol) from scipy.ndimage import map_coordinates res_r_a = map_coordinates(vol_r, [x, y, z], order=2) res_i_a = map_coordinates(vol_i, [x, y, z], order=2) # fill in the other half ind = np.arange(2*b**2) cind = (2*b-1-(ind+2*b**2)//(2*b))*2*b + np.mod(np.mod(ind, 2*b)+b, 2*b) res_r = np.zeros((4*b**2,)) res_r[:2*b**2] = res_r_a res_r[2*b**2:] = res_r_a[cind] res_i = np.zeros((4*b**2,)) res_i[:2*b**2] = res_i_a res_i[2*b**2:] = -res_i_a[cind] return [res_r, res_i]
def turb_3d(xmin, xmax, ymin, ymax, zmin, zmax, nx, ny, nz): """ Parameters: ----------- xmin: float The min domain size in the x-direction. xmax: float The max domain size in the x-direction. ymin: float The min domain size in the y-direction. ymax: float The max domain size in the y-direction. zmin: float The min domain size in the z-direction. zmax: float The max domain size in the z-direction. nx: integer The number of grid points in the x-direction. ny: integer The number of grid points in the y-direction. nz: integer The number of grid points in the z-direction. """ i_cmplx = np.complex(0, 1) #create a complex array of size nx, ny, nz uk = np.zeros([nx, ny, nz], dtype=complex) q1 = np.zeros([nx, ny, nz], dtype=complex) q = np.zeros([nx, ny, nz], dtype=complex) X_FFT = np.zeros([nx, ny, nz], dtype=complex) delta = np.max(((xmax - xmin) / (nx)) and ((ymax - ymin) / (ny)) and ((zmax - zmin) / (nz))) kc = pi / delta #determine the amplitude for the passot-poquet spectrum amp = (sqrt( (2.0 * pi)**3) / ((xmax - xmin) * (ymax - ymin) * (zmax - zmin))) for k in range(0, nz): #Sets the point, which is called the pi-wavenumber, to 0 if ((2 * int(nz / 2) == nz) and (k == nz / 2)): continue for j in range(0, ny): #Sets the point, which is called the pi-wavenumber, to 0 if ((2 * int(ny / 2) == ny) and (j == ny / 2)): continue for i in range(0, int(nx / 2)): """create random angles - ps1 and ps2 ranges from -pi to pi - psr ranges from 0 to 2*pi """ ps1 = np.random.uniform(-pi, pi) ps2 = np.random.uniform(-pi, pi) psr = np.random.uniform(0, 2.0 * pi) #Leave the origin value as 0, otherwise a mean velocity will occur if ((i == 0) and (j == 0) and (k == 0)): continue #Sets the point, which is called the pi-wavenumber, to 0 if ((2 * int(nx / 2) == nx) and (i == nx / 2)): continue kx = (i) * 2.0 * pi / (xmax - xmin) if (j <= ny / 2): ky = (j) * 2.0 * pi / (ymax - ymin) else: ky = -1.0 * (ny - j) * 2.0 * pi / (ymax - ymin) if (k <= nz / 2): kz = (k) * 2.0 * pi / (zmax - zmin) else: kz = -1.0 * (nz - k) * 2.0 * pi / (zmax - zmin) k2 = sqrt(kx**2 + ky**2) kmag = sqrt(kx**2 + ky**2 + kz**2) #Checks to see that the wave numvers are not larger than the cutoff if (kmag > kc): continue e_spec, up, ke = passot_pouquet_spectrum( kmag, min(xmax, ymax, zmax)) ak = amp * sqrt(e_spec / (2.0 * pi * kmag**2)) * np.exp( i_cmplx * ps1) * cos(psr) bk = amp * sqrt(e_spec / (2.0 * pi * kmag**2)) * np.exp( i_cmplx * ps2) * sin(psr) #Calculates the turbulence values if (k2 < (ke / 1e4)): uk[i, j, k] = (ak + bk) / sqrt(2.0) else: uk[i, j, k] = (ak * kmag * ky + bk * kx * kz) / (kmag * k2) #Finds the conjugate value calculation across the YZ Plane conjugate_yzplane(uk[0, :, :], nx, ny, nz) #Finds the FFT in the Y-Direction Y_FFT = fft(uk, axis=1) #Finds the FFT in the Z-Direction Z_FFT = fft(Y_FFT, axis=2) #Finds the FFT in the X-Direction after performing the conjugate for k in range(0, nz): for j in range(0, ny): q[0:nx, j, k] = Z_FFT[0:nx, j, k] for i in range(int(1 + nx / 2), nx): q[i, j, k] = conj(q[nx - i, j, k]) q1 = fft(q, axis=0) X_FFT[0:nx, j, k] = q1[0:nx, j, k] """ To verify that the turbulence is generated correctly, the data must satisfy these three conditions: 1. The sum of the real values after the FFTs should be equal or close to 0. 2. The maximum imaginary value after the FFTs should be equal or close to 0. 3. The Spectral Energy Content and the Physical Energy Content should be the sane. - To determine the Spectral Energy Content, you use the turbuelence array before the FFTs. - TO determine the Physical Energy Content, you use the turbuelence array after the FFTs. """ #Sum of Real Values sum_real = np.sum(X_FFT.real) #Maximum Imaginary Value max_imag = np.amax(np.abs(np.imag(X_FFT))) #Spectral Energy Content spectral_energy = np.sum(np.real(uk * np.conj(uk))) spectral_energy = spectral_energy - 0.5 * np.sum( np.real(uk[0, :, :] * np.conj(uk[0, :, :]))) spectral_energy = spectral_energy / (1.5 * up**2) #Physical Energy Content physical_energy = 0.5 * np.sum(X_FFT.real**2) physical_energy = physical_energy / (1.5 * up**2 * nx * ny * nz) print("Sum of the Real Values: ", sum_real) print("Maximum Imaginary Value: ", max_imag) print("Spectral Energy Content: ", spectral_energy) print("Physical Energy Content: ", physical_energy) #Converts the array into a C-order array U = np.ascontiguousarray(X_FFT.real, dtype=np.float32) #Writes the data calculated into a Binary File with open('Z.bin', 'wb') as f: f.write(U) return U """csv_file = '3D_FFT_Output.csv'
def extract_netcdf_constants(ilon, ilat, grid_file, model_files, TYPE='z', METHOD='spline', EXTRAPOLATE=False, CUTOFF=10.0, GZIP=True, SCALE=1.0): """ Reads files for a netCDF4 tidal model Makes initial calculations to run the tide program Spatially interpolates tidal constituents to input coordinates Arguments --------- ilon: longitude to interpolate ilat: latitude to interpolate grid_file: grid file for model (can be gzipped) model_files: list of model files for each constituent (can be gzipped) Keyword arguments ----------------- TYPE: tidal variable to read z: heights u: horizontal transport velocities U: horizontal depth-averaged transport v: vertical transport velocities V: vertical depth-averaged transport METHOD: interpolation method bilinear: quick bilinear interpolation spline: scipy bivariate spline interpolation linear, nearest: scipy regular grid interpolations EXTRAPOLATE: extrapolate model using nearest-neighbors CUTOFF: extrapolation cutoff in kilometers set to np.inf to extrapolate for all points GZIP: input netCDF4 files are compressed SCALE: scaling factor for converting to output units Returns ------- amplitude: amplitudes of tidal constituents phase: phases of tidal constituents D: bathymetry of tide model constituents: list of model constituents """ #-- raise warning if model files are entered as a string if isinstance(model_files, str): warnings.warn("Tide model is entered as a string") model_files = [model_files] #-- read the tide grid file for bathymetry and spatial coordinates lon, lat, bathymetry = read_netcdf_grid(grid_file, TYPE, GZIP=GZIP) #-- grid step size of tide model dlon = lon[1] - lon[0] dlat = lat[1] - lat[0] #-- replace original values with extend arrays/matrices lon = extend_array(lon, dlon) bathymetry = extend_matrix(bathymetry) #-- create masks bathymetry.mask = (bathymetry.data == 0) #-- adjust dimensions of input coordinates to be iterable ilon = np.atleast_1d(ilon) ilat = np.atleast_1d(ilat) #-- adjust longitudinal convention of input latitude and longitude #-- to fit tide model convention lt0, = np.nonzero(ilon < 0) ilon[lt0] += 360.0 #-- number of points npts = len(ilon) #-- interpolate bathymetry and mask to output points D = np.ma.zeros((npts)) D.mask = np.zeros((npts), dtype=bool) if (METHOD == 'bilinear'): #-- replace invalid values with nan bathymetry[bathymetry.mask] = np.nan #-- use quick bilinear to interpolate values D.data[:] = bilinear_interp(lon, lat, bathymetry, ilon, ilat) #-- replace nan values with fill_value D.mask[:] = np.isnan(D.data) D.data[D.mask] = D.fill_value elif (METHOD == 'spline'): #-- use scipy bivariate splines to interpolate values f1 = scipy.interpolate.RectBivariateSpline(lon, lat, bathymetry.data.T, kx=1, ky=1) f2 = scipy.interpolate.RectBivariateSpline(lon, lat, bathymetry.mask.T, kx=1, ky=1) D.data[:] = f1.ev(ilon, ilat) D.mask[:] = np.ceil(f2.ev(ilon, ilat).astype(bool)) else: #-- use scipy regular grid to interpolate values for a given method r1 = scipy.interpolate.RegularGridInterpolator((lat, lon), bathymetry.data, method=METHOD, bounds_error=False) r2 = scipy.interpolate.RegularGridInterpolator((lat, lon), bathymetry.mask, method=METHOD, bounds_error=False, fill_value=1) D.data[:] = r1.__call__(np.c_[ilat, ilon]) D.mask[:] = np.ceil(r2.__call__(np.c_[ilat, ilon])).astype(bool) #-- u and v are velocities in cm/s if TYPE in ('v', 'u'): unit_conv = (D.data / 100.0) #-- U and V are transports in m^2/s elif TYPE in ('V', 'U'): unit_conv = 1.0 #-- number of constituents nc = len(model_files) #-- list of constituents constituents = [] #-- amplitude and phase ampl = np.ma.zeros((npts, nc)) ampl.mask = np.zeros((npts, nc), dtype=bool) ph = np.ma.zeros((npts, nc)) ph.mask = np.zeros((npts, nc), dtype=bool) #-- read and interpolate each constituent for i, model_file in enumerate(model_files): if (TYPE == 'z'): #-- read constituent from elevation file z, con = read_elevation_file(model_file, GZIP=GZIP) #-- append constituent to list constituents.append(con) #-- replace original values with extend matrices z = extend_matrix(z) #-- interpolate amplitude and phase of the constituent z1 = np.ma.zeros((npts), dtype=z.dtype) z1.mask = np.zeros((npts), dtype=bool) if (METHOD == 'bilinear'): #-- replace invalid values with nan z[z.mask] = np.nan z1.data[:] = bilinear_interp(lon, lat, z, ilon, ilat, dtype=z.dtype) #-- mask invalid values z1.mask[:] |= np.copy(D.mask) z1.data[z1.mask] = z1.fill_value elif (METHOD == 'spline'): f1 = scipy.interpolate.RectBivariateSpline(lon, lat, z.data.real.T, kx=1, ky=1) f2 = scipy.interpolate.RectBivariateSpline(lon, lat, z.data.imag.T, kx=1, ky=1) z1.data.real = f1.ev(ilon, ilat) z1.data.imag = f2.ev(ilon, ilat) #-- mask invalid values z1.mask[:] |= np.copy(D.mask) z1.data[z1.mask] = z1.fill_value else: #-- use scipy regular grid to interpolate values r1 = scipy.interpolate.RegularGridInterpolator( (lat, lon), z.data, method=METHOD, bounds_error=False, fill_value=z1.fill_value) z1.data[:] = r1.__call__(np.c_[ilat, ilon]) #-- mask invalid values z1.mask[:] |= np.copy(D.mask) z1.data[z1.mask] = z1.fill_value #-- extrapolate data using nearest-neighbors if EXTRAPOLATE and np.any(z1.mask): #-- find invalid data points inv, = np.nonzero(z1.mask) #-- replace invalid values with nan z[z.mask] = np.nan #-- extrapolate points within cutoff of valid model points z1.data[inv] = nearest_extrap(lon, lat, z, ilon[inv], ilat[inv], dtype=z.dtype, cutoff=CUTOFF) #-- replace nan values with fill_value z1.mask[inv] = np.isnan(z1.data[inv]) z1.data[z1.mask] = z1.fill_value #-- amplitude and phase of the constituent ampl.data[:, i] = np.abs(z1.data) ampl.mask[:, i] = np.copy(z1.mask) ph.data[:, i] = np.arctan2(-np.imag(z1.data), np.real(z1.data)) ph.mask[:, i] = np.copy(z1.mask) elif TYPE in ('U', 'u', 'V', 'v'): #-- read constituent from transport file tr, con = read_transport_file(model_file, TYPE, GZIP=GZIP) #-- append constituent to list constituents.append(con) #-- replace original values with extend matrices tr = extend_matrix(tr) #-- interpolate amplitude and phase of the constituent tr1 = np.ma.zeros((npts), dtype=tr.dtype) tr1.mask = np.zeros((npts), dtype=bool) if (METHOD == 'bilinear'): tr1.data[:] = bilinear_interp(lon, lat, tr, ilon, ilat, dtype=tr.dtype) #-- mask invalid values tr1.mask[:] |= np.copy(D.mask) tr1.data[tr1.mask] = tr1.fill_value elif (METHOD == 'spline'): f1 = scipy.interpolate.RectBivariateSpline(lon, lat, tr.data.real.T, kx=1, ky=1) f2 = scipy.interpolate.RectBivariateSpline(lon, lat, tr.data.imag.T, kx=1, ky=1) tr1.data.real = f1.ev(ilon, ilat) tr1.data.imag = f2.ev(ilon, ilat) #-- mask invalid values tr1.mask[:] |= np.copy(D.mask) tr1.data[tr1.mask] = z1.fill_value else: #-- use scipy regular grid to interpolate values r1 = scipy.interpolate.RegularGridInterpolator( (lat, lon), tr.data, method=METHOD, bounds_error=False, fill_value=tr1.fill_value) tr1.data[:] = r1.__call__(np.c_[ilat, ilon]) #-- mask invalid values tr1.mask[:] |= np.copy(D.mask) tr1.data[tr1.mask] = tr1.fill_value #-- extrapolate data using nearest-neighbors if EXTRAPOLATE and np.any(tr1.mask): #-- find invalid data points inv, = np.nonzero(tr1.mask) #-- replace invalid values with nan tr[tr.mask] = np.nan #-- extrapolate points within cutoff of valid model points tr1.data[inv] = nearest_extrap(lon, lat, tr, ilon[inv], ilat[inv], dtype=tr.dtype, cutoff=CUTOFF) #-- replace nan values with fill_value tr1.mask[inv] = np.isnan(tr1.data[inv]) tr1.data[tr1.mask] = tr1.fill_value #-- convert units #-- amplitude and phase of the constituent ampl.data[:, i] = np.abs(tr1.data) / unit_conv ampl.mask[:, i] = np.copy(tr1.mask) ph.data[:, i] = np.arctan2(-np.imag(tr1.data), np.real(tr1.data)) ph.mask[:, i] = np.copy(tr1.mask) #-- convert amplitude from input units to meters amplitude = ampl * SCALE #-- convert phase to degrees phase = ph * 180.0 / np.pi phase[phase < 0] += 360.0 #-- return the interpolated values return (amplitude, phase, D, constituents)
def complex2dB(complx): dB = 20 * npy.log10(npy.abs( (npy.real(complx) + 1j*npy.imag(complx) ))) return dB
def save_all_parameters(): if fileExtension == '.h5': f = h5py.File(str(directory), 'r+') if frame.radioButton.isChecked(): param_dim = 9 #we get 9+3 parameters for the circle fit and only 2 for lorentz path = '/entry/analysis/notch resonator' _x = '/re' _y = '/im' if frame.radioButton_2.isChecked(): param_dim = 6 path = '/entry/analysis/lorentz' _x = '/amplitude' # in case the dataset already exists at e.g. /entry/data/notch_res/re create a new one at /entry/data/notch_res 2/re etc. try: dset_x_sim = f.create_dataset(path + _x, (len(i_data), len(f_data))) dset_parameters = f.create_dataset(path + '/param', (len(i_data), param_dim)) if frame.radioButton.isChecked(): #notch res: amplitude and phase therefore two datasets dset_y_sim = f.create_dataset(path + _y, (len(i_data), len(f_data))) except RuntimeError: #iterate datasets .../re 1, ../re 2 ,...... it = 2 while True: try: dset_x_sim = f.create_dataset(path + ' ' + str(it) + _x, (len(i_data), len(f_data))) dset_parameters = f.create_dataset( path + ' ' + str(it) + '/param', (len(i_data), param_dim)) if frame.radioButton.isChecked(): dset_y_sim = f.create_dataset( path + ' ' + str(it) + _y, (len(i_data), len(f_data))) break except RuntimeError: it += 1 dset_x_sim.attrs.create('y_unit', y_unit) dset_x_sim.attrs.create('x_name', x_name) dset_x_sim.attrs.create('y_name', y_name) dset_x_sim.attrs.create('x_unit', x_unit) dset_x_sim.attrs.create('dx', dx) dset_x_sim.attrs.create('dy', dy) dset_x_sim.attrs.create('y0', f_data[0]) dset_x_sim.attrs.create('x0', x0) dset_x_sim.attrs.create('fill', fill) if frame.radioButton.isChecked(): dset_y_sim.attrs.create('y_unit', y_unit) dset_y_sim.attrs.create('x_name', x_name) dset_y_sim.attrs.create('y_name', y_name) dset_y_sim.attrs.create('x_unit', x_unit) dset_y_sim.attrs.create('dx', dx) dset_y_sim.attrs.create('dy', dy) dset_y_sim.attrs.create('y0', f_data[0]) dset_y_sim.attrs.create('x0', x0) dset_y_sim.attrs.create('fill', fill) dset_parameters.attrs.create("amp_norm", data_fit["amp_norm"]) dset_parameters.attrs.create("alpha", data_fit["alpha"]) dset_parameters.attrs.create("delay", data_fit["delay"]) dset_parameters.attrs.create( "order of parameters:", "fr, Qr, absQc, Qi_no_corr, Qi_dia_corr, Qc_dia_corr, phi0, theta0, chi_square" ) if frame.radioButton_2.isChecked(): dset_parameters.attrs.create("order of parameters:", "A1, A2, A3, A4, fr, Qr") param = data_fit["parameters"] i = 0 if frame.radioButton.isChecked(): for z in z_data: dset_x_sim[i] = np.real(z) dset_y_sim[i] = np.imag(z) p = param[i] dset_parameters[i] = np.array([ p["fr"], p["Qr"], p["absQc"], p["Qi_no_corr"], p["Qi_dia_corr"], p["Qc_dia_corr"], p["phi0"], p["theta0"], p["chi_square"] ]) i += 1 if frame.radioButton_2.isChecked(): for z in z_data: dset_x_sim[i] = np.absolute(z) p = param[i] dset_parameters[i] = np.array( [p[0], p[1], p[2], p[3], p[4], p[5]]) i += 1 f.close() return FileName3 = str(QFileDialog.getSaveFileName()) f_out = open(FileName3, 'w') if frame.radioButton.isChecked(): label1 = '#' + str(frame.lineEdit_11.text()) + ' [' + str( frame.lineEdit_4.text()) + ']:' label2 = 'fr' + ' [' + str(frame.lineEdit_6.text()) + ']:' tag = [ label1 + (20 - len(label1)) * ' ', label2 + (20 - len(label2)) * ' ', 'Qr: ', 'Qi_dia_corr: ', 'absQc: ', 'phi0: ' ] f_out.write(tag[0]) f_out.write(tag[1]) f_out.write(tag[2]) f_out.write(tag[3]) f_out.write(tag[4]) f_out.write(tag[5]) f_out.write('\n') i = 0 for res in data_fit["parameters"]: f_out.write(str(i_data[i])) f_out.write((20 - len(str(i_data[i]))) * ' ') f_out.write(str(res["fr"])) f_out.write((20 - len(str(res["fr"]))) * ' ') f_out.write(str(res["Qr"])) f_out.write((20 - len(str(res["Qr"]))) * ' ') f_out.write(str(res["Qi_dia_corr"])) f_out.write((20 - len(str(res["Qi_dia_corr"]))) * ' ') f_out.write(str(res["absQc"])) f_out.write((20 - len(str(res["absQc"]))) * ' ') f_out.write(str(res["phi0"])) f_out.write((20 - len(str(res["phi0"]))) * ' ') i += 1 f_out.write('\n') if frame.radioButton_2.isChecked(): label1 = '#' + str(frame.lineEdit_11.text()) + ' [' + str( frame.lineEdit_4.text()) + ']:' label2 = 'fr' + ' [' + str(frame.lineEdit_6.text()) + ']:' tag = [ label1 + (20 - len(label1)) * ' ', label2 + (20 - len(label2)) * ' ', 'Qr: ' ] f_out.write(tag[0]) f_out.write(tag[1]) f_out.write(tag[2]) f_out.write('\n') i = 0 for res in data_fit["parameters"]: f_out.write(str(i_data[i])) f_out.write((20 - len(str(i_data[i]))) * ' ') f_out.write(str(res[4])) f_out.write((20 - len(str(res[4]))) * ' ') f_out.write(str(res[5])) f_out.write((20 - len(str(res[5]))) * ' ') i += 1 f_out.write('\n') f_out.close()
# read first portion of file ms_pad = ms + 5 n = int(fs*0.001*ms_pad) fp = open(filename,"rb") x = io.get_samples_complex(fp,n) # resample to 3*10.230 MHz fsr = 3*10230000.0/fs nco.mix(x,-coffset/fs,0) h = scipy.signal.firwin(161,12e6/(fs/2),window='hanning') x = scipy.signal.filtfilt(h,[1],x) xr = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.real(x)) xi = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.imag(x)) x = xr+(1j)*xi # iterate (in parallel) over PRNs of interest def worker(p): x,prn = p metric,code,doppler = search(x,prn,doppler_search,ms) return 'prn %2d doppler % 7.1f metric % 7.1f code_offset %6.1f' % (prn,doppler,metric,code) import multiprocessing as mp cpus = mp.cpu_count() results = mp.Pool(cpus).map(worker, map(lambda prn: (x,prn),prns)) for r in results:
# # > Inserting k=0 we see that np.sum(x) corresponds to y[0]. This term will be non-zero if we haven't removed any large scale trend in the data. For N even, the elements y[1]...y[N/2−1] contain the positive-frequency terms, and the elements y[N/2]...y[N−1] contain the negative-frequency terms, in order of decreasingly negative frequency. For N odd, the elements y[1]...y[(N−1)/2] contain the positive- frequency terms, and the elements y[(N+1)/2]...y[N−1] contain the negative- frequency terms, in order of decreasingly negative frequency. # > In case the sequence x is real-valued, the values of y[n] for positive frequencies is the conjugate of the values y[n] for negative frequencies (because the spectrum is symmetric). Typically, only the FFT corresponding to positive frequencies is plotted. # # So the first peak at index 20 is (20 bins) x (0.05 Hz/bin) = 1 Hz, as expected. The nyquist frequency of 2.5 Hz is at an index of N/2 = 50 and the negative frequency peak is 20 bins to the left of the end bin. # # # The inverse transform is: # # $$x[n] = \frac{1}{N} \sum_{k=0}^{N-1} y]k]\exp \left ( i 2 \pi k n /N \right )$$ # %% [markdown] # What about the imaginary part? All imaginary coefficients are zero (neglecting roundoff errors) # %% imag_coeffs=np.imag(thefft) fig,theAx=plt.subplots(1,1,figsize=(8,6)) theAx.plot(imag_coeffs) out=theAx.set_title('imag fft of onehz') # %% #now evaluate the power spectrum using Stull's 8.6.1a on p. 312 Power=np.real(thefft*np.conj(thefft)) totsize=len(thefft) halfpoint=int(np.floor(totsize/2.)) firsthalf=Power[0:halfpoint] fig,ax=plt.subplots(1,1) freq=np.arange(0,5.,0.05)
plt.subplot(3, 1, 3) # subplot 3 plt.plot(x, y1, '--r', label='y1 ') plt.plot(x, y2, 'o', label='y2 ') # plotting both functions on one plot plt.axis([-2.5, 2.5, -0.5, 4.5]) # define axis plt.grid(True) plt.legend(loc='lower right ') # prints a legend on the plot plt.xlabel('x') # x- axis label for all three subplots ( entire figure ) plt.ylabel('Subplot 3') # label for subplot 3 plt.show() ### --- This MUST be included to view your plots ! --- ### cRect = 2 + 3j print(cRect) cPol = abs(cRect) * np.exp(1j * np.angle(cRect)) print(cPol) # notice Python will store this in rectangular form cRect2 = np.real(cPol) + 1j * np.imag(cPol) print(cRect2) # converting from polar to rectangular print(numpy.sqrt(3 * 5 - 5 * 5 + 0j)) #must include 0j #common packages to import import numpy as np import matplotlib.pyplot as plt import scipy as sp import scipy.signal as sig import pandas as pd import control import time from scipy.fftpack import fft, fftshift """ ### common python commands with explanations###