def correctDomain(self): ''' Correct the domain of the pathphase values so that they lie within the [0, 2PI] domain -- [0. 6.28] ''' def correctPhaseDomain(phaseValue): ''' Corrects the values phase so that it resides between topDomain and bottomDomain ''' topDomain = 1. bottomDomain = -1. domainRange = topDomain - bottomDomain phaseValue %= domainRange if phaseValue >= topDomain or phaseValue <= bottomDomain: if phaseValue > 0: phaseValue -= domainRange elif phaseValue <= 0: phaseValue += domainRange return phaseValue topDomain = self.topDomain self.consistentDomainValues = self.values[:] self.consistentDomainValues2 = self.values[:] #use modulo 2PI to maintain a consistent domain self.consistentDomainValues = [ (i + (2 *numpy.pi)) % topDomain for i in self.consistentDomainValues] self.consistentDomainValues2 = [ correctPhaseDomain(i) for i in self.consistentDomainValues2] self.correctedValues=numpy.unwrap(self.consistentDomainValues) self.correctedValues2=numpy.unwrap(self.consistentDomainValues2)
def plotall(self): real = self.z_data_raw.real imag = self.z_data_raw.imag real2 = self.z_data_sim.real imag2 = self.z_data_sim.imag fig = plt.figure(figsize=(15,5)) fig.canvas.set_window_title("Resonator fit") plt.subplot(131) plt.plot(real,imag,label='rawdata') plt.plot(real2,imag2,label='fit') plt.xlabel('Re(S21)') plt.ylabel('Im(S21)') plt.legend() plt.subplot(132) plt.plot(self.f_data*1e-9,np.absolute(self.z_data_raw),label='rawdata') plt.plot(self.f_data*1e-9,np.absolute(self.z_data_sim),label='fit') plt.xlabel('f (GHz)') plt.ylabel('Amplitude') plt.legend() plt.subplot(133) plt.plot(self.f_data*1e-9,np.unwrap(np.angle(self.z_data_raw)),label='rawdata') plt.plot(self.f_data*1e-9,np.unwrap(np.angle(self.z_data_sim)),label='fit') plt.xlabel('f (GHz)') plt.ylabel('Phase') plt.legend() # plt.gcf().set_size_inches(15,5) plt.tight_layout() plt.show()
def plot(self): # Plot the estimated and ground truth trajectories ground_truth = plt.plot(self.XYT[:, 0], self.XYT[:, 1], 'g.-', label='Ground Truth') mean_trajectory = plt.plot(self.MU[:, 0], self.MU[:, 1], 'r.-', label='Estimate') plt.legend() # Try changing this to different standard deviations sigma = 1 # 2 or 3 # Plot the errors with error bars Error = self.XYT-self.MU T = range(size(self.XYT,0)) f, axarr = plt.subplots(3, sharex=True) axarr[0].plot(T,Error[:,0],'r-') axarr[0].plot(T,sigma*sqrt(self.VAR[:,0]),'b--') axarr[0].plot(T,-sigma*sqrt(self.VAR[:,0]),'b--') axarr[0].set_title('X error') axarr[0].set_ylabel('Error (m)') axarr[1].plot(T,Error[:,1],'r-') axarr[1].plot(T,sigma*sqrt(self.VAR[:,1]),'b--') axarr[1].plot(T,-sigma*sqrt(self.VAR[:,1]),'b--') axarr[1].set_title('Y error') axarr[1].set_ylabel('Error (m)') axarr[2].plot(T,degrees(unwrap(Error[:,2])),'r-') axarr[2].plot(T,sigma*degrees(unwrap(sqrt(self.VAR[:,2]))),'b--') axarr[2].plot(T,-sigma*degrees(unwrap(sqrt(self.VAR[:,2]))),'b--') axarr[2].set_title('Theta error (degrees)') axarr[2].set_ylabel('Error (degrees)') axarr[2].set_xlabel('Time') plt.show()
def diffplot(freq, B, A, B2, A2): w, h = sps.freqz(B, A) w2, h2 = sps.freqz(B2, A2) # h = h - h2 dabs = abs(h2) / abs(h) dphase = np.unwrap(np.angle(h2)) - np.unwrap(np.angle(h)) fig = plt.figure() plt.title('Difference between digital filter frequency responses') ax1 = fig.add_subplot(111) plt.plot(w * (freq/np.pi) / 2.0, 20 * np.log10(dabs), 'b') plt.ylabel('Amplitude [dB]', color='b') plt.xlabel('Frequency [rad/sample]') ax2 = ax1.twinx() angles = np.unwrap(np.angle(h)) angles = dphase plt.plot(w * (freq/np.pi) / 2.0, angles, 'g') plt.ylabel('Angle (radians)', color='g') plt.grid() plt.axis('tight') plt.show()
def calc_inst_info(modes,samplerate): """ Calculate the instantaneous frequency, amplitude, and phase of each mode. """ amp=np.zeros(modes.shape,np.float32) phase=np.zeros(modes.shape,np.float32) f=np.zeros(modes.shape,np.float32) print("Mode 1:", len(modes), samplerate) for m in range(len(modes)): h=scipy.signal.hilbert(modes[m]) print(len(modes[m])) print("Mean Amplitude of mode ", m, np.mean(np.abs(h))) print("Mean Phase of mode ", m, np.mean(np.angle(h))) phase[m,:]=np.angle(h) print("Frequ", np.diff(np.unwrap(phase[:,np.r_[0,0:len(modes[m])]]))/(2*np.pi)*samplerate) amp[m,:]=np.abs(h) phase[m,:]=np.angle(h) f[m,:] = np.r_[np.nan, 0.5*(np.angle(-h[2:]*np.conj(h[0:-2]))+np.pi)/(2*np.pi) * samplerate, np.nan] print("Mean Frequ of mode ", m, np.mean(np.diff(np.unwrap(phase[:,np.r_[0,0:len(modes[0])]]))/(2*np.pi)*samplerate)) #f(m,:) = [nan 0.5*(angle(-h(t+1).*conj(h(t-1)))+pi)/(2*pi) * sr nan]; # calc the freqs (old way) #f=np.diff(np.unwrap(phase[:,np.r_[0,0:len(modes[0])]]))/(2*np.pi)*samplerate # clip the freqs so they don't go below zero #f = f.clip(0,f.max()) return f,amp,phase
def plot_cross_angle_grid(self, bf, refpixel='3,3', onoff=None, off=None, ymin=-3, ymax=3, title=None, hold=False): if not hold: self.clear() refpixel_idx = bf.map_pixel_spec[refpixel] if onoff is None: cc = bf.sti_cc else: cc = onoff for row in range(2, 8): for col in range(1, 9): r = (row - 2) * 8 pl_idx = r + col if bf.map_pixel_spec.has_key("%d,%d" % (row, col)): pix = "%d,%d" % (row, col) ax = self.add_subplot(6, 8, pl_idx) spec_idx = bf.map_pixel_spec[pix] if off is None: spec = numpy.unwrap(numpy.angle(cc[spec_idx, refpixel_idx, :, :].mean(axis=1))) else: spec = numpy.unwrap(numpy.angle(cc[spec_idx, refpixel_idx, :, :].mean(axis=1))/numpy.sqrt(numpy.abs(off[spec_idx, spec_idx, :, :].mean(axis=1)) * numpy.abs(off[refpixel_idx, refpixel_idx, :, :].mean(axis=1)))) self.plot(numpy.arange(bf.bin_start, bf.bin_end+1), spec, linestyle='steps-mid', label=pix) self.set_ylim(ymin, ymax) if pl_idx != 1: ax.set_xticklabels([]) ax.set_yticklabels([]) else: print "%d,%d not available" % (row, col) continue self.redraw_plot() if title is not None: self.set_figtitle("%s" % title)
def align_wfarr_average_phase(this,that,mask=None,verbose=False): ''' 'this' phase will be aligned to 'that' phase over their domains ''' # from numpy import angle,unwrap,mean # if mask is None: u = this[:,1]+1j*this[:,2] v = that[:,1]+1j*that[:,2] else: u = this[mask,1]+1j*this[mask,2] v = that[mask,1]+1j*that[mask,2] # _a = unwrap( angle(u) ) _b = unwrap( angle(v) ) # a,b = mean( _a ), mean( _b ) dphi = -a + b # if verbose: alert('The phase shift applied is %s radians.'%magenta('%1.4e'%(dphi))) # this_ = shift_wfarr_phase(this,dphi) # return this_
def get_dispersion_correction(on_scan, off_scan, scanLen=30.0, deltaT=0.1, direc='/media/Disk1/nov14_2015'): """ Gets the linear fits for c_y and c_z given On and Off scans """ bf = BinFile(get_gbtscan_file(direc=direc, scan=on_scan)[0], number_packets=3000) bf.load_sti_cross_correlate(scanLen, deltaT) Ron = bf.sti_cc.mean(axis=3) Ron = numpy.delete(Ron, bf.bad_inputs, axis=0) Ron = numpy.delete(Ron, bf.bad_inputs, axis=1) bf = BinFile(get_gbtscan_file(direc=direc, scan=off_scan)[0], number_packets=3000) bf.load_sti_cross_correlate(scanLen, deltaT) Roff = bf.sti_cc.mean(axis=3) Roff = numpy.delete(Roff, bf.bad_inputs, axis=0) Roff = numpy.delete(Roff, bf.bad_inputs, axis=1) alpha_y = numpy.zeros(bf.num_bins) alpha_z = numpy.zeros(bf.num_bins) for b in range(bf.num_bins): onoff = Ron[:, :, b] - Roff[:, :, b] Rxy = onoff[:12, 12:24] Rxz = onoff[:12, 24:] alpha_y[b] = numpy.angle(Rxy.sum()) alpha_z[b] = numpy.angle(Rxz.sum()) x = numpy.arange(1, bf.num_bins+1) c_y = numpy.polyfit(x, numpy.unwrap(alpha_y), 1) c_z = numpy.polyfit(x, numpy.unwrap(alpha_z), 1) return c_y, c_z
def chickling_pn(shotno, date=time.strftime("%Y%m%d"), bandwidth=1000): fname, data = file_finder(shotno,date) fs = data[0]['samplerate'] samplesize = int(np.unwrap(data[0]['phasediff_co2']).size/bandwidth) phase_noise_sc = np.zeros(samplesize) phase_noise_ref = np.zeros(samplesize) #reshape the array of x points (20M for 1s) into a 2d array each with 40k segments. phasediff_pn_sc = np.reshape(np.unwrap(data[0]['phasediff_co2'][0:(samplesize*bandwidth)]),(samplesize,bandwidth)) phasediff_pn_ref = np.reshape(np.unwrap(data[0]['phasediff_hene'][0:(samplesize*bandwidth)]),(samplesize,bandwidth)) #for each horizontal column perform a standard deviation for i in range(0,samplesize): phase_noise_sc[i] = np.std(phasediff_pn_sc[i]) phase_noise_ref[i] = np.std(phasediff_pn_ref[i]) #plot STD against time and find the average plt.figure("Phase Noise for Scene shot " + str(shotno) + " with bandwidth = " + str(bandwidth) + " Date " + str(date)) plt.xlabel("Time, s") plt.ylabel("Phase Noise, mRadians") plt.plot(np.linspace(0,1,samplesize),phase_noise_sc*1000) print("Scene Phase STD = "+str(np.std(phase_noise_sc))) print("Scene Phase AVR = "+str(np.mean(phase_noise_sc))) plt.figure("Phase Noise for Ref shot " + str(shotno) + " with bandwidth = " + str(bandwidth) + " Date " + str(date)) plt.xlabel("Time, s") plt.ylabel("Phase Noise, mRadians") plt.plot(np.linspace(0,1,samplesize),phase_noise_ref*1000) print("Ref Phase STD = "+str(np.std(phase_noise_ref))) print("Ref Phase AVR = "+str(np.mean(phase_noise_ref)))
def chickling_corr_2ch(shotno,fileno, date=time.strftime("%Y%m%d"), bandwidth=40000): corr = np.zeros(fileno) for j in range (0, fileno): try: fname, data = file_finder(shotno,date) samplesize = int(np.unwrap(data[0]['phasediff_co2']).size/bandwidth) phase_avr_co2 = np.zeros(samplesize) phase_avr_hene = np.zeros(samplesize) #reshape the array of x points (20M for 1s) into a 2d array each with 40k segments. phasediff_co2 = np.reshape(np.unwrap(data[0]['phasediff_co2'][0:(samplesize*bandwidth)]),(samplesize,bandwidth)) phasediff_hene = np.reshape(np.unwrap(data[0]['phasediff_hene'][0:(samplesize*bandwidth)]),(samplesize,bandwidth)) #for each horizontal column perform an average for i in range(0,samplesize): phase_avr_co2[i] = np.mean(phasediff_co2[i]) phase_avr_hene[i] = np.mean(phasediff_hene[i]) a = (phase_avr_co2 - np.mean(phase_avr_co2)) / (np.std(phase_avr_co2) * len(phase_avr_co2)) b = (phase_avr_hene - np.mean(phase_avr_hene)) / (np.std(phase_avr_hene)) corr[j] = np.correlate(a, b, 'valid') #plt.xcorr(a,b)#,'o',ms=0.4) #plt.figure("Correlations of Shot "+str(shotno)+" to "+ str(shotno+fileno)) #plt.plot(np.linspace(0,1,fileno),correlation,'o') except Exception: print("~~~~~ Encountered Error, Skipping Data Set ~~~~~") pass plt.figure("Correlation for shots "+str(shotno)+" to "+str(shotno+fileno)) plt.plot(corr,'o')
def chickling_corr(shotno, date=time.strftime("%Y%m%d"), bandwidth=40000): fname, data = file_finder(shotno,date) samplesize = int(np.unwrap(data[0]['phasediff_co2']).size/bandwidth) phase_avr_co2 = np.zeros(samplesize) phase_avr_hene = np.zeros(samplesize) #reshape the array of x points (20M for 1s) into a 2d array each with 40k segments. phasediff_co2 = np.reshape(np.unwrap(data[0]['phasediff_co2'][0:(samplesize*bandwidth)]),(samplesize,bandwidth)) phasediff_hene = np.reshape(np.unwrap(data[0]['phasediff_hene'][0:(samplesize*bandwidth)]),(samplesize,bandwidth)) #for each horizontal column perform an average for i in range(0,samplesize): phase_avr_co2[i] = np.mean(phasediff_co2[i]) phase_avr_hene[i] = np.mean(phasediff_hene[i]) x = np.linspace(0,1,samplesize) plt.figure("2 Channels | Blue = Scene | Orange = Reference | Green = Cross-Correlation | shot " + str(shotno) + " Date " + str(date)) plt.xlabel("Time, s") plt.ylabel("Phase Difference, Radians") plt.plot(x,phase_avr_co2-np.average(phase_avr_co2)) plt.plot(x,phase_avr_hene-np.average(phase_avr_hene)) a = (phase_avr_co2 - np.mean(phase_avr_co2)) / (np.std(phase_avr_co2) * len(phase_avr_co2)) b = (phase_avr_hene - np.mean(phase_avr_hene)) / (np.std(phase_avr_hene)) yc = np.correlate(a, b, 'full') print(np.correlate(a, b, 'valid')) xc = np.linspace(0,1,yc.size) plt.plot(xc,yc)#,'o',ms=0.4)
def drawDataCallback(baseline): matplotlib.pyplot.clf() acc_n,interleave_a,interleave_b = get_data(baseline) matplotlib.pyplot.subplot(211) if ifch == True: matplotlib.pyplot.semilogy(numpy.abs(interleave_a)) matplotlib.pyplot.xlim(0,1024) else: matplotlib.pyplot.semilogy(xaxis,numpy.abs(interleave_a)) matplotlib.pyplot.grid() matplotlib.pyplot.title('Integration number %i \n%s'%(acc_n,baseline)) matplotlib.pyplot.ylabel('Power (arbitrary units)') matplotlib.pyplot.subplot(212) if ifch == True: matplotlib.pyplot.plot(numpy.unwrap(numpy.angle(interleave_b))) matplotlib.pyplot.xlim(0,1024) matplotlib.pyplot.xlabel('FFT Channel') else: matplotlib.pyplot.plot(xaxis,numpy.unwrap(numpy.angle(interleave_b))) matplotlib.pyplot.xlabel('FFT Frequency') matplotlib.pyplot.ylabel('Phase') matplotlib.pyplot.ylim(-180,180) matplotlib.pyplot.grid() fig.canvas.manager.window.after(100, drawDataCallback,baseline)
def autophase(self, ti=None, tf=None, unwrap=False, x0=[0., 0.], adjust_f0=True): t = self.t m = self.m z = self.z if unwrap: phi = np.unwrap(self.phi) else: phi = self.phi if ti is None and tf is None: mask = m elif ti is not None and tf is None: mask = m & (t >= ti) elif ti is None and tf is not None: mask = m & (t < tf) else: mask = m & (t >= ti) & (t < tf) self.mb = mb = auto_phase(t[mask], phi[mask], x0, adjust_f0=adjust_f0) self.phi0 = mb[-1] self.phi_fit = np.polyval(mb, t) self.dphi = np.unwrap(( (self.phi - self.phi_fit + np.pi) % (2*np.pi)) - np.pi) if adjust_f0: self.f0corr = self.f0 + mb[0] / (2*np.pi) else: self.f0corr = self.f0 self._output_df_X_Y()
def makeGnuFig(filename): resultmat = np.zeros([len(squid.xaxis), 9]) S11 = elem.S11 ydat = measdata.ydat resultmat[:, 0] = squid.xaxis resultmat[:, 1] = ydat.real resultmat[:, 2] = S11.real resultmat[:, 3] = ydat.imag resultmat[:, 4] = S11.imag resultmat[:, 5] = abs(ydat) resultmat[:, 6] = abs(S11) resultmat[:, 7] = np.unwrap(np.angle(S11), discont=pi) resultmat[:, 8] = np.unwrap(np.angle(ydat), discont=pi) np.savetxt(filename, resultmat, delimiter='\t') # Plot in Gnuplot g1 = gp.Gnuplot(persist=1, debug=1) g1("plot '" + str(filename) + "' u 1:2 w l t 'Meas.real'") g1("replot '" + str(filename) + "' u 1:3 w l t 'Fit.real'") g1("replot '" + str(filename) + "' u 1:4 w l t 'Meas.imag'") g1("replot '" + str(filename) + "' u 1:5 w l t 'Fit.imag'") g2 = gp.Gnuplot(persist=1, debug=1) g2("plot '" + str(filename) + "' u 1:6 w l t 'Meas.mag'") g2("replot '" + str(filename) + "' u 1:7 w l t 'Fit.mag'") g2("replot '" + str(filename) + "' u 1:8 w l t 'Meas.phase'") g2("replot '" + str(filename) + "' u 1:9 w l t 'Fit.phase'") return
def follow_trajectory(pr2, bodypart2traj): """ bodypart2traj is a dictionary with zero or more of the following fields: {l/r}_grab, {l/r}_gripper, {l/r_arm} We'll follow all of these bodypart trajectories simultaneously Also, if the trajectory involves grabbing with the gripper, and the grab fails, the trajectory will abort """ rospy.loginfo("following trajectory with bodyparts %s", " ".join(bodypart2traj.keys())) trajectories = [] vel_limits = [] acc_limits = [] bodypart2inds = {} n_dof = 0 name2part = {"l_gripper":pr2.lgrip, "r_gripper":pr2.rgrip, "l_arm":pr2.larm, "r_arm":pr2.rarm} for (name, part) in name2part.items(): if name in bodypart2traj: traj = bodypart2traj[name] if traj.ndim == 1: traj = traj.reshape(-1,1) trajectories.append(traj) vel_limits.extend(part.vel_limits) acc_limits.extend(part.acc_limits) bodypart2inds[name] = range(n_dof, n_dof+part.n_joints) n_dof += part.n_joints trajectories = np.concatenate(trajectories, 1) #print 'traj orig:'; print trajectories #trajectories = np.r_[np.atleast_2d(pr2.get_joint_positions()), trajectories] #print 'traj with first:'; print trajectories for arm in ['l_arm', 'r_arm']: if arm in bodypart2traj: part_traj = trajectories[:,bodypart2inds[arm]] part_traj[:,4] = np.unwrap(part_traj[:,4]) part_traj[:,6] = np.unwrap(part_traj[:,6]) #print 'traj after unwrap:'; print trajectories vel_limits = np.array(vel_limits) acc_limits = np.array(acc_limits) times = retiming.retime_with_vel_limits(trajectories, vel_limits/2) times_up = np.arange(0,times[-1],.1) traj_up = interp2d(times_up, times, trajectories) for (name, part) in name2part.items(): if name in bodypart2traj: part_traj = traj_up[:,bodypart2inds[name]] if name == "l_gripper" or name == "r_gripper": part.follow_timed_trajectory(times_up, part_traj.flatten()) elif name == "l_arm" or name == "r_arm": #print 'following traj for', name, part_traj #print ' with velocities' vels = kinematics_utils.get_velocities(part_traj, times_up, .001) #print vels part.follow_timed_joint_trajectory(part_traj, vels, times_up) pr2.join_all() return True
def butterworth_plot(fig=None, ax=None): """ Plot of frequency response of the Butterworth filter with different orders. """ if fig is None: fig, ax = plt.subplots(1, 2, figsize=(10, 4)) b1, a1 = signal.butter(1, 10, 'low', analog=True) w, h1 = signal.freqs(b1, a1) ang1 = np.rad2deg(np.unwrap(np.angle(h1))) h1 = 20 * np.log10(abs(h1)) b2, a2 = signal.butter(2, 10, 'low', analog=True) w, h2 = signal.freqs(b2, a2) ang2 = np.rad2deg(np.unwrap(np.angle(h2))) h2 = 20 * np.log10(abs(h2)) b4, a4 = signal.butter(4, 10, 'low', analog=True) w, h4 = signal.freqs(b4, a4) ang4 = np.rad2deg(np.unwrap(np.angle(h4))) h4 = 20 * np.log10(abs(h4)) b6, a6 = signal.butter(6, 10, 'low', analog=True) w, h6 = signal.freqs(b6, a6) ang6 = np.rad2deg(np.unwrap(np.angle(h6))) h6 = 20 * np.log10(abs(h6)) w = w/10 # PLOT ax[0].plot(w, h1, 'b', w, h2, 'r', w, h4, 'g', w, h6, 'y', linewidth=2) ax[0].axvline(1, color='black') # cutoff frequency ax[0].scatter(1, -3, marker='s', edgecolor='0', facecolor='1', s=400) #ax1.legend(('1', '2', '4', '6'), title='Filter order', loc='best') ax[0].set_xscale('log') fig.suptitle('Bode plot for low-pass Butterworth filter with different orders', fontsize=16, y=1.05) #ax1.set_title('Magnitude', fontsize=14) ax[0].set_xlabel('Frequency / Critical frequency', fontsize=14) ax[0].set_ylabel('Magnitude [dB]', fontsize=14) ax[0].set_xlim(0.1, 10) ax[0].set_ylim(-120, 10) ax[0].grid(which='both', axis='both') ax[1].plot(w, ang1, 'b', w, ang2, 'r', w, ang4, 'g', w, ang6, 'y', linewidth=2) ax[1].axvline(1, color='black') # cutoff frequency ax[1].legend(('1', '2', '4', '6'), title='Filter order', loc='best') ax[1].set_xscale('log') #ax2.set_title('Phase', fontsize=14) ax[1].set_xlabel('Frequency / Critical frequency', fontsize=14) ax[1].set_ylabel('Phase [degrees]', fontsize=14) ax[1].set_yticks(np.arange(0, -300, -45)) ax[1].set_ylim(-300, 10) ax[1].grid(which='both', axis='both') plt.tight_layout(w_pad=1) axi = plt.axes([.115, .4, .15, .35]) # inset plot axi.plot(w, h1, 'b', w, h2, 'r', w, h4, 'g', w, h6, 'y', linewidth=2) #ax11.set_yticks(np.arange(0, -7, -3)) axi.set_xticks((0.6, 1, 1.4)) axi.set_yticks((-6, -3, 0)) axi.set_ylim([-7, 1]) axi.set_xlim([.5, 1.5]) axi.grid(which='both', axis='both')
def deg_unwrap(data, discont=180.): non_nan_inx = np.isfinite(data) if not np.all(non_nan_inx): out = np.empty_like(data) + np.nan out[non_nan_inx] = np.rad2deg( np.unwrap(np.deg2rad(data[non_nan_inx]), np.deg2rad(discont))) return out return np.rad2deg( np.unwrap(np.deg2rad(data), np.deg2rad(discont)))
def chickling_pd(shotno, date=time.strftime("%Y%m%d")): fname, data = file_finder(shotno,date) fs = data[0]['samplerate'] plt.figure("Phase Difference shot " + str(shotno) + " Date " + str(date)) plt.xlabel("Time, s") plt.ylabel("Phase Difference, Radians") plt.plot(data[0]['timetag_phdiff'][1:]/fs,np.trim_zeros(np.unwrap(data[0]['phasediff_co2'][1:]))) print(np.std(np.unwrap(data[0]['phasediff_co2'])))
def plot_feko_nearfield(): # Load data frequency, x, y, z, ex, ey, ez, no_samples = rff.read_fekonearfield_datafile("FEKOFields/Dipole_85deg_400MHz.efe") wavelength = nf2ff.calc_freespace_wavelength(frequency[0]) x /= wavelength y /= wavelength z /= wavelength # Grid data x_points = no_samples[1] y_points = no_samples[2] new_shape = (x_points, y_points) ey_grid = np.reshape(ey, new_shape) x_grid = np.reshape(x, new_shape) y_grid = np.reshape(y, new_shape) plt.figure() fig, ax = plt.subplots(nrows=1, ncols=2, sharey=True) ax[0].set_title("Magnitude [dB]") ax[0].set_xlabel("x [$\lambda$]") ax[0].set_ylabel("y [$\lambda$]") ax[0].set_ylim(np.min(y_grid), np.max(y_grid)) ax[0].set_xlim(np.min(x_grid), np.max(x_grid)) extents = (np.min(x_grid), np.max(x_grid), np.min(y_grid), np.max(y_grid)) data = 10*np.log10(np.abs(ey_grid)) levels = np.arange(-40, 0, 8) cax = ax[0].imshow(data, extent=extents) ax[0].contour(data, extent=extents, colors='k', origin='upper', levels=levels) cb = fig.colorbar(cax, orientation='horizontal', ax=ax[0]) cb.set_ticks(levels) ax[1].set_title("Unwrapped phase [rad]") ax[1].set_xlabel("x [$\lambda$]") ax[1].set_ylim(np.min(y_grid), np.max(y_grid)) ax[1].set_xlim(np.min(x_grid), np.max(x_grid)) extents = (np.min(x_grid), np.max(x_grid), np.min(y_grid), np.max(y_grid)) data = np.angle(ey_grid) data = np.unwrap(data, axis=0) data = np.unwrap(data, axis=1) levels = np.arange(0, 300, 60) cax = ax[1].imshow(data, extent=extents) ax[1].contour(data, extent=extents, colors='k', origin='upper') cb = fig.colorbar(cax, orientation='horizontal', ax=ax[1]) cb.set_ticks(levels) #ax[0].set_aspect("equal") #ax[1].set_aspect("equal") #fig.set_tight_layout(True) fig.set_size_inches(fig_size, fig_size*(float(6)/8))
def doplot2(B, A, B2, A2, freq = (315.0/88.0) * 8.0): w, h = sps.freqz(B, A) w2, h2 = sps.freqz(B2, A2) # h.real /= C # h2.real /= C2 begin = 0 end = len(w) # end = int(len(w) * (12 / freq)) # chop = len(w) / 20 chop = 0 w = w[begin:end] w2 = w2[begin:end] h = h[begin:end] h2 = h2[begin:end] v = np.empty(len(w)) # print len(w) hm = np.absolute(h) hm2 = np.absolute(h2) v0 = hm[0] / hm2[0] for i in range(0, len(w)): # print i, freq / 2 * (w[i] / pi), hm[i], hm2[i], hm[i] / hm2[i], (hm[i] / hm2[i]) / v0 v[i] = (hm[i] / hm2[i]) / v0 fig = plt.figure() plt.title('Digital filter frequency response') ax1 = fig.add_subplot(111) v = 20 * np.log10(v ) # plt.plot(w * (freq/pi) / 2.0, v) # plt.show() # exit() plt.plot(w * (freq/pi) / 2.0, 20 * np.log10(abs(h)), 'r') plt.plot(w * (freq/pi) / 2.0, 20 * np.log10(abs(h2)), 'b') plt.ylabel('Amplitude [dB]', color='b') plt.xlabel('Frequency [rad/sample]') ax2 = ax1.twinx() angles = np.unwrap(np.angle(h)) angles2 = np.unwrap(np.angle(h2)) plt.plot(w * (freq/pi) / 2.0, angles, 'g') plt.plot(w * (freq/pi) / 2.0, angles2, 'y') plt.ylabel('Angle (radians)', color='g') plt.grid() plt.axis('tight') plt.show()
def preFit(val0): if elem.matchX is False: return getModelData(elem, squid) zerofluxidx = find_nearest(measdata.xaxis, 0.0) t2 = np.abs(measdata.ydat) / np.abs(elem.S11) t3 = (np.unwrap(np.angle(measdata.ydat), discont=pi) - np.unwrap(np.angle(elem.S11), discont=pi)) measdata.ATT = measdata.ATT * t2[zerofluxidx] measdata.PHI = measdata.PHI + t3[zerofluxidx]
def unwrap(arr): brr = np.unwrap(arr) crr = [] for ii in range(1,brr.size): crr.append(brr[ii]-brr[ii-1]) crr = np.unwrap(crr) nn = np.round(crr[0]/(2*np.pi)) crr -= (nn*2.*np.pi) drr = np.zeros(brr.shape)+brr[0] for ii in range(crr.size): drr[ii+1] += np.sum(crr[:ii+1]) return drr
def chickling_pd_overlay(shotno, date=time.strftime("%Y%m%d")): fname, data = file_finder(shotno,date) fs = data[0]['samplerate'] plt.figure("Phase Difference Blue = Scene Orange = Reference shot " + str(shotno) + " Date " + str(date)) plt.xlabel("Time, s") plt.ylabel("Phase Difference, Radians") plt.plot(data[0]['timetag_phdiff']/fs,np.unwrap(data[0]['phasediff_co2']-np.average(np.unwrap(data[0]['phasediff_co2'])))) plt.plot(data[0]['timetag_phdiff']/fs,np.unwrap(data[0]['phasediff_hene']-np.average(np.unwrap(data[0]['phasediff_hene']))))
def main(): ### Parameters ### ENV_FILE = "../../trajopt/data/pr2_table.env.xml" MANIP_NAME = "rightarm" N_STEPS = 10 LINK_NAME = "r_gripper_tool_frame" INTERACTIVE = True #joints_start_end = np.array([ # [-0.95, -0.38918253, -2.43888696, -1.23400121, -0.87433154, -0.97616443, -2.10997203], # [-0.4, -0.4087081, -3.77121706, -1.2273375, 0.69885101, -0.8992004, 3.13313843] #]) #joints_start_end = np.array([[0.34066373, -0.49439586, -3.3 , -1.31059503 , -1.28229698, -0.15682819, -116.19626995], # [ 0.5162424 , -0.42037121 , -3.7 , -1.30277208 , 1.31120586, -0.16411924 ,-118.57637204]]) #joints_start_end = np.array([[ -1.83204054 , -0.33201855 , -1.01105089 , -1.43693186 , -1.099908, -2.00040616, -116.17133393], #[ -0.38176851 , 0.17886005 , -1.4 , -1.89752658 , -1.93285873, -1.60546868, -114.70809047]]) joints_start_end = np.array([[0.33487707, -0.50480484 , -3.3 , -1.33546928 , -1.37194549 , -0.14645853 ,-116.11672039], [ 4.71340480e-01 , -4.56593341e-01 , -3.60000000e+00 , -1.33176173e+00, 1.21859723e+00 , -9.98780266e-02, -1.18561732e+02]]) ################## joints_start_end[:,2] = np.unwrap(joints_start_end[:,2]) joints_start_end[:,4] = np.unwrap(joints_start_end[:,4]) joints_start_end[:,6] = np.unwrap(joints_start_end[:,6]) joints_start = joints_start_end[0,:] joints_end = joints_start_end[1,:] ### Env setup #### env = rave.RaveGetEnvironment(1) if env is None: env = rave.Environment() env.StopSimulation() atexit.register(rave.RaveDestroy) env.Load(ENV_FILE) robot = env.GetRobots()[0] manip = robot.GetManipulator(MANIP_NAME) robot.SetDOFValues(joints_start, manip.GetArmIndices()) ################## result_traj = move_arm_straight(env, manip, N_STEPS, LINK_NAME, joints_start, joints_end, interactive=INTERACTIVE) print 'Showing original straight-line trajectory' env.SetViewer('qtcoin') env.UpdatePublishedBodies() import time time.sleep(2) play_traj(mu.linspace2d(joints_start, joints_end, N_STEPS), env, manip) raw_input('press enter to continue') print 'Showing optimized trajectory' play_traj(result_traj, env, manip) raw_input('press enter to continue')
def check_phase_vs_time(s, plot=True): s, qubits, Qubits = util.loadQubits(s, write_access=True) phases0 = [] phases2 = [] t0s = st.r[0:12:1,ns] for t0 in t0s: ph0, ph2 = tune_phases(s, t0, stats=1200, res=20, plot=False) phases0.append(ph0) phases2.append(ph2) phases0 = np.unwrap(phases0) phases2 = np.unwrap(phases2) fit0 = np.polyfit(t0s, phases0, 1) fit2 = np.polyfit(t0s, phases2, 1) df0 = (s.q1['f10'] - s.q0['f10'])[GHz] df2 = (s.q1['f10'] - s.q2['f10'])[GHz] if plot: fig = plt.figure() ax = fig.add_subplot(111) ax.plot(t0s, phases0, 'b.', label='measured phase') ax.plot(t0s, np.polyval(fit0, t0s), 'r-', label='phase fit') ax.plot(t0s, np.polyval([-2*np.pi*df0, 0], t0s), 'c-', label='detuning') ax.legend() fig = plt.figure() ax = fig.add_subplot(111) ax.plot(t0s, phases2, 'b.', label='measured phase') ax.plot(t0s, np.polyval(fit2, t0s), 'r-', label='phase fit') ax.plot(t0s, np.polyval([-2*np.pi*df2, 0], t0s), 'c-', label='detuning') ax.legend() print 'qubit 0:' print ' detuning:', df0 print ' phase fit:', fit0[0]/(2*np.pi) print ' phase offset:', fit0[1]/(2*np.pi) print Qubits[0]['uwavePhaseSlope'] = fit0[0]/(2*np.pi) * GHz Qubits[0]['uwavePhaseOfs'] = fit0[1] Qubits[0]['uwavePhaseFit'] = fit0 print 'qubit 2:' print ' detuning q2:', df2 print ' phase fit:', fit2[0]/(2*np.pi) print ' phase offset:', fit2[1]/(2*np.pi) print Qubits[2]['uwavePhaseSlope'] = fit2[0]/(2*np.pi) * GHz Qubits[2]['uwavePhaseOfs'] = fit2[1] Qubits[2]['uwavePhaseFit'] = fit2
def plot_approx(self): '''plot the amplitude and phase''' i = 1 for conf, wfs in zip(datasets, waveforms): hpref, hcref, hp, hc = wfs approx = conf.get('approximant', 'approximant') domain = conf.get('approximant', 'domain') m1, m2, s1z, s2z = ['%.2g' % conf.getfloat('parameters', name) for name in ['m1', 'm2', 'spin1z', 'spin2z']] iota = conf.getfloat('parameters', 'inclination') cfac = np.cos(iota) pfac = 0.5 * (1. + cfac*cfac); href = hpref / pfac + 1.j * hcref / cfac h = hp.data.data / pfac + 1.j * hc.data.data / cfac steppar = {'TD': 'deltaT', 'FD': 'deltaF'} dX = conf.getfloat('parameters', steppar[domain]) xvals, xvals_ref = [np.linspace(0., dX * (x.size - 1), x.size) \ for x in [h, href]] if counter is not '': num = counter else: num = i if domain=='TD': xvals_ref += conf.getfloat('waveform-data','epoch') xvals += hp.epoch else: #remove zero frequency entry xvals = xvals[1:] xvals_ref = xvals_ref[1:] h = h[1:] href = href[1:] plotfunc = {'TD': plt.plot, 'FD': plt.loglog} waveformplots([approx, m1, m2, s1z, s2z], xvals, np.abs(h), \ xvals_ref, np.abs(href), 'amplitude', num, \ plotfunc[domain]) plotfunc['FD'] = plt.semilogx waveformplots([approx, m1, m2, s1z, s2z], xvals, np.unwrap(np.angle(h)), \ xvals_ref, np.unwrap(np.angle(href)), 'phase', num, \ plotfunc[domain]) if np.allclose(xvals, xvals_ref, atol = 1e-6): waveformplots([approx, m1, m2, s1z, s2z], xvals, \ np.unwrap(np.angle(h)) - np.unwrap(np.angle(href)), \ name = 'phase_diff', counter = num, \ plot_func = plotfunc[domain]) sel = (np.abs(href) > 0) waveformplots([approx, m1, m2, s1z, s2z], xvals[[sel]], \ np.abs(h[[sel]]) / np.abs(href[[sel]]), name = 'amp_quot', \ counter = num, plot_func = plotfunc[domain]) i += 1
def __init__(self, filename): data = numpy.loadtxt(filename, usecols=range(5)) rows = data.shape[0] self.time_data = numpy.linspace(0, rows * 0.01, rows) theta1_data = numpy.unwrap(data[:, 1]) theta2_data = numpy.unwrap(data[:, 2]) self.theta1 = interp1d(self.time_data, theta1_data, kind="cubic") self.theta2 = interp1d(self.time_data, theta2_data, kind="cubic") # self.omega1 = interp1d(data[:,0], data[:,3], kind=cubic) # self.omega2 = interp1d(data[:,0], data[:,4], kind=cubic) self.state = 0.0, ()
def apply_filter(self, array): if len(array.shape)!=1: return False if isinstance(array.data[0],complex): phase = np.arctan2(array.data.imag,array.data.real) array.data = np.unwrap(-phase,np.pi*self.threshold) #array.data = abs(array.data)*np.exp(1j*np.unwrap(phase,np.pi*self.threshold)) else: array.data = np.unwrap(np.array(array.data),np.pi*self.threshold) return True
def chickling_csd(shotno, date=time.strftime("%Y%m%d")): fname, data = file_finder(shotno,date) x = np.unwrap(data[0]['phasediff_co2']) y = np.unwrap(data[0]['phasediff_hene']) f, Pxy = signal.csd(x, y, 20e6, nperseg=1024) plt.figure("CSD for shot " + str(shotno) + " Date " + str(date)) plt.semilogy(f, np.abs(Pxy)) plt.xlabel('frequency [Hz]') plt.ylabel('CSD [V**2/Hz]')
def chickling_csv(shotno, fileno=1, date=time.strftime("%Y%m%d")): for j in range (0, fileno): fname, data = file_finder(shotno,date) samplesize = int(np.unwrap(data[0]['phasediff_co2']).size) x_data = np.linspace(0,1,samplesize) y_data = np.unwrap(data[0]['phasediff_co2']) with open(str(shotno)+'-'+str(shotno+fileno)+date+'.csv','w', newline='') as csvfile: for i in range(0, samplesize): datawriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) datawriter.writerow((x_data[i],y_data[i]))
#df_v2 = pd.read_excel('ReflectionPhase_1_openingangle_85_length_10.xlsx') # dimensions of '0'th element V2 df_v1 = pd.read_excel( 'Selected_frequency_ReflectionPhase_1_openingangle_10_length_6.xlsx' ) # dimensions of '0'th element V1 df_v2 = pd.read_excel( 'Selected_frequency_ReflectionPhase_1_openingangle_85_length_10.xlsx' ) # dimensions of '0'th element V2 lambda0 = pd.DataFrame([]) lambda0 = 1 / (df_v1["frequency"].div(3 * 10 ^ 8)) k = 2 * pi / lambda0 D = lambda0 df_v1["reflectionphase_unwrapped"] = np.unwrap( (np.deg2rad(df_v1["reflectionphase"])) % 2 * np.pi) # modulo 2*pi helps to change range from -pi to +pi to 0 to 2*pi df_v2["reflectionphase_unwrapped"] = np.unwrap( (np.deg2rad(df_v2["reflectionphase"])) % 2 * np.pi) #omsriramajayam def fun(x, i): L_v = x[:N**2] theta_v = x[N**2:] phase_pred = [] for t, l in zip(theta_v, L_v): if ((t == 0) & (l == 0)): phase_pred.append(df_v1["reflectionphase_unwrapped"][i]) if ((t == 1) & (l == 1)):
def unwrap_euler(data): data['phi'] = np.unwrap(data.phi) data['psi'] = np.unwrap(data.psi) data['theta'] = np.unwrap(data.theta * 2) / 2
def roundcorners(verts, radius=.5): ''' Round the corners of polygon defined by verts. Works for convex polygons assuming radius fits inside. Parameters ---------- verts : list List of (x,y) pairs defining corners of polygon radius : float Radius of curvature Adapted from: https://stackoverflow.com/questions/24771828/algorithm-for-creating-rounded-corners-in-a-polygon ''' poly = [] for v in range(len(verts))[::-1]: p1 = verts[v] p2 = verts[v - 1] p3 = verts[v - 2] dx1 = p2[0] - p1[0] dy1 = p2[1] - p1[1] dx2 = p2[0] - p3[0] dy2 = p2[1] - p3[1] angle = (np.arctan2(dy1, dx1) - np.arctan2(dy2, dx2)) / 2 tan = abs(np.tan(angle)) segment = radius / tan def getlength(x, y): return np.sqrt(x * x + y * y) def getproportionpoint(point, segment, length, dx, dy): factor = segment / length return np.asarray([point[0] - dx * factor, point[1] - dy * factor]) length1 = getlength(dx1, dy1) length2 = getlength(dx2, dy2) length = min(length1, length2) if segment > length: segment = length radius = length * tan p1cross = getproportionpoint(p2, segment, length1, dx1, dy1) p2cross = getproportionpoint(p2, segment, length2, dx2, dy2) dx = p2[0] * 2 - p1cross[0] - p2cross[0] dy = p2[1] * 2 - p1cross[1] - p2cross[1] L = getlength(dx, dy) d = getlength(segment, radius) circlepoint = getproportionpoint(p2, d, L, dx, dy) startangle = np.arctan2(p1cross[1] - circlepoint[1], p1cross[0] - circlepoint[0]) endangle = np.arctan2(p2cross[1] - circlepoint[1], p2cross[0] - circlepoint[0]) startangle, endangle = np.unwrap([startangle, endangle]) arc = [] for i in np.linspace(startangle, endangle, 100): arc.append([ circlepoint[0] + np.cos(i) * radius, circlepoint[1] + np.sin(i) * radius ]) poly.extend(arc) poly.append(poly[0]) # Close the loop return np.asarray(poly)
def mag_theta_star(self, t_in, n_rho=400, n_theta=200, rz_grid=False ): """ Computes theta star Input: ---------- t_in: float time point for the evaluation n_rho: int number of flux surfaces equaly spaced from 0 to 1 of rho_pol n_theta: int number of poloidal points rz_grid: bool evaluate theta star on the grid Output: ---------- R, z, theta: 3d arrays size(n_rho, n_theta) """ rho = np.linspace(0, 1, n_rho+1)[1:] theta = np.linspace(0, 2*np.pi, n_theta, endpoint=False) magr, magz = self.rhoTheta2rz(rho, theta, t_in=t_in, coord_in='rho_pol') magr, magz = magr[0].T, magz[0].T r0 = np.interp(t_in, self.t_eq, self.ssq['Rmag']) z0 = np.interp(t_in, self.t_eq, self.ssq['Zmag']) drdrho, drtheta = np.gradient(magr) dzdrho, dztheta = np.gradient(magz) dpsidrho, dpsitheta = np.gradient(np.tile(rho**2, (n_theta, 1)).T ) grad_rho = np.dstack((drdrho, dzdrho, dpsidrho )) grad_theta = np.dstack((drtheta, dztheta, dpsitheta)) normal = np.cross(grad_rho, grad_theta, axis=-1) dpsi_dr = -normal[:, :, 0]/(normal[:, :, 2] + 1e-8) #Bz dpsi_dz = -normal[:, :, 1]/(normal[:, :, 2] + 1e-8) #Br #WARNING not defined on the magnetics axis dtheta_star = ((magr - r0)**2 + (magz - z0)**2)/(dpsi_dz*(magz - z0) + dpsi_dr*(magr - r0))/magr theta = np.arctan2(magz - z0, - magr + r0) theta = np.unwrap(theta - theta[:, (0, )], axis=1) from scipy.integrate import cumtrapz # Definition of the theta star by integral theta_star = cumtrapz(dtheta_star, theta, axis=1, initial=0) correction = (n_theta - 1.)/n_theta theta_star/= theta_star[:, (-1, )]/(2*np.pi)/correction #normalize to 2pi if not rz_grid: return magr, magz, theta_star # Interpolate theta star on a regular grid cos_th, sin_th = np.cos(theta_star), np.sin(theta_star) Linterp = LinearNDInterpolator(np.c_[magr.ravel(),magz.ravel()], cos_th.ravel(),0) nx = 100 ny = 150 rgrid = np.linspace(magr.min(), magr.max(), nx) zgrid = np.linspace(magz.min(), magz.max(), ny) R,Z = np.meshgrid(rgrid, zgrid) cos_grid = Linterp(np.c_[R.ravel(), Z.ravel()]).reshape(R.shape) Linterp.values[:, 0] = sin_th.ravel() #trick save a some computing time sin_grid = Linterp(np.c_[R.ravel(), Z.ravel()]).reshape(R.shape) theta_star = np.arctan2(sin_grid, cos_grid) return rgrid, zgrid, theta_star
def prepare_angles_for_interp(df): yaw_complemented = np.rad2deg(np.unwrap(np.deg2rad(df["yaw"].dropna()), discont=np.pi/2)) df["yaw"][~df["yaw"].isna()] = yaw_complemented.reshape(-1,1) return df
def specgram(audio, n_fft=512, hop_length=None, mask=True, log_mag=True, re_im=False, dphase=True, mag_only=False): """Spectrogram using librosa. Args: audio: 1-D array of float32 sound samples. n_fft: Size of the FFT. hop_length: Stride of FFT. Defaults to n_fft/2. mask: Mask the phase derivative by the magnitude. log_mag: Use the logamplitude. re_im: Output Real and Imag. instead of logMag and dPhase. dphase: Use derivative of phase instead of phase. mag_only: Don't return phase. Returns: specgram: [n_fft/2 + 1, audio.size / hop_length, 2]. The first channel is the logamplitude and the second channel is the derivative of phase. """ if not hop_length: hop_length = int(n_fft / 2.) fft_config = dict(n_fft=n_fft, win_length=n_fft, hop_length=hop_length, center=True) spec = librosa.stft(audio, **fft_config) if re_im: re = spec.real[:, :, np.newaxis] im = spec.imag[:, :, np.newaxis] spec_real = np.concatenate((re, im), axis=2) else: mag, phase = librosa.core.magphase(spec) phase_angle = np.angle(phase) # Magnitudes, scaled 0-1 if log_mag: mag = (librosa.logamplitude( mag**2, amin=1e-13, top_db=120., ref_power=np.max) / 120.) + 1 else: mag /= mag.max() if dphase: # Derivative of phase phase_unwrapped = np.unwrap(phase_angle) p = phase_unwrapped[:, 1:] - phase_unwrapped[:, :-1] p = np.concatenate([phase_unwrapped[:, 0:1], p], axis=1) / np.pi else: # Normal phase p = phase_angle / np.pi # Mask the phase if log_mag and mask: p = mag * p # Return Mag and Phase p = p.astype(np.float32)[:, :, np.newaxis] mag = mag.astype(np.float32)[:, :, np.newaxis] if mag_only: spec_real = mag[:, :, np.newaxis] else: spec_real = np.concatenate((mag, p), axis=2) return spec_real
TMatrix0ip[:, :].imag, '-', om, TMatrix0ip[:, :].real, '--', ) ax = axa[0, 1] ax2 = ax.twiny() ax2.set_xlim([ax.get_xlim()[0] / eV * hbar, ax.get_xlim()[1] / eV * hbar]) ax.plot(om, abs(TMatrix0ip[:, :]), '-') ax.set_yscale('log') ax = axa[1, 1] ax2 = ax.twiny() ax2.set_xlim([ax.get_xlim()[0] / eV * hbar, ax.get_xlim()[1] / eV * hbar]) ax.plot(om, np.unwrap(np.angle(TMatrix0ip[:, :]), axis=0), '-') ax = axa[1, 0] ax.text(0.5, 0.5, str(pargs).replace(',', ',\n'), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) pdf.savefig(f) # In[ ]: ''' #kdensity = 66 #defined from cl arguments bz_0 = np.array((0,0,0.,)) bz_K1 = np.array((1.,0,0))*4*np.pi/3/hexside/s3
plt.clf() plt.subplot(211) plt.errorbar(f * 1e-6, np.abs(FR), 2 * np.sqrt(UAP[:len(UAP) // 2]), fmt=".-", alpha=0.2, color=colors[0]) plt.xlim(0.5, 80) plt.ylim(0.04, 0.24) plt.xlabel("frequency / MHz", fontsize=22) plt.tick_params(which="both", labelsize=18) plt.ylabel("amplitude / V/MPa", fontsize=22) plt.subplot(212) plt.errorbar(f * 1e-6, np.unwrap(np.angle(FR)) * np.pi / 180, 2 * UAP[len(UAP) // 2:], fmt=".-", alpha=0.2, color=colors[0]) plt.xlim(0.5, 80) plt.ylim(-0.2, 0.3) plt.xlabel("frequency / MHz", fontsize=22) plt.tick_params(which="both", labelsize=18) plt.ylabel("phase / rad", fontsize=22) plt.figure(4) plt.clf() plt.plot(time * 1e6, ux, label="uncertainty", linewidth=2, color=colors[0]) plt.xlabel(r"time / $\mu$s", fontsize=22) plt.ylabel("uncertainty / MPa", fontsize=22)
data0 = np.loadtxt(data_files[0]) data_sets = np.zeros((len(data_files), *data0.shape)) for i, file_path in enumerate(data_files): data = np.loadtxt(file_path) data[:, 1] -= np.mean(data[:, 1]) data_sets[i] = data return np.mean(data_sets, axis=0) d = 18.44 #mm ref = post_process(refs) sample = post_process(samples) freqs, fft_ref = fft(ref[:, 0], ref[:, 1]) freqs_sample, fft_sample = fft(sample[:, 0], sample[:, 1]) T = fft_sample/fft_ref idx = (freqs > 0.3) & (freqs < 1.0) f = freqs[idx]#*10**(-12) phase = np.unwrap(np.angle(T[idx])) p = np.polyfit(f, phase, 1) phase -= p[1] ri = 1 + 0.3*phase/(2*np.pi*f*d) plt.figure() plt.plot(f, ri) plt.ylim(0.9,1.1) plt.show()
import numpy as np import matplotlib.pyplot as plt x = np.arange(0, np.pi, 0.1) y = 4.2 * np.pi * np.sin(x) for i in range(len(x)): if y[i] <= np.pi: y[i] = y[i] elif y[i] <= 3 * np.pi: y[i] = y[i] - 2 * np.pi else: y[i] = y[i] - 4 * np.pi plt.plot(x, y) plt.figure("2") plt.plot(x, np.unwrap(2 * y) / 2) plt.show()
def freqz_resp_list(b, a=np.array([1]), mode='dB', fs=1.0, n_pts=1024, fsize=(6, 4)): """ A method for displaying digital filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freq_resp(self,mode = 'dB',Npts = 1024) A method for displaying the filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4)) b = ndarray of numerator coefficients a = ndarray of denominator coefficents mode = display mode: 'dB' magnitude, 'phase' in radians, or 'groupdelay_s' in samples and 'groupdelay_t' in sec, all versus frequency in Hz Npts = number of points to plot; default is 1024 fsize = figure size; defult is (6,4) inches Mark Wickert, January 2015 """ if type(b) == list: # We have a list of filters N_filt = len(b) f = np.arange(0, n_pts) / (2.0 * n_pts) for n in range(N_filt): w, H = signal.freqz(b[n], a[n], 2 * np.pi * f) if n == 0: plt.figure(figsize=fsize) if mode.lower() == 'db': plt.plot(f * fs, 20 * np.log10(np.abs(H))) if n == N_filt - 1: plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.title('Frequency Response - Magnitude') elif mode.lower() == 'phase': plt.plot(f * fs, np.angle(H)) if n == N_filt - 1: plt.xlabel('Frequency (Hz)') plt.ylabel('Phase (rad)') plt.title('Frequency Response - Phase') elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'): """ Notes ----- Since this calculation involves finding the derivative of the phase response, care must be taken at phase wrapping points and when the phase jumps by +/-pi, which occurs when the amplitude response changes sign. Since the amplitude response is zero when the sign changes, the jumps do not alter the group delay results. """ theta = np.unwrap(np.angle(H)) # Since theta for an FIR filter is likely to have many pi phase # jumps too, we unwrap a second time 2*theta and divide by 2 theta2 = np.unwrap(2 * theta) / 2. theta_dif = np.diff(theta2) f_diff = np.diff(f) Tg = -np.diff(theta2) / np.diff(w) # For gain almost zero set groupdelay = 0 idx = np.nonzero(np.ravel(20 * np.log10(H[:-1]) < -400))[0] Tg[idx] = np.zeros(len(idx)) max_Tg = np.max(Tg) #print(max_Tg) if mode.lower() == 'groupdelay_t': max_Tg /= fs plt.plot(f[:-1] * fs, Tg / fs) plt.ylim([0, 1.2 * max_Tg]) else: plt.plot(f[:-1] * fs, Tg) plt.ylim([0, 1.2 * max_Tg]) if n == N_filt - 1: plt.xlabel('Frequency (Hz)') if mode.lower() == 'groupdelay_t': plt.ylabel('Group Delay (s)') else: plt.ylabel('Group Delay (samples)') plt.title('Frequency Response - Group Delay') else: s1 = 'Error, mode must be "dB", "phase, ' s2 = '"groupdelay_s", or "groupdelay_t"' log.info(s1 + s2)
PhaseFixedRotationFactor*np.pi/180 buffer_sdat = np.power(10, buffer_gain / 20) * np.exp( 1j * buffer_phase) balun_gain = np.polyval(PolyGain_balun, freq) balun_phase = np.polyval(PolyPhase_balun, freq) balun_phase = balun_phase - np.mean(balun_phase) balun_sdat = np.power(10, balun_gain / 20) * np.exp(1j * balun_phase) sdat = sdat_raw / buffer_sdat / balun_sdat slope_valid_inds = np.where( np.all( (freq >= slopeBandwidthFreq[0], freq <= slopeBandwidthFreq[1]), 0)) sub_angles = np.unwrap(np.angle(sdat[slope_valid_inds])) * 180 / np.pi sub_freq = freq[slope_valid_inds] - np.mean(freq[slope_valid_inds]) slope = np.polyfit(sub_freq, sub_angles - np.mean(sub_angles), 1)[0] index_f0 = np.squeeze(np.argwhere(freq == 28)) collectedData.append( Measurement(cfg=pt, pwr=fetchSumDat_pwr(pt), gain=dB20(sdat[index_f0]), phase=ang_deg(sdat[index_f0]), f=freq, s21=sdat, slope=slope)) # Find the indicies close to 0 and 180 as my reference curves phis = np.array([s.phase for s in collectedData]) best_slopes = np.argsort(np.abs(np.mod(phis + 90, 180) - 90))[0:6]
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='spectrum', axis=-1, mode='psd'): ''' Calculate various forms of windowed FFTs for PSD, CSD, etc. This is a helper function that implements the commonality between the psd, csd, and spectrogram functions. It is not designed to be called externally. The windows are not averaged over; the result from each window is returned. Parameters --------- x : array_like Array or sequence containing the data to be analyzed. y : array_like Array or sequence containing the data to be analyzed. If this is the same object in memoery as x (i.e. _spectral_helper(x, x, ...)), the extra computations are spared. fs : float, optional Sampling frequency of the time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length will be used for nperseg. Defaults to 'hanning'. nperseg : int, optional Length of each segment. Defaults to 256. noverlap : int, optional Number of points to overlap between segments. If None, ``noverlap = nperseg // 2``. Defaults to None. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If None, the FFT length is `nperseg`. Defaults to None. detrend : str or function or False, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the ``type`` argument to `detrend`. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is False, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If True, return a one-sided spectrum for real data. If False return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the cross spectral density ('density') where `Pxy` has units of V**2/Hz and computing the cross spectrum ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are measured in V and fs is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the periodogram is computed; the default is over the last axis (i.e. ``axis=-1``). mode : str, optional Defines what kind of return values are expected. Options are ['psd', 'complex', 'magnitude', 'angle', 'phase']. Returns ------- freqs : ndarray Array of sample frequencies. result : ndarray Array of output data, contents dependant on *mode* kwarg. t : ndarray Array of times corresponding to each data segment References ---------- stackoverflow: Rolling window for 1D arrays in Numpy? <http://stackoverflow.com/a/6811241> stackoverflow: Using strides for an efficient moving average filter <http://stackoverflow.com/a/4947453> Notes ----- Adapted from matplotlib.mlab .. versionadded:: 0.16.0 ''' if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']: raise ValueError("Unknown value for mode %s, must be one of: " "'default', 'psd', 'complex', " "'magnitude', 'angle', 'phase'" % mode) # If x and y are the same object we can save ourselves some computation. same_data = y is x if not same_data and mode != 'psd': raise ValueError("x and y must be equal if mode is not 'psd'") axis = int(axis) # Ensure we have np.arrays, get outdtype x = np.asarray(x) if not same_data: y = np.asarray(y) outdtype = np.result_type(x, y, np.complex64) else: outdtype = np.result_type(x, np.complex64) if not same_data: # Check if we can broadcast the outer axes together xouter = list(x.shape) youter = list(y.shape) xouter.pop(axis) youter.pop(axis) try: outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape except ValueError: raise ValueError('x and y cannot be broadcast together.') if same_data: if x.size == 0: return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) else: if x.size == 0 or y.size == 0: outshape = outershape + (min([x.shape[axis], y.shape[axis]]), ) emptyout = np.rollaxis(np.empty(outshape), -1, axis) return emptyout, emptyout, emptyout if x.ndim > 1: if axis != -1: x = np.rollaxis(x, axis, len(x.shape)) if not same_data and y.ndim > 1: y = np.rollaxis(y, axis, len(y.shape)) # Check if x and y are the same length, zero-pad if neccesary if not same_data: if x.shape[-1] != y.shape[-1]: if x.shape[-1] < y.shape[-1]: pad_shape = list(x.shape) pad_shape[-1] = y.shape[-1] - x.shape[-1] x = np.concatenate((x, np.zeros(pad_shape)), -1) else: pad_shape = list(y.shape) pad_shape[-1] = x.shape[-1] - y.shape[-1] y = np.concatenate((y, np.zeros(pad_shape)), -1) # X and Y are same length now, can test nperseg with either if x.shape[-1] < nperseg: warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, ' 'using nperseg = {1:d}'.format(nperseg, x.shape[-1])) nperseg = x.shape[-1] nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') if nfft is None: nfft = nperseg elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg // 2 elif noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') else: noverlap = int(noverlap) # Handle detrending and window functions if not detrend: def detrend_func(d): return d elif not hasattr(detrend, '__call__'): def detrend_func(d): return signaltools.detrend(d, type=detrend, axis=-1) elif axis != -1: # Wrap this function so that it receives a shape that it could # reasonably expect to receive. def detrend_func(d): d = np.rollaxis(d, -1, axis) d = detrend(d) return np.rollaxis(d, axis, len(d.shape)) else: detrend_func = detrend if isinstance(window, string_types) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] != nperseg: raise ValueError('window must have length of nperseg') if np.result_type(win, np.complex64) != outdtype: win = win.astype(outdtype) if mode == 'psd': if scaling == 'density': scale = 1.0 / (fs * (win * win).sum()) elif scaling == 'spectrum': scale = 1.0 / win.sum()**2 else: raise ValueError('Unknown scaling: %r' % scaling) else: scale = 1 if return_onesided is True: if np.iscomplexobj(x): sides = 'twosided' else: sides = 'onesided' if not same_data: if np.iscomplexobj(y): sides = 'twosided' else: sides = 'twosided' if sides == 'twosided': num_freqs = nfft elif sides == 'onesided': if nfft % 2: num_freqs = (nfft + 1) // 2 else: num_freqs = nfft // 2 + 1 # Perform the windowed FFTs result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft) result = result[..., :num_freqs] freqs = fftpack.fftfreq(nfft, 1 / fs)[:num_freqs] if not same_data: # All the same operations on the y data result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft) result_y = result_y[..., :num_freqs] result = np.conjugate(result) * result_y elif mode == 'psd': result = np.conjugate(result) * result elif mode == 'magnitude': result = np.absolute(result) elif mode == 'angle' or mode == 'phase': result = np.angle(result) elif mode == 'complex': pass result *= scale if sides == 'onesided': if nfft % 2: result[..., 1:] *= 2 else: # Last point is unpaired Nyquist freq point, don't double result[..., 1:-1] *= 2 t = np.arange(nperseg / 2, x.shape[-1] - nperseg / 2 + 1, nperseg - noverlap) / float(fs) if sides != 'twosided' and not nfft % 2: # get the last value correctly, it is negative otherwise freqs[-1] *= -1 # we unwrap the phase here to handle the onesided vs. twosided case if mode == 'phase': result = np.unwrap(result, axis=-1) result = result.astype(outdtype) # All imaginary parts are zero anyways if same_data and mode != 'complex': result = result.real # Output is going to have new last axis for window index if axis != -1: # Specify as positive axis index if axis < 0: axis = len(result.shape) - 1 - axis # Roll frequency axis back to axis where the data came from result = np.rollaxis(result, -1, axis) else: # Make sure window/time index is last axis result = np.rollaxis(result, -1, -2) return freqs, t, result
kz = np.sqrt(k0**2 - H.kr**2) for i, z_loop in enumerate(z): phi_z = kz * z_loop # Propagation phase EkrHz = EkrH * np.exp(1j * phi_z) # Apply propagation ErHz = H.iqdht(EkrHz) # iQDHT Erz[:, i] = H.to_original_r(ErHz) # Interpolate output Irz = np.abs(Erz)**2 # %% # Plotting # -------- # Plot the initial field and radial wavevector distribution (given by the # Hankel transform) plt.figure() plt.plot(r * 1e3, np.abs(Er)**2, r * 1e3, np.unwrap(np.angle(Er)), H.r * 1e3, np.abs(ErH)**2, H.r * 1e3, np.unwrap(np.angle(ErH)), '+') plt.title('Initial electric field distribution') plt.xlabel('Radial co-ordinate (r) /mm') plt.ylabel('Field intensity /arb.') plt.legend(['$|E(r)|^2$', '$\\phi(r)$', '$|E(H.r)|^2$', '$\\phi(H.r)$']) plt.axis([0, 1, 0, 1]) plt.figure() plt.plot(H.kr, np.abs(EkrH)**2) plt.title('Radial wave-vector distribution') plt.xlabel(r'Radial wave-vector ($k_r$) /rad $m^{-1}$') plt.ylabel('Field intensity /arb.') plt.axis([0, 3e4, 0, np.max(np.abs(EkrH)**2)]) # %%
G = 6.67259e-11 # m^3/kg/s^2 MSol = 1.989e30 # kg # template parameters: masses in units of MSol: t_mtot = t_m1 + t_m2 # final BH mass is typically 95% of the total initial mass: t_mfin = t_mtot * 0.95 # Final BH radius, in km: R_fin = 2 * G * t_mfin * MSol / clight**2 / 1000. # complex template: template = (template_p + template_c * 1.j) ttime = time - time[0] - template_offset # compute the instantaneous frequency of this chirp-like signal: tphase = np.unwrap(np.angle(template)) fGW = np.gradient(tphase) * fs / (2. * np.pi) # fix discontinuities at the very end: # iffix = np.where(np.abs(np.gradient(fGW)) > 100.)[0] iffix = np.where(np.abs(template) < np.abs(template).max() * 0.001)[0] fGW[iffix] = fGW[iffix[0] - 1] fGW[np.where(fGW < 1.)] = fGW[iffix[0] - 1] # compute v/c: voverc = (G * t_mtot * MSol * np.pi * fGW / clight**3)**(1. / 3.) # index where f_GW is in-band: f_inband = fband[0] iband = np.where(fGW > f_inband)[0][0] # index at the peak of the waveform: ipeak = np.argmax(np.abs(template))
def delay(self, x): pl.grid(True) delay = -np.unwrap(np.angle(x)) / (2 * np.pi * np.array(self.frequencies)) pl.plot(self.frequencies, delay)
def preview(args): import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from scipy.io.wavfile import write as wavwrite from scipy.signal import freqz preview_dir = os.path.join(args.train_dir, 'preview') if not os.path.isdir(preview_dir): os.makedirs(preview_dir) # Load graph infer_metagraph_fp = os.path.join(args.train_dir, 'infer', 'infer.meta') graph = tf.get_default_graph() saver = tf.train.import_meta_graph(infer_metagraph_fp) # Generate or restore z_i and z_o z_fp = os.path.join(preview_dir, 'z.pkl') if os.path.exists(z_fp): with open(z_fp, 'rb') as f: _zs = pickle.load(f) else: # Sample z samp_feeds = {} samp_feeds[graph.get_tensor_by_name('samp_z_n:0')] = args.preview_n samp_fetches = {} samp_fetches['zs'] = graph.get_tensor_by_name('samp_z:0') with tf.Session() as sess: _samp_fetches = sess.run(samp_fetches, samp_feeds) _zs = _samp_fetches['zs'] # Save z with open(z_fp, 'wb') as f: pickle.dump(_zs, f) # label to one hot vector sample_n = 20 one_hot = np.zeros([sample_n, _D_Y]) _zs = _zs[:sample_n] for i in range(10): one_hot[2 * i + 1][i] = 1 one_hot[2 * i][i] = 1 _zs = np.concatenate([_zs, one_hot], 1) # Set up graph for generating preview images feeds = {} feeds[graph.get_tensor_by_name('z:0')] = _zs feeds[graph.get_tensor_by_name('flat_pad:0')] = _WINDOW_LEN // 2 fetches = {} fetches['step'] = tf.train.get_or_create_global_step() fetches['G_z'] = graph.get_tensor_by_name('G_z:0') fetches['G_z_flat_int16'] = graph.get_tensor_by_name('G_z_flat_int16:0') if args.wavegan_genr_pp: fetches['pp_filter'] = graph.get_tensor_by_name( 'G/pp_filt/conv1d/kernel:0')[:, 0, 0] # Summarize G_z = graph.get_tensor_by_name('G_z_flat:0') summaries = [ tf.summary.audio('preview', tf.expand_dims(G_z, axis=0), _FS, max_outputs=1) ] fetches['summaries'] = tf.summary.merge(summaries) summary_writer = tf.summary.FileWriter(preview_dir) # PP Summarize if args.wavegan_genr_pp: pp_fp = tf.placeholder(tf.string, []) pp_bin = tf.read_file(pp_fp) pp_png = tf.image.decode_png(pp_bin) pp_summary = tf.summary.image('pp_filt', tf.expand_dims(pp_png, axis=0)) # Loop, waiting for checkpoints ckpt_fp = None while True: latest_ckpt_fp = tf.train.latest_checkpoint(args.train_dir) if latest_ckpt_fp != ckpt_fp: print('Preview: {}'.format(latest_ckpt_fp)) with tf.Session() as sess: saver.restore(sess, latest_ckpt_fp) _fetches = sess.run(fetches, feeds) _step = _fetches['step'] gen_speech = _fetches['G_z_flat_int16'] gen_len = len(gen_speech) / sample_n for i in range(sample_n): label = int(i / 2) start = i * gen_len end = start + gen_len preview_fp = os.path.join( preview_dir, '{}_{}_{}.wav'.format(str(label), str(_step), str(i))) wavwrite(preview_fp, _FS, gen_speech[start:end]) summary_writer.add_summary(_fetches['summaries'], _step) if args.wavegan_genr_pp: w, h = freqz(_fetches['pp_filter']) fig = plt.figure() plt.title('Digital filter frequncy response') ax1 = fig.add_subplot(111) plt.plot(w, 20 * np.log10(abs(h)), 'b') plt.ylabel('Amplitude [dB]', color='b') plt.xlabel('Frequency [rad/sample]') ax2 = ax1.twinx() angles = np.unwrap(np.angle(h)) plt.plot(w, angles, 'g') plt.ylabel('Angle (radians)', color='g') plt.grid() plt.axis('tight') _pp_fp = os.path.join( preview_dir, '{}_ppfilt.png'.format(str(_step).zfill(8))) plt.savefig(_pp_fp) with tf.Session() as sess: _summary = sess.run(pp_summary, {pp_fp: _pp_fp}) summary_writer.add_summary(_summary, _step) print('Done') ckpt_fp = latest_ckpt_fp time.sleep(1)
def run(self, valuesrc, frames, position, rotation, eulerAngles): ## print(valuesrc) if valuesrc[4] > 0.8: ControlInput = [ valuesrc[1] * self.Size[0], valuesrc[2] * self.Size[1], valuesrc[3] * self.Size[2], valuesrc[5] * self.Size[3] ] elif valuesrc[4] < 0.2: ControlInput = [valuesrc[1], valuesrc[2], valuesrc[3], valuesrc[5]] else: # print(valuesrc[2]) ControlInput = [ valuesrc[1] - 0.55, valuesrc[2] - 0.55, valuesrc[3] - 0.55, valuesrc[5] - 0.1755 ] # print(ControlInput[3]) ## print(position) ## valuesrc = [X_target,Y_target,Z_target,Psi_target] ## position = [X_room_frame,Y_room_frame,Z_room_frame] ## eulerAngles = [Phi_inertial,Theta_inertial,Psi_inertial] dt, Position, PositionRatedt, Rotation, RotationRatedt, Euler, EulerRatedt = FilteredAverage.FilteredAverage( frames, position, rotation, eulerAngles) # Euler = [x/2 for x in Euler] PositionRate = [x * self.SamplingFreq for x in PositionRatedt] RotationRate = [x * self.SamplingFreq for x in RotationRatedt] EulerRate = [x * self.SamplingFreq for x in EulerRatedt] if dt == 0: # valuesrc[4] = 0 # ControlInput = [valuesrc[1],valuesrc[2],valuesrc[3],valuesrc[5]] dt = 0.011 ## print([valuesrc,Position]) X_Vector = ControlInput[0] - Position[0] Y_Vector = ControlInput[1] - Position[1] X_Heading = numpy.sin(numpy.arctan2(Y_Vector, X_Vector) - Euler[2]) * numpy.sqrt(X_Vector * X_Vector + Y_Vector * Y_Vector) Y_Heading = numpy.cos(numpy.arctan2(Y_Vector, X_Vector) - Euler[2]) * numpy.sqrt(X_Vector * X_Vector + Y_Vector * Y_Vector) # print(eulerAngles[-1][1]) # print("{%4f}"%PositionRate[0]) # print(Euler[1]) X_HeadingRate = -numpy.cos(Euler[1]) * PositionRate[0] + numpy.sin( Euler[1]) * PositionRate[1] Y_HeadingRate = -numpy.sin(Euler[1]) * PositionRate[0] - numpy.cos( Euler[1]) * PositionRate[1] # print(PositionRate[2]) self.target = [0, 0, ControlInput[2], ControlInput[3]] self.sensor = [ X_Heading, Y_Heading, Position[2], Euler[2], X_HeadingRate, Y_HeadingRate, PositionRate[2], EulerRate[2] ] # print("{%4f}"%Y_HeadingRate) # Target Values and Sensor Values from Communication X_Target_Value = self.target[0] Y_Target_Value = self.target[1] Z_Target_Value = self.target[2] self.Psi_T_List = [ self.Psi_T_List[1], self.Psi_T_List[2], self.target[3] ] ## self.Psi_T_List.append(self.target[3]) ## print(self.Psi_T_List) numpy.unwrap(self.Psi_T_List) ## print(self.Psi_T_List) Psi_Target_Value = self.Psi_T_List[2] ## T_Target_Value = 0.5 ## print(self.sensor) X_Sensor_Value = self.sensor[0] Y_Sensor_Value = self.sensor[1] Z_Sensor_Value = self.sensor[2] self.Psi_S_List = [ self.Psi_S_List[1], self.Psi_S_List[2], self.target[3] ] ## self.Psi_S_List.append(self.sensor[3]) numpy.unwrap(self.Psi_S_List) Psi_Sensor_Value = self.Psi_S_List[2] u_Sensor_Value = self.sensor[4] v_Sensor_Value = self.sensor[5] w_Sensor_Value = self.sensor[6] r_Sensor_Value = self.sensor[7] # Gain Values from Tuning UI G = [x * dt * 10 for x in self.gain[0:40]] ## G = self.gain[0:40] ## print(self.gain[0:40]) KP = G[0:8] KI = G[8:16] KD = G[16:24] Imax = G[24:32] Imin = G[32:40] ## print(KP) self.X.setPoint(X_Target_Value) self.Y.setPoint(Y_Target_Value) self.Z.setPoint(Z_Target_Value) self.Psi.setPoint(Psi_Target_Value) ## print(X_Sensor_Value,KP[0],KI[0],KD[0],Imax[0],Imin[0]) u_Target_Value = self.X.update(X_Sensor_Value, KP[0], KI[0], KD[0], Imax[0], Imin[0]) v_Target_Value = self.Y.update(Y_Sensor_Value, KP[1], KI[1], KD[1], Imax[1], Imin[1]) w_Target_Value = self.Z.update(Z_Sensor_Value, KP[2], KI[2], KD[2], Imax[2], Imin[2]) r_Target_Value = self.Psi.update(Psi_Sensor_Value, KP[3], KI[3], KD[3], Imax[3], Imin[3]) if valuesrc[4] > 0.8: self.u.setPoint(u_Target_Value) self.v.setPoint(v_Target_Value) self.w.setPoint(w_Target_Value) self.r.setPoint(r_Target_Value) elif valuesrc[4] < 0.2: self.u.setPoint(ControlInput[1]) self.v.setPoint(ControlInput[0]) self.w.setPoint(ControlInput[3]) self.r.setPoint(ControlInput[2]) else: self.u.setPoint(ControlInput[1]) self.v.setPoint(ControlInput[0]) self.w.setPoint(ControlInput[3]) self.r.setPoint(ControlInput[2]) u_Control = self.u.update(u_Sensor_Value, KP[4], KI[4], KD[4], Imax[4], Imin[4]) v_Control = self.v.update(v_Sensor_Value, KP[5], KI[5], KD[5], Imax[5], Imin[5]) w_Control = self.w.update(w_Sensor_Value, KP[6], KI[6], KD[6], Imax[6], Imin[6]) r_Control = self.r.update(r_Sensor_Value, KP[7], KI[7], KD[7], Imax[7], Imin[7]) Aile_pin = v_Control + 0.55 # Saturation if Aile_pin >= 1.0: Aile_pin = 1.0 elif Aile_pin <= 0.0: Aile_pin = 0.0 Elev_pin = u_Control + 0.55 # Saturation if Elev_pin >= 1.0: Elev_pin = 1.0 elif Elev_pin <= 0.0: Elev_pin = 0.0 Rudd_pin = r_Control + 0.55 # Saturation if Rudd_pin >= 1.0: Rudd_pin = 1.0 elif Rudd_pin <= 0.0: Rudd_pin = 0.0 Coll_pin = w_Control + 0.1755 # Saturation if Coll_pin >= 1.0: Coll_pin = 1.0 elif Coll_pin <= 0.0: Coll_pin = 0.0 ## T_Control = self.T.update(Coll_pin,KP[8],KI[8],KD[8],Imax[8],Imin[8]) ## Thro_pin = T_Control/1000 ## ## # Saturation ## if Thro_pin >= 1.0 : ## Thro_pin = 1.0 ## elif Thro_pin <= 0.0 : ## Thro_pin = 0.0 ## Thro_pin = 0.9 # print(Aile_pin) if valuesrc[4] > 0.8: ## print([valuesrc[0],Aile_pin,Elev_pin,Rudd_pin,valuesrc[4],Coll_pin]) vbarVal = [ valuesrc[0], Aile_pin, Elev_pin, Rudd_pin, valuesrc[4], Coll_pin ] elif valuesrc[4] < 0.2: ## print(valuesrc) vbarVal = valuesrc else: ## print([valuesrc[0],valuesrc[1],valuesrc[2],valuesrc[3],valuesrc[4],Coll_pin]) vbarVal = [ valuesrc[0], Aile_pin, Elev_pin, Rudd_pin, valuesrc[4], Coll_pin ] # vbarVal = [valuesrc[0],valuesrc[1],valuesrc[2],valuesrc[3],valuesrc[4],Coll_pin] ## sys.stdout.write( ## "%4d, %4d, %4d, %4d, %4d, %4d--------------%4d, %4d, %4d, %4d, %4d, %4d\r"%tuple( ## valuesrc[:6]+vbarVal[:6])) ## sys.stdout.flush() ## print(vbarVal) return vbarVal
def frfplt(freq, H, freq_min=0, freq_max=0, FLAG=1): """returns Plots frequency response functions in a variety of formats - parameters using ``:param <name>: <description>`` - type of parameters ``:type <name>: <description>`` - returns using ``:returns: <description>`` - examples (doctest) - seealso using ``.. seealso:: text`` - notes using ``.. note:: text`` - warning using ``.. warning:: text`` - todo ``.. todo:: text`` :param freq: frequency data (Hz) of shape (1,n_points) :param H: Frequency Response Functions, shape (n,n_points) :param freq_min: lowest frequency to plot :param freq_min: highest frequency to plot :param FLAG: type of plot :type freq: float array :type H: float array :type freq_min: float :type freq_max: float :type FLAG: integer :returns: ======= ============================================================= FLAG Plot Type ------- ------------------------------------------------------------- 1 (def) Magnitude and Phase versus F 2 Magnitude and Phase versus log10(F) 3 Bodelog (Magnitude and Phase versus log10(w)) 4 Real and Imaginary 5 Nyquist (Real versus Imaginary) 6 Magnitude versus F 7 Phase versus F 8 Real versus F 9 Imaginary versus F 10 Magnitude versus log10(F) 11 Phase versus log10(F) 12 Real versus log10(F) 13 Imaginary versus log10(F) 14 Magnitude versus log10(w) 15 Phase versus log10(w) ======= ============================================================= :Example: >>> f=(0:.01:100)'; >>> w=f*2*pi; >>> k=1e5;m=1;c=1; >>> tf=1./(m*(w*j).^2+c*j*w+k); >>> figure(1);frfplot(f,tf) >>> figure(2);frfplot(f,tf,5) Copyright J. Slater, Dec 17, 1994 Updated April 27, 1995 Ported to Python, July 1, 2015 """ freq = freq.reshape(1, -1) lenF = freq.shape[0] # if lenF==1; # F=(0:length(Xfer)-1)'*F; # end if freq_max == 0: freq_max = np.max(freq) #print(str(freq_max)) if freq_min > freq_max: raise ValueError('freq_min must be less than freq_max.') #print(str(np.amin(freq))) inlow = lenF * (freq_min - np.amin(freq)) // (np.amax(freq) - np.amin(freq)) inhigh = lenF * (freq_max - np.amin(freq)) // (np.amax(freq) - np.amin(freq)) - 1 #if inlow<1,inlow=1;end #if inhigh>lenF,inhigh=lenF;end print(H.shape) H = H[:, inlow:inhigh] #print(H.shape) freq = freq[:, inlow:inhigh] mag = 20 * np.log10(np.abs(H)) minmag = np.amin(mag) maxmag = np.amax(mag) phase = np.unwrap(np.angle(H)) * 180 / np.pi # phmin_max=[min(phase)//45)*45 ceil(max(max(phase))/45)*45]; phmin = np.amin(phase) // 45 * 45.0 phmax = (np.amax(phase) // 45 + 1) * 45 minreal = np.amin(np.real(H)) maxreal = np.amax(np.real(H)) minimag = np.amin(np.imag(H)) maximag = np.amax(np.imag(H)) if FLAG == 1: plt.subplot(2, 1, 1) plt.plot(freq.T, mag.T) plt.xlabel('Frequency (Hz)') plt.ylabel('Mag (dB)') plt.grid() plt.xlim(xmax=freq_max, xmin=freq_min) plt.ylim(ymax=maxmag, ymin=minmag) plt.subplot(2, 1, 2) plt.plot(freq.T, phase.T) plt.xlabel('Frequency (Hz)') plt.ylabel('Phase (deg)') plt.grid() plt.xlim(xmax=freq_max, xmin=freq_min) plt.ylim(ymax=phmax, ymin=phmin) plt.yticks(np.arange(phmin, (phmax + 45), 45)) plt.show()
def _unwrap(degrees): '''Updates angles to avoid big jumps For example, [359, 1] becomes [359, 361].''' return np.rad2deg(np.unwrap(np.deg2rad(degrees)))
t = np.arange(0, n * ts, step=ts) sig = sig_ampl * np.sin(2 * np.pi * sig_freq * t) carrier_freq = 50 carrier_amplitude = sig_ampl sig_xlim = (0, 0.4) phase_modulated = carrier_amplitude * np.sin(2 * np.pi * carrier_freq * t + sig) sig_integrated = np.zeros_like(sig) for i, dt in enumerate(t): sig_integrated[i] = integrate.simps(sig, dx=t[i]) freq_modulated = carrier_amplitude * np.sin(2 * np.pi * carrier_freq * t + sig_ampl * sig_integrated) analytic_signal = hilbert(phase_modulated) phase_function = np.unwrap(np.angle(analytic_signal) + np.pi / 2) phase_demodulated = phase_function - 2 * np.pi * carrier_freq * t freq_demodulated = phase_function - 2 * np.pi * carrier_freq * t fft_freq = np.fft.fftfreq(n, ts) phase_modulated_fft = abs(np.fft.fft(phase_modulated)) / n * 2 phase_demodulated_fft = abs(np.fft.fft(phase_demodulated)) / n * 2 freq_modulated_fft = abs(np.fft.fft(freq_modulated)) / n * 2 freq_demodulated_fft = abs(np.fft.fft(freq_demodulated)) / n * 2 plot_graphic(t, phase_modulated, xlim=sig_xlim, x_label='time (s)', y_label='amplitude (V)', show=False)
#The iteration: for k in range(1,100): F=LP.SubIntensity(Ifar,F) #Substitute the measured far field into the field F=LP.Interpol(size_new,N_new,0,0,0,1,F);#interpolate to a new grid F=LP.Forvard(-z,F) #Propagate back to the near field F=LP.Interpol(size,N,0,0,0,1,F) #interpolate to the original grid F=LP.SubIntensity(Inear,F) #Substitute the measured near field into the field F=LP.Forvard(z,F) #Propagate to the far field #The recovered far- and near field and their phase- and intensity #distributions (phases are unwrapped (i.e. remove multiples of PI)): Ffar_rec=F; Ifar_rec=LP.Intensity(0,Ffar_rec); Phase_far_rec=LP.Phase(Ffar_rec); Phase_far_rec=np.unwrap(np.unwrap(Phase_far_rec,np.pi,0),np.pi,1); Fnear_rec=LP.Forvard(-z,F); Inear_rec=LP.Intensity(0,Fnear_rec); Phase_near_rec=LP.Phase(Fnear_rec); Phase_near_rec=np.unwrap(np.unwrap(Phase_near_rec,np.pi,1),np.pi,0); #Plot the recovered intensity- and phase distributions: plt.subplot(3,2,3);plt.imshow(Inear_rec); plt.title('Recovered Intensity near field'); plt.axis ('off') plt.subplot(3,2,4);plt.imshow(Ifar_rec); plt.title('Recovered Intensity far field'); plt.axis ('off') plt.subplot(3,2,5);plt.imshow(Phase_near_rec); plt.title('Recovered phase near field');plt.axis ('off') plt.subplot(3,2,6);plt.imshow(Phase_far_rec); plt.title('Recovered phase far field'); plt.axis ('off') plt.show()
def groupdelay(self, x): pl.grid(True) gd = np.convolve(np.unwrap(np.angle(x)), [1, -1], mode='same') pl.plot(self.frequencies, gd)
def rare_standalone( init_gpa=False, # Starts the gpa nScans = 1, # NEX larmorFreq = 3.07454, # MHz, Larmor frequency rfExAmp = 0.05, # a.u., rf excitation pulse amplitude rfReAmp = 0.1, # a.u., rf refocusing pulse amplitude rfExTime = 190, # us, rf excitation pulse time rfReTime = 190, # us, rf refocusing pulse time echoSpacing = 6., # ms, time between echoes inversionTime = 0., # ms, Inversion recovery time repetitionTime = 10000., # ms, TR fov = np.array([120., 120., 120.]), # mm, FOV along readout, phase and slice dfov = np.array([0., 0., 0.]), # mm, displacement of fov center nPoints = np.array([60, 10, 1]), # Number of points along readout, phase and slice slThickness = 15, # mm, slice thickness etl = 60, # Echo train length acqTime = 2, # ms, acquisition time axes = np.array([1, 0, 2]), # 0->x, 1->y and 2->z defined as [rd,ph,sl] axesEnable = np.array([1, 1, 0]), # 1-> Enable, 0-> Disable sweepMode = 1, # 0->k2k (T2), 1->02k (T1), 2->k20 (T2), 3->Niquist modulated (T2) rdGradTime = 2.5, # ms, readout gradient time rdDephTime = 1, # ms, readout dephasing time phGradTime = 1, # ms, phase and slice dephasing time rdPreemphasis = 1.005, # readout dephasing gradient is multiplied by this factor ssPreemphasis = 0.45, # ssGradAmplitue is multiplied by this number for rephasing drfPhase = 0, # degrees, phase of the excitation pulse dummyPulses = 1, # number of dummy pulses for T1 stabilization shimming = np.array([-70., -90., 10.]), # a.u.*1e4, shimming along the X,Y and Z axes parAcqLines = 0 # number of additional lines, Full sweep if 0 ): ssDelay = 190 freqCal = 0 # rawData fields rawData = {} # Conversion of variables to non-multiplied units larmorFreq = larmorFreq*1e6 rfExTime = rfExTime*1e-6 rfReTime = rfReTime*1e-6 fov = np.array(fov)*1e-3 dfov = np.array(dfov)*1e-3 echoSpacing = echoSpacing*1e-3 acqTime = acqTime*1e-3 shimming = shimming*1e-4 repetitionTime= repetitionTime*1e-3 inversionTime = inversionTime*1e-3 rdGradTime = rdGradTime*1e-3 rdDephTime = rdDephTime*1e-3 phGradTime = phGradTime*1e-3 slThickness = slThickness*1e-3 # Inputs for rawData rawData['nScans'] = nScans rawData['larmorFreq'] = larmorFreq # Larmor frequency rawData['rfExAmp'] = rfExAmp # rf excitation pulse amplitude rawData['rfReAmp'] = rfReAmp # rf refocusing pulse amplitude rawData['rfExTime'] = rfExTime # rf excitation pulse time rawData['rfReTime'] = rfReTime # rf refocusing pulse time rawData['echoSpacing'] = echoSpacing # time between echoes rawData['inversionTime'] = inversionTime # Inversion recovery time rawData['repetitionTime'] = repetitionTime # TR rawData['fov'] = fov # FOV along readout, phase and slice rawData['dfov'] = dfov # Displacement of fov center rawData['nPoints'] = nPoints # Number of points along readout, phase and slice rawData['etl'] = etl # Echo train length rawData['acqTime'] = acqTime # Acquisition time rawData['axesOrientation'] = axes # 0->x, 1->y and 2->z defined as [rd,ph,sl] rawData['axesEnable'] = axesEnable # 1-> Enable, 0-> Disable rawData['sweepMode'] = sweepMode # 0->k2k (T2), 1->02k (T1), 2->k20 (T2), 3->Niquist modulated (T2) rawData['rdPreemphasis'] = rdPreemphasis rawData['ssPreemphasis'] = ssPreemphasis rawData['drfPhase'] = drfPhase rawData['dummyPulses'] = dummyPulses # Dummy pulses for T1 stabilization rawData['partialAcquisition'] = parAcqLines rawData['rdDephTime'] = rdDephTime rawData['sliceThickness'] = slThickness # Miscellaneous blkTime = 10 # Deblanking time (us) larmorFreq = larmorFreq*1e-6 gradRiseTime = 400e-6 # Estimated gradient rise time gSteps = int(gradRiseTime*1e6/5)*0+1 gradDelay = 9 # Gradient amplifier delay addRdPoints = 10 # Initial rd points to avoid artifact at the begining of rd gammaB = 42.56e6 # Gyromagnetic ratio in Hz/T oversamplingFactor = 6 randFactor = 0e-3 # Random amplitude to add to the phase gradients if rfReAmp==0: rfReAmp = rfExAmp if rfReTime==0: rfReTime = 2*rfExTime resolution = fov/nPoints rawData['resolution'] = resolution rawData['gradDelay'] = gradDelay*1e-6 rawData['gradRiseTime'] = gradRiseTime rawData['oversamplingFactor'] = oversamplingFactor rawData['randFactor'] = randFactor rawData['addRdPoints'] = addRdPoints # Matrix size nRD = nPoints[0]+2*addRdPoints nPH = nPoints[1]*axesEnable[1]+(1-axesEnable[1]) nSL = nPoints[2]*axesEnable[2]+(1-axesEnable[2]) # ETL if nPH = 1 if etl>nPH: etl = nPH # parAcqLines in case parAcqLines = 0 if parAcqLines==0: parAcqLines = int(nSL/2) # BW BW = nPoints[0]/acqTime*1e-6 BWov = BW*oversamplingFactor samplingPeriod = 1/BWov # Readout gradient time if rdGradTime>0 and rdGradTime<acqTime: rdGradTime = acqTime rawData['rdGradTime'] = rdGradTime # Phase and slice de- and re-phasing time if phGradTime==0 or phGradTime>echoSpacing/2-rfExTime/2-rfReTime/2-2*gradRiseTime: phGradTime = echoSpacing/2-rfExTime/2-rfReTime/2-2*gradRiseTime rawData['phGradTime'] = phGradTime # Max gradient amplitude rdGradAmplitude = nPoints[0]/(gammaB*fov[0]*acqTime)*axesEnable[0] phGradAmplitude = nPH/(2*gammaB*fov[1]*(phGradTime+gradRiseTime))*axesEnable[1] slGradAmplitude = nSL/(2*gammaB*fov[2]*(phGradTime+gradRiseTime))*axesEnable[2] rawData['rdGradAmplitude'] = rdGradAmplitude rawData['phGradAmplitude'] = phGradAmplitude rawData['slGradAmplitude'] = slGradAmplitude # Slice selection gradient if slThickness!=0: ssGradAmplitude = 1/(gammaB*slThickness*rfExTime) else: ssGradAmplitude = 0 print(ssGradAmplitude*1e3, ' mT/m') # Readout dephasing amplitude rdDephAmplitude = 0.5*rdGradAmplitude*(gradRiseTime+rdGradTime)/(gradRiseTime+rdDephTime) rawData['rdDephAmplitude'] = rdDephAmplitude # Get factors to OCRA1 units gFactor = reorganizeGfactor(axes) rawData['gFactor'] = gFactor # Phase and slice gradient vector phGradients = np.linspace(-phGradAmplitude,phGradAmplitude,num=nPH,endpoint=False) slGradients = np.linspace(-slGradAmplitude,slGradAmplitude,num=nSL,endpoint=False) # Now fix the number of slices to partailly acquired k-space nSL = (int(nPoints[2]/2)+parAcqLines)*axesEnable[2]+(1-axesEnable[2]) # Add random displacemnt to phase encoding lines for ii in range(nPH): if ii<np.ceil(nPH/2-nPH/20) or ii>np.ceil(nPH/2+nPH/20): phGradients[ii] = phGradients[ii]+randFactor*np.random.randn() kPH = gammaB*phGradients*(gradRiseTime+phGradTime) rawData['phGradients'] = phGradients rawData['slGradients'] = slGradients # Change units to OCRA1 board rdGradAmplitude = rdGradAmplitude/gFactor[0]*1000/5 rdDephAmplitude = rdDephAmplitude/gFactor[0]*1000/5 phGradients = phGradients/gFactor[1]*1000/5 slGradients = slGradients/gFactor[2]*1000/5 ssGradAmplitude = ssGradAmplitude/gFactor[2]*1000/5 # Set phase vector to given sweep mode ind = getIndex(phGradients, etl, nPH, sweepMode) rawData['sweepOrder'] = ind phGradients = phGradients[ind] # Create functions def rfPulse(tStart,rfTime,rfAmplitude,rfPhase): txTime = np.array([tStart+blkTime,tStart+blkTime+rfTime]) txAmp = np.array([rfAmplitude*np.exp(1j*rfPhase),0.]) txGateTime = np.array([tStart,tStart+blkTime+rfTime]) txGateAmp = np.array([1,0]) expt.add_flodict({ 'tx0': (txTime, txAmp), 'tx_gate': (txGateTime, txGateAmp) }) def rxGate(tStart,gateTime): rxGateTime = np.array([tStart,tStart+gateTime]) rxGateAmp = np.array([1,0]) expt.add_flodict({ 'rx0_en':(rxGateTime, rxGateAmp), 'rx_gate': (rxGateTime, rxGateAmp), }) def gradTrap(tStart, gTime, gAmp, gAxis): tUp = np.linspace(tStart, tStart+gradRiseTime, num=gSteps, endpoint=False) tDown = tUp+gradRiseTime+gTime t = np.concatenate((tUp, tDown), axis=0) dAmp = gAmp/gSteps aUp = np.linspace(dAmp, gAmp, num=gSteps) aDown = np.linspace(gAmp-dAmp, 0, num=gSteps) a = np.concatenate((aUp, aDown), axis=0) if gAxis==0: expt.add_flodict({'grad_vx': (t, a+shimming[0])}) elif gAxis==1: expt.add_flodict({'grad_vy': (t, a+shimming[1])}) elif gAxis==2: expt.add_flodict({'grad_vz': (t, a+shimming[2])}) def gradPulse(tStart, gTime, gAmp, gAxes): t = np.array([tStart, tStart+gradRiseTime+gTime]) for gIndex in range(np.size(gAxes)): a = np.array([gAmp[gIndex], 0]) if gAxes[gIndex]==0: expt.add_flodict({'grad_vx': (t, a+shimming[0])}) elif gAxes[gIndex]==1: expt.add_flodict({'grad_vy': (t, a+shimming[1])}) elif gAxes[gIndex]==2: expt.add_flodict({'grad_vz': (t, a+shimming[2])}) def endSequence(tEnd): expt.add_flodict({ 'grad_vx': (np.array([tEnd]),np.array([0]) ), 'grad_vy': (np.array([tEnd]),np.array([0]) ), 'grad_vz': (np.array([tEnd]),np.array([0]) ), }) def iniSequence(tEnd): expt.add_flodict({ 'grad_vx': (np.array([tEnd]),np.array([shimming[0]]) ), 'grad_vy': (np.array([tEnd]),np.array([shimming[1]]) ), 'grad_vz': (np.array([tEnd]),np.array([shimming[2]]) ), }) def createSequence(): phIndex = 0 slIndex = 0 nRepetitions = int(nPH*nSL/etl+dummyPulses) scanTime = 20e3+nRepetitions*repetitionTime rawData['scanTime'] = scanTime*1e-6 if rdGradTime==0: # Check if readout gradient is dc or pulsed dc = True else: dc = False # Set shimming iniSequence(20) for repeIndex in range(nRepetitions): # Initialize time tEx = 20e3+repetitionTime*repeIndex+inversionTime # Inversion pulse if repeIndex>=dummyPulses and inversionTime!=0: t0 = tEx-inversionTime-rfReTime/2-blkTime rfPulse(t0,rfReTime,rfReAmp/180*180,0) gradTrap(t0+blkTime+rfReTime, inversionTime*0.5, 0.2, axes[0]) gradTrap(t0+blkTime+rfReTime, inversionTime*0.5, 0.2, axes[1]) gradTrap(t0+blkTime+rfReTime, inversionTime*0.5, 0.2, axes[2]) # DC radout gradient if desired if (repeIndex==0 or repeIndex>=dummyPulses) and dc==True: t0 = tEx-10e3 gradTrap(t0, 10e3+echoSpacing*(etl+1), rdGradAmplitude, axes[0]) # Slice selection gradient dephasing if (slThickness!=0 and repeIndex>=dummyPulses): t0 = tEx-rfExTime/2-gradRiseTime-gradDelay gradTrap(t0, rfExTime, ssGradAmplitude, axes[2]) # Excitation pulse t0 = tEx-blkTime-rfExTime/2 rfPulse(t0,rfExTime,rfExAmp,drfPhase*np.pi/180) # Slice selection gradient rephasing if (slThickness!=0 and repeIndex>=dummyPulses): t0 = tEx+rfExTime/2+gradRiseTime-gradDelay gradTrap(t0, 0., -ssGradAmplitude*ssPreemphasis, axes[2]) # Dephasing readout t0 = tEx+rfExTime/2-gradDelay if (repeIndex==0 or repeIndex>=dummyPulses) and dc==False: gradTrap(t0, rdDephTime, rdDephAmplitude*rdPreemphasis, axes[0]) # Echo train for echoIndex in range(etl): tEcho = tEx+echoSpacing*(echoIndex+1) # Crusher gradient if repeIndex>=dummyPulses: t0 = tEcho-echoSpacing/2-rfReTime/2-gradRiseTime-gradDelay+ssDelay gradTrap(t0, rfReTime, ssGradAmplitude, axes[2]) # Refocusing pulse t0 = tEcho-echoSpacing/2-rfReTime/2-blkTime rfPulse(t0, rfReTime, rfReAmp, np.pi/2) # # post-crusher gradient # if repeIndex>=dummyPulses: # t0 = tEcho-echoSpacing/2+rfReTime/2-gradDelay # gradTrap(t0, rfExTime, 0.2, axes[2]) # Dephasing phase and slice gradients t0 = tEcho-echoSpacing/2+rfReTime/2-gradDelay if repeIndex>=dummyPulses: # This is to account for dummy pulses gradTrap(t0, phGradTime, phGradients[phIndex], axes[1]) gradTrap(t0, phGradTime, slGradients[slIndex], axes[2]) # Readout gradient t0 = tEcho-rdGradTime/2-gradRiseTime-gradDelay if (repeIndex==0 or repeIndex>=dummyPulses) and dc==False: # This is to account for dummy pulses gradTrap(t0, rdGradTime, rdGradAmplitude, axes[0]) # Rx gate if (repeIndex==0 or repeIndex>=dummyPulses): t0 = tEcho-acqTime/2-addRdPoints/BW rxGate(t0, acqTime+2*addRdPoints/BW) # Rephasing phase and slice gradients t0 = tEcho+acqTime/2+addRdPoints/BW-gradDelay if (echoIndex<etl-1 and repeIndex>=dummyPulses): gradTrap(t0, phGradTime, -phGradients[phIndex], axes[1]) gradTrap(t0, phGradTime, -slGradients[slIndex], axes[2]) elif(echoIndex==etl-1 and repeIndex>=dummyPulses): gradTrap(t0, phGradTime, +phGradients[phIndex], axes[1]) gradTrap(t0, phGradTime, +slGradients[slIndex], axes[2]) # Update the phase and slice gradient if repeIndex>=dummyPulses: if phIndex == nPH-1: phIndex = 0 slIndex += 1 else: phIndex += 1 if repeIndex==nRepetitions-1: endSequence(scanTime) def createFreqCalSequence(): t0 = 20 # Shimming iniSequence(t0) # Excitation pulse rfPulse(t0,rfExTime,rfExAmp,drfPhase*np.pi/180) # Refocusing pulse t0 += rfExTime/2+echoSpacing/2-rfReTime/2 rfPulse(t0, rfReTime, rfReAmp, np.pi/2) # Rx t0 += blkTime+rfReTime/2+echoSpacing/2-acqTime/2-addRdPoints/BW rxGate(t0, acqTime+2*addRdPoints/BW) # Finalize sequence endSequence(repetitionTime) # Changing time parameters to us rfExTime = rfExTime*1e6 rfReTime = rfReTime*1e6 echoSpacing = echoSpacing*1e6 repetitionTime = repetitionTime*1e6 gradRiseTime = gradRiseTime*1e6 phGradTime = phGradTime*1e6 rdGradTime = rdGradTime*1e6 rdDephTime = rdDephTime*1e6 inversionTime = inversionTime*1e6 # Calibrate frequency if freqCal==1: expt = ex.Experiment(lo_freq=larmorFreq, rx_t=samplingPeriod, init_gpa=init_gpa, gpa_fhdo_offset_time=(1 / 0.2 / 3.1)) samplingPeriod = expt.get_rx_ts()[0] BW = 1/samplingPeriod/oversamplingFactor acqTime = nPoints[0]/BW # us rawData['bw'] = BW*1e6 createFreqCalSequence() rxd, msgs = expt.run() dataFreqCal = sig.decimate(rxd['rx0']*13.788, oversamplingFactor, ftype='fir', zero_phase=True) dataFreqCal = dataFreqCal[addRdPoints:nPoints[0]+addRdPoints] # Plot fid # plt.figure(1) tVector = np.linspace(-acqTime/2, acqTime/2, num=nPoints[0],endpoint=True)*1e-3 # plt.subplot(1, 2, 1) # plt.plot(tVector, np.abs(dataFreqCal)) # plt.title("Signal amplitude") # plt.xlabel("Time (ms)") # plt.ylabel("Amplitude (mV)") # plt.subplot(1, 2, 2) angle = np.unwrap(np.angle(dataFreqCal)) # plt.title("Signal phase") # plt.xlabel("Time (ms)") # plt.ylabel("Phase (rad)") # plt.plot(tVector, angle) # Get larmor frequency dPhi = angle[-1]-angle[0] df = dPhi/(2*np.pi*acqTime) larmorFreq += df rawData['larmorFreq'] = larmorFreq*1e6 print("f0 = %s MHz" % (round(larmorFreq, 5))) # Plot sequence: # expt.plot_sequence() # plt.show() # Delete experiment: expt.__del__() # Create full sequence expt = ex.Experiment(lo_freq=larmorFreq, rx_t=samplingPeriod, init_gpa=init_gpa, gpa_fhdo_offset_time=(1 / 0.2 / 3.1)) samplingPeriod = expt.get_rx_ts()[0] BW = 1/samplingPeriod/oversamplingFactor acqTime = nPoints[0]/BW # us createSequence() # Plot sequence: # expt.plot_sequence() # Run the experiment dataFull = [] dummyData = [] overData = [] for ii in range(nScans): print("Scan %s ..." % (ii+1)) rxd, msgs = expt.run() rxd['rx0'] = rxd['rx0']*13.788 # Here I normalize to get the result in mV # Get data if dummyPulses>0: dummyData = np.concatenate((dummyData, rxd['rx0'][0:nRD*etl*oversamplingFactor]), axis = 0) overData = np.concatenate((overData, rxd['rx0'][nRD*etl*oversamplingFactor::]), axis = 0) else: overData = np.concatenate((overData, rxd['rx0']), axis = 0) expt.__del__() print('Scans done!') rawData['overData'] = overData # Fix the echo position using oversampled data if dummyPulses>0: dummyData = np.reshape(dummyData, (nScans, etl, nRD*oversamplingFactor)) dummyData = np.average(dummyData, axis=0) rawData['dummyData'] = dummyData overData = np.reshape(overData, (nScans, int(nPH/etl*nSL), etl, nRD*oversamplingFactor)) for ii in range(nScans): overData[ii, :, :, :] = fixEchoPosition(dummyData, overData[ii, :, :, :]) # Generate dataFull overData = np.squeeze(np.reshape(overData, (1, nRD*oversamplingFactor*nPH*nSL*nScans))) dataFull = sig.decimate(overData, oversamplingFactor, ftype='fir', zero_phase=True) # Get index for krd = 0 # Average data dataProv = np.reshape(dataFull, (nScans, nRD*nPH*nSL)) dataProv = np.average(dataProv, axis=0) # Reorganize the data acording to sweep mode dataProv = np.reshape(dataProv, (nSL, nPH, nRD)) dataTemp = dataProv*0 for ii in range(nPH): dataTemp[:, ind[ii], :] = dataProv[:, ii, :] dataProv = dataTemp # Check where is krd = 0 dataProv = dataProv[int(nPoints[2]/2), int(nPH/2), :] indkrd0 = np.argmax(np.abs(dataProv)) if indkrd0 < nRD/2-addRdPoints or indkrd0 > nRD+addRdPoints: indkrd0 = int(nRD/2) indkrd0 = int(nRD/2) # Get individual images dataFull = np.reshape(dataFull, (nScans, nSL, nPH, nRD)) dataFull = dataFull[:, :, :, indkrd0-int(nPoints[0]/2):indkrd0+int(nPoints[0]/2)] dataTemp = dataFull*0 for ii in range(nPH): dataTemp[:, :, ind[ii], :] = dataFull[:, :, ii, :] dataFull = dataTemp imgFull = dataFull*0 for ii in range(nScans): imgFull[ii, :, :, :] = np.fft.ifftshift(np.fft.ifftn(np.fft.ifftshift(dataFull[ii, :, :, :]))) rawData['dataFull'] = dataFull rawData['imgFull'] = imgFull # Average data data = np.average(dataFull, axis=0) data = np.reshape(data, (nSL, nPH, nPoints[0])) # Do zero padding dataTemp = np.zeros((nPoints[2], nPoints[1], nPoints[0])) dataTemp = dataTemp+1j*dataTemp if nSL==1 or (nSL>1 and parAcqLines==0): dataTemp = data elif nSL>1 and parAcqLines>0: dataTemp[0:nSL-1, :, :] = data[0:nSL-1, :, :] data = np.reshape(dataTemp, (1, nPoints[0]*nPoints[1]*nPoints[2])) # Fix the position of the sample according to dfov kMax = np.array(nPoints)/(2*np.array(fov))*np.array(axesEnable) kRD = np.linspace(-kMax[0],kMax[0],num=nPoints[0],endpoint=False) # kPH = np.linspace(-kMax[1],kMax[1],num=nPoints[1],endpoint=False) kSL = np.linspace(-kMax[2],kMax[2],num=nPoints[2],endpoint=False) kPH = kPH[::-1] kPH, kSL, kRD = np.meshgrid(kPH, kSL, kRD) kRD = np.reshape(kRD, (1, nPoints[0]*nPoints[1]*nPoints[2])) kPH = np.reshape(kPH, (1, nPoints[0]*nPoints[1]*nPoints[2])) kSL = np.reshape(kSL, (1, nPoints[0]*nPoints[1]*nPoints[2])) dPhase = np.exp(-2*np.pi*1j*(dfov[0]*kRD+dfov[1]*kPH+dfov[2]*kSL)) data = np.reshape(data*dPhase, (nPoints[2], nPoints[1], nPoints[0])) rawData['kSpace3D'] = data img=np.fft.ifftshift(np.fft.ifftn(np.fft.ifftshift(data))) rawData['image3D'] = img data = np.reshape(data, (1, nPoints[0]*nPoints[1]*nPoints[2])) # Create sampled data kRD = np.reshape(kRD, (nPoints[0]*nPoints[1]*nPoints[2], 1)) kPH = np.reshape(kPH, (nPoints[0]*nPoints[1]*nPoints[2], 1)) kSL = np.reshape(kSL, (nPoints[0]*nPoints[1]*nPoints[2], 1)) data = np.reshape(data, (nPoints[0]*nPoints[1]*nPoints[2], 1)) rawData['kMax'] = kMax rawData['sampled'] = np.concatenate((kRD, kPH, kSL, data), axis=1) data = np.reshape(data, (nPoints[2], nPoints[1], nPoints[0])) # Save data dt = datetime.now() dt_string = dt.strftime("%Y.%m.%d.%H.%M.%S") dt2 = date.today() dt2_string = dt2.strftime("%Y.%m.%d") if not os.path.exists('experiments/acquisitions/%s' % (dt2_string)): os.makedirs('experiments/acquisitions/%s' % (dt2_string)) if not os.path.exists('experiments/acquisitions/%s/%s' % (dt2_string, dt_string)): os.makedirs('experiments/acquisitions/%s/%s' % (dt2_string, dt_string)) rawData['fileName'] = "%s.%s.mat" % ("RARE",dt_string) savemat("experiments/acquisitions/%s/%s/%s.%s.mat" % (dt2_string, dt_string, "Old_RARE",dt_string), rawData) # Plot data for 1D case if (nPH==1 and nSL==1): # Plot k-space plt.figure(3) dataPlot = data[0, 0, :] plt.subplot(1, 2, 1) if axesEnable[0]==0: tVector = np.linspace(-acqTime/2, acqTime/2, num=nPoints[0],endpoint=False)*1e-3 sMax = np.max(np.abs(dataPlot)) indMax = np.argmax(np.abs(dataPlot)) timeMax = tVector[indMax] sMax3 = sMax/3 dataPlot3 = np.abs(np.abs(dataPlot)-sMax3) indMin = np.argmin(dataPlot3) timeMin = tVector[indMin] T2 = np.abs(timeMax-timeMin) plt.plot(tVector, np.abs(dataPlot)) plt.plot(tVector, np.real(dataPlot)) plt.plot(tVector, np.imag(dataPlot)) plt.xlabel('t (ms)') plt.ylabel('Signal (mV)') print("T2 = %s us" % (T2)) else: plt.plot(kRD[:, 0], np.abs(dataPlot)) plt.yscale('log') plt.xlabel('krd (mm^-1)') plt.ylabel('Signal (mV)') echoTime = np.argmax(np.abs(dataPlot)) echoTime = kRD[echoTime, 0] print("Echo position = %s mm^{-1}" %round(echoTime, 1)) # Plot image plt.subplot(122) img = img[0, 0, :] if axesEnable[0]==0: xAxis = np.linspace(-BW/2, BW/2, num=nPoints[0], endpoint=False)*1e3 plt.plot(xAxis, np.abs(img), '.') plt.xlabel('Frequency (kHz)') plt.ylabel('Density (a.u.)') print("Smax = %s mV" % (np.max(np.abs(img)))) else: xAxis = np.linspace(-fov[0]/2*1e2, fov[0]/2*1e2, num=nPoints[0], endpoint=False) plt.plot(xAxis, np.abs(img)) plt.xlabel('Position RD (cm)') plt.ylabel('Density (a.u.)') else: # Plot k-space plt.figure(3) dataPlot = data[round(nSL/2), :, :] plt.subplot(131) plt.imshow(np.log(np.abs(dataPlot)),cmap='gray') plt.axis('off') # Plot image if sweepMode==3: imgPlot = img[round(nSL/2), round(nPH/4):round(3*nPH/4), :] else: imgPlot = img[round(nSL/2), :, :] plt.subplot(132) plt.imshow(np.abs(imgPlot), cmap='gray') plt.axis('off') plt.title("RARE.%s.mat" % (dt_string)) plt.subplot(133) plt.imshow(np.angle(imgPlot), cmap='gray') plt.axis('off') # plot full image if nSL>1: plt.figure(4) img2d = np.zeros((nPoints[1], nPoints[0]*nPoints[2])) img2d = img2d+1j*img2d for ii in range(nPoints[2]): img2d[:, ii*nPoints[0]:(ii+1)*nPoints[0]] = img[ii, :, :] plt.imshow(np.abs(img2d), cmap='gray') plt.axis('off') plt.title("RARE.%s.mat" % (dt_string)) # plt.figure(5) # plt.subplot(121) # data1d = data[int(nSL/2), :, int(nPoints[0]/2)] # plt.plot(abs(data1d)) # plt.subplot(122) # img1d = img[int(nSL/2), :, int(nPoints[0]/2)] # plt.plot(np.abs(img1d)*1e3) plt.show()
def __init__(self, wav, frame_size=2048, fps=200, filterbank=None, log=False, mul=1, add=1, online=True, block_size=526, lgd=True): """ Creates a new Spectrogram object instance and performs a STFT on the given audio. :param wav: a Wav object :param frame_size: the size for the window [samples] :param fps: frames per second :param filterbank: use the given filterbank for dimensionality reduction :param log: use logarithmic magnitude :param mul: multiply the magnitude by this factor before taking the logarithm :param add: add this value to the magnitude before taking the logarithm :param online: work in online mode (i.e. use only past information) :param block_size: perform the filtering in blocks of the given size :param lgd: compute the local group delay (needed for the ComplexFlux algorithm) """ # init some variables self.wav = wav self.fps = fps self.filterbank = filterbank if add <= 0: raise ValueError("a positive value must be added before taking " "the logarithm") if mul <= 0: raise ValueError("a positive value must be multiplied before " "taking the logarithm") # derive some variables # use floats so that seeking works properly self.hop_size = float(self.wav.sample_rate) / float(self.fps) self.num_frames = int(np.ceil(self.wav.num_samples / self.hop_size)) self.num_fft_bins = int(frame_size / 2) # initial number of bins equal to fft bins, but those can change if # filters are used self.num_bins = int(frame_size / 2) # init spec matrix if filterbank is None: # init with number of FFT frequency bins self.spec = np.empty([self.num_frames, self.num_fft_bins], dtype=np.float32) else: # init with number of filter bands self.spec = np.empty( [self.num_frames, np.shape(filterbank)[1]], dtype=np.float32) # set number of bins self.num_bins = np.shape(filterbank)[1] # set the block size if not block_size or block_size > self.num_frames: block_size = self.num_frames # init block counter block = 0 # init a matrix of that size spec = np.zeros([block_size, self.num_fft_bins]) # init the local group delay matrix self.lgd = None if lgd: self.lgd = np.zeros([self.num_frames, self.num_fft_bins], dtype=np.float32) # create windowing function for DFT self.window = np.hanning(frame_size) try: # the audio signal is not scaled, scale the window accordingly max_value = np.iinfo(self.wav.audio.dtype).max self._fft_window = self.window / max_value except ValueError: self._fft_window = self.window # step through all frames for frame in range(self.num_frames): # seek to the right position in the audio signal if online: # step back one frame_size after moving forward 1 hop_size # so that the current position is at the end of the window seek = int((frame + 1) * self.hop_size - frame_size) else: # step back half of the frame_size so that the frame represents # the centre of the window seek = int(frame * self.hop_size - frame_size / 2) # read in the right portion of the audio if seek >= self.wav.num_samples: # end of file reached break elif seek + frame_size >= self.wav.num_samples: # end behind the actual audio, append zeros accordingly zeros = np.zeros(seek + frame_size - self.wav.num_samples) signal = self.wav.audio[seek:] signal = np.append(signal, zeros) elif seek < 0: # start before the actual audio, pad with zeros accordingly zeros = np.zeros(-seek) signal = self.wav.audio[0:seek + frame_size] signal = np.append(zeros, signal) else: # normal read operation signal = self.wav.audio[seek:seek + frame_size] # multiply the signal with the window function signal = signal * self._fft_window # perform DFT stft = fft.fft(signal)[:self.num_fft_bins] # compute the local group delay if lgd: # unwrap the phase unwrapped_phase = np.unwrap(np.angle(stft)) # local group delay is the derivative over frequency self.lgd[frame, :-1] = (unwrapped_phase[:-1] - unwrapped_phase[1:]) # is block-wise processing needed? if filterbank is None: # no filtering needed, thus no block wise processing needed self.spec[frame] = np.abs(stft) else: # filter in blocks spec[frame % block_size] = np.abs(stft) # end of a block or end of the signal reached end_of_block = (frame + 1) / block_size > block end_of_signal = (frame + 1) == self.num_frames if end_of_block or end_of_signal: start = block * block_size stop = min(start + block_size, self.num_frames) filtered_spec = np.dot(spec[:stop - start], filterbank) self.spec[start:stop] = filtered_spec # increase the block counter block += 1 # next frame # take the logarithm if log: np.log10(mul * self.spec + add, out=self.spec)
plt.plot(freq2wl(freq) * 1e9, angle, linewidth="0.7") plt.axvline(1550, color="k", linestyle="--", linewidth="0.5") plt.legend([r"$\phi_4$", r"$\phi_5$", r"$\phi_6$", r"$\phi_7$"], loc="upper right") plt.xlabel("Wavelength (nm)") plt.ylabel("Phase") plt.show() import sys sys.exit() plt.figure() idx = np.argmax(freq > set_freq) print(idx, freq2wl(freq[idx])) angles = np.rad2deg(np.unwrap(np.angle(s[:, outputs, input_pin]))).T angles = angles + ((angles[:, idx] % 2 * np.pi) - angles[:, idx]).reshape( (4, 1)) print(angles[:, idx], angles) for i in range(4): plt.plot(freq2wl(freq) * 1e9, angles[i]) # , label="Port {} to {}".format(i, j)) plt.plot(freq2wl(freq[idx]) * 1e9, angles[i][idx], "rx") plt.axvline(1550) # plt.legend() plt.xlabel("Wavelength (nm)") plt.ylabel("Phase") plt.show() import sys
def read_orbit(infile, outdir, vmin=-0.2, vmax=0.2, vmin_pal=-0.2, vmax_pal=0.2, dist_gcp=None, write_netcdf=False): """ """ if (re.match(r'^dt_global_j2_adt_vfec_.*\.nc', os.path.basename(infile)) is not None): L3id = 'J2_ADT' # vmin = -0.2; vmax = 0.2 ; vmin_pal = -0.2 ; vmax_pal = 0.2 elif (re.match(r'^dt_global_al_adt_vfec.*\.nc', os.path.basename(infile)) is not None): L3id = 'AL_ADT' # vmin = -0.2 ; vmax = 0.2 ; vmin_pal = -0.2 ; vmax_pal = 0.2 elif (re.match(r'^dt_global_j2_sla_vfec.*\.nc', os.path.basename(infile)) is not None): L3id = 'J2_SLA' # vmin = -0.2 ; vmax = 0.2 ; vmin_pal = -0.2 ; vmax_pal = 0.2 elif (re.match(r'^dt_global_al_sla_vfec.*\.nc', os.path.basename(infile)) is not None): L3id = 'AL_SLA' # vmin = -0.2 ; vmax = 0.2 ; vmin_pal = -0.2 ; vmax_pal = 0.2 # Read dset = Dataset(infile) lon_all = dset.variables['longitude'][:] lat_all = dset.variables['latitude'][:] var_all = dset.variables[L3_MAPS[L3id]['hname']][:] time_all = dset.variables['time'][:] ipass_all = dset.variables['track'][:] cycle_all = dset.variables['cycle'][:] time_units = dset.variables['time'].units var_fill_value = dset.variables[L3_MAPS[L3id]['hname']]._FillValue var_all[var_all == var_fill_value] = np.nan dset.close() # Detect passes index dipass = ipass_all[1:] - ipass_all[:-1] ind_ipass = np.where(dipass != 0)[0] ind_ipass += 1 ind_ipass = np.hstack([0, ind_ipass, len(ipass_all)]) # Loop on all passes and compute geotiff for each pass for i in range(len(ind_ipass) - 1): lon_0 = lon_all[ind_ipass[i]:ind_ipass[i + 1]] # lon_0 = np.mod(lon_0 + 180, 360) - 180 lref = lon_0[np.shape(lon_0)[0] / 2] lon_0 = np.mod(lon_0 - (lref - 180), 360) + (lref - 180) #lon_0 = np.mod(lon_0 - np.min(lon_0), 360) - lref # - 180 lon_0 = np.rad2deg(np.unwrap(np.deg2rad(lon_0))) lat_0 = lat_all[ind_ipass[i]:ind_ipass[i + 1]] var_0 = var_all[ind_ipass[i]:ind_ipass[i + 1]] time_0 = time_all[ind_ipass[i]:ind_ipass[i + 1]] ipass_0 = ipass_all[ind_ipass[i]:ind_ipass[i + 1]] cycle_0 = cycle_all[ind_ipass[i]:ind_ipass[i + 1]] dtime = time_0[1:] - time_0[:-1] dlon = abs(lon_0[1:] - lon_0[:-1]) if (np.array(dlon) > 180).any(): lon_0 = np.mod(lon_0, 360) if len(time_0) > 3: delta = stats.mode(dtime)[0][0] else: continue ndelta = np.round(dtime / delta).astype('int') ind_dtime = np.where(ndelta >= 2)[0] if ind_dtime.size != 0: time = time_0[:ind_dtime[0] + 1] var = var_0[:ind_dtime[0] + 1] else: time = time_0 var = var_0 for i in range(len(ind_dtime)): # time_fill = np.linspace(time_0[ind_dtime[i]], # time_0[ind_dtime[i] + 1], # num=(time_0[ind_dtime[i] + 1] # - time_0[ind_dtime[i]]) / delta) time_fill = np.linspace(time_0[ind_dtime[i]], time_0[ind_dtime[i] + 1], num=ndelta[ind_dtime[i]], endpoint=False) var_fill = np.zeros(np.shape(time_fill[1:])) * np.nan # time = time.append(time_fill[1:]) time = np.hstack([time, time_fill[1:]]) # var = var.append(var_fill) var = np.hstack([var, var_fill]) if i != (len(ind_dtime) - 1): time = np.hstack( [time, time_0[ind_dtime[i] + 1:ind_dtime[i + 1] + 1]]) var = np.hstack( [var, var_0[ind_dtime[i] + 1:ind_dtime[i + 1] + 1]]) else: time = np.hstack([time, time_0[ind_dtime[i] + 1:]]) var = np.hstack([var, var_0[ind_dtime[i] + 1:]]) # time = np.concatenate(time) # var = np.concatenate(var) func = interpolate.interp1d(time_0, lon_0, kind='quadratic') lon = func(time) func = interpolate.interp1d(time_0, lat_0, kind='quadratic') lat = func(time) ssha = var mask_gap = np.isnan(ssha) lon = lon - np.floor((np.min(lon) + 180.) / 360.) * 360. time = np.float64(time) start_time = num2date(time[0], time_units) end_time = num2date(time[-1], time_units) time = num2date(time, units=time_units) time_num = time for t in range(np.shape(time)[0]): time_num[t] = date2num( time[t], units='microseconds since 1970-01-01 00:00:00.000000Z') time_num[t] *= 10**(-6) #time = (num2date(time, time_units) # - calendar.timegm(cnes_day.timetuple())) # for t in range(np.shape(time)[0]): # time[t] = date2num(num2date(time[t], units) - cnes_day, unit=) #time = time.total_seconds() #time = (time + calendar.timegm(julian_day.timetuple()) # - calendar.timegm(cnes_day.timetuple())) # NOTE : lon/lat must be continuous even if crossing dateline # (ie. no [-180,180] clipping) # Make GCPs (mimic a swath of arbitrary width in lon/lat, here ~5km) # gcps = tools_for_gcp.make_gcps_v1(lon, lat, dist_gcp=dist_gcp) gcps = tools_for_gcp.make_gcps_v2(lon, lat, dist_gcp=dist_gcp) # Write geotiff # NOTE : product_name to be changed, set here for test metadata = {} (dtime, time_range) = stfmt.format_time_and_range(start_time, end_time, units='s') metadata['product_name'] = L3_MAPS[L3id]['productname'] dname = (os.path.splitext(os.path.basename(infile))[0] + '_c' + str(int(cycle_0[0])).zfill(4) + '_p' + str(int(ipass_0[0])).zfill(3)) metadata['name'] = dname metadata['datetime'] = dtime metadata['time_range'] = time_range metadata['begin_time'] = start_time.strftime(TIMEFMT) metadata['end_time'] = end_time.strftime(TIMEFMT) metadata['source_URI'] = infile metadata['source_provider'] = 'AVISO' metadata['processing_center'] = '' metadata['conversion_version'] = '0.0.0' metadata['conversion_datatime'] = stfmt.format_time(datetime.utcnow()) metadata['type'] = 'along_track' metadata['cycle'] = int(cycle_0[0]) metadata['pass'] = int(ipass_0[0]) metadata['spatial_resolution'] = np.float32(7000) geolocation = {} geolocation['projection'] = stfmt.format_gdalprojection() geolocation['gcps'] = stfmt.format_gdalgcps(*gcps) band = [] mask = np.ma.getmaskarray(ssha) if write_netcdf: vmin = np.nanmin(ssha) vmin_pal = vmin vmax = np.nanmax(ssha) vmax_pal = vmax #print('bla') scale = (vmax - vmin) / 254. offset = vmin mask = np.ma.getmaskarray(ssha) array = np.clip(np.round((ssha - offset) / scale), 0, 254).astype('uint8') array[mask] = 255 array[mask_gap] = 255 if write_netcdf is False: array = array[:, np.newaxis] colortable = stfmt.format_colortable('matplotlib_jet', vmax=vmax, vmax_pal=vmax_pal, vmin=vmin, vmin_pal=vmin_pal) band.append({ 'array': array, 'scale': scale, 'offset': offset, 'description': L3_MAPS[L3id]['parameter'], 'name': L3_MAPS[L3id]['hname'], 'unittype': 'm', 'nodatavalue': 255, 'parameter_range': [vmin, vmax], 'colortable': colortable }) if write_netcdf is False: tifffile = stfmt.format_tifffilename(outdir, metadata, create_dir=True) stfmt.write_geotiff(tifffile, metadata, geolocation, band) else: geolocation['geotransform'] = [lon, lat] geolocation['time'] = time_num[:] netcdffile = stfmt.format_ncfilename(outdir, metadata, create_dir=True) write_netcdf_1d(netcdffile, metadata, geolocation, band, model='along_track', dgcps=1)
def dft_analysis(x, window='blackmanharris', N=None,scaling_dB=True,normalize_win=True, plot=False, fs=None): """ Analysis of a signal x using the Discrete Fourier Transform ---------------------------------------------------------- input ----- x : 1d-array input signal ofshape (n,) window: window-type (default = 'blackmanharris') : in None, qrectangular window is used N : FFT size should be >= len(x) and power of 2 : if None then N = 2**np.ceil(np.log2(len(n))) scaling_dB: bool, if false, then linear scale of spectrum is returned, else in dB : default True normalize_win: bool (default True), if to normalize wondow (recommended) plot: int, (default: 0) for no plot : 1 for plotting magnitude and phse spectrum : 2 for ploting signal along with spectrum fs : sampling frequency, only used to plot the signal when plot=2 : if not provided, fs=1 is used : it does not affect any computations output ------ mX: magnitude spectrum (of shape=int((N/2)+1)) # positive spectra pX: phase spectrum same shape as mX N : N-point FFT used for computation """ n = x.shape[0] #FFT size (N) is not a power of 2 if N is None: N = int(2**np.ceil(np.log2(n))) assert isPower2(N) #FFT size is smaller than signal - will crop the spectrum information of beyond assert N>=n #window if window is None: window='boxcar' win = signal.get_window(window, n) # normalize analysis window if normalize_win: win = win / win.sum() # positive spectrum, including 0 hN = int((N/2)+1) # half analysis window size by rounding # half analysis window size by floor hM1 = int(np.floor((win.size+1)/2)) hM2 = int(np.floor(win.size/2)) fftbuffer = np.zeros(N) xw = x*win #windowing singal # zero-phase window in fftbuffer fftbuffer[:hM1] = xw[hM2:] fftbuffer[-hM2:] = xw[:hM2] # FFT X = fft(fftbuffer) # magnitude spectrum of positive frequencies absX = np.abs(X[:hN]) absX[absX<np.finfo(float).eps] = np.finfo(float).eps if scaling_dB: mX = 20 * np.log10(absX) # in dB else: mX = absX.copy() # abs # phase calculation tol = 1e-14 X[:hN].real[np.abs(X[:hN].real) < tol] = 0.0 X[:hN].imag[np.abs(X[:hN].imag) < tol] = 0.0 pX = np.unwrap(np.angle(X[:hN])) # unwrapped phase spectrum if plot: if fs is None: fs=1 freq = fs*np.arange(mX.shape[0])/N if plot>1: plt.figure(figsize=(12,6)) plt.subplot(221) else: plt.figure(figsize=(12,4)) plt.subplot(121) plt.plot(freq, mX) plt.title('Magnitude Spectrum') plt.xlabel(f'Frequency{" (normalized)" if fs==1 else " (Hz)"}') plt.ylabel(f'|X|{" (dB)" if scaling_dB else ""}') plt.grid() plt.xlim([freq[0], freq[-1]]) if plot>1: plt.subplot(222) else: plt.subplot(122) plt.plot(freq, pX) plt.title('Phase Spectrum') plt.xlabel(f'Frequency{" (normalized)" if fs==1 else " (Hz)"}') plt.ylabel('<|X|') plt.grid() plt.xlim([freq[0], freq[-1]]) if plot>1: tx = np.arange(len(x))/fs plt.subplot(313) plt.plot(tx,x,label='signal') plt.plot(tx,xw/np.linalg.norm(xw,2)*np.linalg.norm(x,2),label='windowed and scaled') plt.legend() plt.grid() plt.xlim([tx[0],tx[-1]]) plt.ylabel('Amiplitude') plt.title('signal: x') plt.xlabel('time(s)') plt.show() return mX, pX, N
def TEOB_process_array_FD(i, M, q, chi1, chi2, lambda1, lambda2, f_min, iota, outdir, comm, fs, distance, approximant='TEOBv4', allow_skip=True, verbose=True): ''' Helper function for workers Assumes m1 >= m2 ''' args = [i, M, comm.Get_rank(), q, chi1, chi2, lambda1, lambda2, f_min, iota, fs, distance] config_str = 'TEOB_TD_%d.npy'%i if os.path.isfile(outdir+config_str) and allow_skip: if verbose: print '*** Skipping existing TEOB configuration for parameters:', \ args return try: print 'Generate wf:', args print q m1 = M * q/(1.0+q) m2 = M * 1.0/(1.0+q) t, hp, hc = spin_tidal_eob(m1, m2, chi1, chi2, lambda1, lambda2, f_min, distance=distance, inclination=iota, delta_t=1.0/fs, approximant=approximant, verbose=False) # Compute amplitude and phase and interpolate onto a sparse grid h = hp - 1j * hc amp = np.abs(h) phi = np.unwrap(np.angle(h)) ampI = ip.InterpolatedUnivariateSpline(t, amp, k=3, ext='zeros') # Compute phase grid with variable number of points per cycle t_grid, phase_grid = Generate_phase_grid_extrapolate(t, phi) # use only non-zero amplitude data for constructing spline idx = np.where(amp > 0.0) ampI = spline(t[idx], amp[idx], k=3, ext='zeros') amp_on_grid = ampI(t_grid) # Save waveform quantities data_save = np.array([t_grid, phase_grid, amp_on_grid]) np.save(outdir+config_str, data_save) # Save raw data for debugging config_str_raw = 'TEOB_TD_%d_raw.npy'%i data_save_raw = np.array([t, phi, amp]) np.save(outdir+config_str_raw, data_save_raw) if verbose: print '*** TEOB_process_array_TD finished for parameters:', args except Exception as e: print '***********************************************************************' print '*** TEOB_process_array_TD failed for parameters:', args print '*** Error %s' % e print '***********************************************************************' f = open('FAILED_process_array_TD_PARAMS.txt', 'a') print args s = '%.16e'%(args[1]) for arg in args[2:]: if (type(arg) == np.unicode) or (type(arg) == str): s += ' '+str(arg) elif type(arg) == np.ndarray: for a in arg: s += ' %.16e'%(a) else: s += ' %.16e'%(arg) f.write('%s\n'%s) traceback.print_tb(sys.exc_info()[2])
def pro6stacked_seis(eq_file1, eq_file2, plot_scale_fac = 0.03, slow_delta = 0.0005, slowR_lo = -0.1, slowR_hi = 0.1, slowT_lo = -0.1, slowT_hi = 0.1, start_buff = -50, end_buff = 50, norm = 0, freq_corr = 1.0, plot_dyn_range = 1000, fig_index = 401, get_stf = 0, ref_phase = 'blank', ARRAY = 0, max_rat = 1.8, min_amp = 0.2, turn_off_black = 0, R_slow_plot = 0, T_slow_plot = 0, tdiff_clip = 1, event_no = 0): import obspy import obspy.signal from obspy import UTCDateTime from obspy import Stream, Trace from obspy import read from obspy.geodetics import gps2dist_azimuth import numpy as np import os from obspy.taup import TauPyModel import obspy.signal as sign import matplotlib.pyplot as plt model = TauPyModel(model='iasp91') from scipy.signal import hilbert import math import time import statistics #%% Get info #%% get locations print('Running pro6_plot_stacked_seis') start_time_wc = time.time() dphase = 'PKiKP' sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/events_good.txt' with open(sta_file, 'r') as file: lines = file.readlines() event_count = len(lines) print(str(event_count) + ' lines read from ' + sta_file) # Load station coords into arrays station_index = range(event_count) event_names = [] event_index = np.zeros(event_count) event_year = np.zeros(event_count) event_mo = np.zeros(event_count) event_day = np.zeros(event_count) event_hr = np.zeros(event_count) event_min = np.zeros(event_count) event_sec = np.zeros(event_count) event_lat = np.zeros(event_count) event_lon = np.zeros(event_count) event_dep = np.zeros(event_count) event_mb = np.zeros(event_count) event_ms = np.zeros(event_count) event_tstart = np.zeros(event_count) event_tend = np.zeros(event_count) event_gcdist = np.zeros(event_count) event_dist = np.zeros(event_count) event_baz = np.zeros(event_count) event_SNR = np.zeros(event_count) event_Sflag = np.zeros(event_count) event_PKiKPflag = np.zeros(event_count) event_ICSflag = np.zeros(event_count) event_PKiKP_radslo = np.zeros(event_count) event_PKiKP_traslo = np.zeros(event_count) event_PKiKP_qual = np.zeros(event_count) event_ICS_qual = np.zeros(event_count) iii = 0 for ii in station_index: # read file line = lines[ii] split_line = line.split() event_index[ii] = float(split_line[0]) event_names.append(split_line[1]) event_year[ii] = float(split_line[2]) event_mo[ii] = float(split_line[3]) event_day[ii] = float(split_line[4]) event_hr[ii] = float(split_line[5]) event_min[ii] = float(split_line[6]) event_sec[ii] = float(split_line[7]) event_lat[ii] = float(split_line[8]) event_lon[ii] = float(split_line[9]) event_dep[ii] = float(split_line[10]) event_mb[ii] = float(split_line[11]) event_ms[ii] = float(split_line[12]) event_tstart[ii] = float(split_line[13]) event_tend[ii] = float(split_line[14]) event_gcdist[ii] = float(split_line[15]) event_dist[ii] = float(split_line[16]) event_baz[ii] = float(split_line[17]) event_SNR[ii] = float(split_line[18]) event_Sflag[ii] = float(split_line[19]) event_PKiKPflag[ii] = float(split_line[20]) event_ICSflag[ii] = float(split_line[21]) event_PKiKP_radslo[ii] = float(split_line[22]) event_PKiKP_traslo[ii] = float(split_line[23]) event_PKiKP_qual[ii] = float(split_line[24]) event_ICS_qual[ii] = float(split_line[25]) # print('Event ' + str(ii) + ' is ' + str(event_index[ii])) if event_index[ii] == event_no: iii = ii if iii == 0: print('Event ' + str(event_no) + ' not found') else: print('Event ' + str(event_no) + ' is ' + str(iii)) # find predicted slowness arrivals1 = model.get_travel_times(source_depth_in_km=event_dep[iii],distance_in_degree=event_gcdist[iii]-0.5,phase_list=[dphase]) arrivals2 = model.get_travel_times(source_depth_in_km=event_dep[iii],distance_in_degree=event_gcdist[iii]+0.5,phase_list=[dphase]) dtime = arrivals2[0].time - arrivals1[0].time event_pred_slo = dtime/111. # s/km # convert to pred rslo and tslo sin_baz = np.sin(event_baz[iii] * np.pi /180) cos_baz = np.cos(event_baz[iii] * np.pi /180) pred_Nslo = event_pred_slo * cos_baz pred_Eslo = event_pred_slo * sin_baz # rotate observed slowness to N and E obs_Nslo = (event_PKiKP_radslo[iii] * cos_baz) - (event_PKiKP_traslo[iii] * sin_baz) obs_Eslo = (event_PKiKP_radslo[iii] * sin_baz) + (event_PKiKP_traslo[iii] * cos_baz) print('PR '+ str(pred_Nslo) + ' PT ' + str(pred_Eslo) + ' OR ' + str(obs_Nslo) + ' OT ' + str(obs_Eslo)) # find observed back-azimuth # bazi_rad = np.arctan(event_PKiKP_traslo[ii]/event_PKiKP_radslo[ii]) # event_obs_bazi = event_baz[ii] + (bazi_rad * 180 / np.pi) if ARRAY == 1: goto = '/Users/vidale/Documents/PyCode/LASA/EvLocs' os.chdir(goto) file = open(eq_file1, 'r') lines=file.readlines() split_line = lines[0].split() t1 = UTCDateTime(split_line[1]) date_label1 = split_line[1][0:10] file = open(eq_file2, 'r') lines=file.readlines() split_line = lines[0].split() t2 = UTCDateTime(split_line[1]) date_label2 = split_line[1][0:10] #%% read files # #%% Get saved event info, also used to name files # date_label = '2018-04-02' # date for filename if ARRAY == 1: goto = '/Users/vidale/Documents/PyCode/LASA/Pro_files' os.chdir(goto) fname1 = 'HD' + date_label1 + '_2dstack.mseed' fname2 = 'HD' + date_label2 + '_2dstack.mseed' st1 = Stream() st2 = Stream() st1 = read(fname1) st2 = read(fname2) tshift = st1.copy() # make array for time shift amp_ratio = st1.copy() # make array for relative amplitude amp_ave = st1.copy() # make array for relative amplitude print('Read in: event 1 ' + str(len(st1)) + ' event 2 ' + str(len(st2)) + ' traces') nt1 = len(st1[0].data) nt2 = len(st2[0].data) dt1 = st1[0].stats.delta dt2 = st2[0].stats.delta print('Event 1 - First trace has ' + str(nt1) + ' time pts, time sampling of ' + str(dt1) + ' and thus duration of ' + str((nt1-1)*dt1)) print('Event 2 - First trace has ' + str(nt2) + ' time pts, time sampling of ' + str(dt2) + ' and thus duration of ' + str((nt2-1)*dt2)) if nt1 != nt2 or dt1 != dt2: print('nt or dt not does not match') exit(-1) #%% Make grid of slownesses slowR_n = int(1 + (slowR_hi - slowR_lo)/slow_delta) # number of slownesses slowT_n = int(1 + (slowT_hi - slowT_lo)/slow_delta) # number of slownesses print(str(slowT_n) + ' trans slownesses, hi and lo are ' + str(slowT_hi) + ' ' + str(slowT_lo)) # In English, stack_slows = range(slow_n) * slow_delta - slow_lo a1R = range(slowR_n) a1T = range(slowT_n) stack_Rslows = [(x * slow_delta + slowR_lo) for x in a1R] stack_Tslows = [(x * slow_delta + slowT_lo) for x in a1T] print(str(slowR_n) + ' radial slownesses, ' + str(slowT_n) + ' trans slownesses, ') #%% Loop over slowness total_slows = slowR_n * slowT_n global_max = 0 for slow_i in range(total_slows): # find envelope, phase, tshift, and global max if slow_i % 200 == 0: print('At line 101, ' +str(slow_i) + ' slowness out of ' + str(total_slows)) if len(st1[slow_i].data) == 0: # test for zero-length traces print('%d data has zero length ' % (slow_i)) seismogram1 = hilbert(st1[slow_i].data) # make analytic seismograms seismogram2 = hilbert(st2[slow_i].data) env1 = np.abs(seismogram1) # amplitude env2 = np.abs(seismogram2) amp_ave[slow_i].data = 0.5 * (env1 + env2) amp_ratio[slow_i].data = env1/env2 angle1 = np.angle(seismogram1) # time shift angle2 = np.angle(seismogram2) phase1 = np.unwrap(angle1) phase2 = np.unwrap(angle2) dphase = (angle1 - angle2) # dphase = phase1 - phase2 for it in range(nt1): if dphase[it] > math.pi: dphase[it] -= 2 * math.pi elif dphase[it] < -1 * math.pi: dphase[it] += 2 * math.pi if dphase[it] > math.pi or dphase[it] < -math.pi: print(f'Bad dphase value {dphase[it]:.2f} {it:4d}') freq1 = np.diff(phase1) #freq in radians/sec freq2 = np.diff(phase2) ave_freq = 0.5*(freq1 + freq2) ave_freq_plus = np.append(ave_freq,[1]) # ave_freq one element too short # tshift[slow_i].data = dphase / ave_freq_plus # 2*pi top and bottom cancels tshift[slow_i].data = dphase/(2*math.pi*freq_corr) local_max = max(abs(amp_ave[slow_i].data)) if local_max > global_max: global_max = local_max #%% Extract slices tshift_full = tshift.copy() # make array for time shift for slow_i in range(total_slows): # ignore less robust points if slow_i % 200 == 0: print('At line 140, ' +str(slow_i) + ' slowness out of ' + str(total_slows)) for it in range(nt1): if ((amp_ratio[slow_i].data[it] < (1/max_rat)) or (amp_ratio[slow_i].data[it] > max_rat) or (amp_ave[slow_i].data[it] < (min_amp * global_max))): tshift[slow_i].data[it] = np.nan #%% If desired, find transverse slowness nearest T_slow_plot lowest_Tslow = 1000000 for slow_i in range(slowT_n): if abs(stack_Tslows[slow_i] - T_slow_plot) < lowest_Tslow: lowest_Tindex = slow_i lowest_Tslow = abs(stack_Tslows[slow_i] - T_slow_plot) print(str(slowT_n) + ' T slownesses, index ' + str(lowest_Tindex) + ' is closest to input parameter ' + str(T_slow_plot) + ', slowness diff there is ' + str(lowest_Tslow) + ' and slowness is ' + str(stack_Tslows[lowest_Tindex])) # Select only stacks with that slowness for radial plot centralR_st1 = Stream() centralR_st2 = Stream() centralR_amp = Stream() centralR_ampr = Stream() centralR_tdiff = Stream() for slowR_i in range(slowR_n): ii = slowR_i*slowT_n + lowest_Tindex centralR_st1 += st1[ii] centralR_st2 += st2[ii] centralR_amp += amp_ave[ii] centralR_ampr += amp_ratio[ii] centralR_tdiff += tshift[ii] #%% If desired, find radial slowness nearest R_slow_plot lowest_Rslow = 1000000 for slow_i in range(slowR_n): if abs(stack_Rslows[slow_i] - R_slow_plot) < lowest_Rslow: lowest_Rindex = slow_i lowest_Rslow = abs(stack_Rslows[slow_i] - R_slow_plot) print(str(slowR_n) + ' R slownesses, index ' + str(lowest_Rindex) + ' is closest to input parameter ' + str(R_slow_plot) + ', slowness diff there is ' + str(lowest_Rslow) + ' and slowness is ' + str(stack_Rslows[lowest_Rindex])) # Select only stacks with that slowness for transverse plot centralT_st1 = Stream() centralT_st2 = Stream() centralT_amp = Stream() centralT_ampr = Stream() centralT_tdiff = Stream() #%% to extract stacked time functions event1_sample = Stream() event2_sample = Stream() for slowT_i in range(slowT_n): ii = lowest_Rindex*slowT_n + slowT_i centralT_st1 += st1[ii] centralT_st2 += st2[ii] centralT_amp += amp_ave[ii] centralT_ampr += amp_ratio[ii] centralT_tdiff += tshift[ii] #%% compute timing time series ttt = (np.arange(len(st1[0].data)) * st1[0].stats.delta + start_buff) # in units of seconds #%% Plot radial amp and tdiff vs time plots fig_index = 6 # plt.close(fig_index) plt.figure(fig_index,figsize=(30,10)) plt.xlim(start_buff,end_buff) plt.ylim(stack_Rslows[0], stack_Rslows[-1]) for slowR_i in range(slowR_n): # loop over radial slownesses dist_offset = stack_Rslows[slowR_i] # trying for approx degrees ttt = (np.arange(len(centralR_st1[slowR_i].data)) * centralR_st1[slowR_i].stats.delta + (centralR_st1[slowR_i].stats.starttime - t1)) plt.plot(ttt, (centralR_st1[slowR_i].data - np.median(centralR_st1[slowR_i].data))*plot_scale_fac /global_max + dist_offset, color = 'green') plt.plot(ttt, (centralR_st2[slowR_i].data - np.median(centralR_st2[slowR_i].data))*plot_scale_fac /global_max + dist_offset, color = 'red') # extract stacked time functions if get_stf != 0: if np.abs(stack_Rslows[slowR_i]- 0.005) < 0.000001: # kludge, not exactly zero when desired event1_sample = centralR_st1[slowR_i].copy() event2_sample = centralR_st2[slowR_i].copy() # plt.plot(ttt, (centralR_amp[slowR_i].data) *plot_scale_fac/global_max + dist_offset, color = 'purple') if turn_off_black == 0: plt.plot(ttt, (centralR_tdiff[slowR_i].data)*plot_scale_fac/1 + dist_offset, color = 'black') plt.plot(ttt, (centralR_amp[slowR_i].data)*0.0 + dist_offset, color = 'lightgray') # reference lines plt.xlabel('Time (s)') plt.ylabel('R Slowness (s/km)') plt.title(ref_phase + ' seismograms and tdiff at ' + str(T_slow_plot) + ' T slowness, green is event1, red is event2') # Plot transverse amp and tdiff vs time plots fig_index = 7 # plt.close(fig_index) plt.figure(fig_index,figsize=(30,10)) plt.xlim(start_buff,end_buff) plt.ylim(stack_Tslows[0], stack_Tslows[-1]) for slowT_i in range(slowT_n): # loop over transverse slownesses dist_offset = stack_Tslows[slowT_i] # trying for approx degrees ttt = (np.arange(len(centralT_st1[slowT_i].data)) * centralT_st1[slowT_i].stats.delta + (centralT_st1[slowT_i].stats.starttime - t1)) plt.plot(ttt, (centralT_st1[slowT_i].data - np.median(centralT_st1[slowT_i].data))*plot_scale_fac /global_max + dist_offset, color = 'green') plt.plot(ttt, (centralT_st2[slowT_i].data - np.median(centralT_st2[slowT_i].data))*plot_scale_fac /global_max + dist_offset, color = 'red') # plt.plot(ttt, (centralT_amp[slowT_i].data) *plot_scale_fac/global_max + dist_offset, color = 'purple') if turn_off_black == 0: plt.plot(ttt, (centralT_tdiff[slowT_i].data)*plot_scale_fac/1 + dist_offset, color = 'black') plt.plot(ttt, (centralT_amp[slowT_i].data)*0.0 + dist_offset, color = 'lightgray') # reference lines plt.xlabel('Time (s)') plt.ylabel('T Slowness (s/km)') plt.title(str(event_no) + ' ' + date_label1 + ' ' +ref_phase + ' seismograms and tdiff ' + str(R_slow_plot) + ' R slowness, green is event1, red is event2') os.chdir('/Users/vidale/Documents/PyCode/LASA/Quake_results/Plots') # plt.savefig(date_label1 + '_' + str(start_buff) + '_' + str(end_buff) + '_stack.png') #%% R-T tshift averaged over time window fig_index = 8 stack_slice = np.zeros((slowR_n,slowT_n)) for slowR_i in range(slowR_n): # loop over radial slownesses for slowT_i in range(slowT_n): # loop over transverse slownesses index = slowR_i*slowT_n + slowT_i num_val = np.nanmedian(tshift[index].data) # num_val = statistics.median(tshift_full[index].data) stack_slice[slowR_i, slowT_i] = num_val # adjust for dominant frequency of 1.2 Hz, not 1 Hz # stack_slice[0,0] = -0.25 # stack_slice[0,1] = 0.25 # tdiff_clip = 0.4/1.2 tdiff_clip_max = tdiff_clip # DO NOT LEAVE COMMENTED OUT!! tdiff_clip_min = -tdiff_clip y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta), slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)] fig, ax = plt.subplots(1, figsize=(7,6)) # fig, ax = plt.subplots(1, figsize=(9,2)) # fig.subplots_adjust(bottom=0.3) # c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.bwr, vmin = tdiff_clip_min, vmax = tdiff_clip_max) c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.coolwarm, vmin = tdiff_clip_min, vmax = tdiff_clip_max) ax.axis([x1.min(), x1.max(), y1.min(), y1.max()]) circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False) ax.add_artist(circle1) circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False) ax.add_artist(circle2) #outer core limit fig.colorbar(c, ax=ax) plt.ylabel('R Slowness (s/km)') plt.title(ref_phase + ' time shift') # plt.title('T-R average time shift ' + date_label1 + ' ' + date_label2) plt.show() #%% R-T amplitude averaged over time window fig_index = 9 stack_slice = np.zeros((slowR_n,slowT_n)) smax = 0 for slowR_i in range(slowR_n): # loop over radial slownesses for slowT_i in range(slowT_n): # loop over transverse slownesses index = slowR_i*slowT_n + slowT_i num_val = np.nanmedian(amp_ave[index].data) stack_slice[slowR_i, slowT_i] = num_val if num_val > smax: smax = num_val # stack_slice[0,0] = 0 y1, x1 = np.mgrid[slice(stack_Rslows[0], stack_Rslows[-1] + slow_delta, slow_delta), slice(stack_Tslows[0], stack_Tslows[-1] + slow_delta, slow_delta)] # fig, ax = plt.subplots(1) fig, ax = plt.subplots(1, figsize=(7,6)) # c = ax.pcolormesh(x1, y1, stack_slice/smax, cmap=plt.cm.gist_yarg, vmin = 0.5) c = ax.pcolormesh(x1, y1, stack_slice/smax, cmap=plt.cm.gist_rainbow_r, vmin = 0) # c = ax.pcolormesh(x1, y1, stack_slice, cmap=plt.cm.gist_rainbow_r, vmin = 0) ax.axis([x1.min(), x1.max(), y1.min(), y1.max()]) circle1 = plt.Circle((0, 0), 0.019, color='black', fill=False) ax.add_artist(circle1) #inner core limit circle2 = plt.Circle((0, 0), 0.040, color='black', fill=False) ax.add_artist(circle2) #outer core limit c = ax.scatter(pred_Eslo, pred_Nslo, color='blue', s=100, alpha=0.75) c = ax.scatter(obs_Eslo, obs_Nslo, color='black', s=100, alpha=0.75) fig.colorbar(c, ax=ax) plt.xlabel('Transverse Slowness (s/km)') plt.ylabel('Radial Slowness (s/km)') plt.title(str(event_no) + ' ' + date_label1 + ' ' + ref_phase + ' beam amplitude') # plt.title('Beam amplitude ' + date_label1 + ' ' + date_label2) os.chdir('/Users/vidale/Documents/PyCode/LASA/Quake_results/Plots') plt.savefig(date_label1 + '_' + str(start_buff) + '_' + str(end_buff) + '_beam.png') plt.show() #%% Save processed files if ARRAY == 0: goto = '/Users/vidale/Documents/PyCode/Hinet' if ARRAY == 1: goto = '/Users/vidale/Documents/PyCode/LASA/Pro_Files' os.chdir(goto) fname = 'HD' + date_label1 + '_' + date_label2 + '_tshift.mseed' tshift_full.write(fname,format = 'MSEED') fname = 'HD' + date_label1 + '_' + date_label2 + '_amp_ave.mseed' amp_ave.write(fname,format = 'MSEED') fname = 'HD' + date_label1 + '_' + date_label2 + '_amp_ratio.mseed' amp_ratio.write(fname,format = 'MSEED') #%% Option to write out stf if get_stf != 0: event1_sample.taper(0.1) event2_sample.taper(0.1) fname = 'HD' + date_label1 + '_stf.mseed' event1_sample.write(fname,format = 'MSEED') fname = 'HD' + date_label2 + '_stf.mseed' event2_sample.write(fname,format = 'MSEED') elapsed_time_wc = time.time() - start_time_wc print('This job took ' + str(elapsed_time_wc) + ' seconds') os.system('say "Done"')