def _compute_ls(self, magnitude, time, ofac): fx, fy, nout, jmax, prob = lomb.fasper(time, magnitude, ofac, 100.) period = fx[jmax] T = 1.0 / period new_time = np.mod(time, 2 * T) / (2 * T) return T, new_time, prob, period
def computePSD(self): if len(self.series) > 10: fx, fy, nout, jmax, prob = lomb.fasper( self.smpltime[self.idx_start:-1] / 1000, self.series[self.idx_start:-1] / 1000, 4., 2.) pwr = ( ( self.series[self.idx_start:-1]/1000-(self.series[self.idx_start:-1]/1000).mean())**2).sum() \ /(len(self.series[self.idx_start:-1])-1) fy = fy / (nout / (4.0 * pwr)) * 1000 self.psd_mag = fy self.psd_freq = fx # Calculate frequencies power components VLF, LF and HF self.VLFpwr = 0 self.LFpwr = 0 self.HFpwr = 0 for i, f in enumerate(self.psd_freq): if 0 < f <= 0.04: self.VLFpwr += self.psd_mag[i] elif 0.04 < f <= 0.15: self.LFpwr += self.psd_mag[i] elif 0.15 < f <= 0.4: self.HFpwr += self.psd_mag[i] self.VLFpwr *= 1000 self.LFpwr *= 1000 self.HFpwr *= 1000
def _components(self, magnitude, time, ofac): time = time - np.min(time) A, PH = [], [] for i in range(3): wk1, wk2, nout, jmax, prob = lomb.fasper(time, magnitude, ofac, 100.) fundamental_Freq = wk1[jmax] Atemp, PHtemp = [], [] omagnitude = magnitude for j in range(4): function_to_fit = self._yfunc_maker((j + 1) * fundamental_Freq) popt0, popt1, popt2 = curve_fit(function_to_fit, time, omagnitude)[0][:3] Atemp.append(np.sqrt(popt0**2 + popt1**2)) PHtemp.append(np.arctan(popt1 / popt0)) model = self._model(time, popt0, popt1, popt2, (j + 1) * fundamental_Freq) magnitude = np.array(magnitude) - model A.append(Atemp) PH.append(PHtemp) PH = np.asarray(PH) scaledPH = PH - PH[:, 0].reshape((len(PH), 1)) return A, scaledPH
def computePSD( self ): if len(self.series) > 10: fx, fy, nout, jmax, prob = lomb.fasper( self.smpltime[self.idx_start:-1]/1000, self.series[self.idx_start:-1]/1000, 4., 2.) pwr = ( ( self.series[self.idx_start:-1]/1000-(self.series[self.idx_start:-1]/1000).mean())**2).sum() \ /(len(self.series[self.idx_start:-1])-1) fy = fy/(nout/(4.0*pwr))*1000 self.psd_mag = fy self.psd_freq = fx # Calculate frequencies power components VLF, LF and HF self.VLFpwr = 0 self.LFpwr = 0 self.HFpwr = 0 for i, f in enumerate( self.psd_freq ): if 0 < f <= 0.04: self.VLFpwr += self.psd_mag[i] elif 0.04 < f <= 0.15: self.LFpwr += self.psd_mag[i] elif 0.15 < f <= 0.4: self.HFpwr += self.psd_mag[i] self.VLFpwr *= 1000 self.LFpwr *= 1000 self.HFpwr *= 1000
def fft(time, flux, ofac, hifac): # Do LNP Test (Lomb-Scargle Periodogram) ie FT freq,power, nout, jmax, prob = lomb.fasper(time, flux, ofac, hifac) convfactor = (1. / (60 * 60 * 24)) * (10 ** 6) uHzfreq = freq * convfactor #11.57, conversion c/d to mHz return uHzfreq, power
def spectral_features(lc): lc.remove_gaps() FIND_FREQUENCIES = 4 time = numpy.array(lc.time) flux = numpy.array(lc.flux) # center the flux flux = (flux - numpy.mean(flux)) / (1.0 * numpy.std(flux)) result = lomb.fasper(time, flux, 6.0, 6.0) # filter out weird frequencies spectral_results = filter( lambda elem: elem[0] < 0.55 and elem[0] > (2.0 / len(lc.time)), zip(result[0], result[1])) wavelengths = [] for frequency in sorted(spectral_results, key=itemgetter(1), reverse=True): wavelength = int(round(1.0 / frequency[0])) # check if wavelength is approximately in array include = True for found_wavelength in wavelengths: if abs(found_wavelength - wavelength) <= 4: include = False if include: wavelengths.append(wavelength) if len(wavelengths) == FIND_FREQUENCIES: break if len(wavelengths) < FIND_FREQUENCIES: wavelengths += [0] * (FIND_FREQUENCIES - len(wavelengths)) if len(wavelengths) < FIND_FREQUENCIES: wavelengths += [0] * (FIND_FREQUENCES - len(wavelengths)) return wavelengths
def spectral_features(lc): FIND_FREQUENCIES = 4 time = numpy.array(lc.time) flux = numpy.array(lc.flux) # center the flux flux = (flux - numpy.mean(flux)) / (1.0 * numpy.std(flux)) result = lomb.fasper(time, flux, 6.0, 6.0) # filter out weird frequencies spectral_results = filter(lambda elem: elem[0] < 0.55 and elem[0] > (2.0 / len(lc.time)), zip(result[0], result[1])) wavelengths = [] for frequency in sorted(spectral_results, key=itemgetter(1), reverse=True): wavelength = int(round(1.0 / frequency[0])) # check if wavelength is approximately in array include = True for found_wavelength in wavelengths: if abs(found_wavelength - wavelength) <= 4: include = False if include: wavelengths.append(wavelength) if len(wavelengths) == FIND_FREQUENCIES: break if len(wavelengths) < FIND_FREQUENCIES: wavelengths += [0] * (FIND_FREQUENCIES - len(wavelengths)) if len(wavelengths) < FIND_FREQUENCIES: wavelengths += [0] * (FIND_FREQUENCES - len(wavelengths)) return wavelengths
def fit(self, data): magnitude = data[0] time = data[1] time = time - np.min(time) A = [] PH = [] scaledPH = [] def model(x, a, b, c, Freq): return a * np.sin(2 * np.pi * Freq * x) + b * np.cos( 2 * np.pi * Freq * x) + c for i in range(3): wk1, wk2, nout, jmax, prob = lomb.fasper(time, magnitude, 6., 100.) fundamental_Freq = wk1[jmax] # fit to a_i sin(2pi f_i t) + b_i cos(2 pi f_i t) + b_i,o # a, b are the parameters we care about # c is a constant offset # f is the fundamental Frequency def yfunc(Freq): def func(x, a, b, c): return a * np.sin(2 * np.pi * Freq * x) + b * np.cos( 2 * np.pi * Freq * x) + c return func Atemp = [] PHtemp = [] popts = [] for j in range(4): popt, pcov = curve_fit(yfunc((j + 1) * fundamental_Freq), time, magnitude) Atemp.append(np.sqrt(popt[0]**2 + popt[1]**2)) PHtemp.append(np.arctan(popt[1] / popt[0])) popts.append(popt) A.append(Atemp) PH.append(PHtemp) for j in range(4): magnitude = np.array(magnitude) - model( time, popts[j][0], popts[j][1], popts[j][2], (j + 1) * fundamental_Freq) for ph in PH: scaledPH.append(np.array(ph) - ph[0]) self.A = A self.PH = PH self.scaledPH = scaledPH self._value = A[0][0]
def fit(self,data): global new_mjd global prob fx,fy, nout, jmax, prob = lomb.fasper(self.mjd,data, 6., 100.) T = 1.0 / fx[jmax] new_mjd = np.mod(self.mjd, 2*T) / (2*T); return T
def fit(self, data): global new_mjd global prob fx, fy, nout, jmax, prob = lomb.fasper(self.mjd, data, 6., 100.) T = 1.0 / fx[jmax] new_mjd = np.mod(self.mjd, 2 * T) / (2 * T) return T
def lombscargle(rows, oversample=6, nyquist=5, keys=("MJD", "flux")): x = [] y = [] for row in rows: x.append(row[keys[0]]) y.append(row[keys[1]]) fx, fy, nout, jmax, prob = lomb.fasper(np.array(x), np.array(y), oversample, nyquist) return (zip(fx, fy), jmax)
def fit(self, data): magnitude = data[0] time = data[1] fx, fy, nout, jmax, PeriodLS.prob = lomb.fasper(time, magnitude, self.ofac, 100.) period = fx[jmax] T = 1.0 / period PeriodLS.new_time = np.mod(time, 2 * T) / (2 * T) return T
def fit(self, data): magnitude = data[0] time = data[1] time = time - np.min(time) global A global PH global scaledPH A = [] PH = [] scaledPH = [] def model(x, a, b, c, Freq): return a*np.sin(2*np.pi*Freq*x)+b*np.cos(2*np.pi*Freq*x)+c for i in range(3): wk1, wk2, nout, jmax, prob = lomb.fasper(time, magnitude, 6., 100.) fundamental_Freq = wk1[jmax] # fit to a_i sin(2pi f_i t) + b_i cos(2 pi f_i t) + b_i,o # a, b are the parameters we care about # c is a constant offset # f is the fundamental Frequency def yfunc(Freq): def func(x, a, b, c): return a*np.sin(2*np.pi*Freq*x)+b*np.cos(2*np.pi*Freq*x)+c return func Atemp = [] PHtemp = [] popts = [] for j in range(4): popt, pcov = curve_fit(yfunc((j+1)*fundamental_Freq), time, magnitude) Atemp.append(np.sqrt(popt[0]**2+popt[1]**2)) PHtemp.append(np.arctan(popt[1] / popt[0])) popts.append(popt) A.append(Atemp) PH.append(PHtemp) for j in range(4): magnitude = np.array(magnitude) - model(time, popts[j][0], popts[j][1], popts[j][2], (j+1)*fundamental_Freq) for ph in PH: scaledPH.append(np.array(ph) - ph[0]) return A[0][0]
def fit(self, data): magnitude = data[0] time = data[1] global new_time global prob global period fx, fy, nout, jmax, prob = lomb.fasper(time, magnitude, self.ofac, 100.) period = fx[jmax] T = 1.0 / period new_time = np.mod(time, 2 * T) / (2 * T) return T
def fit(self, data): magnitude = data[0] time = data[1] fx, fy, nout, jmax, prob = lomb.fasper(time, magnitude, self.ofac, 100.) period = fx[jmax] T = 1.0 / period new_time = np.mod(time, 2 * T) / (2 * T) self.prob = prob self.new_time = new_time self.period = period self._value = T
def computeLombPeriodogram(self): detrend = False lombx = self.smpltime[self.idx_start:-1] / 1000 if detrend is True: # static component (we remove the dynamic component of the signal -> detrending) z_stat = self.detrendRRI() lomby = np.asarray(z_stat.H)[0][self.idx_start:-1] / 1000 else: lomby = self.series[self.idx_start:-1] fx, fy, nout, jmax, prob = lomb.fasper(lombx, lomby, 4., 2.) pwr = ((lomby - lomby.mean())**2).sum() / (len(lomby) - 1) fy_smooth = np.array([]) fx_smooth = np.array([]) maxout = int(nout / 2) for i in xrange(0, maxout, 4): fy_smooth = np.append(fy_smooth, (fy[i] + fy[i + 1] + fy[i + 2] + fy[i + 3]) / (nout / (2.0 * pwr))) fx_smooth = np.append(fx_smooth, fx[i]) fy_smooth = fy_smooth / 4 * 1e3 # pwr = ( ( self.series[self.idx_start:-1]/1000-(self.series[self.idx_start:-1]/1000).mean())**2).sum() \ # /(len(self.series[self.idx_start:-1])-1) # fy = fy/(nout/(4.0*pwr))*1000 self.psd_mag = fy self.psd_freq = fx # Calculate frequencies power components VLF, LF and HF self.VLFpwr = 0 self.LFpwr = 0 self.HFpwr = 0 for i, f in enumerate(self.psd_freq): if 0 < f <= 0.04: self.VLFpwr += self.psd_mag[i] elif 0.04 < f <= 0.15: self.LFpwr += self.psd_mag[i] elif 0.15 < f <= 0.4: self.HFpwr += self.psd_mag[i] self.VLFpwr *= 1000 self.LFpwr *= 1000 self.HFpwr *= 1000
def calculate_periodic_features(mjd2, data2): A = [] PH = [] def model(x, a, b, c, freq): return a*np.sin(2*np.pi*freq*x)+b*np.cos(2*np.pi*freq*x)+c for i in range(3): wk1, wk2, nout, jmax, prob = lomb.fasper(mjd2, data2, 6., 100.) fundamental_freq = wk1[jmax] # fit to a_i sin(2pi f_i t) + b_i cos(2 pi f_i t) + b_i,o # a, b are the parameters we care about # c is a constant offset # f is the fundamental frequency def yfunc(freq): def func(x, a, b, c): return a*np.sin(2*np.pi*freq*x)+b*np.cos(2*np.pi*freq*x)+c return func Atemp = [] PHtemp = [] popts = [] for j in range(4): popt, pcov = optimize.curve_fit(yfunc((j+1)*fundamental_freq), mjd2, data2) Atemp.append(np.sqrt(popt[0]**2+popt[1]**2)) PHtemp.append(np.arctan(popt[1] / popt[0])) popts.append(popt) A.append(Atemp) PH.append(PHtemp) for j in range(4): data2 = np.array(data2) - model(mjd2, popts[j][0], popts[j][1], popts[j][2], (j+1)*fundamental_freq) scaledPH = [] for ph in PH: scaledPH.append(np.array(ph) - ph[0]) return A, PH, scaledPH
def computeLombPeriodogram( self ): detrend = False lombx = self.smpltime[self.idx_start:-1]/1000 if detrend is True: # static component (we remove the dynamic component of the signal -> detrending) z_stat = self.detrendRRI() lomby = np.asarray(z_stat.H)[0][self.idx_start:-1]/1000 else: lomby = self.series[self.idx_start:-1] fx, fy, nout, jmax, prob = lomb.fasper(lombx,lomby, 4., 2.) pwr = ((lomby-lomby.mean())**2).sum()/(len(lomby)-1) fy_smooth = np.array([]) fx_smooth = np.array([]) maxout = int(nout/2) for i in xrange(0,maxout,4): fy_smooth = np.append(fy_smooth, (fy[i]+fy[i+1]+fy[i+2]+fy[i+3])/(nout/(2.0*pwr))) fx_smooth = np.append(fx_smooth, fx[i]) fy_smooth = fy_smooth/4*1e3 # pwr = ( ( self.series[self.idx_start:-1]/1000-(self.series[self.idx_start:-1]/1000).mean())**2).sum() \ # /(len(self.series[self.idx_start:-1])-1) # fy = fy/(nout/(4.0*pwr))*1000 self.psd_mag = fy self.psd_freq = fx # Calculate frequencies power components VLF, LF and HF self.VLFpwr = 0 self.LFpwr = 0 self.HFpwr = 0 for i, f in enumerate( self.psd_freq ): if 0 < f <= 0.04: self.VLFpwr += self.psd_mag[i] elif 0.04 < f <= 0.15: self.LFpwr += self.psd_mag[i] elif 0.15 < f <= 0.4: self.HFpwr += self.psd_mag[i] self.VLFpwr *= 1000 self.LFpwr *= 1000 self.HFpwr *= 1000
def spot(time,flux,xdim,ydim,steps=10, ofac=8): #grid of times to run over grid = np.linspace(min(time),max(time),steps) for z in range(len(grid)-1): start = np.min(np.where(time >= grid[z])) stop = np.max(np.where(time < grid[z+1])) #arrays to store peak power pA = np.zeros([ydim,xdim,len(grid)-1]) pB = np.zeros([ydim,xdim,len(grid)-1]) #Calcuate peak power at period of each component for i in range(xdim): for j in range(ydim): flx = flux[:,j,i] t = time[start:stop] f = flx[start:stop] m,b = sp.polyfit(t, f, 1) #linear subtraction fit = f - (m*t + b) #periodogram freq, pwr, nout, jmax, prob = lomb.fasper(t, fit, ofac , 1.) per = 1.0/freq #peak power at each period pA[j,i,z] = max(pwr[(np.absolute(per - 0.26312) < 0.05)]) pB[j,i,z] = max(pwr[(np.absolute(per - 0.70895) < 0.05)]) pA = np.mean(pA, axis=2) #take average across timesteps pB = np.mean(pB, axis=2) pA = pA/np.max(pA) #normalize pB = pB/np.max(pB) return pA, pB
normal_obs = obs_vals - mean_obs_p normal_obs = normal_obs / standard_deviation_obs_p #Calculate variance of pre-processed obs data- should be 1 if normal #standard_dev_obs = np.std(normal_var_2, dtype=np.float64) #variance_obs = standard_dev_obs**2 #print 'Variance - pre-processed obs data= ', variance_obs #Convert obs time array into numpy array obs_time = np.array(obs_time) #Define sampling frequency samp_freq = 24 #Obs. lomb fa, fb, nout, jmax, prob2 = lomb.fasper(obs_time, normal_obs, ofac, samp_freq) #Divide output by sampling frequency fb = fb / samp_freq fb = np.log(fb) #Reverse array for smoothing reversed_fb = fb[::-1] obs_periods = 1. / fa reversed_obs_periods = obs_periods[::-1] #Smooth using exponetial smoother, last 2 numbers inputted are: smoothing window & cut-off point cut_obs_periods, smoothed_obs = modules.ema_keep_peaks(reversed_fb, reversed_obs_periods, 40, 12000000) smoothed_obs = np.exp(smoothed_obs)
#ax.xaxis.set_ticks(ticks) #ax.yaxis.set_ticks([]) plt.xlabel('Time (days)') plt.ylabel('Flux (mJy)') c = fname.split('_')[0] plt.title('{0} lightcurve'.format(c)) ax = fig.add_subplot(gs[pltnum + 2]) plt.xlabel('Wavelength (days)') plt.ylabel('Relative intensity') plt.title('{0} Periodogram'.format(c)) time = numpy.array(time) flux = numpy.array(flux) # center the flux flux = (flux - numpy.mean(flux)) / (1.0 * numpy.std(flux)) result = lomb.fasper(time, flux, 6.0, 0.5) FIND_FREQUENCIES = int(result[2]) print FIND_FREQUENCIES # filter out weird frequencies spectral_results = filter( lambda elem: elem[0] < 0.55 and elem[0] > (2.0 / len(time)), zip(result[0], result[1])) wavelengths = [] for frequency in sorted(spectral_results, key=itemgetter(1), reverse=True): wavelength = int(round(1.0 / frequency[0])) # check if wavelength is approximately in array include = True #for found_wavelength in wavelengths: # if abs(found_wavelength[0] - wavelength) <= 4: # include = False #if include:
def fft(time, flux, ofac, hifac): freq, power, nout, jmax, prob = lomb.fasper(time, flux, ofac, hifac) convfactor = (1. / (60 * 60 * 24)) * (10**6) uHzfreq = freq * convfactor #11.57, conversion c/d to mHz return uHzfreq, power
from astropy.stats import LombScargle #model = LombScargle(t, y*1e-3) model = LombScargle(t, y) power_ls = model.power(freqidays, method="fast", normalization="psd") power_ls /= len(t) #make LC, SC powspec import lomb osample = 10. nyq = 283. #nyq = 550 freq, amp, nout, jmax, prob = lomb.fasper(t, y, osample, 3.) freq = 1000. * freq / 86.4 binn = freq[1] - freq[0] fts = 2. * amp * np.var(y) / (np.sum(amp) * binn) fts = scipy.ndimage.filters.gaussian_filter(fts, 4) use = np.where(freq < nyq + 150) freq = freq[use] print(len(freq)) fts = fts[use] #set up MCMC rhotrue = 0.0264 rhosigma = 0.0008 def lnprob(params, y, gp):
def plot(): try: names except NameError: # Readin the model output model, names = readfile("GEOS_v90103_4x5_CV_logs.npy", "001") #001 represents CVO # Processes the date year = (model[:, 0] // 10000) month = ((model[:, 0] - year * 10000) // 100) day = (model[:, 0] - year * 10000 - month * 100) hour = model[:, 1] // 100 min = (model[:, 1] - hour * 100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006 = [ doy[i].days + doy[i].seconds / (24. * 60. * 60.) for i in range(len(doy)) ] #now read in the observations myfile = nappy.openNAFile('York_merge_Cape_verde_1hr_R1.na') myfile.readData() #ppy.openNAFile('York_merge_Cape_verde_1hr_R1.na') counter = 0 fig = plt.figure(figsize=(20, 12)) ax = plt.subplot(111) for species in species_list: #Gives species exact model tags for convenience print species if species == 'ISOPRENE': species = 'TRA_6' elif species == 'ACETONE': species = 'ACET' elif species == 'TEMP': species = 'GMAO_TEMP' elif species == 'SURFACE_PRES': species = 'GMAO_PSFC' elif species == 'WINDSPEED': species = 'GMAO_WIND' elif species == 'SURFACE_SOLAR_RADIATION': species = 'GMAO_RADSW' elif species == 'ABS_HUMIDITY': species = 'GMAO_ABSH' elif species == 'REL_HUMIDITY': species = 'GMAO_RHUM' model_cut_switch = 0 obs_switch = 0 ofac = 1 if species == 'O3': print 'yes' Units = 'ppbV' first_label_pos = 3 obs_data_name = 'Ozone mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'O3' elif species == 'CO': units = 'ppbV' first_label_pos = 1 obs_data_name = 'CO mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'CO' ofac = 2.0001 elif species == 'NO': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO' elif species == 'NO2': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO2 mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO2' elif species == 'C2H6': units = 'pptV' first_label_pos = 1 obs_data_name = 'ethane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C2H6' elif species == 'C3H8': units = 'pptV' first_label_pos = 1 obs_data_name = 'propane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C3H8' elif species == 'DMS': units = 'pptV' first_label_pos = 1 obs_data_name = 'dms mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'DMS' elif species == 'TRA_6': #Isoprene units = 'pptV' first_label_pos = 1 obs_data_name = 'Isoprene (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' elif species == 'ACET': units = 'pptV' first_label_pos = 1 obs_data_name = 'acetone mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'Acetone' elif species == 'GMAO_TEMP': # Temp from met fields units = 'K' first_label_pos = 3 obs_data_name = 'Air Temperature (degC) Campbell_(Mean)' unit_cut = 1 species_type = 'Temp.' actual_species_name = 'Surface Temperature' obs_switch = 1 elif species == 'GMAO_PSFC': #Surface Pressure units = 'hPa' first_label_pos = 3 obs_data_name = 'Atmospheric Pressure (hPa) Campbell_(Mean)' unit_cut = 1 species_type = 'Pres.' actual_species_name = 'Surface Pressure' elif species == 'GMAO_WIND': #Wind Speed extirpolated from UWND and VWND def read_diff_species(): k = names.index('GMAO_UWND') i = names.index('GMAO_VWND') model_cut = np.sqrt((model[:, k]**2) + (model[:, i]**2)) return model_cut units = r'$ms^{-1}$' first_label_pos = 3 obs_data_name = 'Wind Speed (m/s) Campbell_(Mean)' unit_cut = 1 species_type = 'Wind Speed' model_cut_switch = 1 actual_species_name = 'Surface Windspeed' elif species == 'GMAO_RADSW': #Sensible heat flux form surface units = r'$Wm^{-2}$' first_label_pos = 3 obs_data_name = 'Solar Radiation (Wm-2) Campbell_(Mean)' unit_cut = 1 species_type = 'Solar Radiation' actual_species_name = 'Surface Solar Radiation' elif species == 'GMAO_ABSH': #Absolute Humidity units = 'molec/cm-3' first_label_pos = 3 obs_data_name = '' unit_cut = 1 species_type = 'Absolute Humidity' actual_species_name = 'Absolute Humidity' elif species == 'GMAO_RHUM': #Relative Humidity units = '%' first_label_pos = 3 obs_data_name = 'Relative Humidity (%) Campbell_(Mean)' unit_cut = 1 species_type = 'Relative Humidity' actual_species_name = 'Relative Humidity' k_var1 = myfile["VNAME"].index(obs_data_name) # OK need to conver values from a list to a numpy array time = np.array(myfile['X']) if obs_switch == 0: var1 = np.array(myfile['V'][k_var1]) elif obs_switch == 1: var1 = np.array(myfile['V'][k_var1]) + 273.15 valids1 = var1 > 0 time2 = time[valids1] var2 = var1[valids1] #Pre normalise obs data for lomb analysis standard_deviation_obs_p = np.std(var2) mean_obs_p = np.mean(var2) normal_var2 = var2 - mean_obs_p normal_var2 = normal_var2 / standard_deviation_obs_p #Calculate variance of pre-processed obs data- should be 1 if normal #standard_dev_obs = np.std(normal_var_2, dtype=np.float64) #variance_obs = standard_dev_obs**2 #print 'Variance - pre-processed obs data= ', variance_obs #Define sampling intervals samp_spacing = 1. / 24. #Convert model time array into numpy array since2006 = np.array(since2006) #Need to normalise model data also if model_cut_switch == 0: k = names.index(species) model_cut = model[:, k] * unit_cut if model_cut_switch == 1: model_cut = read_diff_species() standard_deviation_model_p = np.std(model_cut) mean_model_p = np.mean(model_cut) normal_model = model_cut - mean_model_p normal_model = normal_model / standard_deviation_model_p #Calculate variance of pre-processed model data- should be 1 if normal #standard_dev_model = np.std(normal_model, dtype=np.float64) #variance_model = standard_dev_model**2 #print 'Variance - pre-processed model data= ', variance_model #Define sampling frequency samp_freq = 24 #Lomb-scargle plot #Plot axis period lines and labels annotate_line_y = np.arange(1e-10, 1e4, 1) horiz_line_100 = np.arange(0, 2000, 1) freq_year = [345] * len(annotate_line_y) array_100 = [100] * len(horiz_line_100) plt.plot(freq_year, annotate_line_y, 'r--', alpha=0.4) plt.text(345, 5, '1 Year', fontweight='bold') plt.plot(horiz_line_100, array_100, 'r--', alpha=0.4) plt.text(1024, 80, '100%', fontweight='bold') #Obs lomb fa, fb, nout, jmax, prob = lomb.fasper(time2, normal_var2, ofac, samp_freq) obs_sig = fa, fb, nout, ofac #Divide output by sampling frequency fb = fb / samp_freq len_fb = len(fb) zeropad = np.zeros(10000) fb = np.concatenate((fb, zeropad)) padded_obs_period = np.concatenate((fa, zeropad)) obs_smoothed = konnoOhmachiSmoothing(fb, padded_obs_period, bandwidth=40, count=1, enforce_no_matrix=True, max_memory_usage=512, normalize=False) obs_smoothed = obs_smoothed[:len_fb] #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_obs = frequencies[-1] #Si_lomb_obs = np.mean(fb)*nyquist_freq_lomb_obs #print nyquist_freq_lomb_obs, Si_lomb_obs, Si_lomb_obs*2 #plot up #plt.loglog(1./fa, fb,'kx',markersize=2, label='Cape Verde Obs. ') #Model lomb fx, fy, nout, jmax, prob2 = lomb.fasper(since2006, normal_model, ofac, samp_freq) model_sig = fx, fy, nout, ofac #Divide output by sampling frequency fy = fy / samp_freq len_fy = len(fy) fy = np.concatenate((fy, zeropad)) padded_model_period = np.concatenate((fx, zeropad)) model_smoothed = konnoOhmachiSmoothing(fy, padded_model_period, bandwidth=40, count=1, enforce_no_matrix=True, max_memory_usage=512, normalize=False) model_smoothed = model_smoothed[:len_fy] #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_model = frequencies[-1] #Si_lomb_model = np.mean(fy)*nyquist_freq_lomb_model #print nyquist_freq_lomb_model, Si_lomb_model, Si_lomb_model*2 #plot up #plt.loglog(1./fx, fy, 'gx', alpha = 0.75,markersize=2, label='GEOS v9.01.03 4x5 ') obs_periods = 1. / fa model_periods = 1. / fx #Which dataset is shorter # obs longer than model if len(obs_smoothed) > len(model_smoothed): obs_smoothed = obs_smoothed[:len(model_smoothed)] freq_array = fx period_array = model_periods #model longer than obs if len(model_smoothed) > len(obs_smoothed): model_smoothed = model_smoothed[:len(obs_smoothed)] freq_array = fa period_array = obs_periods #calculate % of observations #covariance_array = np.hstack((fb,fy)) compare_powers = model_smoothed / obs_smoothed compare_powers = compare_powers * 100 #compare_powers = np.cov(covariance_array, y=None, rowvar=0, bias=0, ddof=None) #print compare_powers #window_size = 120 #compare_powers = np.log(compare_powers) #compare_powers = movingaverage(compare_powers,window_size) #compare_powers = smooth(compare_powers,window_size) #compare_powers = 10**compare_powers #cut_powers = [y for x,y in zip(obs_periods,compare_powers) if x < end] #cut_periods = [x for x,y in zip(obs_periods,compare_powers) if x < end] #rest_powers = [y for x,y in zip(obs_periods,compare_powers) if x >= end] #rest_cut_periods = [x for x,y in zip(obs_periods,compare_powers) if x >= end] #smooth(compare_powers,window_size,obs_periods,species,counter) #for i in rest_powers: # compare_powers=np.append(compare_powers, i) #print compare_powers #for i in rest_cut_periods: # cut_periods=np.append(cut_periods,i) #compare_powers = compare_powers.flatten() #compare_periods = combined_periods.flatten() #cut_periods = cut_periods.flatten() #print rest_cut_periods[-1] #print combined_periods[-1] #smoothed = konnoOhmachiSmoothing(compare_powers, freq_array, bandwidth=40, count=1, # enforce_no_matrix=True, max_memory_usage=512, # normalize=False) # xticks = [0.08,0.10,0.12,0.15,0.25,0.50,1,10,100,1000,5000] #ax.set_xticks(xticks) #ax.set_xticklabels(xticks) #locator = LinearLocator #ax.xaxis.set_major_locator(locator) #standard_deviation_analysis = np.std(compare_powers) #mean_analysis = np.mean(compare_powers) #normal_analysis = compare_powers-mean_analysis #normal_analysis = normal_analysis/standard_deviation_analysis ax.set_xscale('log', basex=10) ax.set_yscale('log', basey=10) plt.plot(period_array, compare_powers, color=colour_list[counter], marker='x', alpha=0.75, markersize=2, label=species) #ax.plot(rest_cut_periods, rest_powers , color=colour_list[counter], marker='x', alpha = 0.75, markersize=2, label = species) #percent1 = period_percent_diff(np.min(obs_periods),1,fb,fy,obs_periods,model_periods) #percent2 = period_percent_diff(1,2,fb,fy,obs_periods,model_periods) #percent3 = period_percent_diff(2,7,fb,fy,obs_periods,model_periods) plt.grid(True) ax.xaxis.set_major_formatter(FormatStrFormatter('%.i')) ax.yaxis.set_major_formatter(FormatStrFormatter('%.i')) leg = plt.legend(loc=4) leg.get_frame().set_alpha(0.4) #plt.text(1e-2, 3000,'Period: 2 hours to 1 day, a %% Diff. of: %.2f%%' %(percent1), fontweight='bold') #plt.text(1e-2, 500,'Period: 1 day to 2 days, a %% Diff. of: %.2f%%' %(percent2), fontweight='bold') #plt.text(1e-2, 90,'Period: 2 days to 7 days, a %% Diff. of: %.2f%%' %(percent3), fontweight='bold') plt.ylim(0.05, 10000) plt.xlabel('Period (Days)') plt.ylabel('Percent of Obs. PSD (%)') plt.title('% PSD of Model compared to Obs.') counter += 1 #plt.savefig('O3_capeverde_comparison_plots.ps', dpi = 200) plt.show()
def plot_quicklook(lc, ticid, breakpoints, target_list, save_data=True, outdir=None): if outdir is None: outdir = os.path.join(self.PACKAGEDIR, 'outputs') time, flux, flux_err = lc.time, lc.flux, lc.flux_err model = BoxLeastSquares(time, flux) results = model.autopower(0.16, minimum_period=2., maximum_period=21.) period = results.period[np.argmax(results.power)] t0 = results.transit_time[np.argmax(results.power)] depth = results.depth[np.argmax(results.power)] depth_snr = results.depth_snr[np.argmax(results.power)] ''' Plot Filtered Light Curve ------------------------- ''' plt.subplot2grid((4,4),(1,0),colspan=2) plt.plot(time, flux, 'k', label="filtered") for val in breakpoints: plt.axvline(val, c='b', linestyle='dashed') plt.legend() plt.ylabel('Normalized Flux') plt.xlabel('Time') osample=5. nyq=283. # calculate FFT freq, amp, nout, jmax, prob = lomb.fasper(time, flux, osample, 3.) freq = 1000. * freq / 86.4 bin = freq[1] - freq[0] fts = 2. * amp * np.var(flux * 1e6) / (np.sum(amp) * bin) use = np.where(freq < nyq + 150) freq = freq[use] fts = fts[use] # calculate ACF acf = np.correlate(fts, fts, 'same') freq_acf = np.linspace(-freq[-1], freq[-1], len(freq)) fitT = build_ktransit_model(ticid=ticid, lc=lc, vary_transit=False) dur = _individual_ktransit_dur(fitT.time, fitT.transitmodel) freq = freq fts1 = fts/np.max(fts) fts2 = scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 5) fts3 = scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 50) ''' Plot Periodogram ---------------- ''' plt.subplot2grid((4,4),(0,2),colspan=2,rowspan=4) plt.loglog(freq, fts/np.max(fts)) plt.loglog(freq, scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 5), color='C1', lw=2.5) plt.loglog(freq, scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 50), color='r', lw=2.5) plt.axvline(283,-1,1, ls='--', color='k') plt.xlabel("Frequency [uHz]") plt.ylabel("Power") plt.xlim(10, 400) plt.ylim(1e-4, 1e0) # annotate with transit info font = {'family':'monospace', 'size':10} plt.text(10**1.04, 10**-3.50, f'depth = {depth:.4f} ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.04, 10**-3.62, f'depth_snr = {depth_snr:.4f} ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.04, 10**-3.74, f'period = {period:.3f} days ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.04, 10**-3.86, f't0 = {t0:.3f} ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) try: # annotate with stellar params # won't work for TIC ID's not in the list if isinstance(ticid, str): ticid = int(re.search(r'\d+', str(ticid)).group()) Gmag = target_list[target_list['ID'] == ticid]['GAIAmag'].values[0] Teff = target_list[target_list['ID'] == ticid]['Teff'].values[0] R = target_list[target_list['ID'] == ticid]['rad'].values[0] M = target_list[target_list['ID'] == ticid]['mass'].values[0] plt.text(10**1.7, 10**-3.50, rf"G mag = {Gmag:.3f} ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.7, 10**-3.62, rf"Teff = {int(Teff)} K ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.7, 10**-3.74, rf"R = {R:.3f} $R_\odot$ ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.7, 10**-3.86, rf"M = {M:.3f} $M_\odot$ ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) except: pass # plot ACF inset ax = plt.gca() axins = inset_axes(ax, width=2.0, height=1.4) axins.plot(freq_acf, acf) axins.set_xlim(1,25) axins.set_xlabel("ACF [uHz]") ''' Plot BLS -------- ''' plt.subplot2grid((4,4),(2,0),colspan=2) plt.plot(results.period, results.power, "k", lw=0.5) plt.xlim(results.period.min(), results.period.max()) plt.xlabel("period [days]") plt.ylabel("log likelihood") # Highlight the harmonics of the peak period plt.axvline(period, alpha=0.4, lw=4) for n in range(2, 10): plt.axvline(n*period, alpha=0.4, lw=1, linestyle="dashed") plt.axvline(period / n, alpha=0.4, lw=1, linestyle="dashed") phase = (t0 % period) / period foldedtimes = (((time - phase * period) / period) % 1) foldedtimes[foldedtimes > 0.5] -= 1 foldtimesort = np.argsort(foldedtimes) foldfluxes = flux[foldtimesort] plt.subplot2grid((4,4), (3,0),colspan=2) plt.scatter(foldedtimes, flux, s=2) plt.plot(np.sort(foldedtimes), scipy.ndimage.filters.median_filter(foldfluxes, 40), lw=2, color='r', label=f'P={period:.2f} days, dur={dur:.2f} hrs') plt.xlabel('Phase') plt.ylabel('Flux') plt.xlim(-0.5, 0.5) plt.ylim(-0.0025, 0.0025) plt.legend(loc=0) fig = plt.gcf() fig.patch.set_facecolor('white') fig.suptitle(f'{ticid}', fontsize=14) fig.set_size_inches(10, 7) if save_data: np.savetxt(outdir+'/timeseries/'+str(ticid)+'.dat.ts', np.transpose([time, flux]), fmt='%.8f', delimiter=' ') np.savetxt(outdir+'/fft/'+str(ticid)+'.dat.ts.fft', np.transpose([freq, fts]), fmt='%.8f', delimiter=' ') with open(os.path.join(outdir,"transit_stats.txt"), "a+") as file: file.write(f"{ticid} {depth} {depth_snr} {period} {t0} {dur}\n") return fig
plt.title('HTAP Model %s V Time at %s' % (species, location)) #Define sampling frequency samp_freq = 24 #Lomb-scargle plot ax3 = fig.add_subplot(2, 1, 2) #Plot axis period lines and labels annotate_line_y = np.arange(1e-10, 1e4, 1) freq_year = [345] * len(annotate_line_y) plt.plot(freq_year, annotate_line_y, 'r--', alpha=0.4) plt.text(345, 1e-7, '1 Year', fontweight='bold') #Model lomb freq_obs, power_obs, nout, jmax, prob = lomb.fasper(obs_time, obs_var, 1., samp_freq) freq_obs_full, power_obs_full, nout, jmax, prob = lomb.fasper( time, obs_var_full, 1., samp_freq) freq_camchem3311, power_camchem3311, nout, jmax, prob2 = lomb.fasper( camchem_3311_time, norm_camchem_3311_o3, 1., samp_freq) freq_camchem3514, power_camchem3514, nout, jmax, prob2 = lomb.fasper( camchem_3514_time, norm_camchem_3514_o3, 1., samp_freq) freq_cam_sr1, power_cam_sr1, nout, jmax, prob2 = lomb.fasper( cam_sr1_time, norm_cam_sr1_o3, 1., samp_freq) freq_chaser, power_chaser, nout, jmax, prob2 = lomb.fasper( chaser_time, norm_chaser_o3, 1., samp_freq) freq_frsgcuci, power_frsgcuci, nout, jmax, prob2 = lomb.fasper( frsgcuci_time, norm_frsgcuci_o3, 1., samp_freq) freq_gemaq, power_gemaq, nout, jmax, prob2 = lomb.fasper( gemaq_time, norm_gemaq_o3, 1., samp_freq) freq_geoschem, power_geoschem, nout, jmax, prob2 = lomb.fasper(
plt.title('HTAP Model %s V Time at %s'% (species,location)) #Define sampling frequency samp_freq = 24 #Lomb-scargle plot ax3= fig.add_subplot(2, 1, 2) #Plot axis period lines and labels annotate_line_y=np.arange(1e-10,1e4,1) freq_year = [345]*len(annotate_line_y) plt.plot(freq_year, annotate_line_y,'r--',alpha=0.4) plt.text(345, 1e-7, '1 Year', fontweight='bold') #Model lomb freq_obs, power_obs, nout, jmax, prob = lomb.fasper(obs_time, obs_var, 1., samp_freq) freq_obs_full, power_obs_full, nout, jmax, prob = lomb.fasper(time, obs_var_full, 1., samp_freq) freq_camchem3311, power_camchem3311, nout, jmax, prob2 = lomb.fasper(camchem_3311_time,norm_camchem_3311_o3, 1., samp_freq) freq_camchem3514, power_camchem3514, nout, jmax, prob2 = lomb.fasper(camchem_3514_time,norm_camchem_3514_o3, 1., samp_freq) freq_cam_sr1, power_cam_sr1, nout, jmax, prob2 = lomb.fasper(cam_sr1_time,norm_cam_sr1_o3, 1., samp_freq) freq_chaser, power_chaser, nout, jmax, prob2 = lomb.fasper(chaser_time,norm_chaser_o3, 1., samp_freq) freq_frsgcuci, power_frsgcuci, nout, jmax, prob2 = lomb.fasper(frsgcuci_time,norm_frsgcuci_o3, 1., samp_freq) freq_gemaq, power_gemaq, nout, jmax, prob2 = lomb.fasper(gemaq_time,norm_gemaq_o3, 1., samp_freq) freq_geoschem, power_geoschem, nout, jmax, prob2 = lomb.fasper(geoschem_time,norm_geoschem_o3, 1., samp_freq) freq_giss, power_giss, nout, jmax, prob2 = lomb.fasper(giss_time,norm_giss_o3, 1., samp_freq) freq_giss_alt, power_giss_alt, nout, jmax, prob2 = lomb.fasper(giss_alt_time,norm_giss_alt_o3, 1., samp_freq) freq_inca, power_inca, nout, jmax, prob2 = lomb.fasper(inca_time,norm_inca_o3, 1., samp_freq) freq_llnl, power_llnl, nout, jmax, prob2 = lomb.fasper(llnl_time,norm_llnl_o3, 2.0001, samp_freq) freq_mozart, power_mozart, nout, jmax, prob2 = lomb.fasper(mozart_time,norm_mozart_o3, 1., samp_freq) freq_mozech, power_mozech, nout, jmax, prob2 = lomb.fasper(mozech_time,norm_mozech_o3, 1., samp_freq) freq_oslo, power_oslo, nout, jmax, prob2 = lomb.fasper(oslo_time,norm_oslo_o3, 1., samp_freq)
def plot(species): #Set model_cut switch to default 0, if want to do more complicated cuts from model field, specify model_cut_switch == 1 in species definitions #Vice versa with obs_switch model_cut_switch = 0 obs_switch = 0 ofac = 1 if species == 'O3': units = 'ppbV' first_label_pos = 3 obs_data_name = 'Ozone mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'O3' ofac = 2.0001 elif species == 'CO': units = 'ppbV' first_label_pos = 1 obs_data_name = 'CO mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'CO' ofac = 2.0001 elif species == 'NO': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO' elif species == 'NO2': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO2 mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO2' elif species == 'C2H6': units = 'pptV' first_label_pos = 1 obs_data_name = 'ethane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C2H6' elif species == 'C3H8': units = 'pptV' first_label_pos = 1 obs_data_name = 'propane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C3H8' elif species == 'DMS': units = 'pptV' first_label_pos = 1 obs_data_name = 'dms mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'DMS' elif species == 'TRA_6': #Isoprene units = 'pptV' first_label_pos = 1 obs_data_name = 'Isoprene (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'Isoprene' elif species == 'ACET': units = 'pptV' first_label_pos = 1 obs_data_name = 'acetone mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'Acetone' elif species == 'GMAO_TEMP': # Temp from met fields units = 'K' first_label_pos = 3 obs_data_name = 'Air Temperature (degC) Campbell_(Mean)' unit_cut = 1 species_type = 'Temp.' actual_species_name = 'Surface Temperature' obs_switch = 1 elif species == 'GMAO_PSFC': #Surface Pressure units = 'hPa' first_label_pos = 3 obs_data_name = 'Atmospheric Pressure (hPa) Campbell_(Mean)' unit_cut = 1 species_type = 'Pres.' actual_species_name = 'Surface Pressure' elif species == 'GMAO_WIND': #Wind Speed extirpolated from UWND and VWND def read_diff_species(): k = names.index('GMAO_UWND') i = names.index('GMAO_VWND') model_cut = np.sqrt((model[:, k]**2) + (model[:, i]**2)) return model_cut units = r'$ms^{-1}$' first_label_pos = 3 obs_data_name = 'Wind Speed (m/s) Campbell_(Mean)' unit_cut = 1 species_type = 'Wind Speed' model_cut_switch = 1 actual_species_name = 'Surface Windspeed' elif species == 'GMAO_RADSW': #Sensible heat flux form surface units = r'$Wm^{-2}$' first_label_pos = 3 obs_data_name = 'Solar Radiation (Wm-2) Campbell_(Mean)' unit_cut = 1 species_type = 'Solar Radiation' actual_species_name = 'Surface Solar Radiation' elif species == 'GMAO_ABSH': #Absolute Humidity units = 'molec/cm-3' first_label_pos = 3 obs_data_name = '' unit_cut = 1 species_type = 'Absolute Humidity' actual_species_name = 'Absolute Humidity' elif species == 'GMAO_RHUM': #Relative Humidity units = '%' first_label_pos = 3 obs_data_name = 'Relative Humidity (%) Campbell_(Mean)' unit_cut = 1 species_type = 'Relative Humidity' actual_species_name = 'Relative Humidity' #reads in the model data def readfile(filename, location): read = np.load(filename) names = read[0, 2:] locs = np.where(read[:, 1] == location) big = read[locs] valid_data = big[:, 2:] names = names.tolist() valid_data = np.float64(valid_data) return valid_data, names def readfile_gaw(filename, location): read = np.load(filename) names = read[0, 1:] locs = np.where(read[:, 0] == location) big = read[locs] print big valid_data = big[:, 1:] names = names.tolist() valid_data = np.float64(valid_data) return valid_data, names try: names except NameError: # Readin the model output model, names = readfile("GEOS_v90103_nested_europe_GAW_logs.npy", "112") #112 represents Mace Head model2, gaw_names = readfile_gaw("gaw_logs_O3.npy", "112") # Processes the date year = (model[:, 0] // 10000) month = ((model[:, 0] - year * 10000) // 100) day = (model[:, 0] - year * 10000 - month * 100) hour = model[:, 1] // 100 min = (model[:, 1] - hour * 100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006 = [ doy[i].days + doy[i].seconds / (24. * 60. * 60.) for i in range(len(doy)) ] # Processes the gaw baseline date year = (model2[:, 0] // 10000) month = ((model2[:, 0] - year * 10000) // 100) day = (model2[:, 0] - year * 10000 - month * 100) hour = model2[:, 1] // 100 min = (model2[:, 1] - hour * 100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006_gaw = [ doy[i].days + doy[i].seconds / (24. * 60. * 60.) for i in range(len(doy)) ] #now read in the observations date, time, vals = NOAA_data_reader_mace_head('O3_mace_head_ppbV.txt') # OK need to conver values from a list to a numpy array valid = vals >= 0 vals = vals[valid] date = date[valid] time = time[valid] # Processes the date year = (date // 10000) month = ((date - year * 10000) // 100) day = (date - year * 10000 - month * 100) hour = time // 100 min = (time - hour * 100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006_obs = [ doy[i].days + doy[i].seconds / (24. * 60. * 60.) for i in range(len(doy)) ] since2006_obs = np.array(since2006_obs) since2006_obs_gaw = since2006_obs valids2 = since2006_obs <= np.max(since2006) since2006_obs = since2006_obs[valids2] var3 = vals[valids2] valids3 = since2006_obs >= np.min(since2006) since2006_obs = since2006_obs[valids3] var4 = var3[valids3] #Pre normalise obs data for lomb analysis standard_deviation_obs_p = np.std(var4) mean_obs_p = np.mean(var4) normal_var2 = var4 - mean_obs_p normal_var2 = normal_var2 / standard_deviation_obs_p #Pre normalise obs data for lomb analysis standard_deviation_obs_p_gaw = np.std(vals) mean_obs_p_gaw = np.mean(vals) normal_var2_gaw = vals - mean_obs_p_gaw normal_var2_gaw = normal_var2_gaw / standard_deviation_obs_p_gaw print 'obs', normal_var2_gaw #Calculate variance of pre-processed obs data- should be 1 if normal #standard_dev_obs = np.std(normal_var_2, dtype=np.float64) #variance_obs = standard_dev_obs**2 #print 'Variance - pre-processed obs data= ', variance_obs #Define sampling intervals samp_spacing = 1. / 24. #Convert model time array into numpy array since2006 = np.array(since2006) since2006_gaw = np.array(since2006_gaw) #Need to normalise model data also if model_cut_switch == 0: k = names.index(species) model_cut = model[:, k] * unit_cut print 'model cut', model_cut if model_cut_switch == 1: model_cut = read_diff_species() standard_deviation_model_p = np.std(model_cut) mean_model_p = np.mean(model_cut) normal_model = model_cut - mean_model_p normal_model = normal_model / standard_deviation_model_p print 'normal model', normal_model #Need to normalise model baseline data also j = gaw_names.index(species) model_cut_gaw = model2[:, j] * unit_cut standard_deviation_model_p_gaw = np.std(model_cut_gaw) mean_model_p_gaw = np.mean(model_cut_gaw) normal_model_gaw = model_cut_gaw - mean_model_p_gaw normal_model_gaw = normal_model_gaw / standard_deviation_model_p_gaw #print normal_model_gaw #Calculate variance of pre-processed model data- should be 1 if normal #standard_dev_model = np.std(normal_model, dtype=np.float64) #variance_model = standard_dev_model**2 #print 'Variance - pre-processed model data= ', variance_model #Plot them all up. fig = plt.figure(figsize=(20, 12)) fig.patch.set_facecolor('white') #Plot up standard conc. v time plot #ax1= fig.add_subplot(2, 1, 1) #fig.subplots_adjust(hspace=0.3) #plt.plot(since2006_obs,var4, color='black', label='Mace Head Obs.') #plt.plot(since2006, model_cut, color='green', label='GEOS v9.01.03 Nested Europe ') #plt.grid(True) #leg=plt.legend(loc=first_label_pos) #leg.get_frame().set_alpha(0.4) #plt.xlabel('Time (Days since 2006)') #print units #plt.ylabel('%s (%s)' % (species_type,units)) #plt.title('%s V Time' % (actual_species_name)) #Define sampling frequency samp_freq = 24 #Lomb-scargle plot ax = fig.add_subplot(1, 1, 1) #Plot axis period lines and labels annotate_line_y = np.arange(1e-10, 1e4, 1) freq_year = [345] * len(annotate_line_y) plt.plot(freq_year, annotate_line_y, 'r--', alpha=0.4) plt.text(345, 1e-10, '1 Year', fontweight='bold') #Obs lomb fa, fb, nout, jmax, prob = lomb.fasper(since2006_obs, normal_var2, ofac, samp_freq) #Divide output by sampling frequency fb = fb / samp_freq fb = np.log(fb) reversed_fb = fb[::-1] obs_periods = 1. / fb reversed_obs_periods = obs_periods[::-1] obs_periods, obs_smoothed = ema(reversed_fb, reversed_obs_periods, 150, 20) #obs_smoothed=savitzky_golay(fb, window_size=301, order=1) obs_smoothed = np.exp(obs_smoothed) #Obs lomb fa_gaw, fb_gaw, nout, jmax, prob = lomb.fasper(since2006_obs_gaw, normal_var2_gaw, ofac, samp_freq) #Divide output by sampling frequency fb_gaw = fb_gaw / samp_freq fb_gaw = np.log(fb_gaw) reversed_fb_gaw = fb_gaw[::-1] obs_periods_gaw = 1. / fb_gaw reversed_obs_periods_gaw = obs_periods_gaw[::-1] obs_periods_gaw, obs_smoothed_gaw = ema(reversed_fb_gaw, reversed_obs_periods_gaw, 150, 20) #obs_smoothed_gaw=savitzky_golay(fb_gaw, window_size=301, order=1) obs_smoothed_gaw = np.exp(obs_smoothed_gaw) print 'obs_after_smooth', obs_smoothed_gaw #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_obs = frequencies[-1] #Si_lomb_obs = np.mean(fb)*nyquist_freq_lomb_obs #print nyquist_freq_lomb_obs, Si_lomb_obs, Si_lomb_obs*2 #plot up #plt.loglog(1./fa, fb,'kx',markersize=2, label='Mace Head Obs. ') #Model lomb #print type(normal_model) fx, fy, nout, jmax, prob2 = lomb.fasper(since2006, normal_model, ofac, samp_freq) #Divide output by sampling frequency fy = fy / samp_freq fy = np.log(fy) reversed_fy = fy[::-1] model_periods = 1. / fx reversed_model_periods = model_periods[::-1] model_periods, model_smoothed = ema(reversed_fy, reversed_model_periods, 150, 20) #model_smoothed=savitzky_golay(fy, window_size=301, order=1) model_smoothed = np.exp(model_smoothed) #gaw basline lomb #print type(normal_model_gaw) #print normal_model_gaw fx_gaw, fy_gaw, nout, jmax, prob2 = lomb.fasper(since2006_gaw, normal_model_gaw, ofac, samp_freq) #Divide output by sampling frequency fy_gaw = fy_gaw / samp_freq fy_gaw = np.log(fy_gaw) reversed_fy_gaw = fy_gaw[::-1] model_periods_gaw = 1. / fx_gaw reversed_model_periods_gaw = model_periods_gaw[::-1] model_periods_gaw, model_smoothed_gaw = ema(reversed_fy_gaw, reversed_model_periods_gaw, 150, 20) #model_smoothed_gaw=savitzky_golay(fy_gaw, window_size=301, order=1) model_smoothed_gaw = np.exp(model_smoothed_gaw) print 'model_after_smooth', model_smoothed_gaw #print model_smoothed_gaw #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_model = frequencies[-1] #Si_lomb_model = np.mean(fy)*nyquist_freq_lomb_model #print nyquist_freq_lomb_model, Si_lomb_model, Si_lomb_model*2 #plot up #print obs_smoothed #print model_smoothed #Which dataset is shorter # obs longer than model if len(obs_smoothed) > len(model_smoothed): print 'yes' obs_smoothed = obs_smoothed[:len(model_smoothed)] period_array = model_smoothed #period_array = model_periods[:len(model_smoothed)] #model longer than obs elif len(model_smoothed) >= len(obs_smoothed): print 'yes' model_smoothed = model_smoothed[:len(obs_smoothed)] period_array = obs_smoothed #period_array = obs_periods[:len(obs_smoothed)] if len(obs_smoothed_gaw) > len(model_smoothed_gaw): print 'yes' obs_smoothed_gaw = obs_smoothed_gaw[:len(model_smoothed_gaw)] period_array = model_periods_gaw #period_array_gaw = model_periods_gaw[:len(model_smoothed_gaw)] #model longer than obs elif len(model_smoothed_gaw) >= len(obs_smoothed_gaw): print 'gaw' model_smoothed_gaw = model_smoothed_gaw[:len(obs_smoothed_gaw)] #period_array_gaw = obs_periods_gaw[:len(obs_smoothed_gaw)] period_array = obs_periods_gaw print 'model_smoothed', model_smoothed_gaw print 'obs_smoothed', obs_smoothed_gaw #calculate % of observations #covariance_array = np.hstack((fb,fy)) compare_powers = model_smoothed / obs_smoothed compare_powers = compare_powers * 100 compare_powers_gaw = model_smoothed_gaw / obs_smoothed_gaw compare_powers_gaw = compare_powers_gaw * 100 #print compare_powers_gaw ax.set_xscale('log', basex=10) ax.set_yscale('log', basey=10) plt.plot(period_array, compare_powers, color='green', marker='x', alpha=0.75, markersize=2, label='O3 % Diff Spatial correction.') plt.plot(period_array_gaw, compare_powers_gaw, color='black', marker='x', alpha=0.3, markersize=2, label='O3 % Diff Baseline.') plt.grid(True) ax.xaxis.set_major_formatter(FormatStrFormatter('%.i')) ax.yaxis.set_major_formatter(FormatStrFormatter('%.i')) leg = plt.legend(loc=4, prop={'size': 21}) leg.get_frame().set_alpha(0.4) plt.xlim(0.05, 1e1) plt.ylim(0.1, 1000) plt.xlabel('Period (Days)', fontsize=21) plt.ylabel('Percent of Obs. PSD (%)', fontsize=21) plt.title('% PSD of Model compared to Obs.', fontsize=21) #percent1 = period_percent_diff(np.min(obs_periods),1,fb,fy,obs_periods,model_periods) #percent2 = period_percent_diff(1,2,fb,fy,obs_periods,model_periods) #percent3 = period_percent_diff(2,7,fb,fy,obs_periods,model_periods) #plt.grid(True) #leg=plt.legend(loc=7) #leg.get_frame().set_alpha(0.4) #plt.text(1e-2, 3000,'Period: 2 hours to 1 day, a %% Diff. of: %.2f%%' %(percent1), fontweight='bold') #plt.text(1e-2, 500,'Period: 1 day to 2 days, a %% Diff. of: %.2f%%' %(percent2), fontweight='bold') #plt.text(1e-2, 90,'Period: 2 days to 7 days, a %% Diff. of: %.2f%%' %(percent3), fontweight='bold') #plt.ylim(1e-10,1e4) #plt.xlabel('Period (Days)') #plt.ylabel(r'PSD $(ppb^{2}/days^{-1})$') #plt.title('Lomb-Scargle %s Power V Period' % actual_species_name) #plt.savefig('O3_capeverde_comparison_plots.ps', dpi = 200) plt.show()
def ran_trials(file0, nperm=4, ofac=10., lowfreq=0, hifreq=1e9, hifac=1.0, metrics=[lambda x: x, lambda x: x**2.]): ''' Determine bootstrap significance threshold for FT of given data. Keyword arguments: file0 -- filename of data to be shuffled and FTed (2 columns e.g. time,flux) nperm -- number of random shufflings of data to make, i.e., 1000 or 1e4 (default 4, for speed) ofac -- the FT oversampling factor (default 10.) lowfreq -- lower limit of frequency range (in Hz; doesn't save time but helps with interpretation). lowfreq -- upper limit of frequency range (in Hz; doesn't save time but helps with interpretation). hifac -- upper limit of the *calculated* frequency range (as fraction of the nyquist frequncy, [0-1]; default 1) metrics -- list of passed (lambda) functions that are evaluated along randomized FTs, the max values being recorded. (default power and amplitude) Outputs: filename.ft -- FT of original data, and ft processed by all metrics filename.hist -- record of highest values of computed metric for each run. ''' # If FT_orig = 1, then it will first compute and store an FT # of the original data set FT_orig = 1 ofile0 = file0[0:-4] # All the time() calls is just to obsess about how long different # parts of the computations take # Read in origianl file t0 = datetime.datetime.now() print '\nReading in', file0, '...' vars = np.loadtxt(file0) t1 = datetime.datetime.now() print 'Elapsed time: ', t1 - t0, '\n' t = vars[:, 0] lc = vars[:, 1] # Force mean of light curve to be zero, and shift the time such that # the "mean" time is also zero t = t - 0.5 * (max(t) - min(t)) lc = lc - np.mean(lc) # Compute FT (periodogram, actually) of unmodified data if FT_orig == 1: print 'Computing fast Lomb-Scargle periodogram of unmodified data.' print 'This could take a while...' fx0, fy0, amp0, nout, jmax, fap_vals, amp_probs, df = lomb.fasper( t, lc, ofac, hifac) t2 = datetime.datetime.now() print 'Elapsed time: ', t2 - t1, '\n' #print fap_vals #print amp_probs #fx0, fy0, nft = ft_fix(fx0,fy0) fx0, amp0, nft = ft_fix(fx0, amp0) outarr = np.zeros((nft, 2 + len(metrics))) outarr[:, 0] = fx0 outarr[:, 1] = amp0 #run all metrics on ft for i, m in enumerate(metrics): outarr[:, 2 + i] = m(amp0, df) ofile1 = ofile0 + '.ft' nvals = len(fap_vals) stsig = '' for i in np.arange(nvals): stsig = stsig + ' {0:f} {1:f}\n'.format( fap_vals[i], amp_probs[i]) head = 'Significance levels ( {0} ) for amplitude from formal periodogram criteria: '.format( nvals ) + '\n FAP amplitude\n' + stsig + 'freq(hz) amplitude' for i in range(len(metrics)): head += ' metric' + str(i) print 'Writing out FT to {0}...'.format(ofile1) np.savetxt(ofile1, outarr, header=head, fmt='%e') t3 = datetime.datetime.now() print 'Elapsed time: ', t3 - t2, '\n' head0 = 'Generated from {0} using lomb.fasper() with ofac= {1}, hifac= {2}, npts= {3}'.format( file0, ofac, hifac, len(t)) # pmaxvals = [] # amaxvals = [] maxvals = [] medvals = [] print '\nRandomly shuffling data', nperm, 'times...\n' for i in np.arange(nperm): t3 = datetime.datetime.now() print 'Permutation {0}...'.format(i) lcper = permutation(lc) fx0, fy0, amp0, nout, jmax, fap_vals, amp_probs, df = lomb.fasper( t, lcper, ofac, hifac) t4 = datetime.datetime.now() thesemaxvals = [] thesemedvals = [] for m in metrics: processed = m(amp0, df) inrange = processed[np.where((fx0 > lowfreq) & (fx0 < hifreq))] thesemaxvals.append(np.max(inrange)) thesemedvals.append(np.median(inrange)) # pmaxvals.append(np.max(metrics[0](amp0))) # amaxvals.append(np.max(metrics[1](amp0))) maxvals.append(thesemaxvals) medvals.append(thesemedvals) print 'Elapsed time: ', t4 - t3, '\n' print 'Finished shuffling data', nperm, 'times\n' # print amaxvals #n=len(maxvals) maxvals = np.array(maxvals) medvals = np.array(medvals) ofile2 = ofile0 + '.hist' print 'Writing maximum,median values to', ofile2 # Write out the peak amplitude and power for each shuffled data set head = 'Maximum, then median values from {0} randomly shuffled trials of {1}\n'.format( nperm, file0) head += 'Values from the following function definitions:\n' for i, m in enumerate(metrics): head += str(i + 1) + inspect.getsource(m) + '\n' np.savetxt(ofile2, np.concatenate((maxvals, medvals), 1), header=head, fmt='%e') print 'Total elapsed time: ', t4 - t0, '\n'
def fft(time, flux, ofac, hifac): freq,power, nout, jmax, prob = lomb.fasper(time, flux, ofac, hifac) convfactor = (1. / (60 * 60 * 24)) * (10 ** 6) uHzfreq = freq * convfactor #11.57, conversion c/d to mHz return uHzfreq, power
def plot(): try: names except NameError: # Readin the model output model , names = readfile("GEOS_logs.npy","001") #001 represents CVO # Processes the date year=(model[:,0]//10000) month=((model[:,0]-year*10000)//100) day=(model[:,0]-year*10000-month*100) hour=model[:,1]//100 min=(model[:,1]-hour*100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006=[doy[i].days+doy[i].seconds/(24.*60.*60.) for i in range(len(doy))] #now read in the observations myfile=nappy.openNAFile('York_merge_Cape_verde_1hr_R1.na') myfile.readData() #ppy.openNAFile('York_merge_Cape_verde_1hr_R1.na') counter = 0 fig =plt.figure(figsize=(20,12)) fig.patch.set_facecolor('white') ax = plt.subplot(111) for species in species_list: #Gives species exact model tags for convenience print species if species == 'ISOPRENE': species = 'TRA_6' elif species == 'ACETONE': species = 'ACET' elif species == 'TEMP': species = 'GMAO_TEMP' elif species == 'SURFACE_PRES': species = 'GMAO_PSFC' elif species == 'WINDSPEED': species = 'GMAO_WIND' elif species == 'SURFACE_SOLAR_RADIATION': species = 'GMAO_RADSW' elif species == 'ABS_HUMIDITY': species = 'GMAO_ABSH' elif species == 'REL_HUMIDITY': species = 'GMAO_RHUM' model_cut_switch = 0 obs_switch = 0 ofac = 1 if species == 'O3': print 'yes' Units = 'ppbV' first_label_pos = 3 obs_data_name = 'Ozone mixing ratio (ppbV)_(Mean)' unit_cut= 1e9 species_type = 'Conc.' actual_species_name = 'O3' elif species == 'CO': units = 'ppbV' first_label_pos = 1 obs_data_name = 'CO mixing ratio (ppbV)_(Mean)' unit_cut= 1e9 species_type = 'Conc.' actual_species_name = 'CO' ofac = 2.0001 elif species == 'NO': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO mixing ratio (pptv)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'NO' elif species == 'NO2': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO2 mixing ratio (pptv)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'NO2' elif species == 'C2H6': units = 'pptV' first_label_pos = 1 obs_data_name = 'ethane mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'C2H6' elif species == 'C3H8': units = 'pptV' first_label_pos = 1 obs_data_name = 'propane mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'C3H8' elif species == 'DMS': units = 'pptV' first_label_pos = 1 obs_data_name = 'dms mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'DMS' elif species == 'TRA_6': #Isoprene units = 'pptV' first_label_pos = 1 obs_data_name = 'Isoprene (pptv)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' elif species == 'ACET': units = 'pptV' first_label_pos = 1 obs_data_name = 'acetone mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'Acetone' elif species == 'GMAO_TEMP': # Temp from met fields units = 'K' first_label_pos = 3 obs_data_name = 'Air Temperature (degC) Campbell_(Mean)' unit_cut= 1 species_type = 'Temp.' actual_species_name = 'Surface Temperature' obs_switch = 1 elif species == 'GMAO_PSFC': #Surface Pressure units = 'hPa' first_label_pos = 3 obs_data_name = 'Atmospheric Pressure (hPa) Campbell_(Mean)' unit_cut= 1 species_type = 'Pres.' actual_species_name = 'Surface Pressure' elif species == 'GMAO_WIND': #Wind Speed extirpolated from UWND and VWND def read_diff_species(): k=names.index('GMAO_UWND') i=names.index('GMAO_VWND') model_cut=np.sqrt((model[:,k]**2)+(model[:,i]**2)) return model_cut units = r'$ms^{-1}$' first_label_pos = 3 obs_data_name = 'Wind Speed (m/s) Campbell_(Mean)' unit_cut= 1 species_type = 'Wind Speed' model_cut_switch = 1 actual_species_name = 'Surface Windspeed' elif species == 'GMAO_RADSW': #Sensible heat flux form surface units = r'$Wm^{-2}$' first_label_pos = 3 obs_data_name = 'Solar Radiation (Wm-2) Campbell_(Mean)' unit_cut= 1 species_type = 'Solar Radiation' actual_species_name = 'Surface Solar Radiation' elif species == 'GMAO_ABSH': #Absolute Humidity units = 'molec/cm-3' first_label_pos = 3 obs_data_name = '' unit_cut= 1 species_type = 'Absolute Humidity' actual_species_name = 'Absolute Humidity' elif species == 'GMAO_RHUM': #Relative Humidity units = '%' first_label_pos = 3 obs_data_name = 'Relative Humidity (%) Campbell_(Mean)' unit_cut= 1 species_type = 'Relative Humidity' actual_species_name = 'Relative Humidity' k_var1=myfile["VNAME"].index(obs_data_name) # OK need to conver values from a list to a numpy array time=np.array(myfile['X']) if obs_switch == 0: var1=np.array(myfile['V'][k_var1]) elif obs_switch == 1: var1=np.array(myfile['V'][k_var1])+273.15 valids1=var1 > 0 time2=time[valids1] var2=var1[valids1] #Pre normalise obs data for lomb analysis standard_deviation_obs_p = np.std(var2) mean_obs_p = np.mean(var2) normal_var2 = var2-mean_obs_p normal_var2 = normal_var2/standard_deviation_obs_p #Calculate variance of pre-processed obs data- should be 1 if normal #standard_dev_obs = np.std(normal_var_2, dtype=np.float64) #variance_obs = standard_dev_obs**2 #print 'Variance - pre-processed obs data= ', variance_obs #Define sampling intervals samp_spacing = 1./24. #Convert model time array into numpy array since2006=np.array(since2006) #Need to normalise model data also if model_cut_switch == 0: k=names.index(species) print model[:,k] model_cut = model[:,k]*unit_cut if model_cut_switch == 1: model_cut = read_diff_species() #Add seasonal emission trend onto ethane. first_season = np.linspace(0,100,num=91,endpoint=True) second_season = first_season[::-1] third_season = np.linspace(0,-100, num=91, endpoint=True) fourth_season = third_season[::-1] fourth_season =np.append(fourth_season,0) n=0 n_end=24 step = 24 season_index = 0 new_model_cut=[] year_count = 0 count=0 while 1==1: if count <91: sliced = model_cut[n:n_end] season_value = first_season[season_index] sliced = [a+season_value for a in sliced] new_model_cut.append(sliced) n+=step n_end+=step #print 'season_1', count, season_index season_index+=1 if season_index == 91: season_index= 0 elif count <182: sliced = model_cut[n:n_end] season_value = second_season[season_index] sliced = [a+season_value for a in sliced] new_model_cut.append(sliced) n+=step n_end+=step #print 'season_2', count, season_index season_index+=1 if season_index == 91: season_index= 0 elif count < 273: sliced = model_cut[n:n_end] season_value = third_season[season_index] sliced = [a+season_value for a in sliced] new_model_cut.append(sliced) n+=step n_end+=step #print 'season_3', count, season_index season_index+=1 if season_index == 91: season_index= 0 elif count < 365: sliced = model_cut[n:n_end] season_value = fourth_season[season_index] sliced = [a+season_value for a in sliced] new_model_cut.append(sliced) n+=step n_end+=step #print 'season_4', count, season_index season_index+=1 else: count = 0 year_count+=1 season_index = 0 continue if year_count == 6: break count+=1 new_model_cut = reduce(lambda x,y: x+y,new_model_cut) standard_deviation_model_p = np.std(model_cut) mean_model_p = np.mean(model_cut) normal_model = model_cut-mean_model_p normal_model = normal_model/standard_deviation_model_p standard_deviation_model_p_corrected = np.std(new_model_cut) mean_model_p_corrected = np.mean(new_model_cut) normal_model_corrected = new_model_cut-mean_model_p_corrected normal_model_corrected = normal_model_corrected/standard_deviation_model_p_corrected #Calculate variance of pre-processed model data- should be 1 if normal #standard_dev_model = np.std(normal_model, dtype=np.float64) #variance_model = standard_dev_model**2 #print 'Variance - pre-processed model data= ', variance_model #Define sampling frequency samp_freq = 24 #Lomb-scargle plot #Plot axis period lines and labels #annotate_line_y=np.arange(1e-10,1e4,1) #horiz_line_100 =np.arange(0,2000,1) #freq_year = [345]*len(annotate_line_y) #array_100 = [100]*len(horiz_line_100) #plt.plot(freq_year, annotate_line_y,'r--',alpha=0.4) #plt.text(345, 5, '1 Year', fontweight='bold') #plt.plot(horiz_line_100, array_100,'r--',alpha=0.4) #plt.text(1024, 80, '100%', fontweight='bold') #Obs lomb fa, fb, nout, jmax, prob = lomb.fasper(time2, normal_var2, ofac, samp_freq) #Divide output by sampling frequency fb = fb/samp_freq fb = np.log(fb) obs_smoothed=savitzky_golay(fb, window_size=301, order=1) obs_smoothed = np.exp(obs_smoothed) #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_obs = frequencies[-1] #Si_lomb_obs = np.mean(fb)*nyquist_freq_lomb_obs #print nyquist_freq_lomb_obs, Si_lomb_obs, Si_lomb_obs*2 #plot up #plt.loglog(1./fa, fb,'kx',markersize=2, label='Cape Verde Obs. ') #Model lomb fx, fy, nout, jmax, prob2 = lomb.fasper(since2006,normal_model, ofac, samp_freq) #Divide output by sampling frequency fy = fy/samp_freq fy = np.log(fy) model_smoothed=savitzky_golay(fy, window_size=301, order=1) model_smoothed = np.exp(model_smoothed) #Model lomb fx, fy, nout, jmax, prob2 = lomb.fasper(since2006,normal_model_corrected, ofac, samp_freq) #Divide output by sampling frequency fy_corrected = fy/samp_freq fy_corrected = np.log(fy) model_corrected_smoothed=savitzky_golay(fy_corrected, window_size=301, order=1) model_corrected_smoothed = np.exp(model_corrected_smoothed) #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_model = frequencies[-1] #Si_lomb_model = np.mean(fy)*nyquist_freq_lomb_model #print nyquist_freq_lomb_model, Si_lomb_model, Si_lomb_model*2 #plot up #plt.loglog(1./fx, fy, 'gx', alpha = 0.75,markersize=2, label='GEOS v9.01.03 4x5 ') obs_periods = 1./fa model_periods = 1./fx #Which dataset is shorter # obs longer than model if len(obs_smoothed) > len(model_smoothed): obs_smoothed = obs_smoothed[:len(model_smoothed)] freq_array = fx period_array = model_periods #model longer than obs if len(model_smoothed) > len(obs_smoothed): model_smoothed = model_smoothed[:len(obs_smoothed)] model_corrected_smoothed = model_corrected_smoothed[:len(obs_smoothed)] freq_array = fa period_array = obs_periods #calculate % of observations #covariance_array = np.hstack((fb,fy)) compare_powers = model_smoothed/obs_smoothed compare_powers = compare_powers *100 corrected_compare_powers = model_corrected_smoothed/obs_smoothed corrected_compare_powers = corrected_compare_powers *100 ax.set_xscale('log', basex=10) ax.set_yscale('log', basey=10) #plt.plot(obs_periods,fb, color = 'k', marker='x', alpha = 0.75, markersize=2, label = 'Mace Head' #plt.plot(period_array, corrected_compare_powers , color=colour_list[counter], marker='x', alpha = 0.75, markersize=2, label = species) plt.plot(period_array, compare_powers , color='black', marker='x', alpha = 0.75, markersize=2, label = species) #ax.plot(rest_cut_periods, rest_powers , color=colour_list[counter], marker='x', alpha = 0.75, markersize=2, label = species) #percent1 = period_percent_diff(np.min(obs_periods),1,fb,fy,obs_periods,model_periods) #percent2 = period_percent_diff(1,2,fb,fy,obs_periods,model_periods) #percent3 = period_percent_diff(2,7,fb,fy,obs_periods,model_periods) plt.grid(True) ax.xaxis.set_major_formatter(FormatStrFormatter('%.i')) ax.yaxis.set_major_formatter(FormatStrFormatter('%.i')) leg=plt.legend(loc=4, prop={'size':21}) leg.get_frame().set_alpha(0.4) #plt.text(1e-2, 3000,'Period: 2 hours to 1 day, a %% Diff. of: %.2f%%' %(percent1), fontweight='bold') #plt.text(1e-2, 500,'Period: 1 day to 2 days, a %% Diff. of: %.2f%%' %(percent2), fontweight='bold') #plt.text(1e-2, 90,'Period: 2 days to 7 days, a %% Diff. of: %.2f%%' %(percent3), fontweight='bold') plt.xlim(0.05,1e1) plt.ylim(0.001,1e3) plt.xlabel('Period (Days)', fontsize=21) plt.ylabel('Percent of Obs. PSD (%)', fontsize=21) plt.title('% PSD of Model compared to Obs.',fontsize=21) counter+=1 #plt.savefig('O3_capeverde_comparison_plots.ps', dpi = 200) plt.show()
fb = fb/samp_freq print threading.activeCount() obs_smoothed = pool.map(konnoOhmachiSmoothing,(fb, fa, bandwidth=40, count=1, enforce_no_matrix=True, max_memory_usage=512, normalize=False)) #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_obs = frequencies[-1] #Si_lomb_obs = np.mean(fb)*nyquist_freq_lomb_obs #print nyquist_freq_lomb_obs, Si_lomb_obs, Si_lomb_obs*2 #plot up #plt.loglog(1./fa, fb,'kx',markersize=2, label='Cape Verde Obs. ') #Model lomb fx, fy, nout, jmax, prob2 = lomb.fasper(since2006,normal_model, ofac, samp_freq) model_sig = fx, fy, nout, ofac #Divide output by sampling frequency fy = fy/samp_freq model_smoothed = konnoOhmachiSmoothing(fy, fx, bandwidth=40, count=1, enforce_no_matrix=True, max_memory_usage=512, normalize=False) #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_model = frequencies[-1] #Si_lomb_model = np.mean(fy)*nyquist_freq_lomb_model #print nyquist_freq_lomb_model, Si_lomb_model, Si_lomb_model*2 #plot up
def plot(species, location): #Set obs data for each location if location == 'Arrival_Heights': obsfile = 'arrival_heights_o3_hourly/o3*' loc_label = 'Arrival Heights' gaw_code = 010 elif location == 'Barrow': obsfile = 'barrow_o3_hourly/o3*' loc_label = 'Barrow' gaw_code = 015 elif location == 'Lauder': obsfile = 'lauder_o3_hourly/o3*' loc_label = 'Lauder' gaw_code = 106 elif location == 'Mace_Head': obsfile = 'O3_mace_head_ppbV.txt' loc_label = 'Mace Head' gaw_code = 112 elif location == 'Mauna_Loa': obsfile = 'mauna_loa_o3_hourly/o3*' loc_label = 'Mauna Loa' gaw_code = 116 elif location == 'Niwot_Ridge': obsfile = 'niwot_ridge_o3_hourly/o3*' loc_label = 'Niwot Ridge' gaw_code = 132 elif location == 'Ragged_Point': obsfile = 'ragged_point_o3_hourly/o3*' loc_label = 'Ragged Point' gaw_code = 152 elif location == 'South_Pole': obsfile = 'south_pole_o3_hourly/o3*' loc_label = 'South Pole' gaw_code = 173 elif location == 'Trinidad_Head': obsfile = 'trinidad_head_o3_hourly/o3*' loc_label = 'Trinidad Head' gaw_code = 189 elif location == 'Tudor_Hill': obsfile = 'tudor_hill_o3_hourly/o3*' loc_label = 'Tudor Hill' gaw_code = 191 elif location == 'Tutuila': obsfile = 'tutuila_o3_hourly/o3*' loc_label = 'Tutuila' gaw_code = 192 #Set model_cut switch to default 0, if want to do more complicated cuts from model field, specify model_cut_switch == 1 in species definitions model_cut_switch = 0 ofac = 1 if species == 'O3': units = 'ppbV' first_label_pos = 3 obs_data_name = 'Ozone mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'O3' ofac = 2.0001 elif species == 'CO': units = 'ppbV' first_label_pos = 1 obs_data_name = 'CO mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'CO' ofac = 2.0001 elif species == 'NO': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO' elif species == 'NO2': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO2 mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO2' elif species == 'C2H6': units = 'pptV' first_label_pos = 1 obs_data_name = 'ethane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C2H6' elif species == 'C3H8': units = 'pptV' first_label_pos = 1 obs_data_name = 'propane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C3H8' elif species == 'DMS': units = 'pptV' first_label_pos = 1 obs_data_name = 'dms mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'DMS' elif species == 'TRA_6': #Isoprene units = 'pptV' first_label_pos = 1 obs_data_name = 'Isoprene (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'Isoprene' elif species == 'ACET': units = 'pptV' first_label_pos = 1 obs_data_name = 'acetone mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'Acetone' elif species == 'GMAO_TEMP': # Temp from met fields units = 'K' first_label_pos = 1 obs_data_name = 'Air Temperature (degC) Campbell_(Mean)' unit_cut = 1 species_type = 'Temp.' actual_species_name = 'Surface Temperature' elif species == 'GMAO_PSFC': #Surface Pressure units = 'hPa' first_label_pos = 1 obs_data_name = 'Atmospheric Pressure (hPa) Campbell_(Mean)' unit_cut = 1 species_type = 'Pres.' actual_species_name = 'Surface Pressure' elif species == 'GMAO_WIND': #Wind Speed extirpolated from UWND and VWND def read_diff_species(): k = names.index('GMAO_UWND') i = names.index('GMAO_VWND') model_cut = np.sqrt((model[:, k]**2) + (model[:, i]**2)) return model_cut units = r'$ms^{-1}$' first_label_pos = 1 obs_data_name = 'Wind Speed (m/s) Campbell_(Mean)' unit_cut = 1 species_type = 'Wind Speed' model_cut_switch = 1 actual_species_name = 'Surface Windspeed' elif species == 'GMAO_RADSW': #Sensible heat flux form surface units = r'$Wm^{-2}$' first_label_pos = 1 obs_data_name = 'Solar Radiation (Wm-2) Campbell_(Mean)' unit_cut = 1 species_type = 'Solar Radiation' actual_species_name = 'Surface Solar Radiation' elif species == 'GMAO_ABSH': #Absolute Humidity units = 'molec/cm-3' first_label_pos = 1 obs_data_name = '' unit_cut = 1 species_type = 'Absolute Humidity' actual_species_name = 'Absolute Humidity' elif species == 'GMAO_RHUM': #Relative Humidity units = '%' first_label_pos = 1 obs_data_name = 'Relative Humidity (%) Campbell_(Mean)' unit_cut = 1 species_type = 'Relative Humidity' actual_species_name = 'Relative Humidity' #do I need to read everything in try: names except NameError: # Readin the model output model = readfile("gaw_logs.npy", gaw_code) print model.shape print model # Processes the date year = (model[:, 1] // 10000) month = ((model[:, 1] - year * 10000) // 100) day = (model[:, 1] - year * 10000 - month * 100) hour = model[:, 2] // 100 min = (model[:, 2] - hour * 100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006 = [ doy[i].days + doy[i].seconds / (24. * 60. * 60.) for i in range(len(doy)) ] #now read in the observations if location == 'Mace_Head': date, time, vals = NOAA_data_reader_mace_head(glob.glob(obsfile)) else: date, time, vals = NOAA_data_reader(glob.glob(obsfile)) valid = vals >= 0 vals = vals[valid] date = date[valid] time = time[valid] print vals # Process NOAA obs time year = (date // 10000) month = ((date - year * 10000) // 100) day = (date - year * 10000 - month * 100) hour = time // 100 min = (time - hour * 100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006_2 = [ doy[i].days + doy[i].seconds / (24. * 60. * 60.) for i in range(len(doy)) ] #Pre normalise obs data for lomb analysis standard_deviation_obs_p = np.std(vals) mean_obs_p = np.mean(vals) normal_var2 = vals - mean_obs_p normal_var2 = normal_var2 / standard_deviation_obs_p #Calculate variance of pre-processed obs data- should be 1 if normal #standard_dev_obs = np.std(normal_var_2, dtype=np.float64) #variance_obs = standard_dev_obs**2 #print 'Variance - pre-processed obs data= ', variance_obs #Define sampling intervals samp_spacing = 1. / 24. #Convert model time array into numpy array since2006 = np.array(since2006) since2006_2 = np.array(since2006_2) #Need to normalise model data also model_cut = model[:, 3] * unit_cut standard_deviation_model_p = np.std(model_cut) mean_model_p = np.mean(model_cut) normal_model = model_cut - mean_model_p normal_model = normal_model / standard_deviation_model_p #Calculate variance of pre-processed model data- should be 1 if normal #standard_dev_model = np.std(normal_model, dtype=np.float64) #variance_model = standard_dev_model**2 #print 'Variance - pre-processed model data= ', variance_model #Plot them all up. fig = plt.figure(figsize=(20, 12)) fig.patch.set_facecolor('white') ax = plt.subplot(111) #Plot up standard conc. v time plot #ax1= fig.add_subplot(2, 1, 1) #fig.subplots_adjust(hspace=0.3) #plt.plot(since2006_2,vals, color='black', label= '%s Obs.' % loc_label) #plt.plot(since2006, model_cut, color='green', label='GEOS v9.01.03 4x5 ') #plt.grid(True) #leg=plt.legend(loc=1) #leg.get_frame().set_alpha(0.4) #plt.xlabel('Time (Days since 2006)') #print units #plt.ylabel('%s (%s)' % (species_type,units)) #plt.title('%s V Time' % (actual_species_name)) #Define sampling frequency samp_freq = 24 #Lomb-scargle plot #ax3= fig.add_subplot(2, 1, 2) #Plot axis period lines and labels annotate_line_y = np.arange(1e-10, 1e4, 1) horiz_line_100 = np.arange(0, 2000, 1) freq_year = [345] * len(annotate_line_y) array_100 = [100] * len(horiz_line_100) plt.plot(freq_year, annotate_line_y, 'r--', alpha=0.4) plt.text(345, 10, '1 Year', fontweight='bold') #plt.plot(horiz_line_100, array_100,'r--',alpha=0.4) #plt.text(0.05, 80, '100%', fontweight='bold') #Obs lomb fa, fb, nout, jmax, prob = lomb.fasper(since2006_2, normal_var2, ofac, samp_freq) #Divide output by sampling frequency fb = fb / samp_freq fb = np.log(fb) obs_smoothed = savitzky_golay(fb, window_size=301, order=1) obs_smoothed = np.exp(obs_smoothed) #nyquist_freq_lomb_obs = frequencies[-1] #Si_lomb_obs = np.mean(fb)*nyquist_freq_lomb_obs #print nyquist_freq_lomb_obs, Si_lomb_obs, Si_lomb_obs*2 #plot up #plt.loglog(1./fa, fb,'kx',markersize=2, label='%s Obs. ' % loc_label) #Model lomb #print normal_model fx, fy, nout, jmax, prob2 = lomb.fasper(since2006, normal_model, ofac, samp_freq) #Divide output by sampling frequency fy = fy / samp_freq fy = np.log(fy) model_smoothed = savitzky_golay(fy, window_size=301, order=1) model_smoothed = np.exp(model_smoothed) #print model_smoothed #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_model = frequencies[-1] #Si_lomb_model = np.mean(fy)*nyquist_freq_lomb_model #print nyquist_freq_lomb_model, Si_lomb_model, Si_lomb_model*2 #plot up #plt.loglog(1./fx, fy, 'gx', alpha = 0.75,markersize=2, label='GEOS v9.01.03 4x5 ') #plt.loglog(1./fa, obs_smoothed, color = 'orangered', marker='x',linestyle='None', alpha = 0.75,markersize=2, label='Smoothed Mace Head Obs. ') #plt.loglog(1./fx, model_smoothed, color = 'blue', marker='x', linestyle='None', alpha = 0.75,markersize=2, label='Smoothed GEOS v9.01.03 4x5 ') obs_periods = 1. / fa model_periods = 1. / fx #Which dataset is shorter # obs longer than model if len(obs_smoothed) > len(model_smoothed): obs_smoothed = obs_smoothed[:len(model_smoothed)] freq_array = fx period_array = model_periods #model longer than obs if len(model_smoothed) > len(obs_smoothed): model_smoothed = model_smoothed[:len(obs_smoothed)] freq_array = fa period_array = obs_periods #calculate % of observations #covariance_array = np.hstack((fb,fy)) # print model_smoothed compare_powers = model_smoothed / obs_smoothed compare_powers = compare_powers * 100 ax.set_xscale('log', basex=10) ax.set_yscale('log', basey=10) plt.plot(period_array, compare_powers, color='black', marker='x', alpha=0.75, markersize=2, label='O3 % Diff.') plt.grid(True) ax.xaxis.set_major_formatter(FormatStrFormatter('%.i')) ax.yaxis.set_major_formatter(FormatStrFormatter('%.i')) leg = plt.legend(loc=4, prop={'size': 21}) leg.get_frame().set_alpha(0.4) plt.xlim(0.05, 1e1) plt.ylim(0.1, 1000) plt.xlabel('Period (Days)', fontsize=21) plt.ylabel('Percent of Obs. PSD (%)', fontsize=21) plt.title('% PSD of Model compared to Obs.', fontsize=21) #plt.savefig('O3_capeverde_comparison_plots.ps', dpi = 200) plt.show()
def plot(): try: names except NameError: # Readin the model output model , names = readfile("GEOS_v90103_4x5_CV_logs.npy","001") #001 represents CVO # Processes the date year=(model[:,0]//10000) month=((model[:,0]-year*10000)//100) day=(model[:,0]-year*10000-month*100) hour=model[:,1]//100 min=(model[:,1]-hour*100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006=[doy[i].days+doy[i].seconds/(24.*60.*60.) for i in range(len(doy))] #now read in the observations myfile=nappy.openNAFile('York_merge_Cape_verde_1hr_R1.na') myfile.readData() #ppy.openNAFile('York_merge_Cape_verde_1hr_R1.na') counter = 0 fig =plt.figure(figsize=(20,12)) ax = plt.subplot(111) for species in species_list: #Gives species exact model tags for convenience print species if species == 'ISOPRENE': species = 'TRA_6' elif species == 'ACETONE': species = 'ACET' elif species == 'TEMP': species = 'GMAO_TEMP' elif species == 'SURFACE_PRES': species = 'GMAO_PSFC' elif species == 'WINDSPEED': species = 'GMAO_WIND' elif species == 'SURFACE_SOLAR_RADIATION': species = 'GMAO_RADSW' elif species == 'ABS_HUMIDITY': species = 'GMAO_ABSH' elif species == 'REL_HUMIDITY': species = 'GMAO_RHUM' model_cut_switch = 0 obs_switch = 0 ofac = 1 if species == 'O3': print 'yes' Units = 'ppbV' first_label_pos = 3 obs_data_name = 'Ozone mixing ratio (ppbV)_(Mean)' unit_cut= 1e9 species_type = 'Conc.' actual_species_name = 'O3' elif species == 'CO': units = 'ppbV' first_label_pos = 1 obs_data_name = 'CO mixing ratio (ppbV)_(Mean)' unit_cut= 1e9 species_type = 'Conc.' actual_species_name = 'CO' ofac = 2.0001 elif species == 'NO': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO mixing ratio (pptv)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'NO' elif species == 'NO2': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO2 mixing ratio (pptv)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'NO2' elif species == 'C2H6': units = 'pptV' first_label_pos = 1 obs_data_name = 'ethane mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'C2H6' elif species == 'C3H8': units = 'pptV' first_label_pos = 1 obs_data_name = 'propane mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'C3H8' elif species == 'DMS': units = 'pptV' first_label_pos = 1 obs_data_name = 'dms mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'DMS' elif species == 'TRA_6': #Isoprene units = 'pptV' first_label_pos = 1 obs_data_name = 'Isoprene (pptv)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' elif species == 'ACET': units = 'pptV' first_label_pos = 1 obs_data_name = 'acetone mixing ratio (pptV)_(Mean)' unit_cut= 1e12 species_type = 'Conc.' actual_species_name = 'Acetone' elif species == 'GMAO_TEMP': # Temp from met fields units = 'K' first_label_pos = 3 obs_data_name = 'Air Temperature (degC) Campbell_(Mean)' unit_cut= 1 species_type = 'Temp.' actual_species_name = 'Surface Temperature' obs_switch = 1 elif species == 'GMAO_PSFC': #Surface Pressure units = 'hPa' first_label_pos = 3 obs_data_name = 'Atmospheric Pressure (hPa) Campbell_(Mean)' unit_cut= 1 species_type = 'Pres.' actual_species_name = 'Surface Pressure' elif species == 'GMAO_WIND': #Wind Speed extirpolated from UWND and VWND def read_diff_species(): k=names.index('GMAO_UWND') i=names.index('GMAO_VWND') model_cut=np.sqrt((model[:,k]**2)+(model[:,i]**2)) return model_cut units = r'$ms^{-1}$' first_label_pos = 3 obs_data_name = 'Wind Speed (m/s) Campbell_(Mean)' unit_cut= 1 species_type = 'Wind Speed' model_cut_switch = 1 actual_species_name = 'Surface Windspeed' elif species == 'GMAO_RADSW': #Sensible heat flux form surface units = r'$Wm^{-2}$' first_label_pos = 3 obs_data_name = 'Solar Radiation (Wm-2) Campbell_(Mean)' unit_cut= 1 species_type = 'Solar Radiation' actual_species_name = 'Surface Solar Radiation' elif species == 'GMAO_ABSH': #Absolute Humidity units = 'molec/cm-3' first_label_pos = 3 obs_data_name = '' unit_cut= 1 species_type = 'Absolute Humidity' actual_species_name = 'Absolute Humidity' elif species == 'GMAO_RHUM': #Relative Humidity units = '%' first_label_pos = 3 obs_data_name = 'Relative Humidity (%) Campbell_(Mean)' unit_cut= 1 species_type = 'Relative Humidity' actual_species_name = 'Relative Humidity' k_var1=myfile["VNAME"].index(obs_data_name) # OK need to conver values from a list to a numpy array time=np.array(myfile['X']) if obs_switch == 0: var1=np.array(myfile['V'][k_var1]) elif obs_switch == 1: var1=np.array(myfile['V'][k_var1])+273.15 valids1=var1 > 0 time2=time[valids1] var2=var1[valids1] #Pre normalise obs data for lomb analysis standard_deviation_obs_p = np.std(var2) mean_obs_p = np.mean(var2) normal_var2 = var2-mean_obs_p normal_var2 = normal_var2/standard_deviation_obs_p #Calculate variance of pre-processed obs data- should be 1 if normal #standard_dev_obs = np.std(normal_var_2, dtype=np.float64) #variance_obs = standard_dev_obs**2 #print 'Variance - pre-processed obs data= ', variance_obs #Define sampling intervals samp_spacing = 1./24. #Convert model time array into numpy array since2006=np.array(since2006) #Need to normalise model data also if model_cut_switch == 0: k=names.index(species) model_cut = model[:,k]*unit_cut if model_cut_switch == 1: model_cut = read_diff_species() standard_deviation_model_p = np.std(model_cut) mean_model_p = np.mean(model_cut) normal_model = model_cut-mean_model_p normal_model = normal_model/standard_deviation_model_p #Calculate variance of pre-processed model data- should be 1 if normal #standard_dev_model = np.std(normal_model, dtype=np.float64) #variance_model = standard_dev_model**2 #print 'Variance - pre-processed model data= ', variance_model #Define sampling frequency samp_freq = 24 #Lomb-scargle plot #Plot axis period lines and labels annotate_line_y=np.arange(1e-10,1e4,1) horiz_line_100 =np.arange(0,2000,1) freq_year = [345]*len(annotate_line_y) array_100 = [100]*len(horiz_line_100) plt.plot(freq_year, annotate_line_y,'r--',alpha=0.4) plt.text(345, 5, '1 Year', fontweight='bold') plt.plot(horiz_line_100, array_100,'r--',alpha=0.4) plt.text(1024, 80, '100%', fontweight='bold') #Obs lomb fa, fb, nout, jmax, prob = lomb.fasper(time2, normal_var2, ofac, samp_freq) obs_sig = fa, fb, nout, ofac #Divide output by sampling frequency fb = fb/samp_freq print threading.activeCount() obs_smoothed = pool.map(konnoOhmachiSmoothing,(fb, fa, bandwidth=40, count=1, enforce_no_matrix=True, max_memory_usage=512, normalize=False))
def superplot(lc, ticid, breakpoints, target_list, save_data=False, outdir=None): """ """ time, flux, flux_err = lc.time, lc.flux, lc.flux_err model = BoxLeastSquares(time, flux) results = model.autopower(0.16, minimum_period=2., maximum_period=21.) period = results.period[np.argmax(results.power)] t0 = results.transit_time[np.argmax(results.power)] depth = results.depth[np.argmax(results.power)] depth_snr = results.depth_snr[np.argmax(results.power)] ''' Plot Filtered Light Curve ------------------------- ''' plt.subplot2grid((8,16),(1,0),colspan=4, rowspan=1) plt.plot(time, flux, 'k', label="filtered") for val in breakpoints: plt.axvline(val, c='b', linestyle='dashed') plt.legend() plt.ylabel('Normalized Flux') plt.xlabel('Time') osample=5. nyq=283. # calculate FFT freq, amp, nout, jmax, prob = lomb.fasper(time, flux, osample, 3.) freq = 1000. * freq / 86.4 bin = freq[1] - freq[0] fts = 2. * amp * np.var(flux * 1e6) / (np.sum(amp) * bin) use = np.where(freq < nyq + 150) freq = freq[use] fts = fts[use] # calculate ACF acf = np.correlate(fts, fts, 'same') freq_acf = np.linspace(-freq[-1], freq[-1], len(freq)) fitT = build_ktransit_model(ticid=ticid, lc=lc, vary_transit=False) dur = _individual_ktransit_dur(fitT.time, fitT.transitmodel) freq = freq fts1 = fts/np.max(fts) fts2 = scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 5) fts3 = scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 50) ''' Plot Periodogram ---------------- ''' plt.subplot2grid((8,16),(0,4),colspan=4,rowspan=4) plt.loglog(freq, fts/np.max(fts)) plt.loglog(freq, scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 5), color='C1', lw=2.5) plt.loglog(freq, scipy.ndimage.filters.gaussian_filter(fts/np.max(fts), 50), color='r', lw=2.5) plt.axvline(283,-1,1, ls='--', color='k') plt.xlabel("Frequency [uHz]") plt.ylabel("Power") plt.xlim(10, 400) plt.ylim(1e-4, 1e0) # annotate with transit info font = {'family':'monospace', 'size':10} plt.text(10**1.04, 10**-3.50, f'depth = {depth:.4f} ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.04, 10**-3.62, f'depth_snr = {depth_snr:.4f} ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.04, 10**-3.74, f'period = {period:.3f} days ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.04, 10**-3.86, f't0 = {t0:.3f} ', fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) try: # annotate with stellar params # won't work for TIC ID's not in the list if isinstance(ticid, str): ticid = int(re.search(r'\d+', str(ticid)).group()) Gmag = target_list[target_list['ID'] == ticid]['GAIAmag'].values[0] Teff = target_list[target_list['ID'] == ticid]['Teff'].values[0] R = target_list[target_list['ID'] == ticid]['rad'].values[0] M = target_list[target_list['ID'] == ticid]['mass'].values[0] plt.text(10**1.7, 10**-3.50, rf"G mag = {Gmag:.3f} ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.7, 10**-3.62, rf"Teff = {int(Teff)} K ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.7, 10**-3.74, rf"R = {R:.3f} $R_\odot$ ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) plt.text(10**1.7, 10**-3.86, rf"M = {M:.3f} $M_\odot$ ", fontdict=font).set_bbox(dict(facecolor='white', alpha=.9, edgecolor='none')) except: pass '''# plot ACF inset ax = plt.gca() axins = inset_axes(ax, width=2.0, height=1.4) axins.plot(freq_acf, acf) axins.set_xlim(1,25) axins.set_xlabel("ACF [uHz]")''' ''' Plot BLS -------- ''' plt.subplot2grid((8,16),(2,0),colspan=4, rowspan=1) plt.plot(results.period, results.power, "k", lw=0.5) plt.xlim(results.period.min(), results.period.max()) plt.xlabel("period [days]") plt.ylabel("log likelihood") # Highlight the harmonics of the peak period plt.axvline(period, alpha=0.4, lw=4) for n in range(2, 10): plt.axvline(n*period, alpha=0.4, lw=1, linestyle="dashed") plt.axvline(period / n, alpha=0.4, lw=1, linestyle="dashed") phase = (t0 % period) / period foldedtimes = (((time - phase * period) / period) % 1) foldedtimes[foldedtimes > 0.5] -= 1 foldtimesort = np.argsort(foldedtimes) foldfluxes = flux[foldtimesort] plt.subplot2grid((8,16), (3,0),colspan=2) plt.scatter(foldedtimes, flux, s=2) plt.plot(np.sort(foldedtimes), scipy.ndimage.filters.median_filter(foldfluxes, 40), lw=2, color='r', label=f'P={period:.2f} days, dur={dur:.2f} hrs') plt.xlabel('Phase') plt.ylabel('Flux') plt.xlim(-0.5, 0.5) plt.ylim(-0.0025, 0.0025) plt.legend(loc=0) fig = plt.gcf() fig.patch.set_facecolor('white') fig.suptitle(f'{ticid}', fontsize=14) fig.set_size_inches(12, 10) if save_data: np.savetxt(outdir+'/timeseries/'+str(ticid)+'.dat.ts', np.transpose([time, flux]), fmt='%.8f', delimiter=' ') np.savetxt(outdir+'/fft/'+str(ticid)+'.dat.ts.fft', np.transpose([freq, fts]), fmt='%.8f', delimiter=' ') with open(os.path.join(outdir,"transit_stats.txt"), "a+") as file: file.write(f"{ticid} {depth} {depth_snr} {period} {t0} {dur}\n") """ --------------- TRANSIT VETTING --------------- """ tpf = get_cutout(ticid, cutout_size=11) ica_lcs = find_ica_components(tpf) fig = plt.subplot2grid((8,16),(0,8),colspan=4,rowspan=4) fig.patch.set_facecolor('white') tpf.plot(ax=fig, title='', show_colorbar=False) add_gaia_figure_elements(tpf, fig) fig = plt.subplot2grid((8,16),(2,8),colspan=4,rowspan=2) lc.fold(2*period, t0+period/2).scatter(ax=fig, c='k', label='Odd Transit') lc.fold(2*period, t0+period/2).bin(3).plot(ax=fig, c='C1', lw=2) plt.xlim(-.5, 0) rms = np.std(lc.flux) plt.ylim(-3*rms, rms) fig = plt.subplot2grid((8,16),(3,8),colspan=4,rowspan=2) lc.fold(2*period, t0+period/2).scatter(ax=fig, c='k', label='Even Transit') lc.fold(2*period, t0+period/2).bin(3).plot(ax=fig, c='C1', lw=2) plt.xlim(0, .5) plt.ylim(-3*rms, rms) fig = plt.subplot2grid((8,16),(0,12),colspan=4,rowspan=4) for i,ilc in enumerate(ica_lcs): scale = 1 plt.plot(ilc + i*scale) plt.xlim(0, len(ica_lcs[0])) plt.ylim(-scale, len(ica_lcs)*scale) """ STARRY MODEL ------------ """ from .utils import _fit x, y, yerr = lc.time, lc.flux, lc.flux_err model, static_lc = _fit(x, y, yerr, target_list=target_list) model_lc = lk.LightCurve(time=x, flux=static_lc) with model: period = model.map_soln['period'][0] t0 = model.map_soln['t0'][0] r_pl = model.map_soln['r_pl'] * 9.96 a = model.map_soln['a'][0] b = model.map_soln['b'][0] try: r_star = target_list[target_list['ID'] == ticid]['rad'].values[0] except: r_star = 10. fig = plt.subplot2grid((8,16),(4,0),colspan=4,rowspan=2) ''' Plot unfolded transit --------------------- ''' lc.scatter(c='k', label='Corrected Flux') lc.bin(binsize=7).plot(c='b', lw=1.5, alpha=.75, label='binned') model_lc.plot(c='r', lw=2, label='Transit Model') plt.ylim([-.002, .002]) plt.xlim([lc.time[0], lc.time[-1]]) fig = plt.subplot2grid((8,16),(6,0),colspan=4,rowspan=2) ''' Plot folded transit ------------------- ''' lc.fold(period, t0).scatter(c='k', label=rf'$P={period:.3f}, t0={t0:.3f}, ' 'R_p={r_pl:.3f} R_J, b={b:.3f}') lc.fold(period, t0).bin(binsize=7).plot(c='b', alpha=.75, lw=2) model_lc.fold(period, t0).plot(c='r', lw=2) plt.xlim([-0.5, .5]) plt.ylim([-.002, .002])
#ax.xaxis.set_ticks(ticks) #ax.yaxis.set_ticks([]) plt.xlabel('Time (days)') plt.ylabel('Flux (mJy)') c = fname.split('_')[0] plt.title('{0} lightcurve'.format(c)) ax = fig.add_subplot(gs[pltnum + 2]) plt.xlabel('Wavelength (days)') plt.ylabel('Relative intensity') plt.title('{0} Periodogram'.format(c)) time = numpy.array(time) flux = numpy.array(flux) # center the flux flux = (flux - numpy.mean(flux)) / (1.0 * numpy.std(flux)) result = lomb.fasper(time, flux, 6.0, 0.5) FIND_FREQUENCIES = int(result[2]) print FIND_FREQUENCIES # filter out weird frequencies spectral_results = filter(lambda elem: elem[0] < 0.55 and elem[0] > (2.0 / len(time)), zip(result[0], result[1])) wavelengths = [] for frequency in sorted(spectral_results, key=itemgetter(1), reverse=True): wavelength = int(round(1.0 / frequency[0])) # check if wavelength is approximately in array include = True #for found_wavelength in wavelengths: # if abs(found_wavelength[0] - wavelength) <= 4: # include = False #if include: if wavelength > 20: wavelengths.append([wavelength, frequency[1]])
def plot(species): #Set model_cut switch to default 0, if want to do more complicated cuts from model field, specify model_cut_switch == 1 in species definitions #Vice versa with obs_switch model_cut_switch = 0 obs_switch = 0 ofac = 1 if species == 'O3': units = 'ppbV' first_label_pos = 3 obs_data_name = 'Ozone mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'O3' elif species == 'CO': units = 'ppbV' first_label_pos = 1 obs_data_name = 'CO mixing ratio (ppbV)_(Mean)' unit_cut = 1e9 species_type = 'Conc.' actual_species_name = 'CO' ofac = 2.0001 elif species == 'NO': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO' elif species == 'NO2': units = 'pptV' first_label_pos = 1 obs_data_name = 'NO2 mixing ratio (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'NO2' elif species == 'C2H6': units = 'pptV' first_label_pos = 1 obs_data_name = 'ethane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C2H6' elif species == 'C3H8': units = 'pptV' first_label_pos = 1 obs_data_name = 'propane mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'C3H8' elif species == 'DMS': units = 'pptV' first_label_pos = 1 obs_data_name = 'dms mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'DMS' elif species == 'TRA_6': #Isoprene units = 'pptV' first_label_pos = 1 obs_data_name = 'Isoprene (pptv)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'Isoprene' elif species == 'ACET': units = 'pptV' first_label_pos = 1 obs_data_name = 'acetone mixing ratio (pptV)_(Mean)' unit_cut = 1e12 species_type = 'Conc.' actual_species_name = 'Acetone' elif species == 'GMAO_TEMP': # Temp from met fields units = 'K' first_label_pos = 3 obs_data_name = 'Air Temperature (degC) Campbell_(Mean)' unit_cut = 1 species_type = 'Temp.' actual_species_name = 'Surface Temperature' obs_switch = 1 elif species == 'GMAO_PSFC': #Surface Pressure units = 'hPa' first_label_pos = 3 obs_data_name = 'Atmospheric Pressure (hPa) Campbell_(Mean)' unit_cut = 1 species_type = 'Pres.' actual_species_name = 'Surface Pressure' elif species == 'GMAO_WIND': #Wind Speed extirpolated from UWND and VWND def read_diff_species(): k = names.index('GMAO_UWND') i = names.index('GMAO_VWND') model_cut = np.sqrt((model[:, k]**2) + (model[:, i]**2)) return model_cut units = r'$ms^{-1}$' first_label_pos = 3 obs_data_name = 'Wind Speed (m/s) Campbell_(Mean)' unit_cut = 1 species_type = 'Wind Speed' model_cut_switch = 1 actual_species_name = 'Surface Windspeed' elif species == 'GMAO_RADSW': #Sensible heat flux form surface units = r'$Wm^{-2}$' first_label_pos = 3 obs_data_name = 'Solar Radiation (Wm-2) Campbell_(Mean)' unit_cut = 1 species_type = 'Solar Radiation' actual_species_name = 'Surface Solar Radiation' elif species == 'GMAO_ABSH': #Absolute Humidity units = 'molec/cm-3' first_label_pos = 3 obs_data_name = '' unit_cut = 1 species_type = 'Absolute Humidity' actual_species_name = 'Absolute Humidity' elif species == 'GMAO_RHUM': #Relative Humidity units = '%' first_label_pos = 3 obs_data_name = 'Relative Humidity (%) Campbell_(Mean)' unit_cut = 1 species_type = 'Relative Humidity' actual_species_name = 'Relative Humidity' #reads in the model data def readfile(filename, location): read = np.load(filename) names = read[0, 2:] locs = np.where(read[:, 1] == location) big = read[locs] valid_data = big[:, 2:] names = names.tolist() valid_data = np.float64(valid_data) return valid_data, names #do I need to read everything in try: names except NameError: # Readin the model output model, names = readfile("GEOS_v90103_4x5_CV_logs.npy", '001') # Processes the date year = (model[:, 0] // 10000) month = ((model[:, 0] - year * 10000) // 100) day = (model[:, 0] - year * 10000 - month * 100) hour = model[:, 1] // 100 min = (model[:, 1] - hour * 100) doy=[ datetime.datetime(np.int(year[i]),np.int(month[i]),np.int(day[i]),\ np.int(hour[i]),np.int(min[i]),0)- \ datetime.datetime(2006,1,1,0,0,0) \ for i in range(len(year))] since2006 = [ doy[i].days + doy[i].seconds / (24. * 60. * 60.) for i in range(len(doy)) ] #now read in the observations myfile = nappy.openNAFile('York_merge_Cape_verde_1hr_R1.na') myfile.readData() #ppy.openNAFile('York_merge_Cape_verde_1hr_R1.na') k_var1 = myfile["VNAME"].index(obs_data_name) # OK need to conver values from a list to a numpy array time = np.array(myfile['X']) if obs_switch == 0: var1 = np.array(myfile['V'][k_var1]) elif obs_switch == 1: var1 = np.array(myfile['V'][k_var1]) + 273.15 valids1 = var1 > 0 time2 = time[valids1] var2 = var1[valids1] #Pre normalise obs data for lomb analysis standard_deviation_obs_p = np.std(var2) mean_obs_p = np.mean(var2) normal_var2 = var2 - mean_obs_p normal_var2 = normal_var2 / standard_deviation_obs_p #Calculate variance of pre-processed obs data- should be 1 if normal #standard_dev_obs = np.std(normal_var_2, dtype=np.float64) #variance_obs = standard_dev_obs**2 #print 'Variance - pre-processed obs data= ', variance_obs #Define sampling intervals samp_spacing = 1. / 24. #Convert model time array into numpy array since2006 = np.array(since2006) #Convey instrument error on model sims #O3 adjustment_factor = 1 #Need to normalise model data also if model_cut_switch == 0: k = names.index(species) model_cut = model[:, k] * unit_cut model_cut = [a + random.normalvariate(0, 10) for a in model_cut] if model_cut_switch == 1: model_cut = read_diff_species() model_cut = [a + random.normalvariate(0, 10) for a in model_cut] standard_deviation_model_p = np.std(model_cut) mean_model_p = np.mean(model_cut) normal_model = model_cut - mean_model_p normal_model = normal_model / standard_deviation_model_p #Calculate variance of pre-processed model data- should be 1 if normal #standard_dev_model = np.std(normal_model, dtype=np.float64) #variance_model = standard_dev_model**2 #print 'Variance - pre-processed model data= ', variance_model #Plot them all up. fig = plt.figure(figsize=(20, 12)) #Plot up standard conc. v time plot ax1 = fig.add_subplot(2, 1, 1) fig.subplots_adjust(hspace=0.3) plt.plot(time2, var2, color='black', label='Cape Verde Obs.') plt.plot(since2006, model_cut, color='green', label='GEOS v9.01.03 4x5 ') plt.grid(True) leg = plt.legend(loc=first_label_pos) leg.get_frame().set_alpha(0.4) plt.xlabel('Time (Days since 2006)') print units plt.ylabel('%s (%s)' % (species_type, units)) plt.title('%s V Time' % (actual_species_name)) #Define sampling frequency samp_freq = 24 #Lomb-scargle plot ax3 = fig.add_subplot(2, 1, 2) #Plot axis period lines and labels annotate_line_y = np.arange(1e-10, 1e4, 1) freq_year = [345] * len(annotate_line_y) plt.plot(freq_year, annotate_line_y, 'r--', alpha=0.4) plt.text(345, 1e-10, '1 Year', fontweight='bold') #Obs lomb fa, fb, nout, jmax, prob = lomb.fasper(time2, normal_var2, ofac, samp_freq) #Divide output by sampling frequency fb = fb / samp_freq #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_obs = frequencies[-1] #Si_lomb_obs = np.mean(fb)*nyquist_freq_lomb_obs #print nyquist_freq_lomb_obs, Si_lomb_obs, Si_lomb_obs*2 #plot up plt.loglog(1. / fa, fb, 'kx', markersize=2, label='Cape Verde Obs. ') #Model lomb fx, fy, nout, jmax, prob2 = lomb.fasper(since2006, normal_model, ofac, samp_freq) #Divide output by sampling frequency fy = fy / samp_freq #Calculate Nyquist frequency, Si and Si x 2 for normalisation checks. #nyquist_freq_lomb_model = frequencies[-1] #Si_lomb_model = np.mean(fy)*nyquist_freq_lomb_model #print nyquist_freq_lomb_model, Si_lomb_model, Si_lomb_model*2 #plot up plt.loglog(1. / fx, fy, 'gx', alpha=0.75, markersize=2, label='GEOS v9.01.03 4x5 ') #make index for high frequency measure of obs. and model PSD. Say is between min period(2 hours) and 1 day obs_periods = 1. / fa model_periods = 1. / fx percent1 = period_percent_diff(np.min(obs_periods), 1, fb, fy, obs_periods, model_periods) percent2 = period_percent_diff(1, 2, fb, fy, obs_periods, model_periods) percent3 = period_percent_diff(2, 7, fb, fy, obs_periods, model_periods) plt.grid(True) leg = plt.legend(loc=7) leg.get_frame().set_alpha(0.4) plt.text(1e-2, 3000, 'Period: 2 hours to 1 day, a %% Diff. of: %.2f%%' % (percent1), fontweight='bold') plt.text(1e-2, 500, 'Period: 1 day to 2 days, a %% Diff. of: %.2f%%' % (percent2), fontweight='bold') plt.text(1e-2, 90, 'Period: 2 days to 7 days, a %% Diff. of: %.2f%%' % (percent3), fontweight='bold') plt.ylim(1e-10, 1e4) plt.xlabel('Period (Days)') plt.ylabel(r'PSD $(ppb^{2}/days^{-1})$') plt.title('Lomb-Scargle %s Power V Period' % actual_species_name) #plt.savefig('O3_capeverde_comparison_plots.ps', dpi = 200) plt.show()
# fft_phase[num] = np.pi + diff #print 'mag sum', np.sum(fft_mag) #wk1, wk2, a, ph = lomb_phase.lomb(a,waveform) #window = np.kaiser(len(waveform),5) #window = signal.flattop(len(waveform), sym=False) #window= np.hamming(len(waveform)) #waveform_mean = np.mean(waveform) #waveform = waveform - waveform_mean #waveform = waveform*window wk1,wk2, amp, l_phase = lomb.fasper(a,waveform) lomb_periods = 1./wk1 #amp_corr = 1./(sum(window)/len(window)) #amp = amp * amp_corr #window = signal.flattop(len(waveform), sym=False) #test = waveform >= 0 #a = a[test] #waveform = waveform[test] #test = waveform >= 0 #a = a[test] #waveform = waveform[test]