Exemplo n.º 1
0
def run_LSP(mod_data, x):

    lat_i = lat_indices[x]
    lon_i = lon_indices[x]

    print lat_i, lon_i

    current_lat = lat_c[lat_i]
    current_lon = lon_c[lon_i]

    waveform = mod_data

    waveform_ave = np.average(waveform)

    model_date_val = np.copy(model_date)
    model_time_val = np.copy(model_time)

    time = modules.date_process(model_date_val, model_time_val, start_year)

    if (species.lower() !=
            'gmao_temp') and (species.lower() != 'gmao_psfc') and (
                species.lower() != 'wind_speed') and (species.lower() !=
                                                      'wind_direction'):
        waveform = waveform * 1e9

    #check model vals are valid
    #valid = vals >= 0
    #vals = vals[valid]
    #model_time_val = model_time[valid]
    #model_date_val = model_date[valid]

    #take 8 hour average
    divisor = 8

    total_len = len(waveform) / divisor
    start = 0
    end = divisor
    ave_waveform = []
    ave_time = []
    for i in range(total_len):
        ave = np.ma.average(waveform[start:end])
        ave_time = np.append(ave_time, time[start])
        ave_waveform = np.append(ave_waveform, ave)
        start += divisor
        end += divisor

    time = np.copy(ave_time)
    waveform = np.copy(ave_waveform)

    #take lsp unwindowed of waveform

    ua_periods, ua_mag, ua_ph, ua_fr, ua_fi = modules.take_lomb_unwindowed(
        time, waveform, ofac, 1. / 24)

    #take out known periodic components 1,182.625, and 365.25 a priori for more accurate red noise fit.
    closest_daily_index = min(range(len(ua_periods)),
                              key=lambda i: abs(ua_periods[i] - 1.))
    closest_ha_index = min(range(len(ua_periods)),
                           key=lambda i: abs(ua_periods[i] - 182.625))
    closest_annual_index = min(range(len(ua_periods)),
                               key=lambda i: abs(ua_periods[i] - 365.25))

    rm_indices = [closest_daily_index, closest_ha_index, closest_annual_index]

    ua_mag_c, ua_fr, ua_fi = redfit.sidelobe_percent_remove(
        np.copy(ua_mag), ua_fr, ua_fi, rm_indices, 5., ua_periods)

    #-------------------------------------------------------------------------------
    #Do IFFT of altered spectra - with significant periods removed and gaps left in real and imag components linearly interpolated.
    #altered spectra provides red noise estimation baseline

    ##use ifft to get time series back from adjusted spectra
    #complex Fourier spectrum which corresponds to the Lomb-Scargle periodogram:
    F = [0] * ((len(ua_fr) * 2) + 1)

    #set first real value to average
    F[0] = complex(waveform_ave * len(waveform), 0)

    #Get reverse real and imaginary values
    rev_ua_fr = np.copy(ua_fr[::-1])
    rev_ua_fi = np.copy(ua_fi[::-1])

    rev_ua_fr[0] = 0
    rev_ua_fi[0] = 0

    f_index = 1

    #Fill Fourier Spectrum real and imaginary values
    for i in range(len(ua_fr)):
        F[f_index] = complex(ua_fr[i], ua_fi[i])
        f_index += 1

    for i in range(len(ua_fr)):
        F[f_index] = complex(rev_ua_fr[i], -rev_ua_fi[i])
        f_index += 1

    F = np.array(F)

    #Take ifft and just take real values
    ifft_ua_ts = numpy.fft.ifft(F)
    ifft_ua_ts = ifft_ua_ts.astype('float64')

    ifft_ua_ts_len = (len(ifft_ua_ts) / ofac) + np.mod(len(ifft_ua_ts), ofac)

    ifft_time = time[-ifft_ua_ts_len:]
    ifft_ua_ts = ifft_ua_ts[-len(waveform):]

    ifft_time = ifft_time - ifft_time[0]

    a_periods, a_mag, corr_a_mag, a_fr, a_fi, a_red_periods, a_red_mag, a_gredth, a_fac95, a_fac99, a_fac99_9, a_faccrit, a_fac_grid, a_sig_levels, a_tau, a_corr = redfit.red_background(
        nsim, mctest, ifft_time, ifft_ua_ts, ofac)

    #apply lsp correction from altered spectrum to unaltered spectrum
    corr_ua_mag = ua_mag / a_corr

    #check confidence of each point on spectrum

    sigs = np.zeros(len(corr_ua_mag))

    last_ind = len(a_sig_levels) - 1

    for i in range(len(a_sig_levels) - 1):
        conf_low = a_gredth * a_fac_grid[i]
        conf_up = a_gredth * a_fac_grid[i + 1]

        current_last_ind = i + 1

        for j in range(len(corr_ua_mag)):
            if sigs[j] == 0:
                if (corr_ua_mag[j] >= conf_low[j]) and (corr_ua_mag[j] <
                                                        conf_up[j]):
                    sigs[j] = a_sig_levels[i]
                elif current_last_ind == last_ind:
                    if corr_ua_mag[j] > conf_up[j]:
                        sigs[j] = a_sig_levels[i + 1]

    #get critical significance for all points on spectrum
    crit_sig = a_gredth * a_faccrit

    #get 95,99 and 99.9 % chi squared significance bands for all points on spectrum
    sig_95 = a_gredth * a_fac95
    sig_99 = a_gredth * a_fac99
    sig_99_9 = a_gredth * a_fac99_9

    return (x, sigs, sig_95, sig_99, sig_99_9, crit_sig, a_gredth, corr_ua_mag,
            ua_periods, a_tau)
def run_LSP(mod_data,x):

    lat_i = lat_indices[x]
    lon_i = lon_indices[x]

    print lat_i,lon_i

    current_lat = lat_c[lat_i]
    current_lon = lon_c[lon_i]

    waveform = mod_data
    
    waveform_ave = np.average(waveform)
    
    model_date_val = np.copy(model_date)
    model_time_val = np.copy(model_time)
    
    time = modules.date_process(model_date_val,model_time_val,start_year)
    
    if (species.lower() != 'gmao_temp') and (species.lower() != 'gmao_psfc') and (species.lower() != 'wind_speed') and (species.lower() != 'wind_direction'):
        waveform = waveform*1e9	
 
    #check model vals are valid
    #valid = vals >= 0
    #vals = vals[valid]
    #model_time_val = model_time[valid]
    #model_date_val = model_date[valid]

    #take 8 hour average
    divisor = 8

    total_len = len(waveform)/divisor
    start = 0
    end = divisor
    ave_waveform = []
    ave_time = []
    for i in range(total_len):
        ave = np.ma.average(waveform[start:end])
        ave_time=np.append(ave_time,time[start])
        ave_waveform=np.append(ave_waveform,ave)
        start+=divisor
        end+=divisor
 
    time=np.copy(ave_time)
    waveform=np.copy(ave_waveform)

    #take lsp unwindowed of waveform

    ua_periods,ua_mag,ua_ph,ua_fr,ua_fi = modules.take_lomb_unwindowed(time,waveform,ofac,1./24)

    #take out known periodic components 1,182.625, and 365.25 a priori for more accurate red noise fit.
    closest_daily_index = min(range(len(ua_periods)), key=lambda i: abs(ua_periods[i]-1.))
    closest_ha_index = min(range(len(ua_periods)), key=lambda i: abs(ua_periods[i]-182.625))
    closest_annual_index = min(range(len(ua_periods)), key=lambda i: abs(ua_periods[i]-365.25))

    rm_indices = [closest_daily_index,closest_ha_index,closest_annual_index]

    ua_mag_c,ua_fr,ua_fi = redfit.sidelobe_percent_remove(np.copy(ua_mag),ua_fr,ua_fi,rm_indices,5.,ua_periods)
    
    #-------------------------------------------------------------------------------
    #Do IFFT of altered spectra - with significant periods removed and gaps left in real and imag components linearly interpolated.
    #altered spectra provides red noise estimation baseline

    ##use ifft to get time series back from adjusted spectra
    #complex Fourier spectrum which corresponds to the Lomb-Scargle periodogram:
    F = [0]*((len(ua_fr)*2)+1)

    #set first real value to average 
    F[0] = complex(waveform_ave*len(waveform),0)

    #Get reverse real and imaginary values
    rev_ua_fr=np.copy(ua_fr[::-1])
    rev_ua_fi=np.copy(ua_fi[::-1])

    rev_ua_fr[0] = 0
    rev_ua_fi[0] = 0

    f_index = 1

    #Fill Fourier Spectrum real and imaginary values
    for i in range(len(ua_fr)):
        F[f_index] = complex(ua_fr[i],ua_fi[i])
        f_index+=1

    for i in range(len(ua_fr)):
        F[f_index] = complex(rev_ua_fr[i],-rev_ua_fi[i])
        f_index+=1

    F = np.array(F)    

    #Take ifft and just take real values
    ifft_ua_ts = numpy.fft.ifft(F)
    ifft_ua_ts = ifft_ua_ts.astype('float64')

    ifft_ua_ts_len = (len(ifft_ua_ts)/ofac) + np.mod(len(ifft_ua_ts),ofac)

    ifft_time = time[-ifft_ua_ts_len:]
    ifft_ua_ts = ifft_ua_ts[-len(waveform):]

    ifft_time = ifft_time-ifft_time[0]

    a_periods,a_mag,corr_a_mag,a_fr,a_fi,a_red_periods,a_red_mag,a_gredth,a_fac95,a_fac99,a_fac99_9,a_faccrit,a_fac_grid,a_sig_levels,a_tau,a_corr = redfit.red_background(nsim,mctest,ifft_time,ifft_ua_ts,ofac)

    #apply lsp correction from altered spectrum to unaltered spectrum
    corr_ua_mag = ua_mag/a_corr

    #check confidence of each point on spectrum

    sigs = np.zeros(len(corr_ua_mag))

    last_ind = len(a_sig_levels)-1

    for i in range(len(a_sig_levels)-1):
        conf_low = a_gredth*a_fac_grid[i]
        conf_up = a_gredth*a_fac_grid[i+1]
    
        current_last_ind = i+1
    
        for j in range(len(corr_ua_mag)):
            if sigs[j] == 0:
                if (corr_ua_mag[j] >= conf_low[j]) and (corr_ua_mag[j] < conf_up[j]):
                    sigs[j] = a_sig_levels[i]
                elif current_last_ind == last_ind:
                    if corr_ua_mag[j] > conf_up[j]:
                       sigs[j] = a_sig_levels[i+1]
    
    #get critical significance for all points on spectrum
    crit_sig = a_gredth*a_faccrit
    
    #get 95,99 and 99.9 % chi squared significance bands for all points on spectrum
    sig_95 = a_gredth*a_fac95
    sig_99 = a_gredth*a_fac99
    sig_99_9 = a_gredth*a_fac99_9
    
    return (x,sigs,sig_95,sig_99,sig_99_9,crit_sig,a_gredth,corr_ua_mag,ua_periods,a_tau)
Exemplo n.º 3
0
def red_background(nsim,mctest,t,x,ofac,all_t,all_x ):
    
    #average dt of entire time series
    diffs = [t[i+1]-t[i] for i in range(len(t)-1)]  
    avgdt = np.average(diffs)

    ave = np.mean(x)
    #subtract mean from data
    x = x - ave

    #GET TAU
    tau, rhoavg, ierr = gettau(t,x,1,avgdt)

    nout = int(0.5*int(ofac)*1*len(t))

    if tau == 'invalid':
        model_periods,model_mag,corr_model_mag,model_fr,model_fi,red_periods,red_mag_avg,gredth,fac95,fac99,fac99_9,faccrit,fac_grid,sig_levels,tau,corr = np.zeros(nout),np.zeros(nout),np.zeros(nout),np.zeros(nout),np.zeros(nout),np.zeros(nout),np.zeros(nout),np.zeros(nout),0,0,0,0,np.zeros(nout),np.zeros(nout),0,1
        return model_periods,model_mag,corr_model_mag,model_fr,model_fi,red_periods,red_mag_avg,gredth,fac95,fac99,fac99_9,faccrit,fac_grid,sig_levels,tau,corr

    #make sure that tau is non-negative
    if (tau < 0.0):
        print 'Negative tau is forced to zero.'
        tau = 0.0
        
    x = x + ave
    
    #determine lag-1 autocorrelation coefficient
    rho = np.exp(-avgdt / tau)    # avg. autocorrelation coefficient
    rhosq = rho * rho
    
    #t = np.copy(all_t)
    #x = np.copy(all_x)
    
    xdif = np.max(t)-np.min(t)
    
    #Calculate model spectrum
    
    
    model_periods,model_mag,model_ph,model_fr,model_fi = modules.take_lomb_unwindowed(t,x,ofac,avgdt)
    fft_periods_all,fft_mag_all,fft_fr,fft_fi,fft_array = modules.take_fft_unwindowed(t,x,avgdt)
    fft_freq_all = 1./fft_periods_all
    
    
    full_n = len(model_periods)
    model_freq = 1./model_periods

    # estimate data variance from data spectrum
    # ----------------------------------------
    varx = (model_freq[0]) * np.sum(model_mag)  # NB: freq[1] = df
    
    varx_fft = (fft_freq_all[0]) * np.sum(fft_mag_all)
    
    red_mag = np.zeros((nsim,len(model_periods)))
    red_mag_sum = np.zeros(len(model_periods))
    fft_mag = np.zeros((nsim,len(fft_periods_all)))
    fft_mag_sum = np.zeros(len(fft_periods_all))

    #create AR1 spectrum nsim times
    for i in range(nsim):
        print 'Nsim = ', i+1
        red = makear1(t,len(x),tau)
        if mctest == True:
            #red_periods,red_mag[i,:],red_ph,red_fr,red_fi = modules.take_lomb_unwindowed(t,red,ofac,avgdt)
            #red_freq = 1./red_periods
            
            fft_periods,fft_mag[i,:],fft_fr,fft_fi,fft_array = modules.take_fft_unwindowed(t,red,avgdt)
            fft_freq = 1./fft_periods
            
        else:
            #red_periods,red_mag[0,:],red_ph,red_fr,red_fi = modules.take_lomb_unwindowed(t,red,ofac,avgdt)
            #red_freq = 1./red_periods
            
            fft_periods,fft_mag[0,:],fft_fr,fft_fi,fft_array = modules.take_fft_unwindowed(t,red,avgdt)
            fft_freq = 1./fft_periods
            
            #plt.loglog(red_periods,red_mag[0,:],color='black')
            #plt.loglog(fft_periods,fft_mag[0,:],color='red')
            
            #plt.show()

        #red_periods = red_periods[cp:]
        #red_mag = red_mag[0,cp:]

    #scale and sum red-noise spectra
    #-------------------------------
        if mctest == True:    
            #varr = (red_freq[0]) * np.sum(red_mag[i,:])  # NB: freq[1] = df
            #fac = varx / varr
            #red_mag[i,:] = fac * red_mag[i,:]
            #red_mag_sum = red_mag_sum + red_mag[i,:]
            
            varr = (fft_freq[0]) * np.sum(fft_mag[i,:])  # NB: freq[1] = df
            fac = varx_fft / varr
            fft_mag[i,:] = fac * fft_mag[i,:]
            fft_mag_sum = fft_mag_sum + fft_mag[i,:]
            
            
        else:
            #varr = (red_freq[0]) * np.sum(red_mag[0,:])  # NB: freq[1] = df
            #fac = varx / varr
            #red_mag_sum = red_mag_sum + fac * red_mag[0,:]
            
            varr = (fft_freq[0]) * np.sum(fft_mag[0,:])  # NB: freq[1] = df
            fac = varx_fft / varr
            fft_mag_sum = fft_mag_sum + fac * fft_mag[0,:]
        
    #determine average red-noise spectrum; scale average again to
    #make sure that roundoff errors do not affect the scaling
    #------------------------------------------------------------
    #red_mag_avg = red_mag_sum / float(nsim)
    #varr = (red_freq[0]) * np.sum(red_mag_avg)
    #fac = varx / varr
    #red_mag_avg = fac * red_mag_avg

    fft_mag_avg = fft_mag_sum / float(nsim)
    varr = (fft_freq[0]) * np.sum(fft_mag_avg)
    fac = varx_fft / varr
    fft_mag_avg = fac * fft_mag_avg

    # set theoretical spectrum (e.g., Mann and Lees, 1996, Eq. 4)
    # make area equal to that of the input time series
    # -----------------------------------------------------------
    #print red_freq[-1]
    #fnyq = red_freq[-1]     
    #gredth = (1.0-rhosq) / (1.0-2.0*rho*np.cos(np.pi*red_freq/fnyq)+rhosq)
    #varr = red_freq[0] * np.sum(gredth)
    #fac = varx / varr
    #gredth = fac * gredth
    
    
    print fft_freq[-1] 
    fnyq = fft_freq[-1]     
    gredth_fft = (1.0-rhosq) / (1.0-2.0*rho*np.cos(np.pi*fft_freq/fnyq)+rhosq)
    varr = fft_freq[0] * np.sum(gredth_fft)
    fac = varx_fft / varr
    gredth_fft = fac * gredth_fft
    
    #ratio = float(len(act_periods))/float(len(model_periods))
    #print 'ratio = ', ratio
    
    #model_mag = model_mag/ratio
    #red_mag_avg = red_mag_avg/ratio
    #gredth = gredth/ratio

    
    #determine correction factor
    #---------------------------
    #corr = red_mag_avg / gredth

    #correct for bias in autospectrum
    #--------------------------------
    #corr_model_mag = model_mag / corr
    #corr_model_mag = model_mag
    #gredth = gredth*corr
    
    
    #print 'model freq 0 = ',  model_freq[1]
    print 'varx = ', varx
    print 'ofac = ', ofac
    print 'avgdt = ', avgdt
    print 'Avg. autocorr. coeff., rho = ', rho 
    print 'Avg. tau = ', tau
 
    
    npoints = len(t)                           # of data points
    nseg = int(2 * npoints / (1 + 1))         # points per segment
    #avgdt = (t[-1] - t[0]) / (npoints-1.0)    # average sampling interval
    tp = avgdt * nseg                      # average period of a segment
    #df = 1.0 / (ofac * tp)                 # freq. spacing
    df = model_freq[0]
    wz = 2.0 * np.pi * df                     # omega = 2*pi*f
    fnyq = 1.0 * 1.0 / (2.0 * avgdt)     # average Nyquist freq.
    nfreq = fnyq / df + 1                  # f(1) = f0; f(nfreq) = fNyq
    lfreq = nfreq * 2
    nout = nfreq
    
    scal = 2.0 / (1.0 * nseg * df * ofac)
    
    print 'df = ', df
    print 'Nseg = ', nseg
    print 'Scal = ', scal
    
    print model_freq[0:20]
    print model_freq[-10:]
    
    #fft_mag_all = fft_mag_all*scal
    #fft_mag_all = fft_mag_all/4.
    
    #winbw = (model_freq[1]-model_freq[0]) * ofac * 1.21
    
    #model_mag = model_mag*len(t)
    
    #model_mag = 20*np.log10(model_mag)
    #gredth = 20*np.log10(gredth)

    #plt.loglog(red_periods,red_mag_avg)
    #plt.loglog(fft_periods,fft_mag_avg)
    #plt.loglog(fft_periods_all,fft_mag_avg)
    #plt.loglog(fft_periods,gredth_fft)
    #plt.plot(model_freq,model_mag)
    #plt.plot(red_freq,red_mag_avg)
    plt.loglog(fft_periods_all,fft_mag_all)
    plt.loglog(fft_periods,fft_mag_avg)
    #plt.plot(red_freq,gredth)
    #plt.loglog(fft_periods,gredth_fft)
    #plt.plot(model_freq,corr_model_mag)
    
    #plt.show()
    
    corr = [1.]*len(fft_freq)
    fac95,fac99,fac99_9,faccrit,fac_grid,sig_levels,ci99 = significance_tests(x,fft_periods,fft_mag,nsim,1,corr,mctest)

    plt.loglog(fft_periods,fft_mag_avg*fac95)
    plt.loglog(fft_periods,fft_mag_avg*fac99)
    plt.loglog(fft_periods,fft_mag_avg*faccrit)
    plt.show()
    
    return model_periods,model_mag,corr_model_mag,model_fr,model_fi,red_periods,red_mag_avg,gredth,fac95,fac99,fac99_9,faccrit,fac_grid,sig_levels,tau,corr