def vme_calculate_breakdown_time(shotnum):
    """ Extracts breakdown time (in us) from optical trigger data. """

    ## Make sure that the vme is not subtracting the breakdown time.
    CURRENT_BREAKDOWN_CONFIG = diag_params['gen.set.breakdown.time.to.zero']
    diag_params['gen.set.breakdown.time.to.zero'] = False
    
    # Disable the current state of the filters and smoothing.
    CURRENT_FILTER_STATE = diag_params['gen.prefilter']
    CURRENT_PRESMOOTH_STATE = diag_params['gen.presmooth']
    diag_params['gen.prefilter'] = False
    diag_params['gen.presmooth'] = False
    
      
    # If shotnum is actually a list, extract the first element.
    if isinstance(shotnum, list):
        print 'vme_get_breakdown_time called with a list as input'
        shotnum = shotnum[0]
    
    # Sets start and end window (in microseconds to look for the breakdown time)
    START_WINDOW = 10
    END_WINDOW = 20
    THRESHOLD = 50
    SMOOTH_WIN = 30    
#    START_WINDOW = 5000
#    END_WINDOW = 10000
#    THRESHOLD = 1e-2
#    SMOOTH_WIN = 10

    # Looks largest rising peak.  Can change the diagnostics to look at to be
    # the tek_hv, iso_hv, or the collimator, or can be generic.
    breakdown_diag = 'tek_hv'
    filepath = vme_get_filepath(shotnum, breakdown_diag)
    data = readVME(filepath, cols=diag_params[breakdown_diag+'.cols'], 
                   rows=diag_params[breakdown_diag+'.rows'])
                   
    time = list(data[0])
    start_index = time.index(START_WINDOW)
    end_index = time.index(END_WINDOW)
    
    ## Get the diff for points within the window.
    diff = np.diff(smooth(data[1][start_index:end_index], SMOOTH_WIN))
    
    # Get locations that passes the threshold.
    max_ind_arr = np.where(diff > THRESHOLD)[0]
    if len(max_ind_arr) > 0:
        max_ind = max_ind_arr[0]
    else:
        print 'The diff of shotnum ' + str(shotnum) + ' never passes ' + str(THRESHOLD)
        print 'Check THRESHOLD within vme_analyze.  Setting the value to ' + str(START_WINDOW)
        max_ind = 0
    
    
    # Set configurations back to user settings.
    diag_params['gen.set.breakdown.time.to.zero'] = CURRENT_BREAKDOWN_CONFIG    
    diag_params['gen.prefilter'] = CURRENT_FILTER_STATE
    diag_params['gen.presmooth'] = CURRENT_PRESMOOTH_STATE
    
    # Return the time associated with that index.
    return data[0][start_index + max_ind]    
def vme_avg_scalar_sig(shotnums, diag, extra=''):
    """ Averages the VME data associated with several shots 
    
        See vme_avg_sig for input description.
        
        returns a list of 2 1d arrays with the time and the signal average.
    """    
   
    # If shotnums is a single string, turn it into a list eg '847' -> ['847']
    if isinstance(shotnums, basestring):
        shotnums = [shotnums]
        
    # Temp holder dict for signals.
    signals = {}
    
    ## Loop through and sum the data up.
    for shotnum in shotnums:
        filepath = vme_get_filepath(shotnum, diag)
        data = readVME(filepath, cols=diag_params[diag+'.cols'], 
                       rows=diag_params[diag+'.rows'])
        time = data[0, :]

        if diag_params['gen.set.breakdown.time.to.zero']:
            time = time - vme_get_breakdown_time(shotnum)
        
        signal = data[diag_params[diag+'.ind'], :] 
        
        # If need to trim the arrays to a given time interval.
        if diag_params['gen.trim']:
            low_trim= diag_params['gen.trim.low.limit']
            low_ind = np.where(time > low_trim)[0][0]
            high_ind = low_ind + diag_params['gen.trim.range']
            time = time[low_ind: high_ind] 
            signal = signal[low_ind: high_ind]
            
        # Remove transients.
        signal = vme_remove_transients(signal)
        
        # If pre-smooth is activated, it smooth the VME input.
        if (diag_params['gen.presmooth']):
            signal = smooth(signal, diag_params['gen.presmooth.const'])
            
        # If pre-filter is activated, it will apply a pre-filter to the signal.
        if (diag_params['gen.prefilter']):
            signal = vme_filter_signal(signal, diag_params['gen.filter.application'])
        
        ## Subtract off any dc offset.  DC offset determined by the first 2%-4% of points.
        signals[shotnum] = signal - np.mean(signal[int(diag_params[diag+'.cols']*.02):int(diag_params[diag+'.cols']*.04)])
    
    avg_signal = np.mean(signals.values(), axis=0)
    
    ## Checks the correlation between the different signals to check for VME
    ## failures or obvious bad data.
    vme_avg_sig_correlation(avg_signal, signals, diag)
    
    # If the calling function wants the individual signal traces.
    if extra == 'indiv_signals':
        extra = signals
    
    return (time, avg_signal, extra)
  def compute_spectrum(self):
    #print "Signal length = " + str((self.signal_1).shape)

    (Pxx_1, freq) = mlab.psd(self.signal_1, NFFT=self.NSAMPLES, Fs=self.RATE, detrend=mlab.detrend_mean, 
                             window=mlab.window_hanning, noverlap=self.N_OVERLAP, sides='onesided')
    (Pxx_2, freq) = mlab.psd(self.signal_2, NFFT=self.NSAMPLES, Fs=self.RATE, detrend=mlab.detrend_mean, 
                             window=mlab.window_hanning, noverlap=self.N_OVERLAP, sides='onesided')
    # Taking 10*log10()  Convert to dB and compute total energy
    #self.amp_sum_in = 0.0
    #self.amp_dum_out = 0.0
    Pxx_1 = np.array([10*math.log(p,10) for p in Pxx_1])
    Pxx_2 = np.array([10*math.log(p,10) for p in Pxx_2])    

    #amp_sum_sub = abs(amp_sum_out - amp_sum_in)
    #energy = amp_sum_sub #which energy source to use?
    #self.energy = self.amp_sum_1
    #print 'Pxx_out shape=' + str(Pxx_1.shape)
    # Smooth in Frequency Domain (Moving Average in time domain)
    temp = np.reshape(Pxx_2-Pxx_1, (self.NSAMPLES/2 + 1,))
    sub_smoothed = cookb_signalsmooth.smooth(temp, window_len=61, window='flat'); #61 or 51

    #compute the SNR
    self.snr_list.append(self.SNR(sub_smoothed))

    if self.PLOT == 1:
      self.plot_graph(freq, Pxx_1, Pxx_2, sub_smoothed)

    return sub_smoothed
Exemple #4
0
def plot_from_rawdata(data1, data2, rate, Ns=NS, overlap_ratio=OVERLAP_RATIO):
    '''
    Given two time-series data and the sampling rates, plot the frequency spectrums
    '''
    nOverlap = Ns * overlap_ratio
    (Pxx_in, freq) = mlab.psd(data1,
                              NFFT=Ns,
                              Fs=rate,
                              detrend=mlab.detrend_mean,
                              window=mlab.window_hanning,
                              noverlap=nOverlap,
                              sides='onesided')
    (Pxx_out, freq) = mlab.psd(data2,
                               NFFT=Ns,
                               Fs=rate,
                               detrend=mlab.detrend_mean,
                               window=mlab.window_hanning,
                               noverlap=nOverlap,
                               sides='onesided')
    Pxx_in = log(Pxx_in)
    Pxx_out = log(Pxx_out)
    Pxx_diff = Pxx_out - Pxx_in
    Pxx_diff_smoothed = cookb_signalsmooth.smooth(Pxx_diff.ravel(),
                                                  window_len=51,
                                                  window='flat')
    plot_graph(freq, Pxx_in, Pxx_out, Pxx_diff, Pxx_diff_smoothed)
def get_index_of_pulse_peak(signal):
    """
    
    Smooths signal first to remove transients.
    Returns the index of the peak element whether it is a max or a min.
    """
    SMOOTHING_PTS = 25
    signal = smooth(signal, SMOOTHING_PTS)    
    return np.where(np.abs(signal) == np.max(np.abs(signal)))[0][0]
Exemple #6
0
 def smooth(self, w=100, mode='gaussian'):
     if mode == 'gaussian':
         from cookb_signalsmooth import smooth
         x = smooth(self.x, window_len=2 * w + 1)
     elif mode == 'median':
         from mediansmooth import mediansmooth
         x = self.x.copy()
         mediansmooth(x, w)
     else:
         raise ValueError("Unknown smooth mode %s" % mode)
     return self.__array_wrap__(x)
def vme_2d_plot_scalar_signal(
        time,
        signal, 
        color=plot_diag_params['gen.color'],
        ls=plot_diag_params['gen.ls'],
        subplot=plot_diag_params['gen.subplot.scalar'],
        title=plot_diag_params['gen.title'],
        xtitle=plot_diag_params['gen.xtitle'],
        ytitle=plot_diag_params['gen.ytitle'],
        xlim=plot_diag_params['gen.xlim'],
        ylim=plot_diag_params['gen.ylim'],
        smooth_win=plot_diag_params['gen.smooth_win'],
        label=True,
        extra_signals=''
        ):
    """ Basic 2D plotting of a single quantity vs time """    
    
    ## Check to see if raw is wanted.
    if plot_diag_params['gen.include.raw']:
        ## Plot the raw version using a thin line
        raw = plt.plot(time, signal)
        plt.setp(raw, color=color, ls=ls)
        plt.setp(raw, linewidth=plot_diag_params['gen.thin_ln_width'])
    
    ## Plot the smoothed version using a thicker line.
    smoothed = plt.plot(time, smooth(signal, window_len=smooth_win))
    plt.setp(smoothed, color=color, ls=ls)
    plt.setp(smoothed, linewidth=plot_diag_params['gen.thick_ln_width'])
    
    ## At the moment, extra signals is only used for plotting the bands.
    if extra_signals != '':
        (sig_min, sig_max) = extra_signals
        plt.fill_between(time, sig_min, sig_max, color = 'none', \
                facecolor = color, alpha = 0.5)
    
    
    # Label the plot.
    if label: plt.setp(smoothed, label=plot_diag_params['gen.shotnum'])

 
    plt.title(title)
    plt.ylabel(ytitle)
    plt.xlabel(xtitle)  

    # Check to see if custom limits are desired.
    if plot_diag_params['gen.custom.limit.x']:
        plt.xlim(xlim)    
    if plot_diag_params['gen.custom.limit.y']:
        plt.ylim(ylim)
            
    
    return smoothed
Exemple #8
0
def plot_from_rawdata(data1, data2, rate, Ns=NS, overlap_ratio=OVERLAP_RATIO):
    '''
    Given two time-series data and the sampling rates, plot the frequency spectrums
    '''
    nOverlap = Ns * overlap_ratio
    (Pxx_in, freq) = mlab.psd(data1, NFFT=Ns, Fs=rate, 
                              detrend=mlab.detrend_mean, window=mlab.window_hanning,
                              noverlap=nOverlap, sides='onesided')
    (Pxx_out, freq) = mlab.psd(data2, NFFT=Ns, Fs=rate, 
                               detrend=mlab.detrend_mean, window=mlab.window_hanning,
                               noverlap=nOverlap, sides='onesided')
    Pxx_in = log(Pxx_in)
    Pxx_out = log(Pxx_out)
    Pxx_diff = Pxx_out - Pxx_in
    Pxx_diff_smoothed = cookb_signalsmooth.smooth(Pxx_diff.ravel(), window_len=51, window='flat'); 
    plot_graph(freq, Pxx_in, Pxx_out, Pxx_diff, Pxx_diff_smoothed)    
Exemple #9
0
def compute_baroclinic_conversion(expt, eddy, zlev):
    avg  = sp.loadmat("eddies_subsets/%s_%s_subset_%sm.mat" %(expt, eddy, zlev))
    lon  = avg['lon']
    lat  = avg['lat']
    u    = avg['u']
    v    = avg['v']
    t    = avg['temp']

    u = np.ma.masked_where(np.abs(u) > 2, u)
    v = np.ma.masked_where(np.abs(v) > 2, v)
    t = np.ma.masked_where(np.abs(v) > 30, t)


    lm, jm, im = u.shape

    # computing averages and perturbations ####################################
    ubar, vbar, tbar = u.mean(axis=0), v.mean(axis=0), t.mean(axis=0)
    unot, vnot, tnot = u*0, v*0, t*0

    for l in range(lm):
        unot[l,...] = u[l,...] - ubar
        vnot[l,...] = v[l,...] - vbar
        tnot[l,...] = t[l,...] - tbar


    # baroclinic energy  conversions ##########################################
    bec = []
    g = 9.8

    theta_z = tbar.mean()
    alpha = sw.alpha(35, theta_z, zlev)
    gradT = np.gradient(tbar)
    dTdx  = gradT[1] / (0.05 * 111000)
    dTdy  = gradT[0] / (0.05 * 111000)

    count = 0

    for l in range(lm):
        count += 1
        bc =  ( (g*alpha)/theta_z ) * ( unot[l]*tnot[l]*dTdx + vnot[l]*tnot[l]*dTdy )
        bec.append( bc.mean() )

    bec = np.array(bec)

    bec = smooth(bec, window_len=10, window='hanning')

    return bec
Exemple #10
0
    def compute_spectrum(self):
        #print "Signal length = " + str((self.signal_1).shape)

        (Pxx_1, freq) = mlab.psd(self.signal_1,
                                 NFFT=self.NSAMPLES,
                                 Fs=self.RATE,
                                 detrend=mlab.detrend_mean,
                                 window=mlab.window_hanning,
                                 noverlap=self.N_OVERLAP,
                                 sides='onesided')
        (Pxx_2, freq) = mlab.psd(self.signal_2,
                                 NFFT=self.NSAMPLES,
                                 Fs=self.RATE,
                                 detrend=mlab.detrend_mean,
                                 window=mlab.window_hanning,
                                 noverlap=self.N_OVERLAP,
                                 sides='onesided')
        # Taking 10*log10()  Convert to dB and compute total energy
        #self.amp_sum_in = 0.0
        #self.amp_dum_out = 0.0
        Pxx_1 = np.array([10 * math.log(p, 10) for p in Pxx_1])
        Pxx_2 = np.array([10 * math.log(p, 10) for p in Pxx_2])

        #amp_sum_sub = abs(amp_sum_out - amp_sum_in)
        #energy = amp_sum_sub #which energy source to use?
        #self.energy = self.amp_sum_1
        #print 'Pxx_out shape=' + str(Pxx_1.shape)
        # Smooth in Frequency Domain (Moving Average in time domain)
        temp = np.reshape(Pxx_2 - Pxx_1, (self.NSAMPLES / 2 + 1, ))
        sub_smoothed = cookb_signalsmooth.smooth(temp,
                                                 window_len=61,
                                                 window='flat')
        #61 or 51

        #compute the SNR
        self.snr_list.append(self.SNR(sub_smoothed))

        if self.PLOT == 1:
            self.plot_graph(freq, Pxx_1, Pxx_2, sub_smoothed)

        return sub_smoothed
Exemple #11
0
def get_spectrums(signal_in,
                  signal_out,
                  rate,
                  Ns=NS,
                  overlap_ratio=OVERLAP_RATIO):
    '''
    Compute the spectrums for the input time-series data

    Args:
        signal_in: the time-series data from channel 1
        signal_out: the time-series data from channel 2
        rate: the samling rate for the input signals
    Returns:
        freq: the discrete frequencies of the FFT results
        Pxx_in: the spectrum of the time-series data from channel 1
        Pxx_out: the spectrum of the time-series data from channel 2
        Pxx_diff: the difference (Pxx_out-Pxx_in)
        Pxx_diff_smoothed: the smoothed (in frequency domain) Pxx_diff
    '''
    nOverlap = Ns * overlap_ratio
    (Pxx_in, freq) = mlab.psd(signal_in,
                              NFFT=Ns,
                              Fs=rate,
                              detrend=mlab.detrend_mean,
                              window=mlab.window_hanning,
                              noverlap=nOverlap,
                              sides='onesided')
    (Pxx_out, freq) = mlab.psd(signal_out,
                               NFFT=Ns,
                               Fs=rate,
                               detrend=mlab.detrend_mean,
                               window=mlab.window_hanning,
                               noverlap=nOverlap,
                               sides='onesided')
    Pxx_in = log(Pxx_in)
    PxX_out = log(Pxx_out)
    Pxx_diff = Pxx_out - Pxx_in
    Pxx_diff_smoothed = cookb_signalsmooth.smooth(Pxx_diff.ravel(),
                                                  window_len=51,
                                                  window='flat')
    return freq, Pxx_in, Pxx_out, Pxx_diff, Pxx_diff_smoothed
Exemple #12
0
def get_spectrums(signal_in, signal_out, rate, 
                  Ns=NS, overlap_ratio=OVERLAP_RATIO):
    '''
    Compute the spectrums for the input time-series data

    Args:
        signal_in: the time-series data from channel 1
        signal_out: the time-series data from channel 2
        rate: the samling rate for the input signals
    Returns:
        freq: the discrete frequencies of the FFT results
        Pxx_in: the spectrum of the time-series data from channel 1
        Pxx_out: the spectrum of the time-series data from channel 2
        Pxx_diff: the difference (Pxx_out-Pxx_in)
        Pxx_diff_smoothed: the smoothed (in frequency domain) Pxx_diff
    '''
    nOverlap = Ns * overlap_ratio
    (Pxx_in, freq) = mlab.psd(signal_in, NFFT=Ns, Fs=rate, detrend=mlab.detrend_mean, window=mlab.window_hanning, noverlap=nOverlap, sides='onesided')
    (Pxx_out, freq) = mlab.psd(signal_out, NFFT=Ns, Fs=rate, detrend=mlab.detrend_mean, window=mlab.window_hanning, noverlap=nOverlap, sides='onesided')
    Pxx_in = log(Pxx_in)
    PxX_out = log(Pxx_out)
    Pxx_diff = Pxx_out - Pxx_in
    Pxx_diff_smoothed = cookb_signalsmooth.smooth(Pxx_diff.ravel(), window_len=51, window='flat')
    return freq, Pxx_in, Pxx_out, Pxx_diff, Pxx_diff_smoothed
def get_index_of_pulse_start(signal):
    """
    Locates the starting index of the pulse by using np.diff and seeing if it gets
    above a certain threshold.
    
    """
    THRESHOLD = .3
    IGNORE_PTS = int(len(signal)*.1)
    SMOOTHING_PTS = 25
    
    signal = smooth(signal, SMOOTHING_PTS)
    # Ignore the first few points points.  Helps to get rid of transients.
    diff_array = np.diff(signal)[IGNORE_PTS:]
    
    index_arr = np.where(np.abs(diff_array) > THRESHOLD * np.max(np.abs(diff_array)))[0]
    
    if np.abs(np.mean(signal)) < np.max(np.abs(signal) * .1):
        print 'Likely VME error, returning 0'
        return 0
    elif np.shape(index_arr) == (0,):
        print 'Peak find failed, returning 0'
        return 0
    else:
        return index_arr[0] + IGNORE_PTS
Exemple #14
0
fig1 = plt.figure(facecolor='w', figsize=(10,10))
for k in range(len(xbt)):
	plt.plot(xbt[k].temp, -xbt[k].pres, label=xbt[k].filename[4:16])
plt.legend(loc=4)
plt.grid()
plt.title(u'Estações de XBT Brutas')
plt.savefig('brutos.png', dpi=300)
plt.show()

fig2 = plt.figure(facecolor='w', figsize=(10,10))
for k in range(len(xbt)):
	if 'station1_T5' in xbt[k].filename:
		# xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
		# xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
		xbt[k].temp = smooth(xbt[k].temp, window_len=201, window='hanning')

	elif 'station2_T5' in xbt[k].filename:
		xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
		xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
		xbt[k].temp = smooth(xbt[k].temp, window_len=51, window='hanning')
	else:
		xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
		xbt[k].temp = smooth(xbt[k].temp, window_len=31, window='hanning')

	plt.plot(xbt[k].temp, -xbt[k].pres, label=xbt[k].filename[4:16])
plt.legend(loc=4)
plt.grid()
plt.title(u'Estações de XBT Filtradas')
plt.savefig('filtrados.png', dpi=300)
plt.show()
Exemple #15
0
alpha = sw.alpha(35, theta_z, zlev)
gradT = np.gradient(tbar)
dTdx = gradT[1] / (0.05 * 111000)
dTdy = gradT[0] / (0.05 * 111000)

count = 0

for l in range(lm):
    count += 1
    bc = ((g * alpha) / theta_z) * (unot[l] * tnot[l] * dTdx +
                                    vnot[l] * tnot[l] * dTdy)
    bec.append(bc.mean())

bec = np.array(bec)

bec = smooth(bec, window_len=10, window='hanning')

sp = 4
#ly=16
ly = -1
for l in range(ly):
    count += 1
    bc = ((g * alpha) / theta_z) * (unot[l] * tnot[l] * dTdx +
                                    vnot[l] * tnot[l] * dTdy)

#plt.pcolor(x_roms,y_roms,bc*10**(7), cmap=plt.get_cmap('seismic'), vmin=-0.5, vmax=0.5);plt.colorbar();
#plt.quiver(x_roms[0:-1:sp, 0:-1:sp],y_roms[0:-1:sp, 0:-1:sp],u[ly,0:-1:sp, 0:-1:sp], v[ly,0:-1:sp, 0:-1:sp])
#plt.show()

lon1 = x_roms.copy()
lat1 = y_roms.copy()
Exemple #16
0
h = grdfile.variables['h'][:]

# getting an isobath
plt.figure()
con = plt.contour(lonr, latr, h, levels=[100])
col = con.collections[0]
paths = col.get_paths()
path0 = paths[0]
isob = path0.vertices
plt.close('all')
# limiting isobath within model domain
f = np.where((isob[:, 1] >= -24) & (isob[:, 1] <= -8))
isob = isob[f[0], :]

# smoothing isobath
isob[:, 0] = smooth(isob[:, 0], window_len=201, window='hanning')
isob[:, 1] = smooth(isob[:, 1], window_len=101, window='hanning')

# now I load original small grid
grdfile = nc.Dataset('/home/rsoutelino/rsoutelino/myroms/phd_run/phd8_grd.nc')

# assigning some variables from grid file
lonr = grdfile.variables['lon_rho'][:]
latr = grdfile.variables['lat_rho'][:]
h = grdfile.variables['h'][:]

# creating adimensional pairs of the parameters
dr = dr / D  # adimensional horizontal resolution
r = D * np.arange(0, 1, dr)  # adimensional horizontal transects
r0 = (1.0 / D) * D  # defining transect center
d = (delta / 111.0) / D  # normalized jet width
Exemple #17
0
filename = 'mosaico_20120919.mat'
data = sp.loadmat(filename)
lon, lat, sst = data['lon'], data['lat'], data['sst']
sst = mask_nans(sst, missing_value=18)
f = np.where(sst < 0)
sst[f] = 18
lon, lat, sst = refine(lon, lat, sst, lims, res=0.02)

base = -42.097463607788086, -22.817603940087874 
outA = -42.296874999999993, -24.568749999999998
outB = -42.66718749999999, -24.245312499999997
inA  = -42.146874999999994, -23.237499999999997
inB  = -42.254687499999996, -23.209374999999994
eddy = np.loadtxt('eddy.txt')
eddy[:,0] = smooth(eddy[:,0], window_len=11, window='hanning')
eddy[:,1] = smooth(eddy[:,1], window_len=11, window='hanning')

trajectoryA = [np.linspace(base[0], outA[0], 10), 
              np.linspace(base[1], outA[1], 10)]
trajectoryB = [np.linspace(base[0], outB[0], 10), 
              np.linspace(base[1], outB[1], 10)]

stationsA = [np.linspace(inA[0], outA[0], 6), np.linspace(inA[1], outA[1], 6)]
stationsB = [np.linspace(inB[0], outB[0], 6), np.linspace(inB[1], outB[1], 6)]

distA, distB = [], []

for k in range(stationsA[0].size):
	distA.append( int(sw.dist([base[0], stationsA[0][k]], [base[1], stationsA[1][k]], 'nm')[0]) )
	distB.append( int(sw.dist([base[0], stationsB[0][k]], [base[1], stationsB[1][k]], 'nm')[0]) )
Exemple #18
0
# assigning some variables from grid file
lonr   = grdfile.variables['lon_rho'][:]
latr   = grdfile.variables['lat_rho'][:]
h      = grdfile.variables['h'][:]

# getting an isobath
plt.figure(); con = plt.contour(lonr, latr, h, levels=[100] )
col = con.collections[0]; paths = col.get_paths()
path0 = paths[0]; isob = path0.vertices; plt.close('all')
# limiting isobath within model domain
f = np.where( (isob[:,1] >= -24) & (isob[:,1] <= -8) )
isob = isob[f[0],:]

# smoothing isobath
isob[:,0] = smooth(isob[:,0],window_len=201,window='hanning')
isob[:,1] = smooth(isob[:,1],window_len=101,window='hanning')

# now I load original small grid
grdfile  = nc.Dataset('/home/rsoutelino/rsoutelino/myroms/phd_run/phd8_grd.nc')

# assigning some variables from grid file
lonr   = grdfile.variables['lon_rho'][:]
latr   = grdfile.variables['lat_rho'][:]
h      = grdfile.variables['h'][:]

# creating adimensional pairs of the parameters
dr    = dr / D                       # adimensional horizontal resolution  
r  = D * np.arange(0, 1, dr)         # adimensional horizontal transects
r0 = (1.0/D) * D                     # defining transect center
d  = ( delta / 111.0 ) / D           # normalized jet width
  wv, refl, error = np.loadtxt(str('S3OS2/'+arq_name), dtype=float, unpack=True)
  
  wv    = np.trim_zeros(wv)
  refl  = np.trim_zeros(refl)
  error = np.array(error[0:len(wv)])

# Number of channels:
  ch_step = int(len(refl)/((0.92 - 0.5)/step))

# Channels position:
  ch_pos = np.arange(0,len(refl),ch_step)

# Spectral smoothing signal:
  if t == 'sm':
    ''' 240 < window_len < 290 '''
    spectra = smooth.smooth(refl, window_len=par, window='blackman')

# Spline fit to the spectra:
  if t == 'sp':
    ''' 40 < s < 200 '''
    spline = intp.UnivariateSpline(wv,refl,w=1/error, k=3, s=par)
    print(spline.get_residual())
    spectra =spline.__call__(wv)/spline.__call__(0.55)

# Polynomial fit to the spectra:
  if t == 'pl':
    ''' deg = 7 or 8 '''
    poly_coefs = np.polyfit(wv, refl, deg=par)
    spectra = np.polyval(poly_coefs, wv)/np.polyval(poly_coefs,0.55)

# Fitting a line equation to the data:
Exemple #20
0
area = 1200 * np.abs( ( lats[0] - lats[-1] ) * 111000 )[0]
vm   = vg.mean()

tv_total = (area * vm) / 1e6

vg_cb = np.ma.masked_where(vg > 0, vg)
vm_cb = vg_cb.mean()

tv_cb = ((area/2) * vm_cb) / 1e6


lat = lat[0,:]
v = vg[0,:]


v, lon, lat, lons, lats = v[::-1], lon[::-1], lat[::-1], lons[::-1], lats[::-1]


x = np.linspace(lons[0], lon[-1], 15)
y = np.linspace(lats[0], lat[-1], 15)



vi = np.interp(y, lat, v)
vi = smooth(vi, window_len=6, window='hanning')

u = vi.copy()
v = u*0

mdict = {'u':u, 'v':v, 'x':x, 'y':y}
sp.savemat('gvel_map.mat', mdict)
Exemple #21
0
fig1 = plt.figure(facecolor='w', figsize=(10, 10))
for k in range(len(xbt)):
    plt.plot(xbt[k].temp, -xbt[k].pres, label=xbt[k].filename[4:16])
plt.legend(loc=4)
plt.grid()
plt.title(u'Estações de XBT Brutas')
plt.savefig('brutos.png', dpi=300)
plt.show()

fig2 = plt.figure(facecolor='w', figsize=(10, 10))
for k in range(len(xbt)):
    if 'station1_T5' in xbt[k].filename:
        # xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
        # xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
        xbt[k].temp = smooth(xbt[k].temp, window_len=201, window='hanning')

    elif 'station2_T5' in xbt[k].filename:
        xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
        xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
        xbt[k].temp = smooth(xbt[k].temp, window_len=51, window='hanning')
    else:
        xbt[k].temp, xbt[k].pres = filter(xbt[k].temp, xbt[k].pres)
        xbt[k].temp = smooth(xbt[k].temp, window_len=31, window='hanning')

    plt.plot(xbt[k].temp, -xbt[k].pres, label=xbt[k].filename[4:16])
plt.legend(loc=4)
plt.grid()
plt.title(u'Estações de XBT Filtradas')
plt.savefig('filtrados.png', dpi=300)
plt.show()
Exemple #22
0
import matplotlib.pyplot as plt
import numpy as np
from cookb_signalsmooth  import smooth
from intdir2uv import *




t = np.arange(0, 24, 0.1)
m = (np.random.randn(t.size) * 5) + 10
mraj = m + np.random.rand(t.size)*5 + 1
m = smooth(m, window_len=15)
mraj = smooth(mraj, window_len=10)
yg = np.arange(0, 25, 0.1)
xg, yg = np.meshgrid(t, yg)
direc = ((45*np.pi) / 180) + m*0
u, v = -1*(m*0+4) , -1*(m*0+4)


tfill  = np.concatenate( ( np.array([0]), t, np.array([t[-1]]) ) )
mfill  = np.concatenate( ( np.array([yg.max()]), mraj, np.array([yg.max()]) ) )
mfill2 = np.concatenate( ( np.array([yg.max()]), m, np.array([yg.max()]) ) )


fig = plt.figure(facecolor='w', figsize=(14, 10))

# WIND
p1 = fig.add_subplot(4,1,1)
p1.contourf(xg, yg, yg, np.arange(5, 20, 0.1), cmap=plt.cm.hot_r)
p1.plot(t, m, 'k', linewidth=2)
p2.set_ylim([-25, -5])

p3 = plt.subplot(223)
plt.plot(days, TSbc, 'r', linewidth=1.5, label='BC'); plt.grid()
plt.plot(days, TSnbuc, 'b', linewidth=1.5, label='NBUC'); plt.legend()
plt.title('Transport @ Southern Boundary', fontsize=11)
plt.xlabel('Days', fontsize=11)
plt.ylabel('Transport [Sv]', fontsize=11)
p3.set_xlim([days[0], days[-1]])
p3.set_ylim([-15, 25])

data = np.loadtxt('phd15_ek.out')

dt = 300

t = data[:,0]
t = (t*dt)  / 86400 
ke = data[:,5]
ke = smooth(ke, window_len=201, window='hanning')

p3 = plt.subplot(224)
plt.plot(t, ke, 'k', linewidth=1.5); plt.grid()
plt.title('Domain-averaged Kinetic Energy', fontsize=11)
plt.xlabel('Days', fontsize=11)
plt.ylabel('J m$^{-2}$', fontsize=11)
p3.set_xlim([days[0], days[-1]])
#p3.set_ylim([-15, 25])
    
plt.show()
plt.savefig('/home/rsoutelino/rsoutelino/prod/csr_phd/figures/transp_inside2.pdf')
Exemple #24
0
draw_logo(fig4, [0.12, 0.14, 0.15, 0.15], 'logo-left.png')
draw_logo(fig4, [0.23, 0.14, 0.15, 0.15], 'photos/IMG_5274.JPG')
plt.savefig('eddy_hunter_gvel.png', dpi=300)
plt.show()

area = 1200 * np.abs((lats[0] - lats[-1]) * 111000)[0]
vm = vg.mean()

tv_total = (area * vm) / 1e6

vg_cb = np.ma.masked_where(vg > 0, vg)
vm_cb = vg_cb.mean()

tv_cb = ((area / 2) * vm_cb) / 1e6

lat = lat[0, :]
v = vg[0, :]

v, lon, lat, lons, lats = v[::-1], lon[::-1], lat[::-1], lons[::-1], lats[::-1]

x = np.linspace(lons[0], lon[-1], 15)
y = np.linspace(lats[0], lat[-1], 15)

vi = np.interp(y, lat, v)
vi = smooth(vi, window_len=6, window='hanning')

u = vi.copy()
v = u * 0

mdict = {'u': u, 'v': v, 'x': x, 'y': y}
sp.savemat('gvel_map.mat', mdict)
def GenOutput(alignments, parameters):
    # Read in the data from file
    
    #pkl_file = open(inputFile,'rb')
    #[colVecs,colVecsIn,offset,offVal,dist,bps,sigDif,locfilter,filterParam,data,decoder,lstopers,rstopers,runLengthDist] = pickle.load(pkl_file)
    
    colVecs = alignments.inClubSum
    colVecsIn = alignments.scoreSum

    sigDif = parameters['GLOBAL_SIG_LEVEL']
    locfilter = 'CDF'
    
    soiInfo = alignments.seqOfInt.name
    if alignments.seqOfInt.readingFrames == 1:
        soi = alignments.seqOfInt.seq
    else:
        soi = alignments.seqOfInt.seq[0]
    testSeq = []
    testSeqInfo = []
    for ts in alignments.testSeqs:
        testSeq.append(ts.seq)
        testSeqInfo.append(ts.name)
    soi = numpy.array(soi,dtype = numpy.int32)
    
    lstopers = numpy.zeros(alignments.seqOfInt.length)
    rstopers = numpy.zeros(alignments.seqOfInt.length)
    #for alignment in alignments.aligns:
        #if len(alignment.localAligns)>0:
            #for locA in alignment.localAligns:
                #lstopers[locA.SOIrange[0]] += 1
                #rstopers[locA.SOIrange[1]] += 1
    
    ratio = parameters['REG_EXP_RATIO']
    numGenes = len(alignments.testSeqs)/alignments.seqOfInt.readingFrames
    #print numGenes
        
    conservedFrac = smooth.smooth(colVecsIn[0]/numpy.asarray(colVecs+0.01,dtype=numpy.float),window_len=11,window='flat')
    if len(numpy.shape(colVecsIn)) != 1:
        pass
        #silentMutations = smooth.smooth(colVecsIn[1]/numpy.asarray(colVecsIn[0]+0.01,dtype=numpy.float),window_len=11,window='flat')
        
        ##silentMutations2 = smooth.smooth(numpy.arcsin(numpy.sqrt(colVecsIn[1]/numpy.asarray(colVecsIn[0]+0.01,dtype=numpy.float))),window_len=11,window='flat') # arcsin transform
        #print numpy.mean(silentMutations)
        #print numpy.mean(silentMutations[numpy.where(silentMutations > 0.01)])
        #print numpy.mean(conservedFrac)
        #print numpy.mean(conservedFrac[numpy.where(conservedFrac > 0.01)])
    
    #print max(colVecs)
    #print max(colVecsIn)
    
    # decode the soi
    genomelist = []
    for i in range(len(soi)):
        if alignments.seqOfInt.sequenceType == 'AA':
            genomelist.append(FIVEBIT_TO_AA[soi[i]])
        elif alignments.seqOfInt.sequenceType == 'DNA':
            genomelist.append(FIVEBIT_TO_AA[soi[i]%2**5])
    #print 'Making run length distribution...'
    
    #fig = plt.figure()
    #y,x = GetRunLengthDistribution(numpy.array(offVal))
    #plt.loglog(x,y,align='center')
    #plt.title('Run Length Distribution after global (sig > ' + str(sigDif) + ') and local ' + locfilter + ' filtering')
    #plt.xlabel('Run Lengths')
    #plt.ylabel('Instances')
    
    #maxRun = max(runLengthDist)
    #plt.figure()
    #runLenLog = numpy.zeros(maxRun+1)
    #for i in range(len(runLengthDist)):
        #runLenLog[runLengthDist[i]] += 1
    #plt.loglog(range(0,maxRun+1),runLenLog)
    #t1 = plt.loglog(range(1,maxRun+1),runLenLog[1]*numpy.arange(1.0,maxRun+1)**-1.5)
    #t2 = plt.loglog(range(1,maxRun+1),runLenLog[1]*numpy.arange(1.0,maxRun+1)**-2.0)
    #plt.title('Run Length Distribution ' + str(numGenes) + ' Bact RpoBs')
    #plt.legend((t1,t2),('l^{-1.5}','l^{-2.0}'))
    
    logging.info('Making histograms...')

    
    
    residuesPerGraph = 400
    residuesPerLine = 100
    numGraphs = len(soi)/residuesPerGraph + 1
    for i in range(numGraphs):
        fig = plt.figure(figsize=(22,17))
        #fig.text(0.5,0.95,'InClub and InSetInClub matches across all alignments.\nGlobal filter of sigma > ' + str(sigDif) + ' then local filter of 5 in 8.',horizontalalignment='center')
        fig.text(0.5,0.92,'InClub and Score matches across all alignments.\nGlobal filter of sigma > ' + str(sigDif) + ' then local filter of ' + str(locfilter) + '.\nTotal genes compared: '+ str(numGenes)+'. Reference Gene: ' + str(soiInfo),horizontalalignment='center')
        fig.text(0.5,0.05,'Sequence compared against, red text indicates at least ratio=' + str(ratio) + ' InSetInClub\nSequence from ' + str(i*residuesPerGraph+1) + ' to ' + str(min((i+1)*residuesPerGraph,len(soi))) + ' of ' + str(len(soi)) + '.',horizontalalignment='center')
        fig.text(0.05,0.5,'Frequency of matches across all other genes, Score = blue and InClub = green and further in shades of red',rotation=90,verticalalignment='center')
        ax = []
        for j in numpy.arange(0,8,2):
            if len(soi) > residuesPerGraph*i + (residuesPerLine/2)*(j+2):
                ax.append(fig.add_subplot(int('81'+str(j+1))))
                fig.text(0.95,0.82-j*0.1,'Residues:\n'+str(i*residuesPerGraph+(residuesPerLine/2)*j+1)+'-'+str(i*residuesPerGraph+(residuesPerLine/2)*j+residuesPerLine),horizontalalignment='center')
                fig.text(0.95,0.82-(j+1)*0.1,'Cons. Ident.\n Start(m) and End(c)',horizontalalignment='center')
                ax[j].bar(numpy.arange(residuesPerLine),colVecs[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),color='green',alpha=0.25,align='center')
                #print colVecs[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes)
                if len(numpy.shape(colVecsIn)) == 1:
                    ax[j].bar(numpy.arange(residuesPerLine),colVecsIn[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),align='center',color='blue')
                else:
                    #print numpy.shape(colVecsIn)
                    ax[j].bar(numpy.arange(residuesPerLine),colVecsIn[0][(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),align='center',color='blue')
                    for k in range(1,numpy.shape(colVecsIn)[0]):
                        ax[j].bar(numpy.arange(residuesPerLine),colVecsIn[k][(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),align='center',color='red')
                #ax[j].bar(numpy.arange(residuesPerLine),colVecs[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),color='green',alpha=0.25,align='center')
                # NOT LOG
                #ax[j].bar(numpy.arange(residuesPerLine),colVecs[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),color='green',alpha=0.25,align='center')
                #if len(numpy.shape(colVecsIn)) == 1:
                    #ax[j].bar(numpy.arange(residuesPerLine),colVecsIn[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),align='center',color='blue')
                #else:
                    ##print numpy.shape(colVecsIn)
                    #ax[j].bar(numpy.arange(residuesPerLine),colVecsIn[0][(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),align='center',color='blue')
                    #for k in range(1,numpy.shape(colVecsIn)[0]):
                        #ax[j].bar(numpy.arange(residuesPerLine),colVecsIn[k][(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),align='center',color='red')
                
                    #ax[j].plot(numpy.arange(100),silentMutations2[residuesPerLine*j+800*i:residuesPerLine*(j+1)+800*i],color='magenta') # arcsin transform
                #ax[j].bar(numpy.arange(100),lstopers[residuesPerLine*j+800*i:residuesPerLine*(j+1)+800*i]/float(numGenes),color='magenta',alpha=0.5,align='center')
                #ax[j].bar(numpy.arange(100),rstopers[residuesPerLine*j+800*i:residuesPerLine*(j+1)+800*i]/float(numGenes),color='black',alpha=0.5,align='center')
                ax[j].set_ylim(0,1)
                ax[j].set_xlim(0,residuesPerLine-1)
                ax[j].set_xticks(numpy.arange(residuesPerLine))
                ax[j].set_xticklabels(genomelist[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i])
                ax[j].set_yticks([0.0,0.5,1.0])
                ax[j].set_yticklabels([0.0,0.5,1.0])
                k = 0
                for lab in ax[j].xaxis.get_ticklabels():
                    if len(numpy.shape(colVecsIn)) != 1:
                        if colVecsIn[0][(residuesPerLine/2)*j+residuesPerGraph*i+k]/float(numGenes) > ratio:
                            lab.set_color('red')
                        k += 1
                    else:
                        if colVecsIn[(residuesPerLine/2)*j+residuesPerGraph*i+k]/float(numGenes) > ratio:
                            lab.set_color('red')
                        k += 1
                ax.append(fig.add_subplot(int('81'+str(j+2))))
                ax[j+1].plot(numpy.arange(residuesPerLine),lstopers[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),color='magenta')
                ax[j+1].plot(numpy.arange(residuesPerLine),rstopers[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i]/float(numGenes),color='cyan')
                ax[j+1].plot(numpy.arange(residuesPerLine),conservedFrac[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i],color='black')
                if len(numpy.shape(colVecsIn)) != 1:
                    pass
                    #ax[j+1].plot(numpy.arange(residuesPerLine),silentMutations[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i],color='red')
                ax[j+1].set_ylim(0,1)
                ax[j+1].set_xlim(0,residuesPerLine-1)
                ax[j+1].set_xticks(numpy.arange(residuesPerLine))
                ax[j+1].set_xticklabels(genomelist[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i])
                ax[j+1].set_yticks([0.0,0.5,1.0])
                ax[j+1].set_yticklabels([0.0,0.5,1.0])
                k = 0
                for lab in ax[j+1].xaxis.get_ticklabels():
                    if lstopers[(residuesPerLine/2)*j+residuesPerGraph*i+k] + rstopers[(residuesPerLine/2)*j+residuesPerGraph*i+k] > 0:
                        lab.set_color('red')
                    k += 1
            else:
                ax.append(fig.add_subplot(int('81'+str(j+1))))
                tmplist1 = []
                tmplist2 = colVecsIn.transpose()[(residuesPerLine/2)*j+residuesPerGraph*i:len(soi)].transpose()
                tmplist3 = colVecs[(residuesPerLine/2)*j+residuesPerGraph*i:len(soi)]
                tmplist1.extend(genomelist[(residuesPerLine/2)*j+residuesPerGraph*i:len(soi)])
                #print tmplist5
                #tmplist2.extend(list(colVecs[residuesPerLine*j+800*i:len(soi)]))
                #tmplist3.extend(list(colVecsIn[residuesPerLine*j+800*i:len(soi)]))
                fig.text(0.95,0.82-j*0.1,'Residue Numbers:\n'+str(i*residuesPerGraph+(residuesPerLine/2)*j+1)+'-'+str(len(soi)),horizontalalignment='center')
                for k in range(residuesPerLine-len(tmplist1)):
                    tmplist1.append(' ')
                    #print numpy.shape(colVecsIn)
                    #print numpy.shape(tmplist2)
                    if len(numpy.shape(colVecsIn)) != 1:
                        #tmplist2 = numpy.append(tmplist2,numpy.zeros((numpy.shape(colVecsIn)[0],1)),1)
                        tmplist2 = numpy.append(tmplist2,numpy.zeros((numpy.shape(colVecsIn)[0],1)),1)
                    else:
                        tmplist2 = numpy.append(tmplist2,[0],1)
                    tmplist3 = numpy.append(tmplist3,[0],1)
                #print tmplist2[0][0:residuesPerLine]
                #print tmplist5
                ax[j].bar(numpy.arange(residuesPerLine),(numpy.array(tmplist3[0:residuesPerLine])/float(numGenes)),color='green',alpha=0.25,align='center')
                if len(numpy.shape(colVecsIn)) == 1:
                    ax[j].bar(numpy.arange(residuesPerLine),(numpy.array(tmplist2[0:residuesPerLine])/float(numGenes)),align='center', color='blue')
                else:
                    ax[j].bar(numpy.arange(residuesPerLine),(numpy.array(tmplist2[0][0:residuesPerLine])/float(numGenes)),align='center', color='blue')
                    for k in range(1,numpy.shape(colVecsIn)[0]):
                        ax[j].bar(numpy.arange(residuesPerLine),(numpy.array(tmplist2[k][0:residuesPerLine])/float(numGenes)),align='center', color='red')
                ## NOT LOG
                #ax[j].bar(numpy.arange(residuesPerLine),numpy.array(tmplist3[0:residuesPerLine])/float(numGenes),color='green',alpha=0.25,align='center')
                #if len(numpy.shape(colVecsIn)) == 1:
                    #ax[j].bar(numpy.arange(residuesPerLine),numpy.array(tmplist2[0:residuesPerLine])/float(numGenes),align='center', color='blue')
                #else:
                    #ax[j].bar(numpy.arange(residuesPerLine),numpy.array(tmplist2[0][0:residuesPerLine])/float(numGenes),align='center', color='blue')
                    #for k in range(1,numpy.shape(colVecsIn)[0]):
                        #ax[j].bar(numpy.arange(residuesPerLine),numpy.array(tmplist2[k][0:residuesPerLine])/float(numGenes),align='center', color='red')
                ax[j].set_ylim(0,1)
                ax[j].set_xlim(0,residuesPerLine-1)
                ax[j].set_xticks(numpy.arange(residuesPerLine))
                ax[j].set_xticklabels(tmplist1[0:residuesPerLine])
                ax[j].set_yticks([0.0,0.5,1.0])
                ax[j].set_yticklabels([0.0,0.5,1.0])
                k = 0
                for lab in ax[j].xaxis.get_ticklabels():
                    if len(numpy.shape(colVecsIn)) != 1:
                        if tmplist2[0][k]/float(numGenes) > ratio:
                            lab.set_color('red')
                        k += 1
                    else:
                        if tmplist2[k]/float(numGenes) > ratio:
                            lab.set_color('red')
                        k += 1
                ax.append(fig.add_subplot(int('81'+str(j+2))))
                ax[j+1].plot(numpy.arange(len(lstopers[(residuesPerLine/2)*j+residuesPerGraph*i:])),lstopers[(residuesPerLine/2)*j+residuesPerGraph*i:]/float(numGenes),color='magenta')
                ax[j+1].plot(numpy.arange(len(rstopers[(residuesPerLine/2)*j+residuesPerGraph*i:])),rstopers[(residuesPerLine/2)*j+residuesPerGraph*i:]/float(numGenes),color='cyan')
                ax[j+1].plot(numpy.arange(len(conservedFrac[(residuesPerLine/2)*j+residuesPerGraph*i:])),conservedFrac[(residuesPerLine/2)*j+residuesPerGraph*i:],color='black')
                if len(numpy.shape(colVecsIn)) != 1:
                    pass
                    #ax[j+1].plot(numpy.arange(len(silentMutations[(residuesPerLine/2)*j+residuesPerGraph*i:])),silentMutations[(residuesPerLine/2)*j+residuesPerGraph*i:],color='red')
                ax[j+1].set_ylim(0,1)
                ax[j+1].set_xlim(0,residuesPerLine-1)
                ax[j+1].set_xticks(numpy.arange(residuesPerLine))
                ax[j+1].set_xticklabels(genomelist[(residuesPerLine/2)*j+residuesPerGraph*i:(residuesPerLine/2)*(j+2)+residuesPerGraph*i])
                ax[j+1].set_yticks([0.0,0.5,1.0])
                ax[j+1].set_yticklabels([0.0,0.5,1.0])
                k = 0
                for lab in ax[j+1].xaxis.get_ticklabels():
                    if k < len(lstopers[(residuesPerLine/2)*j+residuesPerGraph*i:]):
                        if lstopers[(residuesPerLine/2)*j+residuesPerGraph*i+k] + rstopers[(residuesPerLine/2)*j+residuesPerGraph*i+k] > 0:
                            lab.set_color('red')
                    k += 1
                j = 8
                break
        fig.savefig(parameters['OUTFILE'] + '/images/COMBO' + str(i+1) + '.pdf')
        fig.savefig(parameters['OUTFILE'] + '/images/COMBO' + str(i+1) + '.png', bbox_inches='tight', pad_inches=0.03)
def smooth2(x, window_len=11, window='hanning'):
    return sm.smooth(x, window_len, window='hanning')