Esempio n. 1
0
def plot_wiggle(self,figsize=[5,10],fill=True,perc=100,scale=1,subplot=False):
	# plot wiggle traces
	# figsize=[5,10]: matplotlib figure size [inch]
	# fill=True: fill values greater than zero
	# perc=100: percent clip
	# scale=1: scale trace plots
	plotdata=self.data
	plotdata=perc_clip(plotdata,perc)
	print("min=%s max=%s"%(plotdata.min(),plotdata.max()))
	maxval=np.abs(plotdata).max()
	ns=pk.get_ns(self)
	dt=pk.get_dt(self)
	ntr=pk.get_ntr(self)
	t=np.arange(ns)*dt

	if not subplot: plt.figure(figsize=figsize)
	for itr in range(ntr):
		trace=plotdata[itr,:]
		x=itr+trace/maxval*scale
		plt.plot(x,t,'k-',linewidth=0.5)
		if fill: plt.fill_betweenx(t,x,itr,where=x>itr,color='black',linewidth=0.)

	plt.xlim([-2,ntr+1])
	plt.ylim([t[-1],t[0]])
	plt.gca().xaxis.tick_top()
	plt.gca().xaxis.set_label_position('top')
	if subplot: return
	plt.xlabel('Trace number',fontsize='large')
	plt.ylabel('Time (s)',fontsize='large')
Esempio n. 2
0
def plot_fill(llr_cur, tkey, asimov_llr, hist_vals, bincen, fit_gauss, **kwargs):
    """
    Plots fill between the asimov llr value and the histogram values
    which represent an LLR distribution.
    """
    validate_key(tkey)

    expr = 'bincen < asimov_llr' if 'true_N' in tkey else 'bincen > asimov_llr'

    plt.fill_betweenx(
        hist_vals, bincen, x2=asimov_llr, where=eval(expr), **kwargs)

    pvalue = (1.0 - float(np.sum(llr_cur > asimov_llr))/len(llr_cur)
              if 'true_N' in tkey else
              (1.0 - float(np.sum(llr_cur < asimov_llr))/len(llr_cur)))

    sigma_fit = np.fabs(asimov_llr - fit_gauss[1])/fit_gauss[2]
    #logging.info(
    #    "  For tkey: %s, gaussian computed mean (of alt MH): %.3f and sigma: %.3f"
    #    %(tkey,fit_gauss[1],fit_gauss[2]))
    pval_gauss = 1.0 - norm.cdf(sigma_fit)
    sigma_1side = np.sqrt(2.0)*erfinv(1.0 - pval_gauss)

    mctrue_row = [tkey,asimov_llr,llr_cur.mean(),pvalue,pval_gauss,sigma_fit,
                  sigma_1side]

    return mctrue_row
def plot_vawig(axhdl, data, t, excursion):

    import numpy as np
    import matplotlib.pyplot as plt

    [ntrc, nsamp] = data.shape
    

    
    
    t = np.hstack([0, t, t.max()])
    
    for i in range(0, ntrc):
        tbuf = excursion * data[i,:] / np.max(np.abs(data)) + i
        
        tbuf = np.hstack([i, tbuf, i])
            
        axhdl.plot(tbuf, t, color='black', linewidth=0.5)
        plt.fill_betweenx(t, tbuf, i, where=tbuf>i, facecolor=[0.6,0.6,1.0], linewidth=0)
        plt.fill_betweenx(t, tbuf, i, where=tbuf<i, facecolor=[1.0,0.7,0.7], linewidth=0)
    
    axhdl.set_xlim((-excursion, ntrc+excursion))
    axhdl.xaxis.tick_top()
    axhdl.xaxis.set_label_position('top')
    axhdl.invert_yaxis()
Esempio n. 4
0
    def plot_xy(self, x, y, xerror=[], yerror=[], title=' ', xLabel=' ', yLabel=' '):
        """
        Simple X vs Y plot

        Inputs:
        ------
          - x = 1D array
          - y = 1D array

        Keywords:
        --------
          - xerror = error on 'x', 1D array
          - yerror = error on 'y', 1D array
          - title = plot title, string
          - xLabel = title of the x-axis, string
          - yLabel = title of the y-axis, string
        """
        fig = plt.figure(figsize=(18,10))
        plt.rc('font',size='22')
        self._fig = plt.plot(x, y, label=title)
        scale = 1
        ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
        plt.ylabel(yLabel)
        plt.xlabel(xLabel)
        if not yerror==[]:
            #plt.errorbar(x, y, yerr=yerror, fmt='o', ecolor='k')
            plt.fill_between(x, y-yerror, y+yerror,
            alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
        if not xerror==[]:
            #plt.errorbar(x, y, xerr=xerror, fmt='o', ecolor='k')
            plt.fill_betweenx(y, x-xerror, x+xerror,
            alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)

        plt.show() 
Esempio n. 5
0
def plot_posteriors(model_params,plotfile,nbins=30,names=None):

    import matplotlib
    import matplotlib.pyplot as plt

    #----get array dimensions (number of parameters)----
    npar = model_params.shape[1]

    #----loop through parameters----
    for p in xrange(npar):
        
        #--skip iteration column or if parameter is fixed--
        if (max(model_params[:,p]) != min(model_params[:,p])) and p != 0:
            
            y = model_params[:,p]        
            y_hist, x_bin = np.histogram(y,bins=nbins)
            fig,ax=plt.subplots()
            plt.bar(x_bin[:-1],y_hist,width=x_bin[1]-x_bin[0])
            if names != None:
                plt.xlabel(names[p])
            ymin,ymax = ax.get_ylim()

            #-plot median and 1sigma range-
            med = np.median(y)
            sig = np.std(y)
            plt.plot([med,med],[ymin,ymax],color='red',linestyle='-')
            plt.fill_betweenx([0.0,ymax],[med-sig,med-sig],[med+sig,med+sig],color='red',alpha=0.2)

            #-save plot-
            plotfile.savefig()
            plt.close()
Esempio n. 6
0
def make_group_cumdist_fig(LD,group_by,pcol):
    '''Make a plot showing the cumulative distribution of the within-group avg of a 
    specified response variable. compared to the cum dist expected by chance.
    INPUTS: 
        LD: pandas dataframe
        group_by: column to groupby for computing avgs.
        pcol: name of response variable column.
    OUTPUTS:
        fig: figure handle'''    

    name_legend_map = {'counts': 'Number of loans (thousands)',
				 'ROI': 'Average ROI (%)',
				  'int_rate': 'interest rate (%)',
				  'default_prob': 'default probability',
				  'dti': 'Debt-to-income ratio',
				  'emp_length': 'employment length (months)',
                        'annual_inc': 'annual income ($)'}

    min_group_loans = 100 #only use states with at least this many loans
    good_groups = LD.groupby(group_by).filter(lambda x: x[pcol].count() >= min_group_loans)
    n_groups = len(good_groups[group_by].unique())
    group_stats = good_groups.groupby(group_by)[pcol].agg(['mean','sem'])
    group_stats.sort_values(by='mean',inplace=True)
    ov_avg = good_groups[pcol].mean()
    
    #compute bootstrap estimates of null distribution of group-avgs
    boot_samps = 500 #number of bootstrap samples to use when estimating null dist
    shuff_avgs = np.zeros((boot_samps,n_groups))
    shuff_data = good_groups.copy()
    for cnt in xrange(boot_samps):
        shuff_data[pcol] = np.random.permutation(shuff_data[pcol].values)
        shuff_avgs[cnt,:] = shuff_data.groupby(group_by)[pcol].mean().sort_values()
    
    yax = np.arange(n_groups)
    rmean = np.mean(shuff_avgs,axis=0)
    rsem = np.std(shuff_avgs,axis=0)

    #plot avg and SEM of within-state returns
    fig, ax1 = plt.subplots(1,1,figsize=(6.0,5.0))
    if group_by == 'zip3':
        ax1.errorbar(group_stats['mean'].values,yax)
    else:
        ax1.errorbar(group_stats['mean'],yax,xerr=group_stats['sem'])
        plt.fill_betweenx(yax, rmean-rsem, rmean+rsem,facecolor='r',alpha=0.5,linewidth=0)
        plt.yticks(yax, group_stats.index,fontsize=6)
    
    ax1.plot(rmean,yax,'r')
    plt.legend(['Measured-avgs','Shuffled-avgs'],loc='best')
    if group_by == 'zip3':    
        plt.ylabel('Zip codes',fontsize=16)
    else:    
        plt.ylabel('States',fontsize=16)
    plt.xlabel(name_legend_map[pcol],fontsize=16)   
    plt.xlim(np.min(group_stats['mean']),np.max(group_stats['mean']))
    plt.ylim(0,n_groups)
    ax1.axvline(ov_avg,color='k',ls='dashed')
    plt.tight_layout()

    return fig
Esempio n. 7
0
def plot_mutation_rate_violins(libraries, out_prefix, nucleotides_to_count='ATCG', exclude_constitutive=False):
    #Makes violin plots of raw mutation rates
    data = []
    labels = []
    for library in libraries:
        labels.append(library.lib_settings.sample_name)
        data.append([math.log10(val) for val in library.list_mutation_rates(subtract_background=False, subtract_control=False,
                                                        nucleotides_to_count=nucleotides_to_count,
                                                        exclude_constitutive=exclude_constitutive) if val>0])

    colormap = uniform_colormaps.viridis
    fig = plt.figure(figsize=(5,8))
    ax1 = fig.add_subplot(111)

    # Hide the grid behind plot objects
    ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
    ax1.set_axisbelow(True)

    #ax1.set_xlabel(ylabel)
    plt.subplots_adjust(left=0.1, right=0.95, top=0.9, bottom=0.25)

    pos = range(1,len(libraries)+1)  # starts at 1 to play nice with boxplot
    dist = max(pos)-min(pos)
    w = min(0.15*max(dist,1.0),0.5)
    for library,p in zip(libraries,pos):
        d = [math.log10(val) for val in library.list_mutation_rates(subtract_background=False, subtract_control=False,
                                                        nucleotides_to_count=nucleotides_to_count,
                                                        exclude_constitutive=exclude_constitutive) if val>0]
        k = stats.gaussian_kde(d) #calculates the kernel density
        m = k.dataset.min() #lower bound of violin
        M = k.dataset.max() #upper bound of violin
        x = numpy.arange(m,M,(M-m)/100.) # support for violin
        v = k.evaluate(x) #violin profile (density curve)
        v = v/v.max()*w #scaling the violin to the available space
        plt.fill_betweenx(x,p,v+p,facecolor=colormap((p-1)/float(len(libraries))),alpha=0.3)
        plt.fill_betweenx(x,p,-v+p,facecolor=colormap((p-1)/float(len(libraries))),alpha=0.3)
    if True:
        bplot = plt.boxplot(data,notch=1)
        plt.setp(bplot['boxes'], color='black')
        plt.setp(bplot['whiskers'], color='black')
        plt.setp(bplot['fliers'], color='red', marker='.')

    per50s = []
    i = 1
    for datum in data:
        #per50s.append(stats.scoreatpercentile(datum, 50))
        t = stats.scoreatpercentile(datum, 50)

        per50s.append(t)
        #ax1.annotate(str(round(t,3)), xy=(i+0.1, t), xycoords='data', arrowprops=None, fontsize='small', color='black')
        i+= 1
    #ax1.set_xticks([0.0, 0.5, 1.0, 1.5])
    #ax1.set_yscale('log')
    ax1.set_ylabel('log10 mutation rate')
    ax1.set_ylim(-5, 0)
    xtickNames = plt.setp(ax1, xticklabels=labels)
    plt.setp(xtickNames, rotation=90, fontsize=6)
    plt.savefig(out_prefix+'_logviolin.pdf', transparent='True', format='pdf')
    plt.clf()
Esempio n. 8
0
def seismic_wiggle(section, dt=0.004, ranges=None, scale=1.,
                   color='k', normalize=False):
    """
    Plot a seismic section (numpy 2D array matrix) as wiggles.

    Parameters:

    * section :  2D array
        matrix of traces (first dimension time, second dimension traces)
    * dt : float
        sample rate in seconds (default 4 ms)
    * ranges : (x1, x2)
        min and max horizontal values (default trace number)
    * scale : float
        scale factor multiplied by the section values before plotting
    * color : tuple of strings
        Color for filling the wiggle, positive  and negative lobes.
    * normalize :
        True to normalizes all trace in the section using global max/min
        data will be in the range (-0.5, 0.5) zero centered

    .. warning::
        Slow for more than 200 traces, in this case decimate your
        data or use ``seismic_image``.

    """
    npts, ntraces = section.shape  # time/traces
    if ntraces < 1:
        raise IndexError("Nothing to plot")
    if npts < 1:
        raise IndexError("Nothing to plot")
    t = numpy.linspace(0, dt*npts, npts)
    amp = 1.  # normalization factor
    gmin = 0.  # global minimum
    toffset = 0.  # offset in time to make 0 centered
    if normalize:
        gmax = section.max()
        gmin = section.min()
        amp = (gmax-gmin)
        toffset = 0.5
    pyplot.ylim(max(t), 0)
    if ranges is None:
        ranges = (0, ntraces)
    x0, x1 = ranges
    # horizontal increment
    dx = float((x1-x0)/ntraces)
    pyplot.xlim(x0, x1)
    for i, trace in enumerate(section.transpose()):
        tr = (((trace-gmin)/amp)-toffset)*scale*dx
        x = x0+i*dx  # x positon for this trace
        pyplot.plot(x+tr, t, 'k')
        pyplot.fill_betweenx(t, x+tr, x, tr > 0, color=color)
Esempio n. 9
0
def arc(arc):
    plt.imshow(arc.data, aspect='auto', interpolation='none',
               extent=[arc.times[0].to(u.s).value, arc.times[-1].to(u.s).value,
                       arc.latitude[0].to(u.degree), arc.latitude[-1].to(u.degree)])
    plt.xlim(0, arc.times[-1])
    _label = 'first data point (time=' + fmt + ')'
    plt.axvline(arc.offset, label=_label %arc.offset, color='w')
    plt.fill_betweenx([arc.latitude[0], arc.latitude[-1]],
                      arc.offset, hatch='X', facecolor='w', label='not observed')
    plt.ylabel('degrees of arc from first measurement')
    plt.xlabel('time since originating event (seconds)')
    plt.title('arc: ' + arc.title)
    plt.legend(framealpha=0.5)
    plt.show()
Esempio n. 10
0
def drawhigh(cid,filesize,view,threshold,high,lowlimit=0,highlimit=0):
    avgview=sum(view[5:-5])/len(view)
    highdur=map(lambda x:(x[0],x[-1]),high)

    # 图像设置
    plt.figure(figsize=(15,7)) # figsize()设置的宽高比例是是15:7,图片的尺寸会根据这个比例进行调节
    #plt.xlim(-3,19)
    ylow=min(view)-500      #y轴下限
    yhigh=max(view)+500     #y轴上限
    plt.ylim(ylow,yhigh)
    plt.grid(which='both')


    #绘制结果数据
    plt.plot(range(1,len(view)+1),view,'bo-',ms=1,lw=0.5,label='origin')      # 原始图像
    if lowlimit and highlimit:
        plt.axhline(y=lowlimit,lw=3,ls='-',color='m',label='lowlimit')        # 低限
        plt.axhline(y=highlimit,lw=3,ls='-',color='m',label='highlimit')      # 高限

    plt.axhline(y=avgview,lw=1,ls='--',color='b',label='mean')                # 均值
    #plt.axhline(y=avgview*adjustv,lw=1,ls='--',color='g',label='mean*1.2')   # 均值*adjustv
    plt.axhline(y=threshold,lw=2,ls='--',color='r',label='threshold=1.2*mean')       # 阈值
    plt.legend(loc='upper right')
    plt.xlabel('time (s)')
    plt.ylabel('views')

    # 标注高潮区间
    for item in highdur:
        #plt.axvline(x=item[0],lw=2)
        #plt.axvline(x=item[1],lw=2)
        plt.annotate('',xy=(item[1],threshold),xytext=(item[0],threshold),arrowprops=dict(arrowstyle="->",connectionstyle="arc3",color='g'))
        plt.fill_betweenx([ylow,yhigh],item[0], item[1], linewidth=1, alpha=0.2, color='r')

    plt.show()

    # 结果保存
    '''
    resultpath='D:\\hot_pic2'
    if not os.path.exists(resultpath):
        os.mkdir(resultpath)
    fname=os.path.join(resultpath,cid+'.'+str(filesize)+'.jpg')
    print fname
    plt.savefig(fname,dpi = 300)
    plt.close()
    '''
    return 0;
Esempio n. 11
0
    def show_distr(x,y, vertical=False, label=None, color='blue', linecolor='k', quantile=False):
        var_2d=np.copy(y)
        if vertical:
            var_2d=np.copy(x)
        mid=np.nanmean(var_2d, axis=0)
        lower=mid - np.nanstd(var_2d, axis=0)
        upper=mid + np.nanstd(var_2d, axis=0)
        if quantile:
            lower=np.nanpercentile(var_2d,25,axis=0)
            upper=np.nanpercentile(var_2d,75,axis=0)

        if vertical:
            plt.fill_betweenx(y,lower,upper, color=color)
            plt.plot(mid,y,color=linecolor, linewidth=2)
        else:
            plt.fill_between(x,lower,upper, color=color)
            plt.plot(x,mid,color=linecolor, linewidth=2)
Esempio n. 12
0
def plot_airmass(objectlist,obsvat,date):
    #ax = plt.subplot(111)
    
    for obj in objectlist:
        altdata = compute_alt_plot(obj[0],obj[1],obsvat,date)
        plt.plot(altdata[:,0],altdata[:,1])


    morn_twilight,even_twilight = functions.calc_twilight(obsvat,date)
    

    plt.fill_betweenx([0,90],[morn_twilight.datetime(),morn_twilight.datetime()],x2=[even_twilight.datetime(),even_twilight.datetime()],color="0.5")

    locs,labels = plt.xticks()
    plt.setp(labels,rotation=45)
    plt.ylim(0,90)
    plt.show()
Esempio n. 13
0
 def peek(self):
     plt.imshow(self.data, aspect='auto', interpolation='none',
                extent=[self.times[0].to(u.s).value,
                        self.times[-1].to(u.s).value,
                        self.latitude[0].to(u.degree).value,
                        self.latitude[-1].to(u.degree).value])
     plt.xlim(0, self.times[-1].to(u.s).value)
     if self.times[0].to(u.s).value > 0.0:
         plt.fill_betweenx([self.latitude[0].to(u.degree).value,
                            self.latitude[-1].to(u.degree).value],
                           self.times[0].to(u.s).value,
                           hatch='X', facecolor='w', label='not observed')
     plt.ylabel('degrees of arc from first measurement')
     plt.xlabel('time since originating event (seconds)')
     plt.title('arc: ' + self.title)
     plt.legend(framealpha=0.5)
     plt.show()
     return None
Esempio n. 14
0
def plotBootROC(rocDfL, labelL=None, aucL=None, ciParam='fpr'):
    """Plot of ROC curves with confidence intervals.

    Parameters
    ----------
    rocDfL : list of pd.DataFrames
        Each DataFram is one model and must include columns
        fpr_est, tpr_est, fpr_lb, fpr_ub
    labelL : list of str
        Names of each model for legend
    aucL : list of floats
        AUC scores of each model for legend"""
    if labelL is None and aucL is None:
        labelL = ['Model %d' % i for i in range(len(rocDfL))]
    elif labelL is None:
        labelL = ['Model %d (AUC = %0.2f [%0.2f, %0.2f])' % (i, auc[0], auc[1], auc[2]) for i, auc in enumerate(aucL)]
    else:
        labelL = ['%s (AUC = %0.2f [%0.2f, %0.2f])' % (label, auc[0], auc[1], auc[2]) for label, auc in zip(labelL, aucL)]

    colors = sns.color_palette('Set1', n_colors=len(rocDfL))

    plt.cla()
    plt.gca().set_aspect('equal')
    for i, (rocDf, label) in enumerate(zip(rocDfL, labelL)):
        if ciParam == 'fpr':
            plt.fill_betweenx(rocDf['tpr_est'], rocDf['fpr_lb'], rocDf['fpr_ub'], alpha=0.3, color=colors[i])
        elif ciParam == 'tpr':
            plt.fill_between(rocDf['fpr_est'], rocDf['tpr_lb'], rocDf['tpr_ub'], alpha=0.3, color=colors[i])
        plt.plot(rocDf['fpr_est'], rocDf['tpr_est'],'-', color=colors[i], lw=2)
        # plt.plot(rocDf['fpr_est'], rocDf['tpr_lb'], '.--', color=colors[i], lw=1)
        # plt.plot(rocDf['fpr_est'], rocDf['tpr_ub'], '.--', color=colors[i], lw=1)
        # plt.plot(rocDf['fpr_lb'], rocDf['tpr_est'], '--', color=colors[i], lw=1)
        # plt.plot(rocDf['fpr_ub'], rocDf['tpr_est'], '--', color=colors[i], lw=1)
    plt.plot([0, 1], [0, 1], '--', color='gray', label='Chance')
    plt.xlim([0, 1])
    plt.ylim([0, 1])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC')
    plt.legend([plt.Line2D([0, 1], [0, 1], color=c, lw=2) for c in colors], labelL, loc='lower right', fontsize=10)
    plt.show()
Esempio n. 15
0
def gen_fill(tail_num, deg_left, deg_right):
    if tail_num == 3:
        plt.fill_betweenx(mlab.normpdf(x,mean,sigma),deg_left,x, where = ( x <= deg_left))
        plt.fill_betweenx(mlab.normpdf(x,mean,sigma), x,deg_right, where = ( x >= deg_right))    
        plt.draw()
        plt.show()
    elif tail_num ==  2: #right
        plt.fill_betweenx(mlab.normpdf(x,mean,sigma), x,deg_right, where = ( x >= deg_right))    
        plt.draw()
        plt.show()
    elif tail_num == 1 :
        plt.fill_betweenx(mlab.normpdf(x,mean,sigma),deg_left,x, where = ( x <= deg_left))
        plt.draw()
        plt.show()
Esempio n. 16
0
def plot_wiggle(data,zz=1,skip=1,gain=1,alpha=0.7,black=False):
    '''
    Wiggle plot of generic 2D numpy array.

    INPUT
    data: 2D numpy array
    zz: vertical sample rate in depth or time
    skip: interval to choose traces to draw
    gain: multiplier applied to each trace
    '''
    [n_samples,n_traces]=data.shape
    t=range(n_samples)
    plt.figure(figsize=(9.6,6))
    for i in range(0, n_traces,skip):
        trace=gain*data[:,i] / np.max(np.abs(data))
        plt.plot(i+trace,t,color='k', linewidth=0.5)
        if black==False:
            plt.fill_betweenx(t,trace+i,i, where=trace+i>i, facecolor=[0.6,0.6,1.0], linewidth=0)
            plt.fill_betweenx(t,trace+i,i, where=trace+i<i, facecolor=[1.0,0.7,0.7], linewidth=0)
        else:
            plt.fill_betweenx(t,trace+i,i, where=trace+i>i, facecolor='black', linewidth=0, alpha=alpha)
    locs,labels=plt.yticks()
    plt.yticks(locs,[n*zz for n in locs.tolist()])
    plt.grid()
    plt.gca().invert_yaxis()
Esempio n. 17
0
def plot_vawig(axhdl, data, t, excursion, highlight=None):

    import numpy as np
    import matplotlib.pyplot as plt

    [ntrc, nsamp] = data.shape

    t = np.hstack([0, t, t.max()])

    for i in range(0, ntrc):
        tbuf = excursion * data[i] / np.max(np.abs(data)) + i

        tbuf = np.hstack([i, tbuf, i])

        if i == highlight:
            lw = 2
        else:
            lw = 0.5

        axhdl.plot(tbuf, t, color='black', linewidth=lw)

        plt.fill_betweenx(t,
                          tbuf,
                          i,
                          where=tbuf > i,
                          facecolor=[0.6, 0.6, 1.0],
                          linewidth=0)
        plt.fill_betweenx(t,
                          tbuf,
                          i,
                          where=tbuf < i,
                          facecolor=[1.0, 0.7, 0.7],
                          linewidth=0)

    axhdl.set_xlim((-excursion, ntrc + excursion))
    axhdl.xaxis.tick_top()
    axhdl.xaxis.set_label_position('top')
    axhdl.invert_yaxis()
Esempio n. 18
0
def plotear2(X,V):
    try:
        varvals = un.nominal_values(V)
        varerr = un.std_devs(V)

        varmenos = varvals - varerr
        varmas = varvals + varerr

        xerr = un.std_devs(X) / 2
        x = un.nominal_values(X)

        # plt.errorbar(x,varvals,xerr=xerr,yerr=varerr,fmt='o')
        plt.plot(x,varvals,'ok')
        plt.fill_between(x,varmenos,varmas,color='k', alpha=0.3)
        plt.fill_betweenx(varvals, x-xerr,x+xerr,color='k',alpha=0.3)
    except TypeError:
        print('no andan lo errores')
        plt.plot(S,var,'.')

    # plt.title(medicion.split('.')[0].replace('_',' '))
    # plt.legend(loc='best')
    plt.ylabel(r'$Voltaje\ (V)$')
    plt.xlabel(r'$\Delta T\ estacionario\ (K)$')
def plot_mean_sigma_all_days(var, nsigma, xlabel, xlim1, xlim2):

    mean = var.mean(dim='launch_time')
    sigma = var.std(dim='launch_time')
    alt = var.alt.values
    if True:
        plt.figure(figsize=(8, 10))
        plt.plot(mean, alt, linewidth=4, color='black',
                 label='mean')  #label='$\mu$')
        plt.fill_betweenx(alt,
                          mean,
                          mean - (nsigma * sigma),
                          color='lightgrey',
                          alpha=0.6,
                          label='1$\sigma$')
        plt.fill_betweenx(alt,
                          mean,
                          mean + (nsigma * sigma),
                          color='lightgrey',
                          alpha=0.6)

        plt.gca().spines['right'].set_visible(False)
        plt.gca().spines['top'].set_visible(False)
        plt.xlabel(xlabel)
        plt.ylabel('Altitude / m')
        plt.ylim([0, 4000])
        plt.xlim([xlim1, xlim2])
        #plt.legend(loc='best')
        # remove yticks and ylabel
        if False:
            plt.tick_params(bottom=True,
                            labelbottom=True,
                            left=False,
                            labelleft=False)
            plt.ylabel('')
        if False:
            plt.axvline(x=0, color='black')
Esempio n. 20
0
def map_of_rois(acti, roi_tensor, path):
	""" Make a map of ROIs and an histogram of the # of NaNs per trials 
	Arguments:
		acti {array} -- activation tensor
		roi_tensor {array} -- mask of ROIs

	Keyword Arguments:
		None

	Returns:
		None
	"""
	N, T, K = acti.shape
	plt.imshow(tca.make_map(roi_tensor, np.ones(roi_tensor.shape[2])), cmap='hot')

	fig = plt.figure(figsize=(12,12))

	fig.add_subplot(2,2,1)

	nan_per_trial = pd.Series(np.isnan(acti[0,:,:]).sum(axis=0))
	nan_per_trial.plot(kind = 'bar', color = 'blue', width = 1)
	plt.xticks(nan_per_trial.index[::40],  nan_per_trial.index[::40], rotation = 'vertical')
	plt.xlabel('Trials')
	plt.ylabel('Number of NaN')


	fig.add_subplot(2,2,2)

	nan_per_timeframe = pd.Series(np.isnan(acti[0,:,:]).sum(axis=1))
	nan_per_timeframe.plot(kind='bar', color='blue', width=1)
	plt.xticks(nan_per_trial.index[0:T:15], rotation='horizontal')
	plt.xlabel('Time')

	plt.fill_betweenx([0, 1.2 * np.max(nan_per_timeframe)], 105, 135, facecolor='red', alpha=0.5)

	plt.tight_layout()
	plt.savefig(os.path.join(path, 'map_of_rois.png'))
Esempio n. 21
0
def PlotGather(data,
               ntraces,
               nsample,
               dt,
               scale=1,
               displaytext='Display',
               normalize=True):

    if (ntraces > 100):
        ntraces = 100  #numero maximo de tracos por display

    t = range(0, nsample * dt, dt)

    plt.figure()
    plt.title(displaytext)
    plt.ylabel('t (ms)')
    t = np.asarray(t)

    if normalize == True:
        f = NormalizationFactor(data, ntraces, 2)
    else:
        f = 1

    for i in range(0, ntraces, 1):
        tr = data.T[i]
        #tr = (tr / f[i]) * scale
        tr = (tr / f) * scale

        #print shape(tr), shape(t)
        #reshape(t, (nsample,1))
        plt.plot(i + tr, t, 'k')
        plt.fill_betweenx(t, i, i + tr, tr >= 0, color='k')

    plt.ylim(nsample * dt, 0)
    plt.xlim(-1, ntraces)

    plt.show()
Esempio n. 22
0
def plot_silhouette(data, metric, predictions, k):
    if data.ndim == 1:
        data = data.to_frame()

    score_per_sample = silhouette_samples(data, predictions, metric=metric)
    y_lower = 10
    for i in range(k):
        ith_cluster_silhouette_values = \
            score_per_sample[predictions == i]

        ith_cluster_silhouette_values.sort()

        size_cluster_i = ith_cluster_silhouette_values.shape[0]
        y_upper = y_lower + size_cluster_i

        color = cm.nipy_spectral(float(i) / k)
        plt.fill_betweenx(np.arange(y_lower, y_upper),
                          0,
                          ith_cluster_silhouette_values,
                          facecolor=color,
                          edgecolor=color,
                          alpha=0.7)

        plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))

        y_lower = y_upper + 10
    plt.title("The silhouette plot for the various clusters.")
    plt.xlabel("The silhouette coefficient values")
    plt.ylabel("Cluster label")

    # The vertical line for average silhouette score of all the values
    score = silhouette_score(data, predictions, metric=metric)
    plt.axvline(x=score, color="red", linestyle="--")

    plt.yticks([])
    plt.xticks(np.arange(-1, +1, 0.1))
    plt.show()
Esempio n. 23
0
def sil(matr, y_pred, n_clusters):
    print("Computing the silhouette scores")
    silhouette_avg = silhouette_score(matr, y_pred)
    print("Overall silhouette average: %s" % silhouette_avg)
    sample_silhouette_values = silhouette_samples(matr, y_pred)

    plt.figure(20)
    y_lower = 10
    for i in range(n_clusters):
        print("Cluster # %s" % i)
        # Aggregate the silhouette scores for samples belonging to
        # cluster i, and sort them
        ith_cluster_silhouette_values = \
            sample_silhouette_values[y_pred == i]

        ith_cluster_silhouette_values.sort()

        size_cluster_i = ith_cluster_silhouette_values.shape[0]
        y_upper = y_lower + size_cluster_i

        color = cm.spectral(float(i) / n_clusters)
        plt.fill_betweenx(np.arange(y_lower, y_upper),
                          0,
                          ith_cluster_silhouette_values,
                          facecolor=color,
                          edgecolor=color,
                          alpha=0.7)

        # Label the silhouette plots with their cluster numbers at the middle
        plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))

        # Compute the new y_lower for next plot
        y_lower = y_upper + 10  # 10 for the 0 samples
    plt.savefig(out_dir + 'Silhouette_Plot_' + str(n_clusters) +
                'clusters_2.png')
    plt.close()
Esempio n. 24
0
def NAC_fitting_plot(dG_c, NAC_bulkRange, results_dir, sels, protein, mol):
    """Plots the N_theoretic and P_enzyme in the same plot with the y-axis.
        Requires the ranges_bulk and dG_c dataframe"""
    fig, ax = plt.subplots(sharey=True)

    N_adjusted, N_sim = dG_c.filter(like='bulk'), dG_c.filter(
        like='P_{enzyme}')

    colors = plt.rcParams['axes.prop_cycle'].by_key()['color'][:len(N_sim.
                                                                    columns)]

    fig = N_adjusted.plot(kind='line', style="--", color=colors)
    fig.set_xlim(1, NAC_bulkRange[-1] + 20)
    fig.set_ylim(-10,
                 np.max(N_sim.values) +
                 2)  #this is to avoid plotting large negatives in x --> 0
    #fig.legend(ncol=1, bbox_to_anchor=(1.12, 0.5), loc='center left')

    N_sim.plot(kind='line', legend=False, ax=fig, color=colors)
    fig.set_ylabel('ln N')
    fig.set_xlabel(r'$d_{NAC}$ ($\AA$)')

    #Show region used for fitting
    plt.axvline = (NAC_bulkRange[0])
    plt.axvline = (NAC_bulkRange[-1])
    plt.fill_betweenx(plt.ylim(),
                      NAC_bulkRange[0],
                      NAC_bulkRange[-1],
                      alpha=0.5,
                      color='gray')
    plt.xscale('log')
    plt.savefig('{}/dG_nac{}-fitting-{}-{}.png'.format(results_dir, len(sels),
                                                       protein, mol),
                bbox_inches="tight",
                dpi=600)
    plt.clf()
Esempio n. 25
0
def plot_clustering(n_clusters, cluster_labels, X):
    plt.figure()
    sample_silhouette_values = silhouette_samples(X, cluster_labels)
    y_lower = 10
    for i in range(n_clusters):
        # Aggregate the silhouette scores for samples belonging to
        # cluster i, and sort them
        ith_cluster_silhouette_values = \
            sample_silhouette_values[cluster_labels == i]

        ith_cluster_silhouette_values.sort()

        size_cluster_i = ith_cluster_silhouette_values.shape[0]
        y_upper = y_lower + size_cluster_i

        color = plt.spectral(float(i) / n_clusters)
        plt.fill_betweenx(np.arange(y_lower, y_upper),
                          0, ith_cluster_silhouette_values,
                          facecolor=color, edgecolor=color, alpha=0.7)

        # Label the silhouette plots with their cluster numbers at the middle
        plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))

        # Compute the new y_lower for next plot
        y_lower = y_upper + 10  # 10 for the 0 samples

    plt.set_title("The silhouette plot for the various clusters.")
    plt.set_xlabel("The silhouette coefficient values")
    plt.set_ylabel("Cluster label")

    # The vertical line for average silhouette score of all the values
    plt.axvline(x=silhouette_avg, color="red", linestyle="--")

    plt.set_yticks([])  # Clear the yaxis labels / ticks
    plt.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
    plt.show()
    def update( costScl = 1.95, c1 = 2, c2 = 3, b1 = 4, b2 = 12, b3 = 18, b4 = 3):
        model.c1 = c1
        model.c2 = c2
        model.b1 = b1
        model.b2 = b2
        model.b3 = b3
        model.b4 = b4
        solver.solve(model)
        mncost = model.cost()
        cost = mncost*costScl
 
        plt.figure(figsize=(6,6))
        plt.plot(d, (cost-c1*d)/(c2+0.0001), 'k--', lw = 3, label = 'cost')
        plt.plot(b1 * np.ones_like(d), d, lw=3, label='Plant 1',color='b')
        plt.fill_betweenx(d, 0, b1, alpha=0.1,color='b')
        
        plt.plot(d, b2/2*np.ones_like(d), lw=3, label='Plant 2',color='r')
        plt.fill_between(d, 0, b2/2, alpha=0.1,color='r')
        
        plt.plot(d, (b3-3*d)/2, lw=3, label='Plant 3',color='g')
        plt.fill_between(d, 0, (b3-3*d)/2, alpha=0.1,color='g')   
        
        plt.plot(d, b4-d, lw=3, label = 'Min Prod', color='y')
        plt.fill_between(d,b4-d,12,alpha=0.1,color='y')
        
        plt.plot(np.zeros_like(d), d, lw=3, label='d non-negative',color='c')
        plt.plot(d, np.zeros_like(d), lw=3, label='w non-negative',color='m')
        
        plt.xlabel('batches of doors', fontsize=16)
        plt.ylabel('batches of windows', fontsize=16)
        plt.xlim(-0.05, 12)
        plt.ylim(-0.05, 12)
        plt.legend(loc = 'upper right',fontsize=12)
        
        plt.text( 6.2, 6.4, f'Cost = ${1000*cost:,.2f}', fontsize = 12)
        plt.show()  
Esempio n. 27
0
def plot_bore(df, figsize=(11, 8), show=True, dpi=100):
    df = df.copy()

    fig = plt.figure(figsize=figsize, dpi=dpi)

    v = df[["G", "S", "L", "C", "P"]].values
    v[:, 2] += df["SI"].values
    v[np.argwhere(v.sum(1) < 0)] = np.nan

    c = ["#a76b29", "#578E57", "#0078C1", "#DBAD4B", "#708090"]

    for i in range(5):
        plt.fill_betweenx(
            -np.repeat(df["depth_top"], 2),
            np.zeros(v.shape[0] * 2),
            np.roll(np.repeat(np.cumsum(v, axis=1)[:, -(i + 1)], 2), 1),
            color=c[i],
        )

    legend_dict = {
        "Gravel": "#708090",
        "Sand": "#DBAD4B",
        "Loam": "#0078C1",
        "Clay": "#578E57",
        "Peat": "#a76b29",
    }
    patch_list = []
    for key in legend_dict:
        data_key = mpatches.Patch(color=legend_dict[key], label=key)
        patch_list.append(data_key)

    plt.legend(handles=patch_list, bbox_to_anchor=(1, 1), loc="upper left")

    if show:
        plt.show()
    return fig
Esempio n. 28
0
def swiggle(seismic, time, isFilled = True, fillColor = 'blue'):
    if seismic.ndim == 1:
        plt.plot(seismic,time,color=fillColor,alpha=.5)
        plt.fill_betweenx(time, 0, seismic, seismic>0, color='blue', alpha=.25)
        plt.gca().invert_yaxis()

    else:
        nTimes, nGathers = seismic.shape
        for iGather in range(nGathers):
            # print(iGather)
            # plt.figure(1)
            # plt.figure(figsize=(8,6))
            plt.axes([0.1+0.18*iGather,0.1,.1,.8])
            # plt.plot(seismic[:,iGather]+iGather,time,color='blue',alpha=.5)
            # plt.fill_betweenx(time, iGather, seismic[:,iGather]+iGather, seismic[:,iGather]>0, color='blue', alpha=.25)
            plt.plot(seismic[:,iGather],time,color=fillColor,alpha=.5)
            plt.fill_betweenx(time, 0, seismic[:,iGather], seismic[:,iGather]>0, color=fillColor, alpha=.25)
            plt.gca().invert_yaxis()
 
        
    # plt.plot(seismic(:,0),time)
    # plt.plot(seismic(:,1) * 3,time)
    # plt.gca().invert_yaxis()
    # plt.show()
Esempio n. 29
0
def pick_wigb(data,t_axis,offsets,filename,amx):
	''' This function enables picking and saving on a seismic wiggle plot
	data: Is a matrix of seismic data where the columns are the seismic traces
	t_axis: Is your time axis
	offsets: vector of each trace offset 
	filename: Is the output filename
	amx: is a screen gain to the data'''
	amx2=amx*np.max(data)
	i=0
	for offset in offsets:
		
		x=np.divide(data[:,i],amx2)+offset
		i=i+1
		
		plt.plot(x,t_axis,'k')
		plt.fill_betweenx(t_axis,offset,x,where=(x>offset),color='k')

	plt.ylabel('Time (s)')
	plt.xlabel('Offset (m)')
	plt.gca().invert_yaxis()
	plt.xlim((offsets[0]-1.,offsets[-1]+1.))
	picks = plt.ginput( n=0, timeout=0, show_clicks=True, mouse_add=1, mouse_pop=3, mouse_stop=2)
	np.savetxt(filename, picks)
	plt.show()
Esempio n. 30
0
def graphEpiInfDetailed(frame, saveloc: str):
    """Represents the epifaunal to infaunal proportions by displaying foram
	proportions by their respective environment. Requires a dataframe object
	as input. """
    holder = dfProportion(frame) * 100
    # holder.iloc[0] gets the first row
    epifaunal = holder.iloc[0]
    infShallow = holder.iloc[1] + epifaunal
    infDeep = holder.iloc[2] + infShallow
    infUndetermined = holder.iloc[3] + infDeep

    plt.figure(dpi=200, figsize=(3, 12))
    yaxis = [x + 1 for x in range(len(holder.T))]
    plt.title("Detailed Epifaunal to Infaunal proportions")
    plt.ylabel("Sample number")
    plt.xlabel("Percentage")

    plt.plot(epifaunal, yaxis, '#52A55C', label='Epifaunal')
    plt.plot(infShallow, yaxis, '#236A62', label='Inf. Shallow')
    plt.plot(infDeep, yaxis, '#2E4372', label='Inf. Deep')
    plt.plot(infUndetermined, yaxis, '#535353', label='Inf. Undetermined')

    plt.fill_betweenx(yaxis, epifaunal, facecolor='#52A55C')
    plt.fill_betweenx(yaxis, epifaunal, infShallow, facecolor='#236A62')
    plt.fill_betweenx(yaxis, infShallow, infDeep, facecolor='#2E4372')
    plt.fill_betweenx(yaxis, infDeep, infUndetermined, facecolor='#535353')

    plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
    plt.yticks(yaxis)
    plt.gca().set_xlim(0, 100)
    plt.gca().set_ylim(1, len(yaxis))

    plt.subplot(111).legend(loc='upper center',
                            bbox_to_anchor=(0.5, -0.05),
                            fancybox=True,
                            shadow=True,
                            ncol=5,
                            borderaxespad=2)

    savename = "/Detailed Epi-Infaunal.svg"
    plt.savefig(saveloc + savename)
Esempio n. 31
0
def _make_square(df, i):
    x_mean = df.ages.mean()
    y_mean = df.heights.mean()

    x = df.iloc[i].ages
    y = df.iloc[i].heights

    alpha = .1

    if x > x_mean and y > y_mean:
        plt.fill_betweenx(x1=x_mean,
                          x2=x,
                          y=(y_mean, y),
                          alpha=alpha,
                          color='b')

    elif x < x_mean and y < y_mean:
        plt.fill_betweenx(x1=x,
                          x2=x_mean,
                          y=(y, y_mean),
                          alpha=alpha,
                          color='b')

    elif x < x_mean and y > y_mean:
        plt.fill_betweenx(x1=x,
                          x2=x_mean,
                          y=(y_mean, y),
                          alpha=alpha,
                          color='r')

    else:
        plt.fill_betweenx(x1=x_mean,
                          x2=x,
                          y=(y, y_mean),
                          alpha=alpha,
                          color='r')
Esempio n. 32
0
def plot_wiggle(data, zz=1, skip=1, gain=1, alpha=0.7, black=False):
    '''
    Wiggle plot of generic 2D numpy array.

    INPUT
    data: 2D numpy array
    zz: vertical sample rate in depth or time
    skip: interval to choose traces to draw
    gain: multiplier applied to each trace
    '''
    [n_samples, n_traces] = data.shape
    t = range(n_samples)
    plt.figure(figsize=(9.6, 6))
    for i in range(0, n_traces, skip):
        trace = gain * data[:, i] / np.max(np.abs(data))
        plt.plot(i + trace, t, color='k', linewidth=0.5)
        if black == False:
            plt.fill_betweenx(t,
                              trace + i,
                              i,
                              where=trace + i > i,
                              facecolor=[0.6, 0.6, 1.0],
                              linewidth=0)
            plt.fill_betweenx(t,
                              trace + i,
                              i,
                              where=trace + i < i,
                              facecolor=[1.0, 0.7, 0.7],
                              linewidth=0)
        else:
            plt.fill_betweenx(t,
                              trace + i,
                              i,
                              where=trace + i > i,
                              facecolor='black',
                              linewidth=0,
                              alpha=alpha)
    locs, labels = plt.yticks()
    plt.yticks(locs, [n * zz for n in locs.tolist()])
    plt.grid()
    plt.gca().invert_yaxis()
Esempio n. 33
0
def plot_dDAR():
	lam_mu = 0.3+np.arange(2200)/1000
	lam_mu_ref = 0.55
	zeta_deg = 10*np.arange(8)
	plt.ylim([-3,5])
	##
	## mark X-SHOOTER arms
	plt.fill_betweenx(plt.ylim(),0.3,0.55,color="blue",alpha=0.3)
	plt.fill_betweenx(plt.ylim(),0.55,1.0,color="green",alpha=0.3)
	plt.fill_betweenx(plt.ylim(),1.0,2.5,color="red",alpha=0.3)

	for z in zeta_deg:
		am = 1/np.sin(np.deg2rad(90-z))
		plt.plot(lam_mu, dDAR(z, lam_mu, lam_mu_ref), label="{0} deg, airmass {1:5.2f}".format(z,am))
	
	plt.legend()
	plt.title("Differential atmospheric refraction w.r.t. {0:5.2f} micron".format(lam_mu_ref))
	plt.xlabel("Wavelength in micron")
	plt.ylabel("Differential atmospheric refraction in arcsec")

	plt.text(2.4, -2.5, "Paranal standard conditions: 11.5 deg C, 743 hPa, 14.5 % RH", fontsize=9, ha="right")
Esempio n. 34
0
def ftir_method_plots():
    '''
        look at ftir profile vs a priori
        plot averaging kernal summary
            mean AK for column and profiles
    '''
    pname_prof = 'Figs/FTIR_apriori.png'
    pname_AK = 'Figs/FTIR_midday_AK.png'
        
    # Read FTIR output
    ftir=campaign.Wgong()
    
    # Resample FTIR to just midday averages
    middatas=ftir.resample_middays()
    
    ### Mean profile and apriori
    
    # Plot mean profile vs mean a priori
    plt.close()
    alts = ftir.alts
    for i, (prof, c) in enumerate(zip([middatas['VMR'], middatas['VMR_apri']],['k','teal'] )):
        # Convert ppmv to ppbv and plot profiles
        mean = 1000*np.nanmean(prof,axis=0)
        lq = 1000*np.nanpercentile(prof, 25, axis=0)
        uq = 1000*np.nanpercentile(prof, 75, axis=0)
        plt.fill_betweenx(alts, lq, uq, alpha=0.5, color=c)
        plt.plot(mean,alts,label=['x$_{ret}$','x$_{apri}$'][i],linewidth=2,color=c)
    plt.ylim([0,50])
    plt.ylabel('altitude [km]')
    plt.xlabel('HCHO [ppbv]')
    plt.legend(fontsize=14)
    plt.title('FTIR mean profile')
    plt.savefig(pname_prof)
    print("Saved ",pname_prof)
    plt.close()
    
    
    ### Mean averaging kernal summarised
    
    # check plot of VC_AK
    # [dates, levels]
    plt.close()
    plt.figure(figsize=(12,12))
    ax0=plt.subplot(1,2,1)
    OAK=middatas['VC_AK']
    mean = np.nanmean(OAK,axis=0)
    lq = np.nanpercentile(OAK,25, axis=0)
    uq = np.nanpercentile(OAK,75, axis=0)
    plt.fill_betweenx(ftir.alts,lq,uq, label='IQR')
    plt.plot(mean, ftir.alts,color='k',linewidth=2, label='mean')
    plt.title("$\Omega$ sensitivity to HCHO")
    plt.legend()
    plt.ylabel('altitude [km]')
    
    # also check average AK
    AAK = np.nanmean(middatas['VMR_AK'],axis=0)    
    colors=pp.get_colors('gist_ncar',48) # gist_ncar
    plt.subplot(1,2,2, sharey=ax0)
    for i in np.arange(0,48,1):
        sample = int((i%6)==0)
        label=[None,ftir.alts[47-i]][sample]
        linestyle=['--','-'][sample]
        linewidth=[1,2][sample]
        alpha=[.5,1][sample]
        plt.plot(AAK[47-i],ftir.alts, color=colors[i], alpha=alpha,
                 label=label,linestyle=linestyle,linewidth=linewidth)
    plt.legend(title='altitude')
    plt.title('Mean averaging kernal')
    #plt.colorbar()
    plt.ylim([-1,81])
    
    plt.savefig(pname_AK)
    print('Saved ',pname_AK)
Esempio n. 35
0
    plt.xlim([-0.1, 1])

    y_lower = 10
    for i in range(n_clusters):

        ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]

        ith_cluster_silhouette_values.sort()

        size_cluster_i = ith_cluster_silhouette_values.shape[0]
        y_upper = y_lower + size_cluster_i

        color = cm.spectral(float(i) / n_clusters)
        plt.fill_betweenx(np.arange(y_lower, y_upper),
                          0, ith_cluster_silhouette_values,
                          facecolor=color, edgecolor=color, alpha=0.7)

        plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))

        y_lower = y_upper + 10  


    plt.xlabel("Silhouette coefficient values")
    plt.ylabel("Cluster label")

    plt.axvline(x=silhouette_avg, color="red", linestyle="--")

    plt.savefig('Silhouette '+str(n_clusters-1))
    plt.show()
Esempio n. 36
0
                       y_max=15,
                       x_label='frequency [Hz]',
                       y_label='PSD [V**2/Hz]',
                       line_style='--',
                       line_width=2,
                       line_color='black',
                       institute_label='NIST Cartesian Geometry',
                       data_label='Exp',
                       plot_title='Sandia 1 m Helium Plume Puffing Frequency',
                       show_legend=True,
                       legend_location='right')

# add error to measuered puffing freq
plt.fill_betweenx(PSDmeas,
                  np.array([1.19, 1.19]),
                  np.array([1.53, 1.53]),
                  color='lightgrey',
                  figure=fh)

fh = macfp.plot_to_fig(f1p5,
                       Pxx_den_1p5,
                       plot_type='linear',
                       x_min=0,
                       x_max=4,
                       y_min=0,
                       y_max=15,
                       x_label='frequency [Hz]',
                       y_label='PSD [V**2/Hz]',
                       data_label='FDS $\Delta x=1.5$ cm',
                       line_style='-',
                       line_width=1,
Esempio n. 37
0
def plot_tpl(uniq_c):
    print(uniq_c)
    template_name, ruleset_name = uniq_c
    data_template = fits.open(path_2_template(template_name))

    x_template = data_template[1].data['LAMBDA']
    y_template = data_template[1].data['FLUX_DENSITY']
    ok = (y_template > 1e-18)

    selection = (template == template_name) & (ruleset == ruleset_name)
    N_used = len(selection.nonzero()[0])
    if N_used > 1:
        print('N in catalog', N_used)
        MAG = hd[1].data['MAG'][selection]
        TEXP_B = hd[1].data['TEXP_B'][selection]
        TEXP_G = hd[1].data['TEXP_G'][selection]
        TEXP_D = hd[1].data['TEXP_D'][selection]

        # one plot per template
        # A4 figure
        fig = p.figure(0, (8.2, 11.7), frameon=False)

        # template
        fig.add_subplot(
            411,
            title='template=' + template_name,
            xlabel='wavelength [Angstrom]',
            ylabel=r'Flux [$f_\lambda$ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]',
            ylim=((n.median(y_template[ok]) / 10,
                   n.median(y_template[ok]) * 10)),
            xlim=((3600, 9400)),
            yscale='log')

        p.grid()
        p.plot(x_template[ok], y_template[ok], color='black', lw=1)
        ys = [n.min(y_template[ok]), n.max(y_template[ok])]
        p.fill_betweenx(ys,
                        x1=[4200.0, 4200.0],
                        x2=[5000.0, 5000.0],
                        alpha=0.1,
                        color='b')
        p.fill_betweenx(ys,
                        x1=[5500.0, 5500.0],
                        x2=[6700.0, 6700.0],
                        alpha=0.1,
                        color='g')
        p.fill_betweenx(ys,
                        x1=[7200.0, 7200.0],
                        x2=[9000.0, 9000.0],
                        alpha=0.1,
                        color='r')

        # exposure time tracks
        fig.add_subplot(412,
                        title='ruleset=' + ruleset_name,
                        xlabel='MAG',
                        ylabel='TEXP [minutes]',
                        yscale='log')

        p.plot(MAG,
               TEXP_B,
               color='r',
               marker='x',
               ls='',
               label=r'bright N$_{fh}$=' +
               str(n.round(n.sum(TEXP_B) / 60., 1)),
               rasterized=True)
        p.plot(MAG,
               TEXP_G,
               color='g',
               marker='+',
               ls='',
               label=r'grey N$_{fh}$=' + str(n.round(n.sum(TEXP_G) / 60., 1)),
               rasterized=True)
        p.plot(MAG,
               TEXP_D,
               color='b',
               marker='^',
               ls='',
               label=r'dark N$_{fh}$=' + str(n.round(n.sum(TEXP_D) / 60., 1)),
               rasterized=True)
        p.grid()
        p.legend(frameon=False, loc=0)

        # statistics in catalog
        fig.add_subplot(413, xlabel='MAG', ylabel='Counts (N)', yscale='log')

        p.hist(MAG,
               bins=n.arange(n.min(MAG) - 0.1,
                             n.max(MAG) + 0.1, 0.1),
               histtype='step',
               lw=2)
        p.grid()

        fig.add_subplot(414,
                        xlabel='MAG',
                        ylabel='N x TEXP (hours)',
                        yscale='log')

        p.hist(MAG,
               weights=TEXP_D / 60.,
               bins=n.arange(n.min(MAG) - 0.1,
                             n.max(MAG) + 0.1, 0.1),
               histtype='step',
               label=r'bright',
               lw=2)
        p.hist(MAG,
               weights=TEXP_G / 60.,
               bins=n.arange(n.min(MAG) - 0.1,
                             n.max(MAG) + 0.1, 0.1),
               histtype='step',
               label=r'grey',
               lw=2)
        p.hist(MAG,
               weights=TEXP_B / 60.,
               bins=n.arange(n.min(MAG) - 0.1,
                             n.max(MAG) + 0.1, 0.1),
               histtype='step',
               label=r'dark',
               lw=2)
        p.legend(frameon=False, loc=0)

        p.grid()
        p.savefig(
            os.path.join(output_folder,
                         ruleset_name + '_' + template_name[:-5] + '.png'))
        p.tight_layout()
        p.clf()
    else:
        print('not used')
Esempio n. 38
0
galq[3] = 1908
galq[4] = 2799
galq[5] = 4341
galq[6] = 4862
galq[7] = 5000
galq[8] = 6564

# Redshift
z = N.arange(0., 1., 0.01)

plt.figure(1, figsize=(11, 9), dpi=80, facecolor='w', edgecolor='k')
plt.clf()
for ii in range(7):
    x, y = U.get_data(root_to_filts + filters[ii], (0, 1))
    plt.plot(x, y, '-', lw=1, color=colores[:, ii])
    plt.fill_betweenx(y, x, 0, color=colores[:, ii], alpha=0.1)
    plt.grid()
    plt.xlabel('Wavelength [$\AA$]', size=25, labelpad=5)
    plt.ylabel('$z$', size=35)
    plt.xlim(3500, 9500)
    plt.ylim(0.01, 1.)
plt.plot(galw[2] * (1 + z), z, '--', color='black', alpha=0.95)
plt.plot(galw[5] * (1 + z), z, '-', color='black', alpha=0.95)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
"""
qsoline = []
qsoline.append('Ly-alpha: 1215$\AA$')
qsoline.append('$N_{V}$: 1240$\AA$')
qsoline.append('$C_{IV}$: 1549$\AA$')
qsoline.append('$C_{III}$: 1908$\AA$')
Esempio n. 39
0
def escut(image, pos_file, fwhm, peak):
    # input image file name, file name with matched source positions, **np.array of fwhm measurements for each source
    import numpy as np
    import matplotlib.pyplot as plt
    from scipy import stats
    from pyraf import iraf
    # import sewpy
    import os
    from matplotlib.path import Path

    iraf.images(_doprint=0)
    iraf.tv(_doprint=0)
    iraf.ptools(_doprint=0)
    iraf.noao(_doprint=0)
    iraf.digiphot(_doprint=0)
    iraf.photcal(_doprint=0)
    iraf.apphot(_doprint=0)
    iraf.imutil(_doprint=0)

    iraf.unlearn(iraf.phot, iraf.datapars, iraf.photpars, iraf.centerpars,
                 iraf.fitskypars)
    iraf.apphot.phot.setParam('interactive', "no")
    iraf.apphot.phot.setParam('verify', "no")
    iraf.datapars.setParam('datamax', 50000.)
    iraf.datapars.setParam('gain', "gain")
    iraf.datapars.setParam('ccdread', "rdnoise")
    iraf.datapars.setParam('exposure', "exptime")
    iraf.datapars.setParam('airmass', "airmass")
    iraf.datapars.setParam('filter', "filter")
    iraf.datapars.setParam('obstime', "time-obs")
    # iraf.datapars.setParam('obstime',"date-obs")
    iraf.datapars.setParam('sigma', "INDEF")
    iraf.photpars.setParam('zmag', 0.)
    iraf.centerpars.setParam('cbox', 9.)
    iraf.centerpars.setParam('maxshift', 3.)
    iraf.fitskypars.setParam('salgorithm', "median")
    iraf.fitskypars.setParam('dannulus', 10.)

    # clean up the indefs so we can actually do stats, but reassign them to 99999 so we don't lose track of things
    # keep a separate list without them to do the median (we need floats)
    indefs = np.where(fwhm == 'INDEF')
    good = np.where(fwhm != 'INDEF')
    fwhm[indefs] = 99.999
    fwhm = fwhm.astype(float)
    fwhm_good = fwhm[good].astype(float)

    indefs = np.where(peak == 'INDEF')
    peak[indefs] = -999.999
    peak = peak.astype(float)
    peak_good = peak[good].astype(float)

    if not os.path.isfile(image[0:-5] + '.txdump'):
        # findavgfwhm = sewpy.SEW(
        #     params = ["X_IMAGE", "Y_IMAGE", "FWHM_IMAGE", "FLAGS"],
        #     config = {"DETECT_THRESH":200.0},
        #     sexpath = "sex"
        # )
        #
        # out = findavgfwhm(image)["table"]
        #
        # fwhms = out['FWHM_IMAGE'] # This is an astropy table.
        # flags = out['FLAGS']

        # get a really rough estimate of the stellar FWHM in the image to set apertures

        # use the input fwhm measurement
        # ap1x = fwhm_est

        # xpos = datatable['X_IMAGE']
        # ypos = datatable['Y_IMAGE']
        # fwhm = datatable['FWHM_IMAGE']
        # flags = datatable['FLAGS']
        # idno = datatable['NUMBER']
        ap1x = np.median(
            fwhm_good
        )  # only use isolated detections of stars, this is the 1x aperture
        # print ap1x
        ap2x = 2.0 * ap1x

        # these = [ i for i,id in enumerate(idno) if (flags[i] == 0)]

        # with open(image[0:-5]+'.escut.pos','w+') as f:
        #     for j in range(len(xpos)):
        #         print >> f, xpos[j], ypos[j], fwhm[j], idno[j]

        iraf.datapars.setParam('fwhmpsf', ap1x)
        iraf.photpars.setParam('apertures', repr(ap1x) + ', ' + repr(ap2x))
        iraf.fitskypars.setParam('annulus', 4. * ap1x)
        iraf.apphot.phot(image=image,
                         coords=pos_file,
                         output=image[0:-5] + '.phot')
        with open(image[0:-5] + '.txdump', 'w+') as txdump_out:
            iraf.ptools.txdump(
                textfiles=image[0:-5] + '.phot',
                fields=
                "id,mag,merr,msky,stdev,rapert,xcen,ycen,ifilter,xairmass,image",
                expr=
                'MAG[1] != INDEF && MERR[1] != INDEF && MAG[2] != INDEF && MERR[2] != INDEF',
                headers='no',
                Stdout=txdump_out)

    mag1x, mag2x = np.loadtxt(image[0:-5] + '.txdump',
                              usecols=(1, 2),
                              unpack=True)
    iraf_id = np.loadtxt(image[0:-5] + '.txdump',
                         usecols=(0, ),
                         dtype=int,
                         unpack=True)
    # idno = np.loadtxt(image[0:-5]+'.escut.pos', usecols=(3,), dtype=int, unpack=True)
    xpos, ypos = np.loadtxt(pos_file, usecols=(0, 1), unpack=True)

    keepIndex = iraf_id - 1

    xpos, ypos, fwhm, peak = xpos[keepIndex], ypos[keepIndex], fwhm[
        keepIndex], peak[keepIndex]

    # print idno.size, iraf_id.size, xpos.size

    diff = mag2x - mag1x

    diffCut = diff
    magCut = mag2x
    xCut = xpos  #[good]
    yCut = ypos  #[good]
    idCut = iraf_id
    fwhmCut = fwhm  #_good
    peakCut = peak

    print(peakCut.size, magCut.size, diffCut.size)

    print(diffCut.size, 0, np.median(diffCut), diffCut.std())
    nRemoved = 1

    # plt.clf()
    # plt.scatter(peakCut, magCut, edgecolor='none')
    # plt.savefig('peaktest.pdf')

    plt.clf()
    # plt.hlines(bin_edges, -2, 1, colors='red', linestyle='dashed')
    plt.scatter(diff, mag2x, edgecolor='none', facecolor='black', s=4)
    # plt.scatter(diffCut, magCut, edgecolor='none', facecolor='blue', s=4)
    magdiff = list(
        zip(magCut.tolist(), diffCut.tolist(), peakCut.tolist(),
            idCut.tolist()))
    dtype = [('mag', float), ('diff', float), ('peak', float), ('id', int)]
    magdiff = np.array(magdiff, dtype=dtype)

    magSort = np.sort(magdiff, order='peak')

    peakRange = (magSort['peak'] > 20000.0) & (magSort['peak'] < 40000.0)
    peakVal = np.median((magSort['diff'])[np.where(peakRange)])
    # peakVal = np.median(diffCut)
    print(peakVal)

    plt.scatter((magSort['diff'])[np.where(peakRange)],
                (magSort['mag'])[np.where(peakRange)],
                edgecolor='none',
                facecolor='blue',
                s=4)

    while nRemoved != 0:
        nBefore = diffCut.size
        diffCheck = np.where(
            abs(peakVal - diffCut) < 2.0 *
            diffCut.std())  #[i for i,d in enumerate(diff) if (-0.5 < d < 0.0)]

        #
        diffCut = diffCut[diffCheck]
        nRemoved = nBefore - diffCut.size
        magCut = magCut[diffCheck]
        xCut = xCut[diffCheck]
        yCut = yCut[diffCheck]
        idCut = idCut[diffCheck]
        fwhmCut = fwhmCut[diffCheck]
        print(diffCut.size, nRemoved, np.median(diffCut), diffCut.std())
        if 0.05 < diffCut.std() < 0.06:
            nRemoved = 0
        # plt.fill_betweenx(bin_centers, bin_meds+3.0*bin_stds, bin_meds-3.0*bin_stds, facecolor='red', edgecolor='none', alpha=0.4, label='2x RMS sigma clipping region')

    # with open('escutSTD_i.pos','w+') as f:
    #     for i,blah in enumerate(xCut):
    #         print >> f, xCut[i], yCut[i], diffCut[i]

    bin_meds, bin_edges, binnumber = stats.binned_statistic(magCut,
                                                            diffCut,
                                                            statistic='median',
                                                            bins=24,
                                                            range=(-12, 0))
    bin_stds, bin_edges, binnumber = stats.binned_statistic(magCut,
                                                            diffCut,
                                                            statistic=np.std,
                                                            bins=24,
                                                            range=(-12, 0))
    bin_width = (bin_edges[1] - bin_edges[0])
    bin_centers = bin_edges[1:] - bin_width / 2
    # print bin_meds, bin_stds
    bin_hw = np.zeros_like(bin_stds)
    for i, bin_std in enumerate(bin_stds):
        if bin_std > 0.025:
            bin_hw[i] = 3.0 * bin_std
        else:
            bin_hw[i] = 0.075

    # print len(binnumber)
    # for i,bin_hwi in enumerate(bin_hw):

    left_edge = np.array(list(zip(peakVal - bin_hw, bin_centers)))
    right_edge = np.flipud(np.array(list(zip(peakVal + bin_hw, bin_centers))))
    # print left_edge, right_edge
    verts = np.vstack((left_edge, right_edge))
    # print verts
    # verts = np.delete(verts, np.array([0,1,2,22,23,24,25,45,46,47]), axis=0)

    # DON'T USE A PATH BECAUSE APPARENTLY IT CAN SELECT THE INVERSE SET!! WTF
    # print verts
    esRegion = Path(verts)
    sources = esRegion.contains_points(list(zip(diff, mag2x)))
    # print sources

    with open('escutREG_i.pos', 'w+') as f:
        for i, blah in enumerate(xpos[sources]):
            print((xpos[sources])[i], (ypos[sources])[i], (diff[sources])[i],
                  file=f)

    magCut2 = mag2x[sources]
    magCut1 = mag1x[sources]
    fwhmCut = fwhm[sources]
    xCut = xpos[sources]
    yCut = ypos[sources]
    diffCut = diff[sources]

    # find the sources that are in the std method but not the region method
    # print idCut, idno[sources]
    # extrasSTD = np.setdiff1d(idno[sources], idCut)
    # print extrasSTD.size
    # print extrasSTD
    # with open('escutUNIQUE.pos','w+') as f:
    #     for i,blah in enumerate(extrasSTD):
    #         print >> f, xpos[blah-1], ypos[blah-1]

    # fwhmcheck = np.loadtxt('testfwhmREG.log', usecols=(10,), unpack=True)
    fwhmchk2 = np.where((magCut2 < -4) & (fwhmCut < 90.0))
    print(np.median(fwhmCut[fwhmchk2]), np.std(fwhmCut[fwhmchk2]))
    fwchk = np.where(
        np.abs(fwhmCut - np.median(fwhmCut[fwhmchk2])) > 10.0 *
        np.std(fwhmCut[fwhmchk2]))
    drop = np.abs(fwhmCut - np.median(fwhmCut[fwhmchk2])) > 10.0 * np.std(
        fwhmCut[fwhmchk2])
    keep = np.abs(fwhmCut - np.median(fwhmCut[fwhmchk2])) <= 10.0 * np.std(
        fwhmCut[fwhmchk2])

    with open('escutVBAD_i.pos', 'w+') as f:
        for i, blah in enumerate(xCut[fwchk]):
            print((xCut[fwchk])[i], (yCut[fwchk])[i], file=f)

    with open('escut_i.pos', 'w+') as f:
        for i, blah in enumerate(xCut):
            if not drop[i]:
                print(xCut[i],
                      yCut[i],
                      magCut2[i],
                      fwhmCut[i],
                      magCut1[i],
                      file=f)

    with open('escut_g.pos', 'w+') as f:
        for i, blah in enumerate(xCut):
            if not drop[i]:
                print(xCut[i],
                      yCut[i],
                      magCut2[i],
                      fwhmCut[i],
                      magCut1[i],
                      file=f)

    plt.fill_betweenx(bin_centers,
                      peakVal + bin_hw,
                      peakVal - bin_hw,
                      facecolor='red',
                      edgecolor='none',
                      alpha=0.4,
                      label='2x RMS sigma clipping region')

    plt.scatter(diffCut[fwchk],
                magCut2[fwchk],
                edgecolor='none',
                facecolor='red',
                s=4)
    plt.ylim(0, -12)
    plt.xlabel('$m_{2x} - m_{1x}$')
    plt.ylabel('$m_{2x}$')
    plt.xlim(-2, 1)
    plt.savefig('testmagiraf.pdf')

    plt.clf()
    plt.scatter(magCut2, fwhmCut, edgecolor='none', facecolor='black')
    plt.scatter(magCut2[fwchk],
                fwhmCut[fwchk],
                edgecolor='none',
                facecolor='red')
    plt.hlines([np.median(fwhmCut)], -12, 0, colors='red', linestyle='dashed')
    plt.hlines([
        np.median(fwhmCut) + fwhmCut.std(),
        np.median(fwhmCut) - fwhmCut.std()
    ],
               -12,
               0,
               colors='red',
               linestyle='dotted')
    plt.ylim(0, 20)
    plt.xlim(-12, 0)
    plt.ylabel('fwhm')
    plt.xlabel('$m_{2x}$')
    plt.savefig('fwhmcheck.pdf')

    return fwhmCut[keep]
def plot_llr_distributions(llr_nmh, llr_imh, nbins, plot_gauss=True, imh_true=False):
    """Plots LLR distributions-expects llr_nmh and llr_imh to be type
    Series. Also plots vertical line signifying the mean of the
    hierarchy assumed to be given in nature, and the percentage of
    trials from the opposite hierarchy with LLR beyond the mean.
    """

    fig = plt.figure(figsize=(9, 8))
    label_text = r"$\log ( \mathcal{L}(data: IH|IH) / \mathcal{L}( data: IH|NH) )$"
    llr_imh.hist(bins=nbins, histtype="step", lw=2, color="b", label=label_text)
    hist_vals_imh, bincen_imh = plot_error(llr_imh, nbins, fmt=".b", lw=2)
    if plot_gauss:
        fit_imh = plot_gauss_fit(llr_imh, hist_vals_imh, bincen_imh, color="b", lw=2)

    label_text = r"$\log ( \mathcal{L}(data: NH|IH) / \mathcal{L}(data: NH|NH) )$"
    llr_nmh.hist(bins=nbins, histtype="step", lw=2, color="r", label=label_text)
    hist_vals_nmh, bincen_nmh = plot_error(llr_nmh, nbins, fmt=".r", lw=2)
    if plot_gauss:
        fit_nmh = plot_gauss_fit(llr_nmh, hist_vals_nmh, bincen_nmh, color="r", lw=2)

    if imh_true:
        mean_val = llr_nmh.mean()
        pvalue = 1.0 - float(np.sum(llr_imh > mean_val)) / len(llr_imh)
    else:
        mean_val = llr_imh.mean()
        pvalue = float(np.sum(llr_nmh > mean_val)) / len(llr_nmh)

    ymax = max(hist_vals_nmh) if imh_true else max(hist_vals_imh)
    bincen = bincen_imh if imh_true else bincen_nmh
    vline = plt.vlines(mean_val, 1, ymax, colors="k", linewidth=2, label=("pval = %.4f" % pvalue))

    sigma_1side = np.sqrt(2.0) * erfinv(1.0 - pvalue)
    sigma_2side = norm.isf(pvalue)
    print "  Using non-gauss fit: "
    print "    pvalue: %.5f" % pvalue
    print "    sigma 1 sided (erfinv): %.4f" % sigma_1side
    print "    sigma 2 sided (isf)   : %.4f" % sigma_2side

    sigma_fit_imh = (fit_imh[1] - fit_nmh[1]) / fit_imh[2]
    sigma_fit_nmh = (fit_imh[1] - fit_nmh[1]) / fit_nmh[2]
    pval_imh = 1.0 - norm.cdf(sigma_fit_imh)
    pval_nmh = 1.0 - norm.cdf(sigma_fit_nmh)
    sigma_1side_imh = np.sqrt(2.0) * erfinv(1.0 - pval_imh)
    sigma_1side_nmh = np.sqrt(2.0) * erfinv(1.0 - pval_nmh)

    print "\n\n  pval IMH: %.5f, pval NMH: %.5f" % (pval_imh, pval_nmh)
    print "  sigma gauss fit (IMH true): %.4f/%.4f" % (sigma_fit_imh, sigma_1side_imh)
    print "  sigma gauss fit (NMH true): %.4f/%.4f" % (sigma_fit_nmh, sigma_1side_nmh)

    sigma_error_nmh = np.sqrt(1.0 + 0.5 * (2.0 / sigma_1side_nmh) ** 2) / np.sqrt(len(llr_nmh))
    sigma_error_imh = np.sqrt(1.0 + 0.5 * (2.0 / sigma_1side_imh) ** 2) / np.sqrt(len(llr_imh))
    logging.info("total trials: %d", len(llr_nmh))
    logging.info("  nmh sigma error: %f" % sigma_error_nmh)
    logging.info("  imh sigma error: %f" % sigma_error_imh)

    if imh_true:
        plt.fill_betweenx(hist_vals_imh, bincen, x2=mean_val, where=bincen < mean_val, alpha=0.5, hatch="xx")
    else:
        plt.fill_betweenx(hist_vals_nmh, bincen, x2=mean_val, where=bincen > mean_val, alpha=0.5, hatch="xx")

    if args.present:
        plt.ylabel("# Trials")
        plt.xlabel("LLR value")
    else:
        plt.ylabel("# Trials", fontsize="x-large")
        plt.xlabel("LLR value", fontsize="x-large")

    return
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
    # extract time course at cluster vertices
    condition = condition[:, :, inds_v]
    # normally we would normalize values across subjects but
    # here we use data from the same subject so we're good to just
    # create average time series across subjects and vertices.
    mean_tc = condition.mean(axis=2).mean(axis=0)
    std_tc = condition.std(axis=2).std(axis=0)
    plt.plot(times, mean_tc.T, color=color, label=eve_id)
    plt.fill_between(times,
                     mean_tc + std_tc,
                     mean_tc - std_tc,
                     color='gray',
                     alpha=0.5,
                     label='')

ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax),
                  times[inds_t[0]],
                  times[inds_t[-1]],
                  color='orange',
                  alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
Esempio n. 42
0
chain_0 = trace_0[1000:]
varnames = ['alpha', 'beta', 'bd']
pm.traceplot(chain_0, varnames)
plt.savefig('img505.png', dpi=300, figsize=(5.5, 5.5))

plt.figure()

pm.summary(chain_0, varnames)

theta = chain_0['theta'].mean(axis=0)
idx = np.argsort(x_0)
plt.plot(x_0[idx], theta[idx], color='b', lw=3)
plt.axvline(chain_0['bd'].mean(), ymax=1, color='r')
bd_hpd = pm.hpd(chain_0['bd'])
plt.fill_betweenx([0, 1], bd_hpd[0], bd_hpd[1], color='r', alpha=0.5)

plt.plot(x_0, y_0, 'o', color='k')
theta_hpd = pm.hpd(chain_0['theta'])[idx]
plt.fill_between(x_0[idx],
                 theta_hpd[:, 0],
                 theta_hpd[:, 1],
                 color='b',
                 alpha=0.5)

plt.xlabel(x_n, fontsize=16)
plt.ylabel(r'$\theta$', rotation=0, fontsize=16)
plt.savefig('img506.png', dpi=300, figsize=(5.5, 5.5))

plt.figure()
                fontsize=9)
    ax.annotate("Hypervolume: " + str(round(hv, 2)), (0.10, 0.44),
                fontsize=9,
                color='blue')
    new_pfx = pfx[:1]
    new_pfy = pfy[:1]
    for i in xrange(1, len(pfx) - 2):
        new_pfx.append(pfx[i])
        new_pfx.append(pfx[i + 1])
    for u in xrange(1, len(pfy) - 2):
        new_pfy.append(pfy[u])
        new_pfy.append(pfy[u])
    new_pfx.extend(pfx[len(pfx) - 1:])
    new_pfy.extend(pfy[len(pfy) - 1:])

    plt.fill_betweenx(new_pfy, new_pfx, facecolor='blue', alpha=0.2)
    plt.xlabel('Ziel 1', size=9)
    plt.ylabel('Ziel 2', size=9)
    plt.title('')
    plt.grid(False)
    plt.subplots_adjust(bottom=0.19, left=0.18)
    plt.show()

    count = 20
    random.seed(18.9654)
    ref_point2d = [0.0, 0.0]
    set2d = np.zeros((count, 2))
    for i in range(count):
        for u in range(2):
            rand = random.random()
            set2d[i, u] = rand if (rand > ref_point2d) or (
cb.set_label('Brightness contrast, log10(Shell / BG)')
plt.xlabel('Projected distance from Trapezium, D / arcmin')
plt.ylabel('Bowshock radius, r0 / arcsec')
plt.text(0.05, 0.05, 'Symbol size indicates shell relative thickness, H',
         transform=ax.transAxes, fontsize='x-small')
ax.set_xlim(0.05, 20.0)
ax.set_ylim(0.3, 11.0)
ax.set_xscale('log')
ax.set_yscale('log')
fig.savefig(pltfile)
figlist.append('[[file:luis-programas/{0}][{0}]]'.format(pltfile))

pltfile = 'will-PA-vs-PA.pdf'
fig = plt.figure(figsize=(7,6))
ax = fig.add_subplot(111, axisbg="#eeeeee")
plt.fill_betweenx([-90.0, 90.0], [0.0, 0.0], [90.0, 90.0], zorder=-10, alpha=0.05)
plt.fill_betweenx([-90.0, 90.0], [180.0, 180.0], [270.0, 270.0], zorder=-10, alpha=0.05)
plt.fill_betweenx([-90.0, 90.0], [360.0, 360.0], [450.0, 450.0], zorder=-10, alpha=0.05)
plt.text(45.0, -80.0, 'NE\nquadrant',  ha='center', fontsize='x-small')
plt.text(135.0, -80.0, 'SE\nquadrant', ha='center', fontsize='x-small')
plt.text(225.0, -80.0, 'SW\nquadrant', ha='center', fontsize='x-small')
plt.text(315.0, -80.0, 'NW\nquadrant', ha='center', fontsize='x-small')
plt.axhline(zorder=-5)
plt.scatter(PA_star[m], dPA[m], s=20*tab['R_out'][m], c=D60[m], cmap=plt.cm.hot, alpha=0.6)
label_sources(tab['Object'], PA_star, dPA, np.abs(dPA) > 45.0, allmask=m)
cb = plt.colorbar()
cb.set_label('Projected distance from Trapezium, D / arcmin')
plt.xlabel('PA of source from Trapezium, deg')
plt.ylabel('Angle between bowshock axis and radial direction, deg')
ax.set_xlim(-30.0, 375.0)
ax.set_ylim(-90.0, 90.0)
Esempio n. 45
0
    def plot_fitting_results(self):
        """
        Plotting model fitting results.
        """
        data_dates = [self.ref_date + timedelta(days=t) for t in self.times]
        if self.hospital_times is not None:
            hosp_dates = [
                self.ref_date + timedelta(days=float(t))
                for t in self.hospital_times
            ]
        model_dates = [
            self.ref_date + timedelta(days=t + self.fit_results['t0'])
            for t in self.t_list
        ]

        # Don't display the zero-inflated error bars
        cases_err = np.array(self.cases_stdev)
        cases_err[self.observed_new_cases == 0] = 0
        death_err = deepcopy(self.deaths_stdev)
        death_err[self.observed_new_deaths == 0] = 0
        if self.hosp_stdev is not None:
            hosp_stdev = deepcopy(self.hosp_stdev)
            hosp_stdev[hosp_stdev > 1e5] = 0

        plt.figure(figsize=(18, 12))
        plt.errorbar(data_dates,
                     self.observed_new_cases,
                     yerr=cases_err,
                     marker='o',
                     linestyle='',
                     label='Observed Cases Per Day',
                     color='steelblue',
                     capsize=3,
                     alpha=.4,
                     markersize=10)
        plt.errorbar(data_dates,
                     self.observed_new_deaths,
                     yerr=death_err,
                     marker='d',
                     linestyle='',
                     label='Observed Deaths Per Day',
                     color='firebrick',
                     capsize=3,
                     alpha=.4,
                     markersize=10)

        plt.plot(model_dates,
                 self.mle_model.results['total_new_infections'],
                 label='Estimated Total New Infections Per Day',
                 linestyle='--',
                 lw=4,
                 color='steelblue')
        plt.plot(model_dates,
                 self.fit_results['test_fraction'] *
                 self.mle_model.results['total_new_infections'],
                 label='Estimated Tested New Infections Per Day',
                 color='steelblue',
                 lw=4)

        plt.plot(model_dates,
                 self.mle_model.results['total_deaths_per_day'],
                 label='Model Deaths Per Day',
                 color='firebrick',
                 lw=4)

        if self.hospitalization_data_type is HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS:
            new_hosp_observed = self.hospitalizations[
                1:] - self.hospitalizations[:-1]
            plt.errorbar(hosp_dates[1:],
                         new_hosp_observed,
                         yerr=hosp_stdev,
                         marker='s',
                         linestyle='',
                         label='Observed New Hospitalizations Per Day',
                         color='darkseagreen',
                         capsize=3,
                         alpha=1)
            predicted_hosp = (self.mle_model.results['HGen_cumulative'] +
                              self.mle_model.results['HICU_cumulative'])
            predicted_hosp = predicted_hosp[1:] - predicted_hosp[:-1]
            plt.plot(model_dates[1:],
                     self.fit_results['hosp_fraction'] * predicted_hosp,
                     label='Estimated Total New Hospitalizations Per Day',
                     linestyle='-.',
                     lw=4,
                     color='darkseagreen',
                     markersize=10)
        elif self.hospitalization_data_type is HospitalizationDataType.CURRENT_HOSPITALIZATIONS:
            plt.errorbar(hosp_dates,
                         self.hospitalizations,
                         yerr=hosp_stdev,
                         marker='s',
                         linestyle='',
                         label='Observed Total Current Hospitalizations',
                         color='darkseagreen',
                         capsize=3,
                         alpha=.5,
                         markersize=10)
            predicted_hosp = (self.mle_model.results['HGen'] +
                              self.mle_model.results['HICU'])
            plt.plot(model_dates,
                     self.fit_results['hosp_fraction'] * predicted_hosp,
                     label='Estimated Total Current Hospitalizations',
                     linestyle='-.',
                     lw=4,
                     color='darkseagreen')

        plt.plot(model_dates,
                 self.fit_results['hosp_fraction'] *
                 self.mle_model.results['HICU'],
                 label='Estimated ICU Occupancy',
                 linestyle=':',
                 lw=6,
                 color='black')
        plt.plot(model_dates,
                 self.fit_results['hosp_fraction'] *
                 self.mle_model.results['HGen'],
                 label='Estimated General Occupancy',
                 linestyle=':',
                 lw=4,
                 color='black',
                 alpha=0.4)

        plt.yscale('log')
        y_lim = plt.ylim(.8e0)

        start_intervention_date = self.ref_date + timedelta(
            days=self.fit_results['t_break'] + self.fit_results['t0'])
        stop_intervention_date = start_intervention_date + timedelta(days=14)

        plt.fill_betweenx([y_lim[0], y_lim[1]],
                          [start_intervention_date, start_intervention_date],
                          [stop_intervention_date, stop_intervention_date],
                          alpha=0.2,
                          label='Estimated Intervention')

        running_total = timedelta(days=0)
        for i_label, k in enumerate(('symptoms_to_hospital_days',
                                     'hospitalization_length_of_stay_general',
                                     'hospitalization_length_of_stay_icu')):

            end_time = timedelta(days=self.SEIR_kwargs[k])
            x = start_intervention_date + running_total
            y = 1.5**(i_label + 1)
            plt.errorbar(x=[x],
                         y=[y],
                         xerr=[[timedelta(days=0)], [end_time]],
                         marker='',
                         capsize=8,
                         color='k',
                         elinewidth=3,
                         capthick=3)
            plt.text(x + (end_time + timedelta(days=2)),
                     y,
                     k.replace('_', ' ').title(),
                     fontsize=14)
            running_total += end_time

        if self.SEIR_kwargs['beds_ICU'] > 0:
            plt.hlines(self.SEIR_kwargs['beds_ICU'],
                       *plt.xlim(),
                       color='k',
                       linestyles='-',
                       linewidths=6,
                       alpha=0.2)
            plt.text(data_dates[0] + timedelta(days=5),
                     self.SEIR_kwargs['beds_ICU'] * 1.1,
                     'Available ICU Capacity',
                     color='k',
                     alpha=0.5,
                     fontsize=15)

        plt.ylim(*y_lim)
        plt.xlim(min(model_dates[0], data_dates[0]),
                 data_dates[-1] + timedelta(days=150))
        plt.xticks(rotation=30, fontsize=14)
        plt.yticks(fontsize=14)
        plt.legend(loc=4, fontsize=14)
        plt.grid(which='both', alpha=.5)
        plt.title(self.display_name, fontsize=20)

        for i, (k, v) in enumerate(self.fit_results.items()):

            fontweight = 'bold' if k in ('R0', 'Reff') else 'normal'

            if np.isscalar(v) and not isinstance(v, str):
                plt.text(1.05,
                         .7 - 0.032 * i,
                         f'{k}={v:1.3f}',
                         transform=plt.gca().transAxes,
                         fontsize=15,
                         alpha=.6,
                         fontweight=fontweight)
            else:
                plt.text(1.05,
                         .7 - 0.032 * i,
                         f'{k}={v}',
                         transform=plt.gca().transAxes,
                         fontsize=15,
                         alpha=.6,
                         fontweight=fontweight)

        output_file = get_run_artifact_path(self.fips,
                                            RunArtifact.MLE_FIT_REPORT)
        plt.savefig(output_file, bbox_inches='tight')
        plt.close()

        self.mle_model.plot_results()
        plt.savefig(output_file.replace('mle_fit_results', 'mle_fit_model'),
                    bbox_inches='tight')
        plt.close()
Esempio n. 46
0
 def horiz_plot(v_coord, orography, style_args):
     y = v_coord.points
     x = orography.points
     return plt.fill_betweenx(y, x, **style_args)
import matplotlib.pyplot as plt
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
                  enumerate(good_cluster_inds)][0]  # first cluster

times = np.arange(X[0].shape[1]) * tstep * 1e3

plt.clf()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']

for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
    # extract time course at cluster vertices
    condition = condition[:, :, inds_v]
    # normally we would normalize values across subjects but
    # here we use data from the same subject so we're good to just
    # create average time series across subjects and vertices.
    mean_tc = condition.mean(axis=2).mean(axis=0)
    std_tc = condition.std(axis=2).std(axis=0)
    plt.plot(times, mean_tc.T, color=color, label=eve_id)
    plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
                     alpha=0.5, label='')

plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.fill_betweenx(np.arange(*plt.ylim()), times[inds_t[0]],
                  times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
Esempio n. 48
0
    d_dos = data[:, 3]

    # check if orbital is in plotting list
    if f'{el}_s' in list_dos_plot:
        plt.plot(s_dos, energies, label=f'{el}' + '($\it{s}$)')
    if f'{el}_p' in list_dos_plot:
        plt.plot(p_dos, energies, label=f'{el}' + '($\it{p}$)')
    if f'{el}_d' in list_dos_plot:
        plt.plot(d_dos, energies, label=f'{el}' + '($\it{d}$)')

data_tot = np.loadtxt(dos_path + 'DOS_total.dat', dtype='f')
energies = data_tot[:, 0]
tot_dos = data_tot[:, 1]

#plt.plot(tot_dos, energies, label = 'total')
plt.fill_betweenx(energies, tot_dos, alpha=0.25, color='k')

plt.xlabel('DOS (a.u.)')
# axes
ax = plt.gca()
plt.yticks([])
# legend on right side of plot
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))

plt.ylim(ymin, ymax)
plt.xlim(0, 9)

plt.legend()
#plt.show()

plt.savefig('dos_bs.pdf')
Esempio n. 49
0
def hybridRasterBp(x,
                   names=None,
                   ax=None,
                   makeviolin=False,
                   cols=None,
                   rastsep=0.3,
                   Nkde=200,
                   kdepad=None,
                   kdew=0.8,
                   marker='x'):
    '''
  Create a boxplot with a raster plot to one side of the data contained in x.
  
  If makeviolin is True then a kernel density estimate will be computed and 
  plotted down one side of the boxplot. If the matplotlib axis ax is not 
  specified, then the current axis   will be used. The names used on the x-axis 
  can be specified in names,   otherwise numbers will be used. A dictionary of 
  colours can be specified in   cols, otherwise a cycle based on the default 
  matplotlib cycle will be used.  rastsep specifies the horizontal seperation 
  between the raster plot and the left side of the boxplot, this can be 
  increased to move the raster points to the left or right. Nkde is used to 
  specify the resolution of the kernel density plot, should a higher resolution 
  is required. kdew specifies what fraction of the distance between the right 
  side of the boxplot and the raster plot should be used for the violin plot. 
  This does nothing if the violin plot is disabled. The default of 0.8 
  correspond to 80% of rastsep, which seems to give reasonable results. The 
  marker used in the scatter plot can be specified via the marker argument.
  The argument x is expected to have entries (Nplots,Nobs)
  '''
    if ax is None:
        ax = gca()
    if names is None:
        names = arange(len(x))
    if cols is None:
        cols = dict([
            k for k in izip(names,
                            cycle([u'b', u'g', u'r', u'c', u'm', u'y', u'k']))
        ])

    ##############################################################################
    # BOXPLOT
    ##############################################################################
    bp = ax.boxplot(x, showfliers=False)
    bx, cp, fl, mu, md, wh = (bp['boxes'], bp['caps'], bp['fliers'],
                              bp['means'], bp['medians'], bp['whiskers'])
    ax.set_xticklabels(names)

    # Colour the whiskers and boxes
    for l, n0 in zip(bx, names):
        x0, y0 = l.get_data()
        xr = (x0[1] + x0[0]) / 2
        xl = x0[0]
        x1 = array([xl, xr, xr, xl, xl])
        l.set_data(x1, y0)
        l.set_color('k')
        ax.add_patch(
            patches.Rectangle(
                (xl, y0[-1]),  # (x,y)
                xr - xl,  # width
                y0[-2] - y0[-1],  # height
                alpha=0.6,
                edgecolor=None,
                facecolor=cols[n0]))

    for l in md:
        x0, y0 = l.get_data()
        xr = (x0[1] + x0[0]) / 2
        xl = x0[0]
        x1 = array([xl, xr])
        l.set_data(x1, y0)
        l.set_color('k')

    for l in cp:
        x0, y0 = l.get_data()
        xr = (x0[1] + x0[0]) / 2
        xl = x0[0]
        x1 = array([xl, xr])
        l.set_data(x1, y0)
        l.set_color('k')

    for l in wh:
        l.set_color('k')
        l.set_linestyle("-")

    ##############################################################################
    # RASTER PLOT
    ##############################################################################
    for i, (ix, n) in enumerate(zip(x, names)):
        ax.scatter((1 + i) * ones_like(ix) + rastsep,
                   ix,
                   edgecolor=cols[n],
                   facecolor=cols[n],
                   marker=marker)

    ##############################################################################
    # VIOLIN PLOT
    ##############################################################################
    if makeviolin:
        # Estimate limits for the kde
        ymx = None
        ymn = None
        xmx = None
        kdests = list()
        for i, (ix, n) in enumerate(zip(x, names)):
            if ymn is None or ymn > min(ix):
                ymn = min(ix)
            if ymx is None or ymx < max(ix):
                ymx = max(ix)
        if kdepad is None:
            kdepad = (ymx - ymn) * 0.3
        yr = linspace(ymn - kdepad, ymx + kdepad, Nkde)
        for i, (ix, n) in enumerate(zip(x, names)):
            kdests.append(gaussian_kde(ix))
            nxmx = kdests[-1](yr).max()
            if xmx is None or nxmx > xmx:
                xmx = nxmx

        # Plot lines and fill between them bsaed on the kernel density estimate
        for i, (ix, n, gkde) in enumerate(zip(x, names, kdests)):
            fill_betweenx(yr, (1 + i) * ones_like(yr),
                          (1 + i) + ((kdew * rastsep) / xmx) * gkde(yr),
                          color=cols[n],
                          zorder=10,
                          alpha=0.15)
            plot(1 + i + ((kdew * rastsep) / xmx) * gkde(yr),
                 yr,
                 c=cols[n],
                 alpha=0.6,
                 lw=2,
                 zorder=-9)
            plot(1 + i * ones_like(yr), yr, c='k', alpha=0.6, lw=2, zorder=-9)

    return (bp)
Esempio n. 50
0
def plot_vertical_fam_loss_by_route(fam='LOx',
                                    ref_spec='O3',
                                    wd=None,
                                    Mechanism='Halogens',
                                    rm_strat=False,
                                    weight_by_molecs=True,
                                    CODE_wd=None,
                                    full_vertical_grid=True,
                                    dpi=320,
                                    suffix='',
                                    save_plot=True,
                                    show_plot=False,
                                    limit_plotted_alititude=True,
                                    lw=16,
                                    Ox_loss_dict=None,
                                    fontsize=10,
                                    cmap=plt.cm.jet,
                                    verbose=True,
                                    debug=False):
    """
    Plot vertical odd oxygen (Ox) loss via route (chemical family)

    Parameters
    -------
    fam (str): tagged family to track (already compiled in KPP mechanism)
    ref_spec (str): reference species to normalise to
    wd (str): working directory ("wd") of model output
    CODE_wd (str): root of code directory containing the tagged KPP mechanism
    Mechanism (str): name of the KPP mechanism (and folder) of model output
    weight_by_molecs (bool): weight grid boxes by number of molecules
    rm_strat (bool): (fractionally) replace values in statosphere with zeros
    debug, verbose (bool): switches to turn on/set verbosity of output to screen
    full_vertical_grid (bool): use the full vertical grid for analysis
    limit_plotted_alititude (bool): limit the plotted vertical extend to troposphere
    suffix (str): suffix in filename for saved plot
    dpi (int): resolution to use for saved image (dots per square inch)
    Ox_loss_dict (dict), dictionary of Ox loss variables/data (from get_Ox_loss_dicts)

    Returns
    -------
    (None)

    Notes
    -----
     - AC_tools includes equivlent functions for smvgear mechanisms
    """
    # - Local variables/ Plot extraction / Settings
    if isinstance(Ox_loss_dict, type(None)):
        Ox_loss_dict = AC.get_Ox_loss_dicts(
            wd=wd,
            CODE_wd=CODE_wd,
            fam=fam,
            ref_spec=ref_spec,
            Mechanism=Mechanism,
            rm_strat=rm_strat,
            weight_by_molecs=weight_by_molecs,
            full_vertical_grid=full_vertical_grid,
        )
    # extract variables from data/variable dictionary
    sorted_fam_names = Ox_loss_dict['sorted_fam_names']
    fam_dict = Ox_loss_dict['fam_dict']
    ars = Ox_loss_dict['ars']
    RR_dict_fam_stioch = Ox_loss_dict['RR_dict_fam_stioch']
    RR_dict = Ox_loss_dict['RR_dict']
    tags2_rxn_num = Ox_loss_dict['tags2_rxn_num']
    tags = Ox_loss_dict['tags']
    tags_dict = Ox_loss_dict['tags_dict']
    Data_rc = Ox_loss_dict['Data_rc']
    # Combine to a single array
    arr = np.array(ars)
    if debug:
        print((arr.shape))
    # - Process data for plotting
    fam_tag = [fam_dict[i] for i in tags]
    fam_ars = []
    for fam_ in sorted_fam_names:
        # Get indices for routes of family
        fam_ind = [n for n, i in enumerate(fam_tag) if (i == fam_)]
        if debug:
            print((fam_ind, len(fam_ind)))
        # Select these ...
        fam_ars += [arr[fam_ind, ...]]
    # Recombine and sum by family...
    if debug:
        print(([i.shape for i in fam_ars], len(fam_ars)))
    arr = np.array([i.sum(axis=0) for i in fam_ars])
    if debug:
        print((arr.shape))
    # - Plot up as a stack-plot...
    # Normalise to total and conver to % (*100)
    arr = (arr / arr.sum(axis=0)) * 100
    # Add zeros array to beginning (for stack/area plot )
    arr_ = np.vstack((np.zeros((1, arr.shape[-1])), arr))
    # Setup figure
    fig, ax = plt.subplots(figsize=(9, 6),
                           dpi=dpi,
                           facecolor='w',
                           edgecolor='w')
    # Plot by family
    for n, label in enumerate(sorted_fam_names):
        # Print out some summary stats
        if verbose:
            print(n,
                  label,
                  arr[:n, 0].sum(axis=0),
                  arr[:n + 1, 0].sum(axis=0),
                  end=' ')
            print(arr[:n, :].sum(), arr[:n + 1, :].sum())
            print([i.shape for i in (Data_rc['alt'], arr)])
        # Fill between X
        plt.fill_betweenx(Data_rc['alt'],
                          arr[:n, :].sum(axis=0),
                          arr[:n + 1, :].sum(axis=0),
                          color=cmap(1. * n / len(sorted_fam_names)))
        # Plot the line too
        plt.plot(
            arr[:n, :].sum(axis=0),
            Data_rc['alt'],
            label=label,
            color=cmap(1. * n / len(sorted_fam_names)),
            alpha=0,
            lw=lw,
        )
    # Beautify the plot
    plt.xlim(0, 100)
    xlabel = '% of total O$_{\\rm x}$ loss'
    plt.xlabel(xlabel, fontsize=fontsize * .75)
    plt.yticks(fontsize=fontsize * .75)
    plt.xticks(fontsize=fontsize * .75)
    plt.ylabel('Altitude (km)', fontsize=fontsize * .75)
    leg = plt.legend(loc='upper center', fontsize=fontsize)
    # Update lengnd line sizes ( + update line sizes)
    for legobj in leg.legendHandles:
        legobj.set_linewidth(lw / 2)
        legobj.set_alpha(1)
    plt.ylim(Data_rc['alt'][0], Data_rc['alt'][-1])
    # Limit plot y axis to 12km?
    if limit_plotted_alititude:
        plt.ylim(Data_rc['alt'][0], 12)
    # Show plot or save?
    if save_plot:
        filename = 'Ox_loss_plot_by_vertical_{}_{}'.format(Mechanism, suffix)
        plt.savefig(filename, dpi=dpi)
    if show_plot:
        plt.show()
Esempio n. 51
0
def main(sta1,
         sta2,
         filterid,
         components,
         mov_stack=1,
         show=True,
         outfile=None):
    db = connect()
    maxlag = float(get_config(db, 'maxlag'))
    start, end, datelist = build_movstack_datelist(db)

    dtt_lag = get_config(db, "dtt_lag")
    dtt_v = float(get_config(db, "dtt_v"))
    dtt_minlag = float(get_config(db, "dtt_minlag"))
    dtt_width = float(get_config(db, "dtt_width"))
    dtt_sides = get_config(db, "dtt_sides")
    minCoh = float(get_config(db, "dtt_mincoh"))
    maxErr = float(get_config(db, "dtt_maxerr"))
    maxDt = float(get_config(db, "dtt_maxdt"))

    def plot_lags(minlag, maxlag):
        plt.axhline(minlag, c='g')
        plt.axhline(-minlag, c='g')
        plt.axhline(maxlag, c='g')
        plt.axhline(-maxlag, c='g')

    if sta2 < sta1:
        print("Stations STA1 STA2 should be sorted alphabetically")
        return

    sta1 = check_stations_uniqueness(db, sta1)
    sta2 = check_stations_uniqueness(db, sta2)
    pair = "%s_%s" % (sta1, sta2)

    station1 = sta1.split(".")
    station2 = sta2.split(".")

    station1 = get_station(db, station1[0], station1[1])
    station2 = get_station(db, station2[0], station2[1])

    if dtt_lag == "static":
        minlag = dtt_minlag
    else:
        minlag = get_interstation_distance(station1, station2,
                                           station1.coordinates) / dtt_v
        print(minlag)

    maxlag2 = minlag + dtt_width

    print("New Data for %s-%s-%i-%i" % (pair, components, filterid, mov_stack))

    id = []
    alldt = []
    allcoh = []
    for day in datelist:
        fname = os.path.join('MWCS', "%02i" % filterid,
                             "%03i_DAYS" % mov_stack, components, pair,
                             '%s.txt' % day)
        if os.path.isfile(fname):
            df = pd.read_csv(fname,
                             delimiter=' ',
                             header=None,
                             index_col=0,
                             names=['t', 'dt', 'err', 'coh'])
            alldt.append(df["dt"])
            allcoh.append(df["coh"])
            id.append(day)
            del df
    print(len(alldt[0]))

    alldt = pd.DataFrame(alldt, index=pd.DatetimeIndex(id))
    allcoh = pd.DataFrame(allcoh, index=pd.DatetimeIndex(id))

    alldt = alldt.resample('D').mean()
    allcoh = allcoh.resample('D').mean()

    xextent = (date2num(id[0]), date2num(id[-1]), -maxlag, maxlag)

    gs = gridspec.GridSpec(2, 2, width_ratios=[3, 1], height_ratios=[1, 1])

    plt.figure()
    ax1 = plt.subplot(gs[0])
    im = plt.imshow(alldt.T,
                    extent=xextent,
                    aspect="auto",
                    interpolation='none',
                    origin='lower',
                    cmap=cm.seismic)
    cscale = np.nanpercentile(alldt, q=99)
    im.set_clim((cscale * -1, cscale))
    cb = plt.colorbar()
    cb.set_label('dt')
    plt.ylabel("Lag Time (s)")
    plt.axhline(0, lw=0.5, c='k')
    plt.grid()
    plt.title('%s : %s : dt' % (sta1, sta2))
    plot_lags(minlag, maxlag2)
    plt.setp(ax1.get_xticklabels(), visible=False)
    print(type(alldt))
    print(alldt)
    plt.subplot(gs[1], sharey=ax1)
    plt.plot(alldt.mean(axis=0), alldt.columns, c='k')
    plt.grid()
    plot_lags(minlag, maxlag2)
    plt.axvline(-maxDt, c='r', ls='--')
    plt.axvline(maxDt, c='r', ls='--')
    plt.xlabel('dt')
    plt.ylabel("Lag Time (s)")

    ax2 = plt.subplot(gs[2], sharex=ax1, sharey=ax1)
    plt.imshow(allcoh.T,
               extent=xextent,
               aspect="auto",
               interpolation='none',
               origin='lower',
               cmap='hot',
               vmin=minCoh,
               vmax=1)

    cb = plt.colorbar()
    cb.set_label('mean coherence')
    plt.ylabel("Lag Time (s)")
    plt.axhline(0, lw=0.5, c='k')
    plt.grid()
    locator = AutoDateLocator()
    ax2.xaxis.set_major_locator(locator)
    ax2.xaxis.set_major_formatter(AutoDateFormatter(locator))
    plt.setp(plt.xticks()[1], rotation=30, ha='right')
    plt.title('%s : %s : mean coherence' % (sta1, sta2))
    plot_lags(minlag, maxlag2)

    plt.subplot(gs[3], sharey=ax1)
    m = allcoh.mean(axis=0)
    s = allcoh.std(axis=0)
    plt.plot(m, allcoh.columns, c='k')
    plt.fill_betweenx(
        allcoh.columns,
        m - s,
        m + s,
        color='silver',
    )

    plt.grid()
    plot_lags(minlag, maxlag2)
    plt.axvline(minCoh, c='r', ls='--')
    plt.xlabel('Coherence')
    plt.ylabel("Lag Time (s)")

    name = '%s-%s f%i m%i' % (sta1, sta2, filterid, mov_stack)
    name = name.replace('_', '.')

    plt.suptitle(name)

    if outfile:
        if outfile.startswith("?"):
            pair = pair.replace(':', '-')
            outfile = outfile.replace(
                '?', '%s-%s-f%i-m%i' % (pair, components, filterid, mov_stack))
        outfile = "mwcs " + outfile
        print("output to:", outfile)
        plt.savefig(outfile)
    if show:
        plt.show()
def plot_reliability(data1, data2, nbins=10, cor='', maintitle="My Title", name_fig="diagram_att_T42.png", first=True, fig=None):
    '''
    Plot attribute diagram

    Usage: attributediagr(data1,data2, nbins, maintitle)

    Inputs:

    data1: Vector of forecast probabilities for the event of
           interest (e.g. precip. in the upper tercile)

    data2: Vector of binary observations for the event of
           interest (e.g. precip. in the upper tercile)

       nbins: number of probability bins

       maintitle: String containing the text for the reliability diagram title

    Author: Caio Coelho <*****@*****.**>
    Python implemantation: Arthur Costa <*****@*****.**>
    '''

    pf = data1
    probfcsts = np.array(pf)

    aux = probfcsts
    probfcsts = probfcsts[ aux != np.NaN ]

    binobs = data2
    binobs = np.array(binobs)
    binobs = binobs[aux != np.NaN]

    aux1 = binobs
    binobs = binobs[aux1 != np.NaN]
    probfcsts = probfcsts[ aux1 != np.NaN ]

    obar = np.nanmean(binobs)

    n = len(probfcsts)

    h1 = np.histogram(probfcsts, bins=np.arange(0., 1.+1./nbins, 1./nbins))[0]

    g1 = np.histogram(probfcsts[binobs == 1], bins=np.arange(0., 1.+1./nbins, 1./nbins))[0]

    h1 = np.array(h1).astype(float)

    g1 = np.array(g1).astype(float)

    obari = g1/h1

    obari[obari == np.NaN] = 0

    # Computes reliability,resolution and uncertainty components of the
    # Brier score
    yi = np.arange((1. / nbins) / 2., 1. + 1. / nbins, 1. / nbins)[:-1]

    reliab = np.nansum(h1 * ((yi - obari)**2)) / n
    resol = np.nansum(h1 * ((obari - obar)**2)) / n
    uncert = obar * (1 - obar)

    bs = reliab - resol + uncert

    if first:
        fig = plt.figure()

    ax = fig.add_subplot(111)
    ax.bar(yi * 100, h1 / n * 100., (yi[1] - yi[0]) * 100, edgecolor=cor, linewidth=3, color='none', align='center')
    ax.plot(yi * 100, obari * 100, marker='o', markersize=7, markeredgecolor=cor, color=cor, linewidth=3)
    # ax.plot([0, 100], [0, 100], color='gray')

    if first:

        x_plot = np.arange(101)
        y_plot = (x_plot + (obar * 100)) / 2
        plt.fill_betweenx(x_plot, 2 * y_plot - (obar * 100), x2=obar * 100, color="gray", alpha=0.5)
        plt.fill_between(x_plot, x_plot, y_plot, color="gray", alpha=0.5)

        plt.axhline(obar * 100, color='gray', ls='--')
        plt.axvline(obar * 100, color='gray', ls='--')

        # plt.plot(range(-10, 110),(range(-10, 110) + (obar * 100)) / 2, color='g', ls='--')
        plt.plot(range(-10, 110),(range(-10, 110) + (obar * 100)) / 2, color='gray', ls='--')

        #~ plt.text(05, 95, "Rel: " + str(round(reliab,2)))
        #~ plt.text(05, 90, "Res: " + str(round(resol,2)))
        #~ plt.text(05, 85, "Unc: " + str(round(uncert,2)))
        #~ plt.text(05, 80, "BS: " + str(round(bs,2)))

        plt.text(05, 95, "ACIMA", color='blue')
        plt.text(05, 90, "NORMAL", color='y')
        plt.text(05, 85, "ABAIXO", color='red')

        plt.axis((0, 100, 0, 100))
        plt.ylabel(u'Frequência Relativa Observada (%)')
        plt.xlabel(u'Probabilidade da Previsão (%)')
        plt.title(maintitle)

    return fig, ax
Esempio n. 53
0
        # 图像设置
        plt.figure(figsize=(15,7))  # figsize()设置的宽高比例是是15:7,图片的尺寸会根据这个比例进行调节
        lowlimit=min(view)-500      #y轴下限
        highlimit=max(view)+500     #y轴上限
        plt.ylim(lowlimit,highlimit)
        plt.grid(which='both')

        # 绘制原始数据
        plt.plot(range(1,len(view)+1),view,color='y',lw=0.5,label='origin')
        plt.legend(loc='upper right')
        plt.xlabel('time (s)')
        plt.ylabel('views')

        # 当存在高潮区间的时候标注高潮区间
        if high:
            for item in high:
                plt.annotate('',xy=(item[1],1000),xytext=(item[0],1000),arrowprops=dict(arrowstyle="->",connectionstyle="arc3"))
                plt.fill_betweenx([lowlimit,highlimit],item[0], item[1], linewidth=1, alpha=0.2, color='r')

        # 结果保存(能不能不保存,直接获取图像数据显示)
        despath='D:\\hot_pic1'
        if not os.path.exists(despath):
            os.makekdirs(despath)
        fname=os.path.join(despath,cid+'.'+str(filesize)+'.jpg')
        plt.savefig(fname,dpi = 300)
        plt.close()

if __name__ == "__main__":
    app.run()
    #dbx=DBO()
    #dbx.GET()
Esempio n. 54
0
    def Profile(self, coord_sys, axis, var, step):
        """ Plots stress/strain in LCS or MCS against Z vector. """

        # Sets plot dimension
        fig = plt.figure()
        #plt.figure(figsize=(10, 8))

        Z = clt.assemble_Z(self.lam)

        X = {
            "inf": np.zeros((self.num_layers)),
            "sup": np.zeros((self.num_layers))
        }
        Y = {
            "inf": np.zeros((self.num_layers)),
            "sup": np.zeros((self.num_layers))
        }

        P = np.zeros((self.num_layers * 2, 2))

        # Sets the proper names
        if coord_sys == "MCS":
            if axis == 0:
                axis_name = "1"
            elif axis == 1:
                axis_name = "2"
            else:
                axis_name = "6"
        else:
            if axis == 0:
                axis_name = "x"
            elif axis == 1:
                axis_name = "y"
            else:
                axis_name = "xy"

        # Formats the plot
        plt.title('Profile ' + coord_sys + '-' + axis_name + ' ' + var)
        plt.xlabel(var + ' (' + coord_sys + '-' + axis_name + ')')
        plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
        plt.ylabel('Z coordinate')
        plt.grid(True)

        # Iterates the layers, add data to plot
        for layer in range(self.num_layers):
            X["inf"][layer] = self.res[step][coord_sys][var]["inf"][axis][
                layer]
            Y["inf"][layer] = Z[layer]
            X["sup"][layer] = self.res[step][coord_sys][var]["sup"][axis][
                layer]
            Y["sup"][layer] = Z[layer + 1]
            P[layer * 2] = [X["inf"][layer], Y["inf"][layer]]
            P[layer * 2 + 1] = [X["sup"][layer], Y["sup"][layer]]
            plt.fill_betweenx([P[layer * 2, 1], P[layer * 2 + 1, 1]],
                              [P[layer * 2, 0], P[layer * 2 + 1, 0]],
                              hatch="//",
                              facecolor="none",
                              edgecolor="r",
                              lw=1.0)

        # Adds main lines
        plt.plot(P[:, 0], P[:, 1], color="r", lw=2.5)
        plt.plot([0] * (self.num_layers + 1), Z, color="b", lw=2.5)

        # Displays the plot
        if self.display:
            plt.show()

        if self.save:
            fig.savefig("plots/profile.png")

        plt.close(fig)
        """
Esempio n. 55
0
plt.vlines(confLevel95,
           ymin=0,
           ymax=1.8,
           color='red',
           label=r'$\alpha = 0.05$' + ' level')

#Creates a vertical line where the 99% critical value occurs
plt.vlines(confLevel99,
           ymin=0,
           ymax=1.8,
           color='purple',
           label=r'$\alpha = 0.01$' + ' level')

plt.fill_betweenx(yChi,
                  xChi,
                  criticalChi,
                  where=xChi > criticalChi,
                  color='orange')

plt.legend()

plt.show()

#Summary Statistics for Raw Height Data

#The statistics table has the following measures:

# 1. Mean

# 2. Variance
Esempio n. 56
0
def oplotCentralRegion(plt, minR, maxR, minSM, maxSM, smLimit):
    # add shaded region on left to show central area
    plt.fill_betweenx(((smLimit-minSM)/(maxSM-minSM),1),0,(0-minR)/(maxR-minR),color='gray',alpha=0.1)
Esempio n. 57
0
File: n.py Progetto: knly/PAP2
plt.clf()
plt.title(u'Diagramm 3.1: Phasengang eines RC-Glieds')
plt.xlabel('Wechselstromfrequenz der Eingangsspannung $f \, [kHz]$')
plt.ylabel(ur'Phasenverschiebung der Ausgangsspannung $\Phi \, [^\circ]$')
plt.xscale('log')
xspace = np.logspace(0.2,1)*const.kilo
yspace = np.linspace(0,90)
plt.ylim(yspace[0], yspace[-1])
plt.xlim(xspace[0]/const.kilo, xspace[-1]/const.kilo)
papstats.plot_data(f/const.kilo, phi, label='Messpunkte')
extrapolater = ip.UnivariateSpline(unp.nominal_values(f), unp.nominal_values(phi), k=3)
plt.plot(xspace/const.kilo, extrapolater(xspace), label='Extrapolation', color='b')
plt.axhline(45, color='black', label='$\Phi = 45^\circ$')
plt.axvline(f_G_theo.n/const.kilo, label='Theoretische Grenzfrequenz $f_G^{theo}$\nmit Fehlerbereich', color='g')
plt.fill_betweenx(yspace, (f_G_theo.n-f_G_theo.s)/const.kilo, (f_G_theo.n+f_G_theo.s)/const.kilo, color='g', alpha=0.2)
f_G_phas = unc.ufloat(3.09, 0.2)*const.kilo
plt.axvline(f_G_phas.n/const.kilo, color='r', label='Grenzfrequenz $'+papstats.pformat(f_G_phas/const.kilo, unit='kHz', label='f_G^{phas}')+'$\nnach Extrapolation mit Fehlerbereich')
plt.fill_betweenx(yspace, (f_G_phas.n-f_G_phas.s)/const.kilo, (f_G_phas.n+f_G_phas.s)/const.kilo, color='r', alpha=0.2)
plt.legend()
papstats.savefig_a4('3.1.png')


#####
print "# 4: Frequenzgang eines Serienschwingkreises"
#####

data = np.loadtxt('4.txt', skiprows=1, converters=dict.fromkeys(range(1,6), unc.ufloat_fromstr), dtype=object)
R, f_R, df, U_E, U_A = np.array(data[:,0], dtype=float), data[:,1]*const.kilo, (data[:,3]-data[:,2])*const.kilo, data[:,4], data[:,5]
R = R * unc.ufloat(1, 0.05)
Esempio n. 58
0
    def display_final(self):


        mpl.figure(self.label+' thinning')
        mpl.plot(self.tau, self.depth_mid, color=self.color_opt, label='Corrected +/-$\sigma$')
        mpl.plot(self.tau_model, self.depth_mid, color=self.color_mod, label='Model')
        mpl.fill_betweenx(self.depth_mid, self.tau-self.sigma_tau, self.tau+self.sigma_tau, color=self.color_ci)
#        mpl.plot(self.tau+self.sigma_tau, self.depth_mid, color='k', linestyle='-', label='+/- 1 sigma')
#        mpl.plot(self.tau-self.sigma_tau, self.depth_mid, color='k', linestyle='-')
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((x1,x2,self.depth_min,self.depth_max))
        mpl.legend(loc=4)
        mpl.ylim(mpl.ylim()[::-1])
        pp=PdfPages(self.label+'/thinning.pdf')
        pp.savefig(mpl.figure(self.label+' thinning'))
        pp.close()

        mpl.figure(self.label+' accumulation')
        if show_initial:
            mpl.step(self.age, np.concatenate((self.a_init, np.array([self.a_init[-1]]))), color=self.color_init, where='post', label='Initial')
        mpl.step(self.age, np.concatenate((self.a_model, np.array([self.a_model[-1]]))), color=self.color_mod, where='post', label='Model')
        mpl.step(self.age, np.concatenate((self.a, np.array([self.a[-1]]))), color=self.color_opt, where='post', label='Corrected +/-$\sigma$')
        mpl.fill_between(self.age[:-1], self.a-self.sigma_a, self.a+self.sigma_a, color=self.color_ci)
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((self.age_min,x2,y1,y2))
        mpl.legend()
        pp=PdfPages(self.label+'/accumulation.pdf')
        pp.savefig(mpl.figure(self.label+' accumulation'))
        pp.close()

        mpl.figure(self.label+' LIDIE')
        mpl.step(self.age, self.LIDIE_model, color=self.color_mod,where='post', label='Model')
        mpl.step(self.age, self.LIDIE, color=self.color_opt, where='post', label='Corrected +/-$\sigma$')
        mpl.fill_between(self.age, self.LIDIE-self.sigma_LIDIE, self.LIDIE+self.sigma_LIDIE, color=self.color_ci)
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((self.age_min,x2,y1,y2))
        mpl.legend()
        pp=PdfPages(self.label+'/LIDIE.pdf')
        pp.savefig(mpl.figure(self.label+' LIDIE'))
        pp.close()

        mpl.figure(self.label+' ice age')
        mpl.plot(self.age, self.depth, color=self.color_opt, label='Corrected +/-$\sigma$')
        mpl.plot(self.age_model, self.depth, color=self.color_mod, label='Model')
        mpl.fill_betweenx(self.depth, self.age-self.sigma_age, self.age+self.sigma_age , color=self.color_ci)
#        mpl.plot(self.age-self.sigma_age, self.depth, color='k', linestyle='-')
        mpl.plot(self.sigma_age*10, self.depth, color=self.color_sigma, label='$\sigma$ x10')   
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((x1,x2,self.depth_min,self.depth_max))    
        mpl.legend()
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((self.age_min,x2,y2,y1))
        pp=PdfPages(self.label+'/ice_age.pdf')
        pp.savefig(mpl.figure(self.label+' ice age'))
        pp.close()

        mpl.figure(self.label+' gas age')
        mpl.plot(self.gage, self.depth, color=self.color_opt, label='Corrected +/-$\sigma$')
        mpl.plot(self.gage_model, self.depth, color=self.color_mod, label='Model')
        mpl.fill_betweenx(self.depth, self.gage-self.sigma_gage, self.gage+self.sigma_gage , color=self.color_ci)
#        mpl.plot(self.gage+self.sigma_gage, self.depth, color='k', linestyle='-', label='+/- 1 sigma')
#        mpl.plot(self.gage-self.sigma_gage, self.depth, color='k', linestyle='-')
        mpl.plot(self.sigma_gage*10, self.depth, color=self.color_sigma, label='$\sigma$ x10')  
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((x1,x2,self.depth_min,self.depth_max))    
        mpl.legend()
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((self.age_min,x2,y2,y1))
        pp=PdfPages(self.label+'/gas_age.pdf')
        pp.savefig(mpl.figure(self.label+' gas age'))
        pp.close()

        mpl.figure(self.label+' Ddepth')
        mpl.plot(self.Ddepth_model, self.depth, color=self.color_mod, label='Model')
        mpl.plot(self.Ddepth, self.depth, color=self.color_opt, label='Corrected +/-$\sigma$')
        mpl.fill_betweenx(self.depth, self.Ddepth-self.sigma_Ddepth, self.Ddepth+self.sigma_Ddepth, color=self.color_ci)
#        mpl.plot(self.Ddepth+self.sigma_Ddepth, self.depth, color='k', linestyle='-', label='+/- 1 sigma')
#        mpl.plot(self.Ddepth-self.sigma_Ddepth, self.depth, color='k', linestyle='-')
        x1,x2,y1,y2 = mpl.axis()
        mpl.axis((x1,x2,self.depth_min,self.depth_max))
        mpl.legend(loc=4)
        mpl.ylim(mpl.ylim()[::-1])
        pp=PdfPages(self.label+'/Ddepth.pdf')
        pp.savefig(mpl.figure(self.label+' Ddepth'))
        pp.close()
                  enumerate(good_cluster_inds)][0]  # first cluster

times = np.arange(X[0].shape[1]) * tstep * 1e3

plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']

for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
    # extract time course at cluster vertices
    condition = condition[:, :, inds_v]
    # normally we would normalize values across subjects but
    # here we use data from the same subject so we're good to just
    # create average time series across subjects and vertices.
    mean_tc = condition.mean(axis=2).mean(axis=0)
    std_tc = condition.std(axis=2).std(axis=0)
    plt.plot(times, mean_tc.T, color=color, label=eve_id)
    plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
                     alpha=0.5, label='')

ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
                  times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
Esempio n. 60
0
def main(sta1, sta2, filterid, components, mov_stack=1, show=True, outfile=None):
    db = connect()
    maxlag = float(get_config(db, "maxlag"))
    start, end, datelist = build_movstack_datelist(db)

    dtt_lag = get_config(db, "dtt_lag")
    dtt_v = float(get_config(db, "dtt_v"))
    dtt_minlag = float(get_config(db, "dtt_minlag"))
    dtt_width = float(get_config(db, "dtt_width"))
    dtt_sides = get_config(db, "dtt_sides")
    minCoh = float(get_config(db, "dtt_mincoh"))
    maxErr = float(get_config(db, "dtt_maxerr"))
    maxDt = float(get_config(db, "dtt_maxdt"))

    def plot_lags(minlag, maxlag):
        plt.axhline(minlag, c="g")
        plt.axhline(-minlag, c="g")
        plt.axhline(maxlag, c="g")
        plt.axhline(-maxlag, c="g")

    sta1 = sta1.replace(".", "_")
    sta2 = sta2.replace(".", "_")
    if sta2 > sta1:  # alphabetical order filtering!
        pair = "%s_%s" % (sta1, sta2)
        station1 = sta1.split("_")
        station2 = sta2.split("_")

        station1 = get_station(db, station1[0], station1[1])
        station2 = get_station(db, station2[0], station2[1])

        if dtt_lag == "static":
            minlag = dtt_minlag
        else:
            minlag = get_interstation_distance(station1, station2, station1.coordinates) / dtt_v
            print(minlag)

        maxlag2 = minlag + dtt_width

        print("New Data for %s-%s-%i-%i" % (pair, components, filterid, mov_stack))
        format = "matrix"

        alldf = []
        id = []
        alldt = []
        allcoh = []
        for day in datelist:
            fname = os.path.join("MWCS", "%02i" % filterid, "%03i_DAYS" % mov_stack, components, pair, "%s.txt" % day)
            # ~ print fname
            if os.path.isfile(fname):
                df = pd.read_csv(fname, delimiter=" ", header=None, index_col=0, names=["t", "dt", "err", "coh"])
                alldt.append(df["dt"])
                allcoh.append(df["coh"])
                id.append(day)
                del df
        print(len(alldt[0]))

        alldt = pd.DataFrame(alldt, index=pd.DatetimeIndex(id))
        allcoh = pd.DataFrame(allcoh, index=pd.DatetimeIndex(id))

        crange = [np.amin(alldt.values), np.amax(alldt.values)]

        alldt = alldt.resample("D", how="mean")
        allcoh = allcoh.resample("D", how="mean")

        xextent = (date2num(id[0]), date2num(id[-1]), -maxlag, maxlag)

        gs = gridspec.GridSpec(2, 2, width_ratios=[3, 1], height_ratios=[1, 1])

        plt.figure()
        ax = plt.subplot(gs[0])
        plt.imshow(
            alldt.T,
            extent=xextent,
            aspect="auto",
            interpolation="none",
            origin="lower",
            cmap=cmap_center_point_adjust(cm.seismic, crange, 0),
        )
        cb = plt.colorbar()
        cb.set_label("dt")
        plt.ylabel("Lag Time (s)")
        plt.axhline(0, lw=0.5, c="k")
        plt.grid()
        ax.xaxis.set_major_locator(YearLocator())
        ax.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d"))
        plt.title("%s : %s : dt" % (sta1, sta2))
        plot_lags(minlag, maxlag2)

        plt.subplot(gs[1])
        plt.plot(alldt.mean(axis=0), alldt.columns, c="k")
        plt.grid()
        plot_lags(minlag, maxlag2)
        plt.axvline(-maxDt, c="r", ls="--")
        plt.axvline(maxDt, c="r", ls="--")
        plt.xlabel("dt")
        plt.ylabel("Lag Time (s)")

        ax = plt.subplot(gs[2], sharex=ax, sharey=ax)
        plt.imshow(
            allcoh.T,
            extent=xextent,
            aspect="auto",
            interpolation="none",
            origin="lower",
            cmap="hot",
            vmin=minCoh,
            vmax=1,
        )
        # ~ plt.imshow(allcoh.T, extent=xextent,aspect="auto",interpolation='none',origin='lower',cmap='hot')
        cb = plt.colorbar()
        cb.set_label("mean coherence")
        plt.ylabel("Lag Time (s)")
        plt.axhline(0, lw=0.5, c="k")
        plt.grid()
        ax.xaxis.set_major_locator(YearLocator())
        ax.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d"))
        plt.title("%s : %s : mean coherence" % (sta1, sta2))
        plot_lags(minlag, maxlag2)

        plt.subplot(gs[3])
        m = allcoh.mean(axis=0)
        s = allcoh.std(axis=0)
        plt.plot(m, allcoh.columns, c="k")
        plt.fill_betweenx(allcoh.columns, m - s, m + s, color="silver")

        plt.grid()
        plot_lags(minlag, maxlag2)
        plt.axvline(minCoh, c="r", ls="--")
        plt.xlabel("Coherence")
        plt.ylabel("Lag Time (s)")

        name = "%s-%s f%i m%i" % (sta1, sta2, filterid, mov_stack)
        name = name.replace("_", ".")

        plt.suptitle(name)

        if outfile:
            if outfile.startswith("?"):
                pair = pair.replace(":", "-")
                outfile = outfile.replace("?", "%s-%s-f%i-m%i" % (pair, components, filterid, mov_stack))
            outfile = "mwcs " + outfile
            print("output to:", outfile)
            plt.savefig(outfile)
        if show:
            plt.show()