Esempio n. 1
0
def pltAgainst(a, label, clr):
    X = a['X']
    low_25 = a['low_25']
    high_25 = a['high_25']
    medians = a['medians']

    plt.plot(X[:, 1], low_25, ':', color=clr)
    plt.plot(X[:, 1], high_25, ':', color=clr)
    plt.plot(X[:, 1], medians, color=clr, label=label)
    plt.gca().yaxis.set_major_formatter(
        plt.FuncFormatter(lambda x, pos: '%0.2f%%' % (x * 100)))
Esempio n. 2
0
def plot_land_cover(lc_data,year,country_code,cmap='tab20b'):
    table = '''
    |Water Bodies|0|At least 60% of area is covered by permanent water bodies.|
    |Grasslands|1|Dominated by herbaceous annuals (<2m) includ- ing cereal croplands.|
    |Shrublands|2|Shrub (1-2m) cover >10%.|
    |Broadleaf Croplands|3|Dominated by herbaceous annuals (<2m) that are cultivated with broadleaf crops.|
    |Savannas|4|Between 10-60% tree cover (>2m).|
    |Evergreen Broadleaf Forests|5|Dominated by evergreen broadleaf and palmate trees (>2m). Tree cover >60%.|
    |Deciduous Broadleaf Forests|6|Dominated by deciduous broadleaf trees (>2m). Tree cover >60%.|
    |Evergreen Needleleaf Forests|7|Dominated by evergreen conifer trees (>2m). Tree cover >60%.|
    |Deciduous Needleleaf Forests|8|Dominated by deciduous needleleaf (larch) trees (>2m). Tree cover >60%.|
    |Non-Vegetated Lands|9|At least 60% of area is non-vegetated barren (sand, rock, soil) or permanent snow and ice with less than 10% vegetation.|
    |Urban and Built-up Lands|10|At least 30% impervious surface area including building materials, asphalt, and vehicles.|
    |Unclassified|255|Has not received a map label because of missing inputs.|
    '''

    LC_Type3 = np.array([s.split('|')[1:-1] for s in table.split('\n')[1:-1]]).T
    

    '''
    First, lets get the codes and names of the LCs used
    '''
    flc_data = lc_data.astype(float)
    flc_data[lc_data == 255] = np.nan
    land_covers_present = np.unique(lc_data[lc_data!=255])
    land_cover_names = LC_Type3[0]
    '''
    For categorical data we want a quantitative colormap

    The core options are:
    https://matplotlib.org/tutorials/colors/colormaps.html

    qcmaps = ['Pastel1', 'Pastel2', 'Paired', 'Accent',
             'Dark2', 'Set1', 'Set2', 'Set3',
              'tab10', 'tab20', 'tab20b', 'tab20c']
    '''

    '''
    Now learn how to plot with categorical labels
    following example in
    https://gist.github.com/jakevdp/8a992f606899ac24b711
    FuncFormatter to put labels 
    '''

    ncov = land_covers_present.max()
    # This function formatter will replace integers with target names
    formatter = plt.FuncFormatter(lambda val, loc: land_cover_names[val])
    plt.figure(figsize=(10,10))
    plt.title(f'MODIS LAI Land cover LC_Type3 from MCD12Q1 {year} {country_code}')
    plt.imshow(flc_data,vmin=0,vmax=ncov,\
               cmap=plt.cm.get_cmap(cmap,ncov))
    plt.colorbar(ticks=np.arange(ncov+2).astype(int), \
                 format=formatter)
    return(land_cover_names[land_covers_present]) 
Esempio n. 3
0
def plot_evolutions(sols, ts, ax):
    """ Plot system evolution
    """
    ax.imshow(sols, aspect='auto', cmap=plt.cm.gray, interpolation='nearest')

    # let x-labels correspond to actual time steps
    ts = np.append(ts, ts[-1] + (ts[-1] - ts[-2]))
    formatter = plt.FuncFormatter(lambda x, pos: int(ts[x]))
    ax.xaxis.set_major_formatter(formatter)

    ax.set_xlabel(r'$t$')
    ax.set_ylabel(r'$\Theta_i$')
Esempio n. 4
0
    def call(self):

        self.cleanup()
        viewer = self.get_viewer()

        los = self.t_los1, self.t_los2, self.t_los3 # unit vector
        wavelength = self.t_wavelength # meter C-Band
        extent = -self.t_ext, self.t_ext, -self.t_ext, self.t_ext # meter (xmin,xmax,ymin,ymax)

        fault = okada.OkadaSource(
          strike=self.t_strike, dip=self.t_dip, rake=self.t_strike, # degree
          slip=self.t_slip, # meter
          ztop=self.t_ztop, zbottom=self.t_zbot, length=self.t_length, # meter
          xtrace=self.t_xtrace, ytrace=self.t_ytrace ) # meter

        Y, X = numpy.meshgrid(
          numpy.linspace( extent[2], extent[3], 500 ),
          numpy.linspace( extent[0], extent[1], 500 ) )

        XYZ = numpy.array([ X, Y, numpy.zeros_like(X) ]).T

        disp = fault.displacement( XYZ, poisson=.25 )

        disp_los = numpy.dot( disp, los )
        phase = ( numpy.mod( disp_los / ( .5 * wavelength ) * 2 + 1, 2 ) - 1 ) * numpy.pi

        if self.fig is None or self.fframe.closed is True or not self._live_update:
            self.fframe = self.pylab(get='figure_frame')
            self.fig = self.fframe.gcf()

        if self._live_update:
            self.fig.clf()

        ax = self.fig.add_subplot(111)
        ax.imshow( phase, extent=extent, cmap=plt.cm.jet, origin='lower' )

        if not self._live_update:
            dx = numpy.array((-.5,.5)) * fault.length * numpy.sin( fault.strike * numpy.pi / 180 )
            dy = numpy.array((-.5,.5)) * fault.length * numpy.cos( fault.strike * numpy.pi / 180 )

            ax.plot( fault.xtrace + dx, fault.ytrace + dy, 'w-', linewidth=5, solid_capstyle='round' )
            ax.plot( fault.xtrace + dx, fault.ytrace + dy, 'k--', linewidth=2, dash_capstyle='round' )

        formatter = plt.FuncFormatter( lambda x, pos: '%dkm' % int( x / 1e3 ) if x else '0' )
        ax.xaxis.set_major_formatter( formatter )
        ax.yaxis.set_major_formatter( formatter )

        ax.grid()
        self.fig.canvas.draw()

        if self._live_update:
            self.fig.canvas.show()
plt.close()

# Finally apply trend and seasonality back on results_ARIMA
predictions_arima_diff = pd.Series(results_arima.fittedvalues, copy=True)
predictions_arima_diff_cumsum = predictions_arima_diff.cumsum()
predictions_arima_log = pd.Series(fa_log.ix[0], index=fa_log.index)
predictions_arima_log = predictions_arima_log.add(predictions_arima_diff_cumsum, fill_value=0)
predictions_arima = np.exp(predictions_arima_log)

# Check if the prediction algorithm is plausible and not overfitting the data set - looks good here
fa.plot(label='Actual Data')
predictions_arima.plot(label='Predictions ARIMA')
plt.legend(loc='best')
plt.ylabel('Patients per Quarter')
ax = plt.gca()
ax.get_yaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x))))
plt.title('ARIMA Plausibility Check for: %s' % selection)
plt.savefig('./plots/ARIMA_plausibility_%s.png' % selection)
plt.show(block=True)
plt.close()

# Predict future values
# Extend Data Frame (I've hardcoded the dates here - It could be done more elegantly in the future)
start = dt.datetime.strptime("2017-03-31", "%Y-%m-%d")
date_list = pd.to_datetime(['2017-03-31', '2017-06-30', '2017-09-30', '2017-12-31', '2018-03-31'])
future = pd.DataFrame(index=date_list, columns=['Forecast'])
fa = pd.concat([fa, future])

# Calculate predicted values and put them back into original scale
future_predictions = results_arima.predict(start = 16, end = 21, dynamic= True)
print(future_predictions)
Esempio n. 6
0
 def formatter(self):
     return plt.FuncFormatter(
         multiple_formatter(self.denominator, self.number, self.latex))
Esempio n. 7
0
def plot_HR(Ls=10**np.random.uniform(-5, 6, 200) * Lsun,
            Ts=np.random.uniform(0.25, 10, 200) * Tsun,
            Tmin=1000,
            Tmax=40000,
            Lmin=1e-5,
            Lmax=1e6):

    T, Mag = np.loadtxt('./data/stars.txt', usecols=[0, 1], unpack=True)
    L = Lsun * 10**(0.4 * (4.74 - Mag))

    y, x = np.mgrid[np.log10(Lmin):np.log10(Lmax):100j,
                    np.log10(Tmin):np.log10(Tmax):100j]
    size = np.sqrt(10**(y) * (Tsun / (10**x))**4)
    temp = x
    fig, ax = plt.subplots(figsize=(10, 12))
    ax.set_ylim([y.min(), y.max()])
    ax.set_xlim([x.min(), x.max()])

    lgLsun = np.log10(1)
    lgTsun = np.log10(Tsun)

    lgL = np.log10(L / Lsun)
    lgT = np.log10(T)
    R = np.sqrt(10**(lgL) * (Tsun / (10**lgT))**4)

    lgLs = np.log10(Ls / Lsun)
    lgTs = np.log10(Ts)
    Rs = np.sqrt(10**(lgLs) * (Tsun / (10**lgTs))**4)

    levels = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
    CS = ax.contour(x, y, size, colors='k', levels=levels)

    fmt = {}
    strs = [
        str(levels[0]) + r' $R_{\odot}$ ',
        str(levels[1]) + r' $R_{\odot}$ ',
        str(levels[2]) + r' $R_{\odot}$ ',
        str(levels[3]) + r' $R_{\odot}$ ',
        str(levels[4]) + r' $R_{\odot}$ ',
        str(levels[5]) + r' $R_{\odot}$ ',
        str(levels[6]) + r' $R_{\odot}$ '
    ]
    for l, s in zip(CS.levels, strs):
        fmt[l] = s

    plt.clabel(CS, inline=1, fmt=fmt, fontsize=10)

    s0 = 50  #size of sun

    ax.set_xlabel(r'surface temperature $T_{\star}$ [K]', fontsize=20)
    ax.set_ylabel(r'luminosity $L_{\star}$ [$L_{_\odot}$]', fontsize=20)
    ax.set_title('Hertzsprung-Russell diagram', fontsize=20)
    ax.scatter(lgT, lgL, s=R * s0, c=T, cmap='magma_r')
    ax.scatter(lgTs[0], lgLs[0], s=s0 * 4, color='green')
    ax.scatter(lgTs[0], lgLs[0], s=s0 * 4, color='goldenrod', marker=(5, 2))
    ax.annotate("Object Nr. 1", (lgTs[0] + .1, lgLs[0] + .3),
                fontsize=18,
                color='green')
    ax.scatter(lgTs[1], lgLs[1], s=s0 * 4, color='red')
    ax.scatter(lgTs[1], lgLs[1], s=s0 * 4, color='goldenrod', marker=(5, 2))
    ax.annotate("Object Nr. 2", (lgTs[1] + .1, lgLs[1] + .3),
                fontsize=18,
                color='red')
    ax.scatter(lgTs[2], lgLs[2], s=s0 * 4, color='blue')
    ax.scatter(lgTs[2], lgLs[2], s=s0 * 4, color='goldenrod', marker=(5, 2))
    ax.annotate("Object Nr. 3", (lgTs[2] + .1, lgLs[2] + .3),
                fontsize=18,
                color='blue')

    #ax.scatter(lgTsun,lgLsun,c=T,s=s0,cmap='magma_r')

    def format_func_x(value, tick_number):
        # find number of multiples of pi/2

        res = 10**value
        return r'{:0.0f}'.format(res)

    def format_func_y(value, tick_number):
        # find number of multiples of pi/2

        res = 10**value
        return r'{:0.4f}'.format(res)

    ax.yaxis.set_major_formatter(plt.FuncFormatter(format_func_y))
    ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func_x))
    ax.invert_xaxis()
Esempio n. 8
0
def main(symbols,
         sDate,
         eDate,
         maxDaysHeld,
         numSim=5000,
         daySpacing=10,
         startDays=10,
         cumulative=True,
         outputFilename='simulation.dat',
         silent=False):
    ###########
    # Multiprocessing Stuff
    #raise KeyError

    # Establish communication queues
    tasks = multiprocessing.Queue()
    results = multiprocessing.Queue()

    # Start Workers
    num_workers = multiprocessing.cpu_count() * 1
    if not silent:
        print 'Creating %d workers' % num_workers
    consumers = [Worker(tasks, results) for i in xrange(num_workers)]
    for w in consumers:
        w.start()

    # Enqueue jobs
    num_jobs = 0

    if type(symbols) is str:
        # Pull only a single stock
        df = grabSymbol(symbols, sDate, eDate)
    else:
        # Pull portfollio with tuples in form of ('symbol', pctAllocation)
        if abs(sum([a[1] for a in symbols]) - 1) > 0.01:
            raise RuntimeError('Sum of allocations must be 1.0')
        df = pandas.DataFrame()
        for symbol, pctAlloc in symbols:
            df = df.add(grabSymbol(symbol, sDate, eDate) * pctAlloc,
                        fill_value=0)

    for daysHeld in range(startDays, maxDaysHeld, daySpacing):
        tasks.put(Task(daysHeld, numSim, df, cumulative))
        num_jobs += 1
    # Add a poison pill for each worker
    for i in xrange(num_workers):
        tasks.put(None)

    # Start printing results
    jobsTime = pandas.Series(pandas.datetime.now())
    means = np.zeros(num_jobs)
    medians = np.zeros(num_jobs)
    low_25 = np.zeros(num_jobs)
    high_25 = np.zeros(num_jobs)
    wastedTime = np.zeros(num_jobs)

    while num_jobs:
        ans = results.get()
        if isinstance(ans, ErrorHolder):
            # We got an error, print it
            print ans
        else:
            (mn, mds, Xn, Yn, Cn, dH, l_25, h_25, w) = ans
        ix = (dH - startDays) / daySpacing
        means[ix] = mn
        medians[ix] = mds
        low_25[ix] = l_25
        high_25[ix] = h_25
        wastedTime[ix] = w

        if 'X' not in vars().keys():
            # Initalize results matrixes
            X = np.zeros([num_jobs, int(Xn.shape[0])])
            Y = np.zeros([num_jobs, int(Yn.shape[0])])
            C = np.zeros([num_jobs, int(Cn.shape[0])])
        X[ix, :] = Xn
        Y[ix, :] = Yn
        C[ix, :] = Cn

        num_jobs -= 1
        jobsTime = jobsTime.append(pandas.Series(pandas.datetime.now()))
        if not silent:
            print "Jobs left: %s (ETA %0.2f min)"% \
                (num_jobs,
                 np.average([(jobsTime.iloc[n] - jobsTime.iloc[n-1]).seconds
                             for n in range(1,len(jobsTime))])*num_jobs/60.0 )

    # Plot Results
    # Convert time (X) to years
    X = X.astype('f') / 365.25

    if not silent:
        plt.xkcd()
        plt.pcolor(X, Y, C, cmap=plt.cm.YlOrRd)
        #means = pandas.rolling_mean(pandas.Series(means), 10)
        plt.plot(X[:, 1], means, color='k', label='Mean')
        plt.plot(X[:, 1], medians, color='b', label='Median')
        plt.plot(X[:, 1], low_25, color='g', label='Low25')
        plt.plot(X[:, 1], high_25, color='g', label='High25')
        plt.colorbar().set_label('Probability Higher/Lower than Median')
        plt.legend(loc='upper left')
        plt.xlabel('Years')
        plt.ylabel('Return')
        plt.grid(axis='y')
        plt.gca().yaxis.set_major_formatter(
            plt.FuncFormatter(lambda x, pos: '%0.2f%%' % (x * 100)))
        plt.show()

        plt.figure()
        plt.plot(X[:, 1], wastedTime)
        plt.title("Average number of days I lost waiting to invest")
        plt.show()

    # Save data to dat file for reading my plotMultipleGainsOverTime
    if not silent:
        print "Exporing data to file %s..." % outputFilename
    import shelve
    sh = shelve.open(outputFilename)
    sh['X'] = X
    sh['Y'] = Y
    sh['C'] = C
    sh['means'] = means
    sh['medians'] = medians
    sh['low_25'] = low_25
    sh['high_25'] = high_25
    sh['wastedTime'] = wastedTime
    sh['portfolio'] = symbols
    sh.close()

    return high_25, means, medians, low_25
Esempio n. 9
0
cnts = []
for p in parts:
    cnts = cnts + [[p, data[data['Part Number'] == p].shape[0]]]

# creating and sorting dataframe for seaborn plotting
df = pd.DataFrame(data=cnts, columns=['Part Number', 'Count'])
df_sort = df.sort_values(by='Count', ascending=False)

# trimming to top10
top10 = df_sort.head(10)

plt.figure(figsize=(12, 8))
# plot barh chart with index as x values
ax = sns.barplot(top10['Part Number'], top10['Count'], palette='Oranges_r')
ax.get_yaxis().set_major_formatter(
    plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x))))
ax.set(xlabel="Part Number", ylabel='Count')
ax.set_title('Top 10 QN Counts for Part Numbers - Past Two Weeks')
ax.set_xticklabels(top10["Part Number"])
for item in ax.get_xticklabels():
    item.set_rotation(70)
for i, v in enumerate(top10["Count"].iteritems()):
    ax.text(i, v[1], "{:,}".format(v[1]), color='m', va='bottom', rotation=45)

### GETTING TRENDS FOR QN COUNTS OVER TWO MONTHS
pn10 = top10['Part Number']

df10 = datas[datas['Part Number'].isin(pn10)]

#trends = []
#for j in df10['julian-date'].unique():
loc = ticker.MultipleLocator(
    base=0.04)  # this locator puts ticks at regular intervals
[ax[i, 6].xaxis.set_major_locator(loc) for i in range(3)]


def format_func(value, tick_number):
    # find number of multiples of pi/2
    if value == 0.0:

        return '0'
    else:
        return value


ax[0, 1].xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax[0, 4].xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax[0, 5].xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax[0, 6].xaxis.set_major_formatter(plt.FuncFormatter(format_func))

ax[0, 0].set_ylabel("Mathematics")
ax[1, 0].set_ylabel("Reading")
ax[2, 0].set_ylabel("Writing")

ax[0, 0].set_xlim([0.82, 1.78])
ax[0, 1].set_xlim([-0.05, 0.06])
ax[0, 3].set_xlim([0.33, -2.3])
ax[0, 4].set_xlim([0.023, -0.07])
ax[0, 5].set_xlim([0.03, -0.06])
ax[0, 6].set_xlim([0.03, -0.06])
import matplotlib as mpl
import matplotlib.pylab as plt
import numpy as np
#import csv
#f = open('')

iris = load_iris()  # sample dataset load
# load_iris() 가 반환한 객체는 파이썬의 딕셔너리와 유사한 객체이다.
print('iris feature', iris.data)
# data 에는 각 꽃의 특성(feature)가 담겨있다.
print(iris.keys)
print("iris_dataset의 키: \n{}".format(iris.keys()))
features = iris.data
features_names = iris.feature_names
target = iris.target
target_names = iris.target_names

x_index = 0
y_index = 1

formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])
# 람다식
plt.figure(figsize=(5, 4))
plt.scatter(iris.data[:, x_index], iris.data[:, y_index], c=iris.target)
plt.colorbar(ticks=[0, 1, 2], format=formatter)
plt.xlabel(iris.feature_names[x_index])
plt.ylabel(iris.feature_names[y_index])

plt.tight_layout()
plt.show()
Esempio n. 12
0
def gsea_plot(rank_metric, enrich_term, hit_ind, nes, pval, fdr, RES,
              phenoPos=None, phenoNeg=None, figsize =(6.5,6), **kwarg):
    """This is the main function for reproducing the gsea plot.

    :param rank_metric: rankings, rank_metric['rank'].values.
    :param enrich_term: gene_set name
    :param hit_ind: hit indexs of rank_metric['gene_name'] presented in gene set S.
    :param nes: Normalized enrichment scores.
    :param pval: nominal p-value.
    :param fdr: false discoveray rate.
    :param RES: ranking enrichment scores of all genes in rank_metric['gene_name'].
    :param phenoPos: phenotype lable, positive correlated.
    :param phenoNeg: phenotype lable, negative correlated.
    :param figsize: matplotlib figsize.
    :return: fig object of gsea plot.
    """
    #plt.style.use('classic')
    # center color map at midpoint = 0
    norm = _MidpointNormalize(midpoint=0)

    #dataFrame of ranked matrix scores
    x = rank_metric.index.values
    #figsize = (6,6)
    phenoP_label = phenoPos + ' (Positively Correlated)'
    phenoN_label = phenoNeg + ' (Negatively Correlated)'
    zero_score_ind = np.abs(rank_metric['rank']).argmin()
    z_score_label = 'Zero score at ' + str(zero_score_ind)
    nes_label = 'NES: '+ "{:.3f}".format(float(nes))
    pval_label = 'Pval: '+ "{:.3f}".format(float(pval))
    fdr_label = 'FDR: '+ "{:.3f}".format(float(fdr))
    #im_matrix = rank_metric.ix[:,1:].T
    im_matrix = rank_metric.iloc[:,1:].T

    #in most case, we will have mangy plots, so do not display plots
    #It's also convinient to run this script on command line.
    plt.ioff()
    #GSEA Plots
    gs = plt.GridSpec(16,1)
    fig = plt.figure(figsize=figsize)
    #Ranked Metric Scores Plot
    ax1 =  fig.add_subplot(gs[11:])
    ax1.fill_between(x, y1= rank_metric['rank'], y2=0, color='#C9D3DB')
    ax1.set_ylabel("Ranked list metric",fontsize=14)
    ax1.text(.05, .9, phenoP_label, color='red', horizontalalignment='left', verticalalignment='top',
         transform=ax1.transAxes)
    ax1.text(.95, .05, phenoN_label, color='Blue', horizontalalignment='right', verticalalignment='bottom',
         transform=ax1.transAxes)

    # the x coords of this transformation are data, and the y coord are axes
    trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)
    ax1.vlines(zero_score_ind, 0, 1, linewidth=.5, transform=trans1, linestyles='--', color='grey')
    ax1.text(zero_score_ind, 0.5, z_score_label, horizontalalignment='center', verticalalignment='center',
             transform=trans1)
    ax1.set_xlabel("Rank in Ordered Dataset", fontsize=14)
    ax1.spines['top'].set_visible(False)
    ax1.tick_params(axis='both', which='both', top='off', right='off', left='off')
    ax1.locator_params(axis='y', nbins=5)
    ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num :  '{:.1f}'.format(tick_loc) ))

    # use round method to control float number
    #ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num :  round(tick_loc, 1) ))

    #gene hits
    ax2 = fig.add_subplot(gs[8:10], sharex=ax1)

    # the x coords of this transformation are data, and the y coord are axes
    trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)
    ax2.vlines(hit_ind, 0, 1,linewidth=.5,transform=trans2)
    ax2.spines['bottom'].set_visible(False)
    ax2.tick_params(axis='both', which='both', bottom='off', top='off',
                    labelbottom='off', right='off', left='off',labelleft='off')
    #colormap
    ax3 =  fig.add_subplot(gs[10],sharex=ax1)
    ax3.imshow(im_matrix, aspect='auto', norm=norm, cmap=plt.cm.seismic, interpolation='none') # cm.coolwarm
    ax3.spines['bottom'].set_visible(False)
    ax3.tick_params(axis='both', which='both', bottom='off', top='off',
                    labelbottom='off', right='off', left='off',labelleft='off')

    # Enrichment score plot
    ax4 = fig.add_subplot(gs[:8],sharex=ax1)
    ax4.plot(x,RES,linewidth=4,color ='#88C544')
    ax4.text(.1, .1, fdr_label, transform=ax4.transAxes)
    ax4.text(.1, .2, pval_label, transform=ax4.transAxes)
    ax4.text(.1, .3, nes_label, transform=ax4.transAxes)

    # the y coords of this transformation are data, and the x coord are axes
    trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)
    ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')
    ax4.set_ylabel("Enrichment score (ES)", fontsize=14)
    ax4.set_xlim(min(x), max(x))
    ax4.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off')
    ax4.locator_params(axis='y', nbins=5)
    # FuncFormatter need two argment, I don't know why. this lambda function used to format yaxis tick labels.
    ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num :  '{:.1f}'.format(tick_loc)) )

    #fig adjustment
    fig.suptitle(enrich_term, fontsize=16)
    fig.subplots_adjust(hspace=0)
    #fig.tight_layout()
    plt.close(fig)

    return fig