Exemplo n.º 1
0
def plot_hist(hist, bin_edges, out_path, do_per=False, do_log=True, title='', xlabel='', ylabel=''):
    """
    Plot histogram using information contained in hist (frequency count) and bin_edges.
    :param hist: values of the histogram
    :param bin_edges: return the bin edges
    :param out_path: output path and file name
    :param do_per: plot each bin as percentage of total
    :param do_log: plot logarithmic y-axis
    :param title:
    :param xlabel:
    :param ylabel:
    :return:
    """
    logger.info('Plot histogram')
    sns.set_style('whitegrid')

    if do_per:
        # Compute percentage of total sum
        per_hist = hist * 100.0 / sum(hist)
        sns.barplot(bin_edges[:-1].astype(int), per_hist, color='purple', edgecolor='none')
        plt.ylim(0.0, 100.0)
    else:
        sns.barplot(bin_edges[:-1].astype(int), hist, color='purple', edgecolor='none', log=do_log)

    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.title(title)

    plt.tight_layout()
    plt.savefig(out_path, dpi=constants.DPI)
    plt.close()
def makeFigure4():
    '''Makes Figure 4 from Quinn et al., 2017 - WRR (parallel axes of tradeoffs \
    for all four formulations)'''
    
    sns.set_style("dark")
    
    # load thinned reference sets from each problem formulation
    WC = np.loadtxt('./../WC/WC_thinned.csv',delimiter=',',skiprows=1)
    WP1 = np.loadtxt('./../WP1/WP1_thinned.csv',delimiter=',',skiprows=1)
    EV = np.loadtxt('./../EV/EV_thinned.csv',delimiter=',',skiprows=1)
    EVSDH = np.loadtxt('./../EVSDH/EVSDH_thinned.csv',delimiter=',',skiprows=1)
    
    # set plotting characteristics
    formulations = [WC, WP1, EV, EVSDH]
    labels = [['WC Hydro\n(Gwh/day)','WC Deficit$\mathregular{^2}$\n(m$\mathregular{^3}\!$/s)$\mathregular{^2}$','WC Flood\nDamages (-)'],\
        ['WP1 Hydro\n(Gwh/day)','WP1 Deficit$\mathregular{^2}$\n(m$\mathregular{^3}\!$/s)$\mathregular{^2}$','WP1 Flood\n(m above 11.25 m)','WP1 Recovery\n(days)'],\
        ['EV Hydro\n(Gwh/day)','EV Deficit$\mathregular{^2}$\n(m$\mathregular{^3}\!$/s)$\mathregular{^2}$','WP1 Flood\n(m above 11.25 m)','EV Recovery\n(days)'],\
        ['EV Hydro\n(Gwh/day)','EV Deficit$\mathregular{^2}$\n(m$\mathregular{^3}\!$/s)$\mathregular{^2}$','WP1 Flood\n(m above 11.25 m)','EV Recovery\n(days)','SD Hydro\n(Gwh/day)']]
    cmaps = ['Reds_r','Blues_r','Greens_r','Purples_r']
    titles = ['Worst Case (WC)', 'Worst 1st Percentile (WP1)','Expected Value (EV)',\
        'Expected Value & Hydro St Dev (EV&SD$\mathregular{_H}\!$)']
    precision = [[1,0,0],[1,0,2,1],[1,0,2,1],[1,0,2,1,1]]
    
    # make 2 x 2 subplot with parallel axes for each problem formulation
    plot(formulations, labels, precision, cmaps, titles, 'Figure4.pdf')
    
    return None
Exemplo n.º 3
0
def plotSimulations():
    '''Makes Figure S1 and S2 from Quinn et al., 2016 - WRR \
    Figure S1 = Storage and Release Trajectories at Hoa Binh \
    Figure S2 = Storage Trajectories at all 4 Reservoirs'''

    sns.set_style("dark")

    WC = getFormulations('WC')
    WP1 = getFormulations('WP1')
    EV = getFormulations('EV')
    EVSDH = getFormulations('EVSDH')

    formulations = [WC, WP1, EV, EVSDH]
    colors = ['#1b9e77', '#d95f02', '#7570b3', '#e7298a', '#e6ab02']
    ylabels = [r'$s_t^{HB} (km^3\!)$', r'$r_t^{HB} (m^3\!/s)$']
    titles = [
        'WC Formulation', 'WP1 Formulation', 'EV Formulation',
        'EV&SD$\mathregular{_H}$ Formulation'
    ]
    makeFigureS1(formulations, colors, ylabels, titles, 'FigureS1.pdf')

    colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3']
    ylabels = [
        r'$s_t^{SL} (km^3\!)$', r'$s_t^{HB} (km^3\!)$', r'$s_t^{TQ} (km^3\!)$',
        r'$s_t^{TB} (km^3\!)$'
    ]
    makeFigureS2(formulations, colors, ylabels, 'FigureS2.pdf')

    return None
def plotShadedDensity():
    '''Makes Figures 7 and 8 from Quinn et al., 2017 - WRR \
    Figure 7: Time-varying PDFs of water level at Hanoi \
    Figure 8: Probabilistic state space diagrams of total storage and water level at Hanoi'''

    sns.set_style("dark")

    WC = getFormulations('WC')
    WP1 = getFormulations('WP1')

    WCformulations = [WC.bestFlood, WC.bestHydro, WC.compromise]
    WP1formulations = [WP1.bestFlood, WP1.bestHydro, WP1.compromise]

    ylabels = ['WC Formulation', 'WP1 Formulation']
    titles = [
        'Best Flood Solution', 'Best Hydro Solution', 'Compromise Solution'
    ]
    makeFigure7(WCformulations, WP1formulations, ylabels, titles,
                'Figure7.pdf')

    titles = ['WC Compromise Solution', 'WP1 Compromise Solution']
    xlabel = r'$s^{TOT} (km^3\!)$'
    ylabel = r'$z^{HN} (m)$'
    makeFigure8(WC.compromise, WP1.compromise, xlabel, ylabel, titles,
                'Figure8.pdf')

    return None
Exemplo n.º 5
0
def plt_fix_ticks():
    import seaborn as sns
    sns.set_style("white")
    plt.imshow(scores_mean, sns.cubehelix_palette(light=1, as_cmap=True))

    # ah, have to use xticks to get the label
    plt.xticks(range(gam_len), param_grid['gamma'], rotation=30)
    plt.yticks(range(Clen), param_grid['C'])
Exemplo n.º 6
0
def fitMVN():
    # set plotting style
    sns.set_style("darkgrid")

    # load PM dataset
    pm_daily = np.loadtxt('../data/epa_hourly/alabamatest.csv', delimiter=',')
    #Nyears = int(np.shape(pm_daily)[0] / 365*24)
    Nsites = np.shape(pm_daily)[1]
    # Months = ['May', 'June', 'July', 'August', 'September', 'October', 'November', 'December', 'January', 'February',
    #           'March', 'April']


    # calculate standard deviation in daily flows each month and squared Mahalanobis distances
    #StdMonthly = calc_monthly_std(pm_daily, Nyears, Nsites)
    #D2 = calcD2(Nyears, Nsites, np.log(pm_daily))
    #D2 = calcD2(Nsites, np.log(pm_daily))

    # calculate theoretical quantiles for a chi^2 distribution with dof = Nsites, and for the standard normal distribution
    m = np.array(range(1, Nyears + 1))
    p = (m - 0.5) / Nyears
    chi2 = stats.chi2.ppf(p, Nsites)
    norm = stats.norm.ppf(p, 0, 1)

    # initialize matrices to store correlation coefficients and significance levels for marginal normal distributions and chi^2 distributions
    normCorr = np.zeros([Nsites, 12])
    norm_sigLevel = np.zeros([Nsites, 12])
    chi2Corr = np.zeros([12])
    chi2_sigLevel = np.zeros([12])

    for i in range(len(Months)):
        # plot histograms of standard deviation of daily flows each month, and of their logs
        plotHistograms(Nsites, StdMonthly[:, :, i], 'Standard Deviation of Daily ' + Months[i] + ' Flows',
                       Months[i] + 'Hist.png')
        plotHistograms(Nsites, np.log(StdMonthly[:, :, i]), 'log(Standard Deviation of Daily ' + Months[i] + ' Flows)', \
                       'Log' + Months[i] + 'Hist.png')

        # plot QQ plots of standard deviation of daily flows each month, and of their logs
        plotNormQQ(Nsites, StdMonthly[:, :, i], norm, 'Standard Deviation of Daily ' + Months[i] + ' Flows',
                   Months[i] + 'QQ.png')
        normCorr[:, i] = plotNormQQ(Nsites, np.log(StdMonthly[:, :, i]), norm,
                                    'log(Standard Deviation of Daily ' + Months[i] + ' Flows)',
                                    'Log' + Months[i] + 'QQ.png')

        # plot QQ plot of Chi Squared distribution of log of standard deviation in daily flows each month
        chi2Corr[i] = plotChi2QQ(Nsites, D2[:, i], chi2,
                                 'D$\mathregular{^2}\!$ of log(Standard Deviation of Daily ' + Months[i] + ' Flows)', \
                                 'Log' + Months[i] + 'Chi2QQ.png')

        # find significance levels
        chi2_sigLevel[i] = chi2_MC(Nsites, Nyears, chi2, chi2Corr[i])
        norm_sigLevel[:, i] = norm_MC(Nsites, Nyears, norm, normCorr[:, i])

    np.savetxt('Norm_sigLevels.txt', np.transpose(norm_sigLevel))
    np.savetxt('Norm_corr.txt', np.transpose(normCorr))
    np.savetxt('Chi2_sigLevels.txt', chi2_sigLevel)
    np.savetxt('Chi2_corr.txt', chi2Corr)

    return None
Exemplo n.º 7
0
def makeStorageFig(EMODPS, GL, preference, figName):
    EMODPSprobs = np.log10(getStorageProbs(EMODPS[0], EMODPS[1]))
    GLprobs = np.log10(getStorageProbs(GL[0], GL[1]))
    a = EMODPSprobs[EMODPSprobs > -np.inf]
    b = GLprobs[GLprobs > -np.inf]
    
    tickMin = min(np.min(a), np.min(b))
    tickMax = max(np.max(a), np.max(b))
    
    sns.set_style("dark")
    ymax = 15
    ymin = 0
    xmin = 5
    xmax = 30
    
    fig = plt.figure()
    ax = fig.add_subplot(1,2,1)
    sm = ax.imshow(EMODPSprobs, cmap='RdYlBu_r',origin="upper",norm=mpl.colors.Normalize(vmin=tickMin, vmax=tickMax))
    ax.set_xticks(np.arange(0,100+100/5,100/5))
    ax.set_xticklabels(np.arange(xmin, xmax+5, 5),fontsize=18)
    ax.set_xlabel(r'$s^{TOT} (km^3\!)$',fontsize=18)
    ax.set_yticks(np.arange(0,100+100/3,100/3))
    ax.set_yticklabels(np.arange(ymax,ymin-5,-5),fontsize=18)
    ax.set_ylabel(r'$z^{HN} (m)$',fontsize=18)
    ax.set_ylim([100,0])
    ax.set_xlim([0,100])
    ax.set_title('EMODPS ' + preference + ' Policy',fontsize=18)
    alarm, = ax.plot([0,100],[(1-11.25/15.0)*100.0,(1-11.25/15.0)*100.0],linestyle='--',c='k') # second alarm
    dikeHeight, = ax.plot([0,100],[(1-13.4/15.0)*100.0,(1-13.4/15.0)*100.0],linewidth=2,c='k') # dike height

    
    ax = fig.add_subplot(1,2,2)
    sm = ax.imshow(GLprobs, cmap='RdYlBu_r',origin="upper",norm=mpl.colors.Normalize(vmin=tickMin, vmax=tickMax))
    ax.set_xticks(np.arange(0,100+100/5,100/5))
    ax.set_xticklabels(np.arange(xmin, xmax+5, 5),fontsize=18)
    ax.set_xlabel(r'$s^{TOT} (km^3\!)$',fontsize=18)
    ax.set_yticks(np.arange(0,100+100/3,100/3))
    ax.tick_params(axis='y',which='both',labelleft='off')
    ax.set_ylim([100,0])
    ax.set_xlim([0,100])
    ax.set_title('Guidelines Policy',fontsize=18)
    alarm, = ax.plot([0,100],[(1-11.25/15.0)*100.0,(1-11.25/15.0)*100.0],linestyle='--',c='k') # second alarm
    dikeHeight, = ax.plot([0,100],[(1-13.4/15.0)*100.0,(1-13.4/15.0)*100.0],linewidth=2,c='k') # dike height
    
    fig.subplots_adjust(right=0.8, bottom=0.2)
    fig.legend([alarm, dikeHeight],['Alarm Level', 'Dike Height'], \
        loc='lower center', ncol=2, fontsize=18, frameon=True)
    cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
    cbar = fig.colorbar(sm, cax=cbar_ax, ticks=np.arange(-7,-2,1))
    cbar.ax.set_ylabel('Probability Density',fontsize=18)
    cbar.ax.set_yticklabels([r'$10^{-7}$',r'$10^{-6}$',r'$10^{-5}$',r'$10^{-4}$',r'$10^{-3}$'],fontsize=16)
    fig.set_size_inches([11.7125, 5.975])
    fig.savefig(figName)
    fig.clf()
    
    return None
Exemplo n.º 8
0
def evaluate_network(network, X_test, y_test, classes_names, length=1000, batch_size=64):
    resp = network.predict_proba(X_test[:length], batch_size=batch_size, verbose=False)
    resc = network.predict_classes(X_test[:length], batch_size=batch_size, verbose=False)

    a1 = []
    a2 = []
    cpt = 0
    cpt_on = []
    cpt_real = []
    cpt_should = 0
    should = []
    cpt_shouldnt = 0
    shouldnt = []
    for idx, i in enumerate(resc):
        a1.append(i)
        a2.append(np.array(y_test[idx]).argmax())
        if i.tolist() == [0, 0, 0, 0]:
            cpt += 1
            cpt_on.append(resp[idx].argmax())
            cpt_real.append(np.array(y_test[idx]).argmax())
            if cpt_on[-1] == cpt_real[-1]:
                cpt_should += 1
                should.append(resp[idx].argmax())
            else:
                cpt_shouldnt += 1
                shouldnt.append(resp[idx].argmax())
            # print(resp[idx])
    print("No decision: %d / %d  [%.02f%%]" % (cpt, len(resc), (cpt / float(len(resc))) * 100), end="")
    print(cpt_should, cpt_shouldnt)

    print("Accuracy: %.06f" % metrics.label_ranking_average_precision_score(y_test[:length], resp))

    cpt_on = np.array(cpt_on)
    print(metrics.classification_report(a1, a2, target_names=classes_names))

    print("Confusion matrix:")
    cm = confusion_matrix(a1, a2)
    print(cm)
    sns.set_style("ticks")
    sns.mpl.rc("figure", figsize=(8, 4))

    np.set_printoptions(precision=2)
    cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
    fig = plt.figure()
    plt.imshow(cm_normalized, interpolation="nearest", cmap=plt.cm.Blues)
    plt.title("Normalized confusion matrix")
    plt.colorbar()
    tick_marks = np.arange(len(classes_names))
    plt.xticks(tick_marks, classes_names, rotation=45)
    plt.yticks(tick_marks, classes_names)
    plt.ylabel("True label")
    plt.xlabel("Predicted label")
    plt.tick_params(which="both", direction="in", length=0)
    plt.show()
Exemplo n.º 9
0
def _plotTrackingPercentage(plotData):
    figure = plt.figure(figsize=(figureWidth, halfPageHeight), dpi=600)
    sns.set_style(style='white')
    ax = figure.gca()

    minTracking = 100.
    lambdaPhiSet = set()

    trackingPercentageList = []
    for j, (P_d, d1) in enumerate(plotData.items()):
        for i, (N, d2) in enumerate(d1.items()):
            x = []
            y = []
            for lambda_phi, trackingPercentage in d2.items():
                x.append(lambda_phi)
                y.append(trackingPercentage)
                minTracking = min(minTracking, trackingPercentage)
                lambdaPhiSet.add(lambda_phi)
            x = np.array(x)
            y = np.array(y)
            x, y = (list(t) for t in zip(*sorted(zip(x, y))))
            trackingPercentageList.append((P_d, N, x, y))

    trackingPercentageList.sort(key=lambda tup: float(tup[1]), reverse=True)
    trackingPercentageList.sort(key=lambda tup: float(tup[0]), reverse=True)

    pdSet = set()
    nSet = set()
    for P_d, N, x, y in trackingPercentageList:
        if P_d not in pdSet:
            nSet.clear()
        pdSet.add(P_d)
        nSet.add(N)
        ax.plot(x, y,
                label="$P_D$={0:}, N={1:.0f}".format(P_d, N),
                c=colors[len(nSet) - 1],
                linestyle=linestyleList[len(pdSet) - 1],
                linewidth=linewidth,
                marker='*' if len(x)==1 else None)

    lambdaPhiList = list(lambdaPhiSet)
    lambdaPhiList.sort()

    ax.legend(loc=0, ncol=len(pdSet), fontsize=legendFontsize)
    ax.set_xlabel("$\lambda_{\phi}$", fontsize=labelFontsize)
    ax.set_ylabel("\nAverage tracking percentage", fontsize=labelFontsize)
    ax.xaxis.set_major_formatter(FormatStrFormatter('%.1e'))
    ax.set_ylim(0.0, 100.01)
    ax.tick_params(labelsize=labelFontsize)
    sns.despine(ax=ax, offset=0)
    ax.xaxis.set_ticks(lambdaPhiList)
    figure.tight_layout(pad=0.8, h_pad=0.8, w_pad=0.8)
    return figure
Exemplo n.º 10
0
def _plotTrackLossPercentage(plotData):
    figure = plt.figure(figsize=(figureWidth,halfPageHeight), dpi=600)
    colors = sns.color_palette(n_colors=5)
    sns.set_style(style='white')
    ax = figure.gca()
    maxTrackloss = 0.
    lambdaPhiSet = set()

    trackLossList = []
    for j, (P_d, d1) in enumerate(plotData.items()):
        for i, (N, d2) in enumerate(d1.items()):
            x = []
            y = []
            for lambda_phi, trackLossPercentage in d2.items():
                x.append(lambda_phi)
                y.append(trackLossPercentage)
                maxTrackloss = max(maxTrackloss, trackLossPercentage)
                lambdaPhiSet.add(lambda_phi)
            x, y = (list(t) for t in zip(*sorted(zip(x, y))))
            x = np.array(x)
            y = np.array(y)
            trackLossList.append((P_d, N, x, y))

    trackLossList.sort(key=lambda tup: float(tup[1]))
    trackLossList.sort(key=lambda tup: float(tup[0]))

    pdSet = set()
    nSet = set()
    for P_d, N, x, y in trackLossList:
        if P_d not in pdSet:
            nSet.clear()
        pdSet.add(P_d)
        nSet.add(N)
        ax.plot(x,y,
                label = "$P_D$={0:}, N={1:.0f}".format(P_d, N),
                c = colors[len(nSet)-1],
                linestyle=linestyleList[len(pdSet)-1],
                linewidth = linewidth)
    lambdaPhiList = list(lambdaPhiSet)
    lambdaPhiList.sort()

    ax.legend(loc=0, ncol=len(pdSet), fontsize=legendFontsize)
    ax.set_xlabel("$\lambda_{\phi}$", fontsize=labelFontsize)
    ax.set_ylabel("Track loss (%)", fontsize=labelFontsize)
    ax.xaxis.set_major_formatter(FormatStrFormatter('%.1e'))
    ax.set_ylim(0, 30)
    ax.tick_params(labelsize=labelFontsize)
    yStart, yEnd = ax.get_ylim()
    ax.yaxis.set_ticks(np.arange(yStart, yEnd * 1.01, 10))
    ax.xaxis.set_ticks(lambdaPhiList)
    figure.tight_layout(pad=0.8, h_pad=0.8, w_pad=0.8)
    return figure
Exemplo n.º 11
0
def makeFigure3():
    '''Makes Figure 3 from Quinn et al., 2017 - WRR (flood damage function)'''

    sns.set_style("dark")
    h = np.arange(0, 14.0, 0.1)
    d = np.zeros(len(h))
    for i in range(len(h)):
        if h[i] <= 6:
            d[i] = 0
        elif h[i] > 6 and h[i] <= 11.25:
            d[i] = (h[i] - 6) * 750000 / 5.25
        else:
            d[i] = 1.50601636E6 * h[i]**4 - 7.00078878E7 * h[
                i]**3 + 1.21999573E9 * h[i]**2 - 9.44555684E9 * h[
                    i] + 2.74132803E10

    fig = plt.figure()
    # plot damage function
    ax = fig.add_subplot(1, 1, 1)
    l1, = ax.plot([6,6],[0,1.50601636E6*13.9**4 - 7.00078878E7*13.9**3 + 1.21999573E9*13.9**2 - 9.44555684E9*13.9 + 2.74132803E10], \
        c='k', linestyle=':', linewidth=2) # first alarm line
    l2, = ax.plot([11.25,11.25],[0,1.50601636E6*13.9**4 - 7.00078878E7*13.9**3 + 1.21999573E9*13.9**2 - 9.44555684E9*13.9 + 2.74132803E10], \
        c='k', linestyle='--', linewidth=2) # second alarm line
    l3, = ax.plot([13.4,13.4],[0,1.50601636E6*13.9**4 - 7.00078878E7*13.9**3 + 1.21999573E9*13.9**2 - 9.44555684E9*13.9 + 2.74132803E10], \
        c='k', linewidth=2) # third alarm line
    ax.plot(h, d, c='#cb181d', linewidth=2)  # damage function
    ax.set_ylim([
        0, 1.50601636E6 * 13.9**4 - 7.00078878E7 * 13.9**3 +
        1.21999573E9 * 13.9**2 - 9.44555684E9 * 13.9 + 2.74132803E10
    ])
    ax.set_xlim([0, 13.9])
    ax.set_xlabel(r'$z_{HN}$', fontsize=20)
    ax.set_ylabel('Damages (-)', fontsize=20)
    ax.set_title('Flooding Damages Function', fontsize=24)
    ax.tick_params(axis='both', labelsize=18)

    fig.subplots_adjust(bottom=0.2)
    fig.legend([l1, l2, l3], ['First Alarm', 'Second Alarm', 'Dike Height'],
               loc='lower center',
               ncol=3,
               frameon=True,
               fontsize=20)
    fig.set_size_inches([9.1, 7.4125])

    fig.savefig('Figure3.pdf')
    fig.clf()

    return None
Exemplo n.º 12
0
def show_history(history):
    sns.set_style("darkgrid")
    sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 1.6})
    sns.mpl.rc("figure", figsize=(16, 5))
    f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
    ax1.plot(history.losses, color="#47d024")
    ax1.plot(history.accuracy, color="#0672b6")
    ax1.legend(["loss", "accuracy"])
    ax1.set_title("Batch validation")

    ax2.plot(history.val_losses, color="#47d024")
    ax2.plot(history.val_accuracy, color="#0672b6")
    ax2.legend(["loss", "accuracy"])
    ax2.set_title("Epoch validation")

    plt.show()
Exemplo n.º 13
0
def plot_hist(hist,
              bin_edges,
              out_path,
              do_per=False,
              do_log=True,
              title='',
              xlabel='',
              ylabel=''):
    """
    Plot histogram using information contained in hist (frequency count) and bin_edges.
    :param hist: values of the histogram
    :param bin_edges: return the bin edges
    :param out_path: output path and file name
    :param do_per: plot each bin as percentage of total
    :param do_log: plot logarithmic y-axis
    :param title:
    :param xlabel:
    :param ylabel:
    :return:
    """
    logger.info('Plot histogram')
    sns.set_style('whitegrid')

    if do_per:
        # Compute percentage of total sum
        per_hist = hist * 100.0 / sum(hist)
        sns.barplot(bin_edges[:-1].astype(int),
                    per_hist,
                    color='purple',
                    edgecolor='none')
        plt.ylim(0.0, 100.0)
    else:
        sns.barplot(bin_edges[:-1].astype(int),
                    hist,
                    color='purple',
                    edgecolor='none',
                    log=do_log)

    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.title(title)

    plt.tight_layout()
    plt.savefig(out_path, dpi=constants.DPI)
    plt.close()
Exemplo n.º 14
0
def plot_synapse_pairs(X, zh, output='synapse_pairs.pdf'):
    sns.set_style({"font.size": 10, "axes.labelsize": 30})
    d = {'z': zh}
    n, p = X.shape
    for i in range(p):
        d[r'$x_{%i}$'%(i+1)] = X[:,i]
        #d[r'$x_{%i}$'%(i+1)] = X[:,i]/(10**5)
    df = pd.DataFrame(data=d)
    g = sns.PairGrid(df, hue="z",vars=[r'$x_{%i}$'%(i+1) for i in range(p)])

    def scatter_fake_diag(x, y, *a, **kw):
        if x.equals(y):
            kw["color"] = (0, 0, 0, 0)
        plt.scatter(x, y, s=3, *a, **kw)

    g.map(scatter_fake_diag)
    g.map_diag(plt.hist)
    g.savefig(output)
Exemplo n.º 15
0
def plot_qq(clf, X, y, figsize=(7, 7)):
    """Generate a Q-Q plot (a.k.a. normal quantile plot).

    Parameters
    ----------
    clf : sklearn.linear_model
        A scikit-learn linear model classifier with a `predict()` method.
    X : numpy.ndarray
        Training data used to fit the classifier.
    y : numpy.ndarray
        Target training values, of shape = [n_samples].
    figsize : tuple
        A tuple indicating the size of the plot to be created, with format
        (x-axis, y-axis). Defaults to (7, 7).

    Returns
    -------
    matplotlib.figure.Figure
        The Figure instance.
    """
    # Ensure we only plot residuals using classifiers we have tested
    assert isinstance(clf, _utils.supported_linear_models), (
        "Classifiers of type {0} not currently supported.".format(type(clf)))
    residuals = stats.residuals(clf, X, y, r_type='raw')
    prob_plot = sm.ProbPlot(residuals, scipy.stats.t, fit=True)
    # Set plot style
    sns.set_style("darkgrid")
    sns.set(font_scale=1.2)
    # Generate plot
    try:
        # Q-Q plot doesn't respond to figure size, so prep a figure first
        fig, ax = plt.subplots(figsize=figsize)
        prob_plot.qqplot(line='45', ax=ax)
        plt.title("Normal Quantile Plot")
        plt.xlabel("Theoretical Standardized Residuals")
        plt.ylabel("Actual Standardized Residuals")
        plt.show()
    except:
        raise  # Re-raise the exception
    finally:
        sns.reset_orig()
    return fig
Exemplo n.º 16
0
def plot_synapse_pairs(X, zh, output='synapse_pairs.pdf'):
    sns.set_style({"font.size": 10, "axes.labelsize": 30})
    d = {'z': zh}
    n, p = X.shape
    for i in range(p):
        d[r'$x_{%i}$' % (i + 1)] = X[:, i]
        #d[r'$x_{%i}$'%(i+1)] = X[:,i]/(10**5)
    df = pd.DataFrame(data=d)
    g = sns.PairGrid(df,
                     hue="z",
                     vars=[r'$x_{%i}$' % (i + 1) for i in range(p)])

    def scatter_fake_diag(x, y, *a, **kw):
        if x.equals(y):
            kw["color"] = (0, 0, 0, 0)
        plt.scatter(x, y, s=3, *a, **kw)

    g.map(scatter_fake_diag)
    g.map_diag(plt.hist)
    g.savefig(output)
Exemplo n.º 17
0
def makeFigure6():
    new_WP1 = calcRobustness('new_WP1')

    cbar = 'inferno'
    labels = [r'$J_{Flood}$' + '\n(m above 11.25 m)',\
        r'$J_{Hydro}$' + '\n(Gwh/day)',\
        r'$J_{Max Def}$' + '\n' + r'$\mathregular{(m^3/s)}$']
    precision = [2, 0, 0]
    cbar_axes = [0.85, 0.15, 0.05, 0.7]

    sns.set_style("darkgrid")
    fig = plt.figure()
    ax = fig.add_subplot(111)

    # create newlabels so they aren't appended to labels each time
    newlabels = []

    table = pandas.DataFrame(new_WP1.reeval_1000, columns=labels)
    mins = np.min(new_WP1.reeval_1000, 0)
    maxs = np.max(new_WP1.reeval_1000, 0)
    # round number of significant digits shown on objective labels
    for k in range(len(labels)):
        if precision[k] != 0:
            newlabels.append(
                str(np.round(mins[k], precision[k])) + '\n' + labels[k])
        else:
            newlabels.append(str(int(mins[k])) + '\n' + labels[k])

        # don't show negative sign on maximization objectives
        if mins[k] < 0:
            newlabels[k] = newlabels[k][1:]

    parallel_coordinate(fig, ax, table, new_WP1.Satisfy1[:,4], mins, maxs, cbar,\
        newlabels, precision, cbar_axes, new_WP1.bestIndices)

    fig.set_size_inches([8.725, 7.7375])
    fig.savefig('Figure6.pdf')
    fig.clf()

    return None
Exemplo n.º 18
0
def plot_damp_top_authors(folder, damps, top, min_year, plot_author_count=20, show_legend=True):
  graph = cite_graph(GRAPH_CSV)
  top_authors = most_cited_authors(graph, top, min_year)[:plot_author_count]
  author_nodes = mysql.get_authors()
  x_labels = [author_nodes[a[0]].name for a in top_authors]
  x_axis = range(1, plot_author_count + 1)
  top_author_ids = np.array([a[0] for a in top_authors])
  folder_path = "figs/%s/%s/authors/%s" % (THE.version, THE.permitted, folder)
  palette = np.array(sns.color_palette("hls", plot_author_count))
  legends = []
  # for i, f_name in enumerate(os.listdir(folder_path)):
  y_axes = []
  means = np.array([0.0] * plot_author_count)
  plt.figure(figsize=(8, 2))
  for i, _ in enumerate(damps):
    # file_name = "%s/%s" % (folder_path, name)
    file_name = "%s/page_rank_%0.2f.pkl" % (folder_path, damps[i])
    with open(file_name) as f:
      pr_scores = cPkl.load(f)
      y_axis = np.array([pr_scores[a] for a in top_author_ids])
      y_axes.append(y_axis)
      means += y_axis
  indices = np.argsort(means)[::-1]
  top_author_ids = top_author_ids[indices]
  # sns.set_style("whitegrid", {'axes.grid': False})
  sns.set_style("white")
  for i, y_axis in enumerate(y_axes):
    plt.plot(x_axis, y_axis[indices], c=palette[i])
    legends.append("%0.2f" % damps[i])
  if show_legend:
    plt.legend(legends, bbox_to_anchor=(-0.1, 1.15, 1.15, 0.2), loc="lower left",
               mode="expand", borderaxespad=0, ncol=10)
  fig_name = "figs/%s/%s/authors/damp_%s.png" % (THE.version, THE.permitted, folder)
  plt.ylabel("Page Rank Score", fontsize=14)
  plt.xlabel("Author ID", fontsize=14)
  plt.xticks(x_axis, top_author_ids, rotation='vertical')
  plt.title("Page Rank Score for top %d cited author with varying damping factors" % plot_author_count)
  plt.savefig(fig_name, bbox_inches='tight')
  plt.clf()
Exemplo n.º 19
0
def _plotTimeLog(plotData):
    figure = plt.figure(figsize=(figureWidth, halfPageHeight), dpi=600)
    ax = figure.gca()
    sns.set_style(style='white')

    nSet = set()
    for j, P_d in enumerate(sorted(plotData, reverse=True)):
        d1 = plotData[P_d]
        for i, lambda_phi in enumerate(sorted(d1)):
            d2 = d1[lambda_phi]
            x = []
            y = []
            for N, (meanRuntime, percentiles) in d2.items():
                x.append(N)
                y.append(meanRuntime)
                nSet.add(N)
            x = np.array(x)
            y = np.array(y)
            x, y = (list(t) for t in zip(*sorted(zip(x, y))))
            ax.plot(x,y, linestyle=linestyleList[i], color=colors[j],
                     label="$P_D$={0:}, $\lambda_\phi$={1:}".format(P_d, lambda_phi),
                    linewidth=linewidth)

    ax.set_xlim(ax.get_xlim()[0]-0.5, ax.get_xlim()[1]+0.5)
    ax.set_ylim(0,1)
    ax.set_title("Tracking iteration runtime", fontsize=titleFontsize)
    ax.set_xlabel("N", fontsize=labelFontsize, labelpad=0)
    ax.set_ylabel("Average iteration time [s]", fontsize=labelFontsize)
    ax.xaxis.set_ticks(sorted(list(nSet)))
    ax.tick_params(labelsize=labelFontsize)

    ax.legend(loc=0, fontsize=legendFontsize)
    ax.grid(False)

    sns.despine(ax=ax)
    figure.tight_layout(pad=0.5, h_pad=0.5, w_pad=0.5)
    return figure
Exemplo n.º 20
0
def makeFigure4():
    '''Makes Parallel Axis plots for Hetch Hetchy DPS Formulation)'''

    sns.set_style("dark")

    # load thinned reference sets from each problem formulation
    C1 = np.loadtxt('portfolio.ref')
    C2 = pandas.read_csv('NSGAII_portfolio.set', delimiter=' ', header=None)
    C2.columns = ['Annual', 'Min', 'Hedge', 'Complexity']
    C2 = C2[C2.Min < -115]
    np.savetxt('NSGAII_portfolio_req_1.txt', C2, delimiter=',')
    C2 = np.loadtxt('NSGAII_portfolio_req_1.txt', delimiter=',')
    #C3 = np.loadtxt('./../EV/EV_thinned.csv',delimiter=',',skiprows=1)
    #C4 = np.loadtxt('./../EVSDH/EVSDH_thinned.csv',delimiter=',',skiprows=1)

    # set plotting characteristics
    formulations = [C1]
    formulations_1 = [C2]
    labels = [[
        'Annualized\nAdjusted Revenue\n($M)',
        'Minimum\nAdjusted Revenue\n($M)', 'Maximum\nHedge Complexity',
        'Maximum\nFund Balance\n($M)'
    ]]
    #['WP1 Hydro\n(Gwh/day)','WP1 Deficit$\mathregular{^2}$\n(m$\mathregular{^3}\!$/s)$\mathregular{^2}$','WP1 Flood\n(m above 11.25 m)','WP1 Recovery\n(days)'],\
    #['EV Hydro\n(Gwh/day)','EV Deficit$\mathregular{^2}$\n(m$\mathregular{^3}\!$/s)$\mathregular{^2}$','WP1 Flood\n(m above 11.25 m)','EV Recovery\n(days)'],\
    #['EV Hydro\n(Gwh/day)','EV Deficit$\mathregular{^2}$\n(m$\mathregular{^3}\!$/s)$\mathregular{^2}$','WP1 Flood\n(m above 11.25 m)','EV Recovery\n(days)','SD Hydro\n(Gwh/day)']]
    #cmaps = ['Reds_r','Blues_r','Greens_r','Purples_r']
    cmaps = ['Reds_r']
    titles = ['NSGA-II']
    precision = [[0, 0, 2, 1]]

    # make 2 x 2 subplot with parallel axes for each problem formulation
    plot(formulations, formulations_1, labels, precision, cmaps, titles,
         'NSGAII_first_requirement.pdf')

    return None
Exemplo n.º 21
0
def makeFigure5():
    '''Makes Figure 5 from Quinn et al., 2017 - WRR (re-evaluation of solutions \
    from each formulation on the objectives of all other formulations over \
    streamflows from both optimization and out-of-sample validation)'''

    sns.set_style("dark")

    # load thinned reference sets from each problem formulation
    WC = getFormulations('WC')
    WP1 = getFormulations('WP1')
    EV = getFormulations('EV')
    EVSDH = getFormulations('EVSDH')

    # specify plotting parameters
    formulations = [EVSDH, EV, WP1, WC]
    colors = ['#984ea3', '#4daf4a', '#377eb8', '#e41a1c']
    indices = [[0, 1, 2], [0, 1, 3, 2], [4, 5, 7, 6]]
    ylabels = [
        'WC Objectives', 'WP1 Objectives', 'EV&SD$\mathregular{_H}$ Objectives'
    ]
    precision = [[1, 0, 0], [1, 0, 2, 1], [1, 0, 1, 1]]
    names = [
        'WC Formulation', 'WP1 Formulation', 'EV Formulation',
        'EV&SD$\mathregular{_H}$ Formulation'
    ]
    titles = [['WC Hydro (Gwh/day)', 'WC Deficit$\mathregular{^2}\!$ (m$\mathregular{^3}\!$/s)$\mathregular{^2}$', \
            'WC Flood (-)'],\
        ['WP1 Hydro (Gwh/day)', 'WP1 Deficit$\mathregular{^2}\!$ (m$\mathregular{^3}\!$/s)$\mathregular{^2}$', \
            'WP1 Flood\n(m above 11.25 m)', 'WP1 Recovery (days)'],\
        ['EV Hydro (Gwh/day)', 'EV Deficit$\mathregular{^2}\!$ (m$\mathregular{^3}\!$/s)$\mathregular{^2}$', \
            'EV&SD$\mathregular{_H}$\nHydro Std (Gwh/day)', 'EV Recovery (days)']]

    makePlots(formulations, colors, indices, ylabels, titles, precision, names,
              'Figure5.pdf')

    return None
Exemplo n.º 22
0
def plot_rmsf(data, resid_data):

    col = ['#8c96c6', '#8856a7', '#810f7c']


    plt.style.use('ggplot')
    sns.set_style('ticks')

    ax = plt.subplot(111)
    ax.plot(resid_data.resids, data, '-', linewidth=1, color=col[-1])
    ax.fill_between(resid_data.resids, data, alpha=0.1, color=col[-1])

    sns.despine(ax=ax, offset=2.5)
    ax.set_xlabel("Residue number")
    ax.set_ylabel(r"RMSF ($\AA$)")
    #ax.set_ylim(top=9.5)

    #ax.axvspan(4, 9, alpha=0.1, color='red') # mark the hydophobic patch region
    # ax.axvspan(127, 132, alpha=0.5, color='red')
    # ax.axvspan(250, 255, alpha=0.5, color='red')

    plt.legend()
    plt.savefig('rmsf.svg', format='svg')
    plt.show()
Exemplo n.º 23
0
"""Initialize experiment sets and parameters for Df(16)A analysis."""

import os.path
import pandas as pd
import ConfigParser

import lab
import lab.classes.exceptions as exc
import lab.plotting as plotting

import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)

import seaborn.apionly as sns
sns.set_style("ticks")
sns.set_context(
    rc={
        'lines.linewidth': 1,
        'axes.titlesize': 7,
        'axes.labelsize': 'medium',
        'xtick.labelsize': 'medium',
        'ytick.labelsize': 'medium'
    })

import matplotlib as mpl
# mpl.rcParams['font.sans-serif'].append(u'DejaVu Sans')
mpl.rcParams.update({
    'lines.linewidth': 1,
    'axes.titlesize': 9,
    'axes.labelsize': 9,
    'xtick.labelsize': 7,
Exemplo n.º 24
0
def test_real_embedding(X,
                        Y,
                        target_dim,
                        img_getter,
                        filename,
                        subsampling=10,
                        zoom=.5,
                        labels=None,
                        palette='hls'):
    print('--------\n', filename)

    embedding = spectral_embedding(Y,
                                   target_dim=target_dim,
                                   gramian=False,
                                   discard_first=False)

    D = dot_matrix(X)
    Q = Y.dot(Y.T)

    if palette == 'none':
        point_labels = None
    else:
        point_labels = np.arange(len(X))

    sns.set_style('white')

    plt.figure(figsize=(16, 5.1), tight_layout=True)
    gs = gridspec.GridSpec(1, 4)

    titles = [r'$\mathbf{X}^\top \mathbf{X}$', r'$\mathbf{Y}^\top \mathbf{Y}$']
    for i, (M, t) in enumerate(zip([D, Q], titles)):
        ax = plt.subplot(gs[i])
        plot_matrix(M,
                    ax=ax,
                    labels=point_labels,
                    which_labels='both',
                    labels_palette=palette,
                    colorbar_labelsize=15)
        plt_title = ax.set_title(t, fontsize=25)
        plt_title.set_position((0.5, 1.07))

    ax = plt.subplot(gs[2])
    plot_matrix(Y,
                ax=ax,
                labels=point_labels,
                which_labels='vertical',
                labels_palette=palette,
                colorbar_labelsize=15)
    title_y = r'$\mathbf{Y}^\top$'
    plt_title = ax.set_title(title_y, fontsize=25)
    plt_title.set_position((0.5, 1.07))

    ax = plt.subplot(gs[3])
    plot_images_embedded(embedding,
                         img_getter,
                         labels=labels,
                         subsampling=subsampling,
                         zoom=zoom,
                         palette=palette,
                         ax=ax)

    plt.savefig('{}{}.pdf'.format(dir_name, filename), dpi=300)

    plt.figure()
    plot_images_embedded(embedding,
                         img_getter,
                         labels=labels,
                         subsampling=subsampling,
                         zoom=zoom,
                         palette=palette)
    plt.savefig('{}{}_embedding.pdf'.format(dir_name, filename), dpi=300)

    # pdf_file_name = '{}{}_plot_{}_on_data_{}{}'
    # for i in range(Y.shape[1]):
    #     plt.figure()
    #     plot_bumps_on_data(embedding, [Y[:, i]])
    #     plt.savefig(pdf_file_name.format(dir_name, filename, 'Y', i, '.png'),
    #                 dpi=300)
    #     plt.close()

    pdf_file_name = '{}{}_plot_{}_on_data_{}'
    plt.figure()
    # bump_locs = np.linspace(0, Y.shape[1], num=6, endpoint=False, dtype=np.int)
    bump_locs = [1, 6, 31, 40, 43, 47, 55]
    plot_bumps_on_data(embedding, [Y[:, i] for i in bump_locs], palette='Set1')
    # plt.title('Receptive fields', fontsize=25)
    plt.savefig(pdf_file_name.format(dir_name, filename, 'Y', 'multiple.png'),
                dpi=300)

    pdf_file_name = '{}{}_plot_{}_1d{}'
    plt.figure()
    _, ax = plt.subplots(1, 1)
    plot_bumps_1d(Y, subsampling=5, labels=point_labels, ax=ax)
    ax.set_yticks([])
    ax.set_title('Receptive fields', fontsize=25)
    plt.savefig(pdf_file_name.format(dir_name, filename, 'Y', '.pdf'), dpi=300)
Exemplo n.º 25
0
def expandable_ttest(
    df,
    colorset=QUALITATIVE_COLORSET,
    compare="Treatment",
    comparisons={"Period [days]": []},
    datacolumn_label="Sucrose Preference Ratio",
    legend_loc="best",
    rename_treatments={},
    bp_style=True,
    save_as=False,
):
    """High-level interface for plotting of one or multiple related t-tests.

	Parameters
	----------

	df : {pandas.Dataframe, string}
	Pandas Dataframe containing the experimental data, or path pointing to a csv containing such data.

	compare : string, optional
	Which parameter to categorize the comparison by. Must be a column name from df.

	comparisons : dict, optional
	A dictionary, the key of which indicates which df column to generate comparison insances from. If only a subset of the available rows are to be included in the comparison, the dictionary needs to specify a value, consisting of a list of acceptable values on the column given by the key.

	datacolumn_label : string, optional
	A column name from df, the values in which column give the data to plot.

	legend_loc : string, optional
	Where to place the legend on the figure.

	rename_treatments : dict, optional
	Dictionary with strings as keys and values used to map treatment names onto new stings.

	bp_style : bool, optional
	Whether to apply the default behaviopy style.

	Notes
	-----

	Seaborn's `sns.swarmplot()` does not read rcParams by itself, so we need to pass it `size=rcParams['lines.markersize']` to correctly set the marker size.
	"""

    try:
        if isinstance(df, basestring):
            df = path.abspath(path.expanduser(df))
            df = pd.read_csv(df)
    except NameError:
        if isinstance(df, str):
            df = path.abspath(path.expanduser(df))
            df = pd.read_csv(df)

    comparison_instances_label = list(comparisons.keys())[0]
    comparison_instances = list(comparisons.values())[0]
    if comparison_instances:
        df[df[comparison_instances_label].isin([comparison_instances])]

    if rename_treatments:
        for key in rename_treatments:
            df.loc[df["Treatment"] == key,
                   "Treatment"] = rename_treatments[key]
        df = control_first_reordering(df, "Treatment")

    if bp_style:
        sns.set_style("white", {'legend.frameon': True})
        plt.style.use(u'seaborn-darkgrid')
        plt.style.use(u'ggplot')

    sns.swarmplot(
        x=comparison_instances_label,
        y=datacolumn_label,
        hue=compare,
        data=df,
        palette=sns.color_palette(colorset),
        split=True,
        size=rcParams['lines.markersize'],
    )
    plt.legend(loc=legend_loc, frameon=True)

    add_significance(df,
                     datacolumn_label,
                     compare=compare,
                     over=comparison_instances_label)

    if save_as:
        plt.savefig(path.abspath(path.expanduser(save_as)),
                    bbox_inches='tight')
Exemplo n.º 26
0
def _plotInitializationTime2D(plotData, loadFilePath, simLength, timeStep, nTargets):
    timeArray = np.arange(0, simLength, timeStep)
    for M_init, d1 in plotData.items():
        for N_init, d2 in d1.items():
            figure = plt.figure(figsize=(figureWidth, fullPageHeight), dpi=600)

            ax11 = figure.add_subplot(311)
            ax12 = figure.add_subplot(312)
            ax13 = figure.add_subplot(313)

            sns.set_style(style='white')
            savePath = _getSavePath(loadFilePath, "Time({0:}-{1:})".format(M_init, N_init))
            cpfmList = []
            falseCPFMlist = []
            accFalseTrackList = []
            for k, (lambda_phi, d3) in enumerate(d2.items()):
                for j, (P_d, (correctInitTimeLog, falseInitTimeLog)) in enumerate(d3.items()):
                    falsePFM = np.zeros_like(timeArray)
                    pmf = np.zeros_like(timeArray)
                    falseTrackDelta = np.zeros_like(timeArray)
                    for i, time in enumerate(timeArray):
                        if str(time) in correctInitTimeLog:
                            pmf[i] = correctInitTimeLog[str(time)]
                        if str(time) in falseInitTimeLog:
                            falsePFM[i] = falseInitTimeLog[str(time)][0]
                            falseTrackDelta[i] = falseInitTimeLog[str(time)][1]
                    cpmf = np.cumsum(pmf) / float(nTargets)
                    falseCPFM = np.cumsum(falsePFM)
                    falseTrackDelta = np.cumsum(falseTrackDelta)
                    cpfmList.append((P_d, lambda_phi, cpmf))
                    falseCPFMlist.append((P_d, lambda_phi, falseCPFM))
                    accFalseTrackList.append((P_d, lambda_phi, falseTrackDelta))
            cpfmList.sort(key=lambda tup: float(tup[1]))
            cpfmList.sort(key=lambda tup: float(tup[0]), reverse=True)

            falseCPFMlist.sort(key=lambda tup: float(tup[1]))
            falseCPFMlist.sort(key=lambda tup: float(tup[0]), reverse=True)

            accFalseTrackList.sort(key=lambda tup: float(tup[1]))
            accFalseTrackList.sort(key=lambda tup: float(tup[0]), reverse=True)

            pdSet = set()
            lambdaPhiSet = set()
            for P_d, lambda_phi, cpmf in cpfmList:
                if P_d not in pdSet:
                    lambdaPhiSet.clear()
                pdSet.add(P_d)
                lambdaPhiSet.add(lambda_phi)
                ax11.plot(timeArray,
                        cpmf,
                        label="$P_D$ = {0:}, $\lambda_\phi$ = {1:}".format(P_d, float(lambda_phi)),
                        c=colors[len(pdSet)-1],
                        linestyle=linestyleList[len(lambdaPhiSet)-1],
                        linewidth=linewidth)

            pdSet = set()
            lambdaPhiSet = set()
            for P_d, lambda_phi, cpmf in falseCPFMlist:
                if P_d not in pdSet:
                    lambdaPhiSet.clear()
                pdSet.add(P_d)
                lambdaPhiSet.add(lambda_phi)
                ax12.semilogy(timeArray,
                        cpmf+(1e-10),
                        label="$P_D$ = {0:}, $\lambda_\phi$ = {1:}".format(P_d, float(lambda_phi)),
                        c=colors[len(pdSet)-1],
                        linestyle=linestyleList[len(lambdaPhiSet)-1],
                        linewidth=linewidth)

            pdSet = set()
            lambdaPhiSet = set()
            for P_d, lambda_phi, accFalseTrack in accFalseTrackList:
                if P_d not in pdSet:
                    lambdaPhiSet.clear()
                pdSet.add(P_d)
                lambdaPhiSet.add(lambda_phi)
                ax13.plot(timeArray,
                          accFalseTrack,
                         label="$P_D$ = {0:}, $\lambda_\phi$ = {1:}".format(P_d, float(lambda_phi)),
                         c=colors[len(pdSet) - 1],
                         linestyle=linestyleList[len(lambdaPhiSet) - 1],
                            linewidth=linewidth)

            ax11.set_xlabel("Time [s]", fontsize=labelFontsize)
            ax11.set_ylabel("Average cpfm", fontsize=labelFontsize)
            ax11.set_title("Cumulative Probability Mass Function", fontsize=titleFontsize)
            ax11.legend(loc=4, ncol=len(pdSet), fontsize=legendFontsize)
            ax11.grid(False)
            ax11.set_xlim(0, simLength)
            ax11.set_ylim(0,1)
            ax11.xaxis.set_ticks(np.arange(0,simLength+15, 15))
            ax11.tick_params(labelsize=labelFontsize)
            sns.despine(ax=ax11, offset=0)

            ax12.set_xlabel("Time [s]", fontsize=labelFontsize)
            ax12.set_ylabel("Average number of tracks", fontsize=labelFontsize)
            ax12.set_title("Accumulative number of erroneous tracks", fontsize=titleFontsize)
            ax12.grid(False)
            ax12.set_xlim(0, simLength)
            ax12.set_ylim(1e-3,1000)
            ax12.xaxis.set_ticks(np.arange(0,simLength+15, 15))
            ax12.tick_params(labelsize=labelFontsize)
            sns.despine(ax=ax12, offset=0)

            ax13.set_xlabel("Time [s]", fontsize=labelFontsize)
            ax13.set_ylabel("Average number of tracks", fontsize=labelFontsize)
            ax13.set_title("Number of erroneous tracks alive", fontsize=titleFontsize)
            ax13.grid(False)
            ax13.set_xlim(0, simLength)
            ax13.set_ylim(-0.02, max(1,ax13.get_ylim()[1]))
            ax13.xaxis.set_ticks(np.arange(0,simLength+15, 15))
            ax13.tick_params(labelsize=labelFontsize)
            sns.despine(ax=ax13, offset=0)

            figure.tight_layout(pad=0.8, h_pad=0.8, w_pad=0.8)
            figure.savefig(savePath)
            figure.clf()

            plt.close()
Exemplo n.º 27
0
def forced_swim_timecourse(
    df,
    bp_style=True,
    colorset=QUALITATIVE_COLORSET,
    datacolumn_label="Immobility Ratio",
    legend_loc="best",
    plotstyle="tsplot",
    rename_treatments={},
    time_label="interval [1 min]",
    save_as=False,
):
    """Plot timecourse of forced swim measurements.

	Parameters
	----------

	df : {pandas.Dataframe, string}
	Pandas Dataframe containing the experimental data, or path pointing to a csv containing such data.

	bp_style : bool, optional
	Whether to apply the default behaviopy style.

	datacolumn_label : string, optional
	A column name from df, the values in which column give the data to plot.

	legend_loc : string, optional
	Where to place the legend on the figure.

	plotstyle : {"pointplot", "tsplot"}
	Dictionary with strings as keys and values used to map treatment names onto new stings.

	rename_treatments : dict, optional
	Dictionary with strings as keys and values used to map treatment names onto new stings.

	time_label : dict, optional
	A column name from df, the values in which column give the time pointd of the data.
	"""

    try:
        if isinstance(df, basestring):
            df = path.abspath(path.expanduser(df))
            df = pd.read_csv(df)
    except NameError:
        if isinstance(df, str):
            df = path.abspath(path.expanduser(df))
            df = pd.read_csv(df)

    for key in rename_treatments:
        df.loc[df["Treatment"] == key, "Treatment"] = rename_treatments[key]
    df = control_first_reordering(df, "Treatment")
    if bp_style:
        sns.set_style("white", {'legend.frameon': True})
        plt.style.use(u'seaborn-darkgrid')
        plt.style.use(u'ggplot')
    if plotstyle == "tsplot":
        myplot = sns.tsplot(time=time_label,
                            value=datacolumn_label,
                            condition="Treatment",
                            unit="Identifier",
                            data=df,
                            err_style="unit_traces",
                            color=sns.color_palette(colorset))
        myplot.set_xticks(list(set(df[time_label])))
    elif plotstyle == "pointplot":
        sns.pointplot(x=time_label,
                      y=datacolumn_label,
                      hue="Treatment",
                      data=df,
                      palette=sns.color_palette(colorset),
                      legend_out=False,
                      dodge=0.1)
    plt.legend(loc=legend_loc, frameon=True)

    if save_as:
        plt.savefig(path.abspath(path.expanduser(save_as)),
                    bbox_inches='tight')
Exemplo n.º 28
0
def test_toy_embedding(X,
                       Y,
                       target_dim,
                       filename,
                       palette='hls',
                       elev_azim=None):
    print('--------\n', filename)

    embedding = spectral_embedding(Y,
                                   target_dim=target_dim,
                                   gramian=False,
                                   discard_first=False)

    D = dot_matrix(X)
    Q = Y.dot(Y.T)
    labels = np.arange(len(X))

    sns.set_style('white')

    plt.figure(figsize=(20, 5), tight_layout=True)
    gs = gridspec.GridSpec(1, 5)

    if X.shape[1] == 3:
        ax = plt.subplot(gs[0], projection='3d')
    else:
        ax = plt.subplot(gs[0])
    plot_data_embedded(X, ax=ax, palette=palette, elev_azim=elev_azim)
    ax.set_title('Input dataset', fontsize='xx-large')

    titles = [
        r'Input Gramian $\mathbf{X}^\top \mathbf{X}$',
        r'$\mathbf{Y}^\top \mathbf{Y}$'
    ]
    for i, (M, t) in enumerate(zip([D, Q], titles)):
        ax = plt.subplot(gs[i + 1])
        plot_matrix(M,
                    ax=ax,
                    labels=labels,
                    which_labels='both',
                    labels_palette=palette,
                    colorbar_labelsize=15)
        plt_title = ax.set_title(t, fontsize=25)
        plt_title.set_position((0.5, 1.07))

    ax = plt.subplot(gs[3])
    plot_matrix(Y,
                ax=ax,
                labels=labels,
                which_labels='vertical',
                labels_palette=palette,
                colorbar_labelsize=15)
    title_y = r'$\mathbf{Y}^\top$ ($'
    plt_title = ax.set_title(title_y, fontsize=25)
    plt_title.set_position((0.5, 1.07))

    if target_dim == 2:
        ax = plt.subplot(gs[4])
    if target_dim == 3:
        ax = plt.subplot(gs[4], projection='3d')
    plot_data_embedded(embedding, ax=ax, palette=palette)
    ax.set_title('2D embedding', fontsize=25)
    plt.savefig('{}{}.pdf'.format(dir_name, filename), dpi=300)

    fig = plt.figure()
    if target_dim == 2:
        ax = fig.add_subplot(111)
    if target_dim == 3:
        ax = fig.add_subplot(111, projection='3d')
    plot_data_embedded(embedding, ax=ax, palette=palette)
    plt.savefig('{}{}_embedding.pdf'.format(dir_name, filename), dpi=300)

    # pdf_file_name = '{}{}_plot_{}_on_data_{}{}'
    # for i in range(Y.shape[1]):
    #     plt.figure()
    #     plot_bumps_on_data(embedding, [Y[:, i]])
    #     plt.savefig(pdf_file_name.format(dir_name, filename, 'Y', i, '.png'),
    #                 dpi=300)
    #     plt.close()

    pdf_file_name = '{}{}_plot_{}_on_data_{}'
    plt.figure()
    if target_dim == 2:
        ax = fig.add_subplot(111)
    if target_dim == 3:
        ax = fig.add_subplot(111, projection='3d')
    plot_bumps_on_data(embedding, [Y[:, i] for i in range(0, Y.shape[1], 10)],
                       ax=ax)
    plt.savefig(pdf_file_name.format(dir_name, filename, 'Y', 'multiple.pdf'),
                dpi=300)

    pdf_file_name = '{}{}_plot_{}_1d{}'
    _, ax = plt.subplots(1, 1)
    plot_bumps_1d(Y, subsampling=10, labels=labels, ax=ax)
    ax.set_yticks([])
    ax.set_title(r'Rows of $\mathbf{Y}$', fontsize=25)
    plt.savefig(pdf_file_name.format(dir_name, filename, 'Y', '.pdf'), dpi=300)
Exemplo n.º 29
0
	def plot(self, show_samples, show_loadings, sbrn_plt):

		# Normalizer and Delta perform badly
		# They flatten out all difference in a PCA plot
		
		pca = PCA(n_components=self.n_components)
		X_bar = pca.fit_transform(self.X)
		var_exp = pca.explained_variance_ratio_
		var_pc1 = np.round(var_exp[0]*100, decimals=2)
		var_pc2 = np.round(var_exp[1]*100, decimals=2)
		explained_variance = np.round(sum(pca.explained_variance_ratio_)*100, decimals=2)
		comps = pca.components_
		comps = comps.transpose()
		loadings = pca.components_.transpose()
		vocab_weights_p1 = sorted(zip(self.features, comps[:,0]), key=lambda tup: tup[1], reverse=True)
		vocab_weights_p2 = sorted(zip(self.features, comps[:,1]), key=lambda tup: tup[1], reverse=True)

		if sbrn_plt == False:

			# Generate color dictionary
			color_dict = {author:index for index, author in enumerate(sorted(set(self.authors)))}
			cmap = discrete_cmap(len(color_dict), base_cmap='brg')

			if show_samples == True:

				fig = plt.figure(figsize=(8,6))
				ax = fig.add_subplot(111)
				x1, x2 = X_bar[:,0], X_bar[:,1]

				# If anything needs to be invisible in plot, add to exclusion_list

				ax.scatter(x1, x2, 100, edgecolors='none', facecolors='none', cmap='rainbow')
				for index, (p1, p2, a, title) in enumerate(zip(x1, x2, self.authors, self.titles)):
					ax.scatter(p1, p2, marker='o', color=cmap(color_dict[a]), s=20)
					ax.text(p1, p2, title.split('_')[-1], color='black', fontdict={'size': 5})

				# Legend settings (code for making a legend)

				collected_patches = []
				for author in set(self.authors):
					legend_patch = mpatches.Patch(color=cmap(color_dict[author]), label=author.split('-')[0])
					collected_patches.append(legend_patch)
				plt.legend(handles=collected_patches, fontsize=7)

				ax.set_xlabel('Principal Component 1 \n \n Explained Variance: {}% \n Sample Size: {} words/sample \n Number of Features: {} features'.format(str(explained_variance), str(self.sample_size), str(len(self.features))), fontdict={'size': 7})
				ax.set_ylabel('Principal Component 2', fontdict={'size': 7})

				if show_loadings == True:
					ax2 = ax.twinx().twiny()
					l1, l2 = loadings[:,0], loadings[:,1]
					ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none');
					for x, y, l in zip(l1, l2, self.features):
						ax2.text(x, y, l, ha='center', va="center", color="black",
						fontdict={'family': 'Arial', 'size': 6})

					# Align axes

					# Important to adjust margins first when function words fall outside plot
					# This is due to the axes aligning (def align).

					ax2.margins(x=0.14, y=0.14)
					align_xaxis(ax, 0, ax2, 0)
					align_yaxis(ax, 0, ax2, 0)
					plt.axhline(y=0, ls="--", lw=0.5, c='0.75')
					plt.axvline(x=0, ls="--", lw=0.5, c='0.75')			
					plt.tight_layout()
					plt.show()
				
				elif show_loadings == False:

					plt.axhline(y=0, ls="--", lw=0.5, c='0.75')
					plt.axvline(x=0, ls="--", lw=0.5, c='0.75')

					plt.tight_layout()
					plt.show()

					# Converting PDF to PNG, use pdftoppm in terminal and -rx -ry for resolution settings

				fig.savefig(os.path.dirname(os.getcwd()) + "/pca.pdf", transparent=True, format='pdf')

			elif show_samples == False:

				fig = plt.figure(figsize=(8, 6))
				ax2 = fig.add_subplot(111)
				l1, l2 = loadings[:,0], loadings[:,1]
				ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none')
				for x, y, l in zip(l1, l2, features):
					ax2.text(x, y, l, ha='center', va='center', color='black',
						fontdict={'family': 'Arial', 'size': 6})

				ax2.set_xlabel('PC1')
				ax2.set_ylabel('PC2')

				align_xaxis(ax, 0, ax2, 0)
				align_yaxis(ax, 0, ax2, 0)

				plt.axhline(y=0, ls="--", lw=0.5, c='0.75')
				plt.axvline(x=0, ls="--", lw=0.5, c='0.75')

				plt.tight_layout()
				plt.show()
				fig.savefig(os.path.dirname(os.getcwd()) + "/pca.pdf", bbox_inches='tight', transparent=True, format='pdf')

				# Converting PDF to PNG, use pdftoppm in terminal and -rx -ry for resolution settings

		else:

			data = [(title.split("_")[0], author, pc1, pc2) for [pc1, pc2], title, author in zip(X_bar, self.titles, self.authors)]
			df = pd.DataFrame(data, columns=['title', 'author', 'PC1', 'PC2'])

			# Get the x in an array
			sns.set_style('darkgrid')
			sns_plot = sns.lmplot('PC1', 'PC2', data=df, fit_reg=False, hue="author",
			           scatter_kws={"marker": "+","s": 100}, markers='o', legend=False)

			plt.legend(loc='upper right')
			plt.tight_layout()
			plt.show()

			sns_plot.savefig(os.path.dirname(os.getcwd()) + "/pca.pdf")
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn
from pandas import Series
from pandas import DataFrame
from io import StringIO
from scipy import stats
from datetime import datetime
from pandas_datareader import DataReader
import requests
#PLOTTING
import matplotlib as mpl
import seaborn.apionly as sns

sns.set_style('whitegrid')
#get data from the web
url = "http://elections.huffingtonpost.com/pollster/2012-general-election-romney-vs-obama.csv"
source = requests.get(url).text
poll_data = StringIO(source)
poll_df = pd.read_csv(poll_data)

print(poll_df.head())
poll_df.info()
sns.catplot(x='Affiliation', kind='count', data=poll_df)
sns.catplot(x='Affiliation', kind='count', hue='Population', data=poll_df)
#strong showing of likely and registered voters,thus poll data should  be a good reflection on the populations polled
avg = pd.DataFrame(poll_df.mean())
print(avg.head())
#We dont require number of observations
avg.drop('Number of Observations', axis=0, inplace=True)
Exemplo n.º 31
0
dcd_file = sys.argv[3]
dat_file = sys.argv[4] + '.dat'
out_file = sys.argv[4] + '.png'

u = mda.Universe(psf_file, dcd_file)
ref = mda.Universe(psf_file, pdb_file) # default the 0th frame

R = MDAnalysis.analysis.rms.RMSD(u, ref, select = "backbone", filename=dat_file)

R.run()
R.save()

rmsd = R.rmsd.T
time = rmsd[1]
import seaborn.apionly as sns
#matplotlib inline
plt.style.use('ggplot')
rcParams.update({'figure.autolayout': True})
sns.set_style('ticks')
fig = plt.figure(figsize=(5,3))
ax = fig.add_subplot(111)
color = sns.color_palette()[2]
#ax.fill_between(ca.residues.resids, rmsf, alpha=0.3, color=color)
ax.plot(time, rmsd[2], lw=1, color=color)
sns.despine(ax=ax)
ax.set_xlabel("Time (ps)")
ax.set_ylabel(r"RMSD ($\AA$)")
ax.set_xlim(0, max(time))
ax.set_ylim(0, 10)
fig.savefig(out_file)
Exemplo n.º 32
0
def test_grid(n_clusters=16, use_copositive=False):
    X = np.mgrid[0:16, 0:16]
    X = X.reshape((len(X), -1)).T
    labels = np.arange(len(X))

    # X_norm = X - np.mean(X, axis=0)
    # cov = X_norm.T.dot(X_norm)
    # X_norm /= np.trace(cov.dot(cov)) ** 0.25
    #
    # alpha = 0.001
    # plt.matshow(np.maximum(X_norm.dot(X_norm.T) - alpha, 0), cmap='gray_r')
    #
    # from scipy.spatial.distance import pdist, squareform
    # plt.matshow(squareform(pdist(X)), cmap='gray_r')
    #
    # return

    rank = len(X)
    print(rank)
    if use_copositive:
        beta = n_clusters / len(X)
        Y = copositive_burer_monteiro(X,
                                      alpha=0.003,
                                      beta=beta,
                                      rank=rank,
                                      tol=1e-5,
                                      constraint_tol=1e-5,
                                      verbose=True)
        name = 'grid_copositive_bm'
    else:
        Y = sdp_km_burer_monteiro(X,
                                  n_clusters,
                                  rank=rank,
                                  tol=1e-6,
                                  verbose=True)
        name = 'grid_sdpkm_bm'

    Q = Y.dot(Y.T)

    idx = np.argsort(np.argmax(Y, axis=0))
    Y = Y[:, idx]

    sns.set_style('white')

    plt.figure(figsize=(12, 4.7), tight_layout=True)
    gs = gridspec.GridSpec(1, 3)

    ax = plt.subplot(gs[0])
    plot_data_embedded(X, palette='hls', ax=ax)
    plt_title = ax.set_title('Input dataset', fontsize='xx-large')
    # plt_title.set_position((0.5, 1.07))

    ax = plt.subplot(gs[1])
    plot_matrix(Q,
                ax=ax,
                labels=labels,
                which_labels='both',
                labels_palette='hls')
    plt_title = ax.set_title(r'$\mathbf{Q}$', fontsize='xx-large')
    plt_title.set_position((0.5, 1.07))

    ax = plt.subplot(gs[2])
    plot_matrix(Y,
                ax=ax,
                labels=labels,
                which_labels='vertical',
                labels_palette='hls')
    plt_title = ax.set_title(r'$\mathbf{Y}^\top$', fontsize='xx-large')
    plt_title.set_position((0.5, 1.07))

    plt.savefig('{}{}.pdf'.format(dir_name, name))

    pdf_file_name = '{}{}_plot_{}_on_data_{}{}'
    for i in range(Y.shape[1]):
        plt.figure()
        plot_bumps_on_data(X, [Y[:, i]])
        plt.savefig(pdf_file_name.format(dir_name, name, 'Y', i, '.png'),
                    dpi=300,
                    bbox_inches='tight')
        plt.close()

    pdf_file_name = '{}{}_plot_{}_on_data_{}'
    plt.figure()
    bumps_locs = np.random.random_integers(Y.shape[1], size=6)
    plot_bumps_on_data(X, [Y[:, i] for i in bumps_locs], palette='Set1')
    plt.savefig(pdf_file_name.format(dir_name, name, 'Y', 'multiple.png'),
                dpi=300,
                bbox_inches='tight')

    Y_aligned = align_bumps(Y, Y.shape[1] // 2)

    _, ax = plt.subplots(1, 1)
    plot_matrix(Y_aligned, ax=ax)
    plt_title = ax.set_title(r'Aligned $\mathbf{Y}^\top$', fontsize='xx-large')
    plt_title.set_position((0.5, 1.07))
    plt.savefig('{}{}_Y_aligned_2d.pdf'.format(dir_name, name))

    _, ax = plt.subplots(1, 1)
    ax.plot(Y_aligned)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_title(r'Receptive fields', fontsize='xx-large')
    plt.savefig('{}{}Y_aligned_1d.pdf'.format(dir_name, name))

    pos = np.arange(len(Y))
    median = np.median(Y_aligned, axis=1)
    mu = np.mean(Y_aligned, axis=1)
    sigma = np.std(Y_aligned, axis=1)

    _, ax = plt.subplots(1, 1)
    plt_mean = ax.plot(pos, mu, color='#377eb8')
    ax.fill_between(pos,
                    np.maximum(mu - 3 * sigma, 0),
                    mu + 3 * sigma,
                    alpha=0.3,
                    color='#377eb8')
    plt_median = ax.plot(pos, median, '-.', color='#e41a1c')
    ax.set_xticks([])
    ax.set_yticks([])
    plt_aux = ax.fill(np.NaN, np.NaN, '#377eb8', alpha=0.3, linewidth=0)
    ax.legend([(plt_mean[0], plt_aux[0]), plt_median[0]],
              [r'Mean $\pm$ 3 STD', 'Median'],
              loc='upper left',
              fontsize='xx-large')
    ax.set_title(r'Receptive fields summary', fontsize='xx-large')
    plt.savefig('{}{}Y_aligned_1d_summary.pdf'.format(dir_name, name))
Exemplo n.º 33
0
def plot_free_energy(data,
                     ax=None,
                     obs=0,
                     temperature=300.,
                     n_samples=None,
                     pi=None,
                     bw='scott',
                     gridsize=30,
                     cut=3,
                     clip=None,
                     color='beryl',
                     shade=True,
                     alpha=0.5,
                     cmap='bone',
                     vmin=None,
                     vmax=None,
                     n_levels=10,
                     clabel=False,
                     clabel_kwargs=None,
                     xlabel=None,
                     ylabel=None,
                     labelsize=14,
                     random_state=None):
    """
    Plot free energy of observable(s) in kilocalories per mole.

    Parameters
    ----------
    data : ndarray (nsamples, ndim)
        The samples. This should be a 1- or 2-dimensional array. For a 1-D
        array this results in 1-D kernel density plot. For a 2-D array, this
        generates a 2-D contour plot.
    ax : matplotlib axis, optional
        matplotlib figure axis
    obs : int or tuple, optional (default: 0)
        Observables to plot.
    temperature : float, optional (default: 300.0)
        Simulation temperature in degrees Kelvin.
    n_samples : int, optional
        Number of points to subsample from original data.
    pi : array-like, optional
        Equilibrium ensemble weights for each observation.
    bw : {‘scott’ | ‘silverman’ | scalar | pair of scalars }, optional
        Name of reference method to determine kernel size, scalar factor, or
        scalar for each dimension of the bivariate plot.
    gridsize : int, optional
        Number of discrete points in the evaluation grid per dimensional.
    cut : scalar, optional (default: 3)
        Draw the estimate to cut * bw from the extreme data points.
    clip : pair of scalars, or pair of pair of scalars, optional
        Lower and upper bounds for datapoints used to fit KDE. Can provide a
        pair of (low, high) bounds for bivariate plots.
    color : str, optional (default: 'beryl')
        Color of the univariate KDE curve.
    shade : bool, optional
        If True, shade in the area over the KDE curve (or draw with filled
        contours when data is bivariate).
    alpha : float, optional  (default: 0.5)
        Opacity of shaded area.
    cmap : str or matplotlib colormap, optional (default: 'bone')
        Colormap to use in the filled contour plot.
    vmin : float, optional
        The minimum value used in contour plot. If None the minimum value
        of the KDE is used.
    vmax : float, optional
        The maximum value used in contour plot. If None the median value
        of the KDE is used.
    n_levels : int, optional (default: 10)
        Number of contour levels to include.
    clabel : bool, optional (default: False)
        Adds labels to contours in counter plot.
    clabel_kwargs : dict, optional
        Arguments to pass to matplotlib clabel.
    xlabel : str, optional
        x-axis label
    ylabel : str, optional
        y-axis label
    labelsize : int, optional (default: 14)
        x- and y-label font size
    random_state : integer or numpy.RandomState, optional
        The generator used to initialize the centers. If an integer is
        given, it fixes the seed. Defaults to the global numpy random
        number generator

    Returns
    -------
    ax : matplotlib axis
        matplotlib figure axis

    """

    sns.set_style('whitegrid')

    if ax is None:
        ax = pp.gca()

    if pi is not None and sum(pi) > 1:
        pi /= sum(pi)

    if isinstance(obs, int):
        obs = (obs, )

    if isinstance(random_state, (int, type(None))):
        random_state = np.random.RandomState(random_state)

    prune = data[:, obs]
    if n_samples:
        idx = random_state.choice(range(data.shape[0]), size=n_samples, p=pi)
        prune = prune[idx, :]

    if prune.shape[1] == 1:

        if clip is None:
            clip = (-np.inf, np.inf)

        X, Z = _scipy_univariate_kde(prune[:, 0], bw, gridsize, cut, clip)

        Z = _thermo_transform(Z, temperature)

        ax.plot(X, Z - Z.min(), color=color)

        ax.fill_between(X,
                        Z - Z.min(),
                        Z.max() - Z.min(),
                        facecolor=color,
                        alpha=alpha)

    elif prune.shape[1] == 2:

        if clip is None:
            clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
        elif np.ndim(clip) == 1:
            clip = [clip, clip]

        X, Y, Z = _scipy_bivariate_kde(prune[:, 0], prune[:, 1], bw, gridsize,
                                       cut, clip)

        Z = _thermo_transform(Z, temperature)

        if not vmin:
            vmin = np.percentile(Z, 0)
        if not vmax:
            vmax = np.percentile(Z, 50)

        if shade:
            ax.contourf(X,
                        Y,
                        Z - Z.min(),
                        cmap=pp.get_cmap(cmap),
                        levels=np.linspace(vmin, vmax, n_levels),
                        alpha=alpha,
                        zorder=1,
                        vmin=vmin,
                        vmax=vmax)
        cs = ax.contour(X,
                        Y,
                        Z - Z.min(),
                        cmap=pp.get_cmap('bone_r'),
                        levels=np.linspace(vmin, vmax, n_levels),
                        alpha=1,
                        zorder=2,
                        vmin=vmin,
                        vmax=vmax)

        if clabel:
            if not clabel_kwargs:
                clabel_kwargs = {}

            ax.clabel(cs, **clabel_kwargs)

        ax.grid(zorder=0)

    else:
        raise ValueError('obs cannot be greater than size 2')

    if xlabel:
        ax.set_xlabel(xlabel, size=labelsize)

    if ylabel:
        ax.set_ylabel(ylabel, size=labelsize)

    return ax
Exemplo n.º 34
0
@email [email protected]
@copyright CL all rights reserved
@created Thu Dec 23 2018 15:10 GMT-0800 (PST)
@last-modified Wed Feb 27 2019 19:20 GMT-0800 (PST)
.:.:.::.:.:.:.:.::.:.:.:.:.::.:.:.:.:.::.:.:.:.:.::.:.:'''

from tabulate import tabulate
import matplotlib
import os
import datetime
from matplotlib import pyplot as plt
from mpl_finance import candlestick_ohlc
import matplotlib.dates as mdates
import numpy as np
import seaborn.apionly as sns
sns.set_style("white")
sns.set_context("poster")
import pandas as pd
import mpld3


class VisualChart:
    '''
    plot visualization
    '''
    def price_volumn(self, kline_data, save_figure):
        '''
        price_volumn: plot price and volumn 
        Param:
            kline_data (obj, pandas_dataframe)
        '''
def makeFigureS1():
    nsamples = 1000
    nobjs = 5
    
    new_WP1 = getFormulations('new_WP1_thinned', nsamples, nobjs)
    colOrder = [0, 3, 4] # hydro obj, max deficit obj, flood obj
    figOrder = [3, 4, 2]
    thresholds = np.array([-25, 350.0, 2.15])
    objs = [r'$J_{Hydro}$' + ' (Gwh/day)', r'$J_{Max Def}$' + ' ' + r'$\mathregular{(m^3/s)}$', r'$J_{Flood}$' + ' (m above 11.25 m)']
    cmap = matplotlib.cm.get_cmap('Blues_r')

    p = np.zeros([nsamples])
    for i in range(nsamples):
        p[i] = 100*(i+1.0)/(nsamples+1.0)
        
    sns.set_style("darkgrid")
    fig = plt.figure()
    for i in range(len(objs)):
        ax = fig.add_subplot(2,2,figOrder[i])
        for k in range(np.shape(new_WP1.MORDMobjs)[0]):
            #if new_WP1.normObjs[k,0] == np.min(new_WP1.normObjs[:,0]):
            x = np.sort(new_WP1.MORDMobjs[k,:,colOrder[i]])
            l1, = ax.step(x,p,c=cmap(new_WP1.normObjs[k,0]))
            
        ax.plot([thresholds[i],thresholds[i]],[0,100],c='k',linewidth=2)
        ax.set_xlabel(objs[i],fontsize=18)
        ax.tick_params(axis='both',labelsize=14)
        
        if i == 0:
            ax.set_xticks(np.arange(-70,10,20))
            ax.set_xticklabels(-np.arange(-70,10,20))
            
        if i == 1:
            ax.set_xticks(np.arange(0,500,100))
            ax.set_xticklabels(np.arange(0,500,100))
            ax.set_xlim([0,450])
            
        ax.set_ylabel('Cumulative Percent\n of Sampled SOWs',fontsize=18)
    
    ax = fig.add_subplot(2,2,1)
    ax.scatter(new_WP1.baseObjs[:,0],new_WP1.baseObjs[:,2],s=200*(new_WP1.normObjs[:,1]+0.05),\
        edgecolor=cmap(new_WP1.normObjs[:,0]),facecolor=cmap(new_WP1.normObjs[:,0]))
    pt1 = ax.scatter([],[],s=200*0.05,facecolor='#6baed6',edgecolor='#6baed6')
    pt2 = ax.scatter([],[],s=200*1.05,facecolor='#6baed6',edgecolor='#6baed6')
    
    ax.set_yticks(np.arange(0,3,0.5))
    ax.set_xticks(np.arange(-46,-22,4))
    ax.set_xticklabels(-np.arange(-46,-22,4))
    ax.set_xlim([-46,-26])
    ax.tick_params(axis='both',labelsize=14)
    ax.set_xlabel(r'$J_{Hydro}$' + ' (Gwh/day)',fontsize=18)
    ax.set_ylabel(r'$J_{Flood}$' + ' (m above 11.25 m)',fontsize=18)
    ax.set_title('Performance in Base SOW',fontsize=18)
    legend = ax.legend([pt1, pt2], [str(int(np.round(np.min(new_WP1.baseObjs[:,1]),0))), \
        str(int(np.round(np.max(new_WP1.baseObjs[:,1]),0)))], scatterpoints=1, \
        title=r'$J_{Deficit^2}$'+ ' ' + r'$\mathregular{(m^3/s)^2}$', \
        fontsize=18, loc='upper right', frameon=True)
    plt.setp(legend.get_title(), fontsize=18)
    
    fig.subplots_adjust(bottom=0.25, hspace=0.3, wspace=0.3)
    sm = matplotlib.cm.ScalarMappable(cmap=matplotlib.cm.get_cmap('Blues'))
    sm.set_array([np.min(-new_WP1.baseObjs[:,0]),np.max(-new_WP1.baseObjs[:,0])])
    cbar_ax = fig.add_axes([0.1,0.1,0.8,0.05])
    cbar = fig.colorbar(sm, cax=cbar_ax,orientation='horizontal')
    cbar.ax.tick_params(labelsize=14)
    fig.axes[-1].set_xlabel(r'$J_{Hydro}$' + ' (Gwh/day) in Base SOW',fontsize=18)
    
    fig.set_size_inches([14.775, 10.0875])
    fig.savefig('FigureS1.pdf')
    fig.clf()
    
    return None