Ejemplo n.º 1
0
def _get_frequency_range_manual(waterfall, f_channels):
    """Select frequency range to use with GUI."""
    fig = plt.figure(figsize=(8., 8.5), facecolor='k')
    fig.subplots_adjust(left=0.01, bottom=0.01, right=0.95, top=0.94, hspace=0)
    gs = gridspec.GridSpec(2,
                           2,
                           hspace=0,
                           height_ratios=[1, 4],
                           width_ratios=[2, 2])
    ax_text = fig.add_subplot(gs[1, 0])
    ax_wat_prof = fig.add_subplot(gs[0, 1])
    ax_wat_map = fig.add_subplot(gs[1, 1], sharex=ax_wat_prof)

    for ax in fig.axes:
        ax.axis('off')

    ax_wat_map.axis('on')
    ax_wat_map.spines['left'].set_color('white')
    ax_wat_map.tick_params(axis='y', colors='white')
    ax_wat_map.yaxis.label.set_color('white')

    # plot waterfall
    top_lim = [
        waterfall.shape[0],
    ]
    bottom_lim = [
        0,
    ]
    sub_factor = [
        1,
    ]
    left = 0
    right = waterfall.shape[1]

    plot_wat_map = ax_wat_map.imshow(waterfall,
                                     origin='lower',
                                     aspect='auto',
                                     cmap=colormap,
                                     interpolation='nearest',
                                     extent=(left, right, bottom_lim[-1] - 0.5,
                                             top_lim[-1] + 0.5))

    ax_wat_map.set_ylabel("Observing frequency (MHz)", fontsize=14)
    df = np.median(np.diff(f_channels))

    # set frequencies as label instead of channel numbers
    yticks = np.linspace(bottom_lim[-1] - 0.5, top_lim[-1] + 0.5, 9)
    yticklabels = np.round(
        np.linspace(f_channels[bottom_lim[-1]] - df / 2.,
                    f_channels[top_lim[-1] - 1] + df / 2., 9), 1)

    ax_wat_map.set_yticks(yticks)
    ax_wat_map.set_yticklabels(yticklabels, fontsize=14)

    # plot summed profile
    wat_prof = np.nansum(waterfall, axis=0)
    plot_wat_prof, = ax_wat_prof.plot(wat_prof, 'w-', linewidth=2)
    ax_wat_prof.set_ylim([wat_prof.min(), wat_prof.max()])
    ax_wat_prof.set_xlim([0, wat_prof.size])
    ax_wat_prof.set_title("Waterfall", fontsize=16, color='w', y=1.08)

    # plot instructions
    text = """
    Manual selection of
      frequency range.

    On the plot, press
      "t" to select top limit.
      "b" to select bottom limit.
      "T" to undo upper limit.
      "B" to undo lower limit.
      "s" to subband by factor 2.
      "S" to upband by factor 2.
      "q" to save and exit.

    """
    instructions = ax_text.annotate(text, (0, 1),
                                    color='w',
                                    fontsize=14,
                                    horizontalalignment='left',
                                    verticalalignment='top',
                                    linespacing=1.5)

    def subband(data):
        nfreq, nsamp = data.shape
        return np.nansum(data.reshape(-1, sub_factor[-1], nsamp), axis=1)

    # GUI
    def update_lim():
        plot_wat_map.set_data(
            subband(waterfall)[bottom_lim[-1]:top_lim[-1], ...])
        plot_wat_map.autoscale()
        plot_wat_map.set_extent((left, right, bottom_lim[-1], top_lim[-1] + 1))

        # set frequencies as label instead of channel numbers
        yticks = np.linspace(bottom_lim[-1], top_lim[-1] + 1, 9)
        sub_f_channels = f_channels.reshape(-1, sub_factor[-1]).mean(axis=1)
        yticklabels = np.round(
            np.linspace(
                sub_f_channels[bottom_lim[-1]] - df * sub_factor[-1] / 2.,
                sub_f_channels[top_lim[-1] - 1] + df * sub_factor[-1] / 2., 9),
            1)

        ax_wat_map.set_yticks(yticks)
        ax_wat_map.set_yticklabels(yticklabels, fontsize=14)

        wat_prof = np.nansum(subband(waterfall)[bottom_lim[-1]:top_lim[-1],
                                                ...],
                             axis=0)
        plot_wat_prof.set_ydata(wat_prof)
        ax_wat_prof.set_ylim([wat_prof.min(), wat_prof.max()])
        return

    def press(event):
        sys.stdout.flush()
        if event.key == "t":
            y = int(round(event.ydata))
            top_lim.append(y)
            update_lim()
        if event.key == "b":
            y = int(round(event.ydata))
            bottom_lim.append(y)
        elif event.key == "s":
            # subband factor should leave at least 8 channels
            if sub_factor[-1] * 8 != top_lim[0] * sub_factor[-1]:
                sub_factor.append(sub_factor[-1] * 2)
                top_lim[:] = [x / 2 for x in top_lim]
                bottom_lim[:] = [x / 2 for x in bottom_lim]
                update_lim()
        elif event.key == "T":
            if len(top_lim) > 1: del top_lim[-1]
            update_lim()
        elif event.key == "B":
            if len(bottom_lim) > 1: del bottom_lim[-1]
            update_lim()
        elif event.key == "S":
            if len(sub_factor) > 1:
                del sub_factor[-1]
                top_lim[:] = [x * 2 for x in top_lim]
                bottom_lim[:] = [x * 2 for x in bottom_lim]
                update_lim()
        fig.canvas.draw()
        return

    def new_cmap(event):
        colormap = next(colormap_list)
        plot_wat_map.set_cmap(colormap)
        plot_pow_map.set_cmap(colormap)
        fig.canvas.draw()
        return

    ax_but = plt.axes([0.01, 0.94, 0.18, 0.05])
    but = Button(ax_but, 'Change colormap', color='0.8', hovercolor='0.2')
    but.on_clicked(new_cmap)

    try:
        cursor = Cursor(ax_wat_map, color='g', linewidth=2, vertOn=False)
    except AttributeError:
        pass
    key = fig.canvas.mpl_connect('key_press_event', press)

    plt.show()
    return bottom_lim[-1] * sub_factor[-1], top_lim[-1] * sub_factor[-1]
Ejemplo n.º 2
0
    (temp == 0, temp == 1, temp == 2))  # One-hot encoding (.astype(int))

#plt.figure()
#plt.get_current_fig_manager().window.wm_geometry("1400x760+20+20")
#disp2DResult(x2D, z2D,0)
#w_factor = 0.1
#draw_ellipse(m0, C0, alpha=pi0 * w_factor, color='r')
#draw_ellipse(m1, C1, alpha=pi1 * w_factor, color='g')
#draw_ellipse(m2, C2, alpha=pi2 * w_factor, color='b')
#plt.show()

#-----------------------------------------------------------------------------------------------------------------------
# 1) K-means algorithm
plt.figure()
plt.get_current_fig_manager().window.wm_geometry("1400x760+20+20")
gs = gridspec.GridSpec(2, 2)
gs.update(wspace=0.05, hspace=0.3)

print("------------------------- K-means algorithm -------------------------")
clust_center = []
for k in range(2, 6):
    print("k = ", k)
    colors = iter(cm.rainbow(np.linspace(0, 1, k)))
    kmean_h = KMeans(n_clusters=k, n_init=10).fit(x2D)
    clust_center.append(kmean_h.cluster_centers_)

    curr_clust = getEmpProbTable(3, k, z2D, kmean_h.labels_, prob_z)

    plt.subplot(gs[k - 2])
    disp2DResult(x2D, curr_clust, 0)
    plt.title("{:d} Clusters".format(k))
Ejemplo n.º 3
0
# Map the codified parameter names to their sexy latex equivalents
param_to_latex = dict(q1=r"$q_1$",
                      qz=r"$q_z$",
                      v_halo=r"$v_{halo}$",
                      phi=r"$\phi$")

halo_params = ["q1", "qz", "v_halo", "phi"]
acceptance_fractions, flat_chain, chain = pickle.load(
    open("2013-03-12_05-03-29_q1_qz_v_halo_phi_w200_s400.pickle"))
chain = chain[(acceptance_fractions > 0.15) & (acceptance_fractions < 0.6)]

# Create a figure object with same aspect ratio as a sheet of paper...
fig = plt.figure(figsize=(16, 20.6))

# I want the plot of individual walkers to span 2 columns
gs = gridspec.GridSpec(4, 3)

# The halo velocity (v_halo) parameter is stored in units of kpc/Myr, but I
#   want to plot it in km/s
for xx in range(chain.shape[0]):
    for yy in range(chain.shape[1]):
        chain[xx, yy,
              2] = (chain[xx, yy, 2] * u.kpc / u.Myr).to(u.km / u.s).value

# I could compute this, but for now I just hard code it by looking at the
#   plots by eye...
converged_idx = 150

# For each parameter, I want to plot each walker on one panel, and a histogram
#   of all links from all walkers past 150 steps (approximately when the chains
#   converged)
Ejemplo n.º 4
0
# :class:`~matplotlib.axes.Axes` objects.

fig1, f1_axes = plt.subplots(ncols=2, nrows=2, constrained_layout=True)

############################################################################
# For a simple use case such as this, :mod:`~matplotlib.gridspec` is
# perhaps overly verbose.
# You have to create the figure and :class:`~matplotlib.gridspec.GridSpec`
# instance separately, then pass elements of gridspec instance to the
# :func:`~matplotlib.figure.Figure.add_subplot` method to create the axes
# objects.
# The elements of the gridspec are accessed in generally the same manner as
# numpy arrays.

fig2 = plt.figure(constrained_layout=True)
spec2 = gridspec.GridSpec(ncols=2, nrows=2, figure=fig2)
f2_ax1 = fig2.add_subplot(spec2[0, 0])
f2_ax2 = fig2.add_subplot(spec2[0, 1])
f2_ax3 = fig2.add_subplot(spec2[1, 0])
f2_ax4 = fig2.add_subplot(spec2[1, 1])

#############################################################################
# The power of gridspec comes in being able to create subplots that span
# rows and columns.  Note the `NumPy slice syntax
# <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_
# for selecting the part of the gridspec each subplot will occupy.
#
# Note that we have also used the convenience method `.Figure.add_gridspec`
# instead of `.gridspec.GridSpec`, potentially saving the user an import,
# and keeping the namespace cleaner.
Ejemplo n.º 5
0
        def saveWeight(weightSamples):
            ws = weightSamples["weights"]
            palettes = weightSamples["palettes"]
            repeats = weightSamples["repeats"]
            print ws, repeats, len(palettes[0])

            sortedWeights = [
                str(int(10 * ws[key])) for key in sorted(ws.keys())
            ]
            shorthand = ["PD", "ND", "NU", "PP"]
            name = "__".join(
                ['-'.join(d) for d in zip(sortedWeights, shorthand)])
            imgType = "eps"
            fname = os.path.join(self.outputPath, name + "." + imgType)

            rgbPalettes = [
                np.vstack([
                    np.array([[
                        np.array(convert.convertLabToRGB(color)) / 255.0
                        for color in repeat
                    ], [np.array([1, 1, 1]) for color in repeat]])
                    for repeat in sizes
                ]) for sizes in palettes
            ]

            def makeName(palette):
                return '; '.join([
                    '[' + ','.join([str(int(i)) for i in c]) + ']'
                    for c in palette
                ])

            labNames = [[makeName(repeat)] for sizes in palettes
                        for repeat in sizes]

            fig = plt.figure(figsize=(24, 10), dpi=300)

            sortedWeights = [str(ws[key]) for key in sorted(ws.keys())]
            figName = " ".join(
                [':'.join(d) for d in zip(shorthand, sortedWeights)])
            fig.suptitle("Slider settings:: " + figName,
                         fontsize=30,
                         x=0,
                         fontweight="bold",
                         color="#010101")

            # http://matplotlib.org/users/gridspec.html
            gs0 = gridspec.GridSpec(1, 2, width_ratios=[2, 1.1])
            gs0.update(left=0)

            gs1 = gridspec.GridSpecFromSubplotSpec(1,
                                                   3,
                                                   subplot_spec=gs0[0],
                                                   width_ratios=[3, 5, 8])
            # gs1 = gridspec.GridSpec(1, 4, width_ratios=[5,3,5,8])
            # gs1.update(left=0.23, right=0.68, wspace=0)
            # gs = gridspec.GridSpec(2, 3, width_ratios=[3,5,8])

            ax1 = fig.add_subplot(gs1[0])
            ax2 = fig.add_subplot(gs1[1])
            ax3 = fig.add_subplot(gs1[2])

            gs2 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs0[1])
            # gs2 = gridspec.GridSpec(1, 1)
            # gs2.update(left=0.7, right=1, hspace=0.05)
            ax4 = fig.add_subplot(gs2[:, :])

            allButLast = repeats * 2 - 1
            ax1.imshow(rgbPalettes[0][:allButLast], interpolation="nearest")
            ax2.imshow(rgbPalettes[1][:allButLast], interpolation="nearest")
            ax3.imshow(rgbPalettes[2][:allButLast], interpolation="nearest")

            table = ax4.table(cellText=labNames, loc='center')
            table.auto_set_font_size(False)
            table.set_fontsize(10)

            for key, cell in table.get_celld().items():
                cell.set_linewidth(0)
                cell.set_height(0.03)
                cell._text.set_color('#333333')

            ax1.set_axis_off()
            ax2.set_axis_off()
            ax3.set_axis_off()
            ax4.axis('tight')
            ax4.set_axis_off()

            fig.savefig(fname, dpi=300, bbox_inches='tight')
Ejemplo n.º 6
0
    def __init__(self,heart,plot=False,store=True,stepsstored=1000,replot=False):
        
        self.heart=heart
        self.timea=time.time()
        self.plot=plot
        self.store=store
        self.replot=replot
        self.gridintime=[]
        self.stepsstored=stepsstored
        self.electrocardiot=[]
        self.num_excited = []

        self.electrocardiot.append(0)
        #self.num_excited.append(0)
       
        self.tstartfib=200
        self.tstopfib=210
        self.timecheck=-120 # I set this to be negative so it doesn't f****d up the first time grid[100,100] is excited
        
        
        self.infibrillation=False
        self.tfibrillation=[]
        
        self.fibrillationcounter=0

           
        
        
        
        if self.plot==False and self.replot==False and self.store==True:
            self.timec = time.time()
            if self.heart.time == 0:
                self.heart.excitecolumn()
            self.storesteps()
            self.timed = time.time()
            print("Storage time = %s" %(self.timed-self.timec))
            print("Exclusive self store enacted!")
        
        if self.plot or self.replot:
            self.figure=plt.figure()
            gs = gridspec.GridSpec(2, 1, height_ratios=[5, 1]) 
            self.ax1 = plt.subplot(gs[0], axisbg='black')
            
                
            self.ax1.set_title('Lattice')
            self.ax1.set_xlabel('x')
            self.ax1.set_ylabel('y')
                
            self.ax2 = plt.subplot(gs[1],axisbg='black' )
            self.ax2.set_xlim(0,200)
            self.ax2.set_ylim(-50,50)
               
            
            
            
            self.heart.excitecolumn()
            self.im = self.ax1.imshow(self.heart.grid, extent=(-0,heart.L, heart.L, 0), aspect = 'auto', cmap = "Greys_r")
            self.im.set_cmap('Greys_r') 
            self.im2, = self.ax2.plot(self.heart.tcounter,self.electrocardiot, color='white' )
            
            
            self.interval=1 
            self.counter=0
            if self.replot==False:
                self.anim1 = animation.FuncAnimation(self.figure, self.updatefig,
                            frames=10000, interval=self.interval, blit=False)
                #mywriter = animation.FFMpegWriter()

                #self.anim1.save('lines5.mp4', writer=mywriter,fps=0 )
            
            if self.store==True:
                #gridintime.append(self.heart.grid) 
                print("self store enacted!")
            
            if self.replot==True:
                print (" elements stored in heart:")
                print (len(gridintime) )
                
                
                self.anim1 = animation.FuncAnimation(self.figure, self.replotfigure,
                            frames=10000, interval=self.interval, blit=False) 
                
        self.timeb=time.time()
        
        print ("Timing",(self.timeb-self.timea))
Ejemplo n.º 7
0
def multiPlotter2d(figName, arrayNameList, mini, maxi, plotter='imshow', titleList=None, supTitle='', printIO=False, **kwargs):

    nbr = len(arrayNameList)
    
    if titleList is None:
        titleList = []
        for i in xrange(nbr):
            titleList.append('sim '+str(i))

    kwargs['origin'] = 'lower'
    kwargs['vmin']   = mini
    kwargs['vmax']   = maxi
    
    Nc = int(np.floor(np.sqrt(nbr)))
    Nl = Nc
    while Nc*Nl < nbr:
        Nl += 1

    figure = plt.figure()
    plt.clf()
    gs = gridspec.GridSpec(Nl, Nc)
    j = 0

    for (arrayName, title) in zip(arrayNameList, titleList):
        nc = int(np.mod(j,Nc))
        nl = int((j-nc)/Nc)
        
        ax = plt.subplot(gs[nl,nc])
        Y = np.load(arrayName)
        
        try:
            extent = kwargs['extent']
            ax.set_xlim(extent[0],extent[1])
            ax.set_ylim(extent[2],extent[3])
        except:
            kwargs['extent'] = [0.0, 1.0, 0.0, 1.0]
            ax.set_xlim(0.0, 1.0)
            ax.set_ylim(0.0, 1.0)

        ax.set_yticks([])
        ax.set_xticks([])
        ax.set_title(title)        
        im = plotMatrix(ax, Y, plotter, **kwargs)
        j += 1


    gs.tight_layout(figure,rect=[0.,0.,0.85,1.])
    gs2 = gridspec.GridSpec(1,1)
    gs2.update(left=0.87,right=0.93)

    cax = plt.subplot(gs2[0,0],frameon=False)
    cmap = mpl.cm.jet
    norm = mpl.colors.Normalize(vmin=mini, vmax=maxi)
    cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm, orientation='vertical')

    plt.suptitle(supTitle)

    if printIO:
        print('Writing '+figName+' ...')
    plt.savefig(figName)
    plt.close()
                                                                                
Ejemplo n.º 8
0
for i in range(len(data)):

    # 9723, 156 is a good example
    # 334 for demonstrating weighting on incorrect examples
    # 4763 clear multi stage alignments
    # 9311 for multi-resolution
    # 4, good for concentration
    # 56 GREAT - co-reference resolution

    print(i, len(data[i]["premise_attention"]),
          len(data[i]["hypothesis_attention"]))
    print(data[i])

    fig = plt.figure()
    axs = gs.GridSpec(2, 15)
    prem, hyp = convert_to_dataframe(data[i])
    max_val = max(prem.values.max(), hyp.values.max())
    max_dist = max(data[i]["act_probs"])
    ax = fig.add_subplot(axs[0, 0:13])
    ax.set_title("Hypothesis")
    seaborn.heatmap(hyp, vmin=0.0, vmax=1.0)

    ax = fig.add_subplot(axs[1, 0:13])
    ax.set_title("Premise")
    seaborn.heatmap(prem, vmin=0.0, vmax=1.0)
    ax = fig.add_subplot(axs[:, 14])
    ax.set_title("ACT halting probs(weights)")
    seaborn.heatmap(np.expand_dims(data[i]["act_probs"], 1), annot=True)
    plt.gca()
    plt.show()
Ejemplo n.º 9
0
def plotHistory():
    colnames = ('jd', 'desv10', 'desv40', 'desv70')
    table = ascii.read('Rut02_dat/ordenes_master.txt',
                       format='csv',
                       names=colnames,
                       comment='@')
    jd = np.array(table["jd"])
    desv10 = np.array(table["desv10"])
    desv40 = np.array(table["desv40"])
    desv70 = np.array(table["desv70"])

    today = datetime.datetime.now()
    today = astropy.time.Time(today)
    jd_today = np.int(today.jd)
    jd_ini = jd_today - 180

    plt.figure(figsize=(12, 7))
    gs = gridspec.GridSpec(3, 1)
    gs.update(left=0.08,
              right=0.95,
              bottom=0.08,
              top=0.93,
              wspace=0.2,
              hspace=0.1)

    ax = plt.subplot(gs[0, 0])
    ax.set_ylabel(r'$\Delta y$ (pix) - Orden 10')
    ax.get_xaxis().set_ticks([])
    ax.set_ylim([-1, 1])
    ax.set_xlim([0, 180])
    arr = desv10
    plt.errorbar(jd - jd_ini, desv10, yerr=0, fmt='o', c='red')
    for year in range(10):
        jdyear = gcal2jd(2011 + year, 1, 1)
        plt.axvline(jdyear[0] + jdyear[1] - jd_ini, ls=':', c='gray')
        begin = jdyear[0] + jdyear[1] - jd_ini
        ax.annotate(np.str(2011 + year),
                    xy=(begin + 150, 890),
                    xycoords='data',
                    fontsize=14)
    plt.grid(ls=':', c='gray')
    plt.axhline(0.1, ls='--', c='red')
    plt.axhline(-0.1, ls='--', c='red')

    ax = plt.subplot(gs[1, 0])
    ax.set_ylabel(r'$\Delta y$ (pix) - Orden 40')
    label = r'JD-' + str(jd_ini) + ' (days)'
    ax.set_xlabel(label)
    ax.set_xlim([0, 180])
    ax.set_ylim([-1, 1])
    arr = desv40
    plt.errorbar(jd - jd_ini, desv40, yerr=0, fmt='o', c='red')
    for year in range(10):
        jdyear = gcal2jd(2011 + year, 1, 1)
        plt.axvline(jdyear[0] + jdyear[1] - jd_ini, ls=':', c='gray')
        begin = jdyear[0] + jdyear[1] - jd_ini
        ax.annotate(np.str(2011 + year),
                    xy=(begin + 150, 890),
                    xycoords='data',
                    fontsize=14)
    plt.grid(ls=':', c='gray')
    plt.axhline(0.1, ls='--', c='red')
    plt.axhline(-0.1, ls='--', c='red')

    ax = plt.subplot(gs[2, 0])
    ax.set_ylabel(r'$\Delta y$ (pix) - Orden 70')
    label = r'JD-' + str(jd_ini) + ' (days)'
    ax.set_xlabel(label)
    ax.set_xlim([0, 180])
    ax.set_ylim([-1, 1])
    plt.errorbar(jd - jd_ini, desv70, yerr=0, fmt='o', c='red')
    for year in range(10):
        jdyear = gcal2jd(2011 + year, 1, 1)
        plt.axvline(jdyear[0] + jdyear[1] - jd_ini, ls=':', c='gray')
        begin = jdyear[0] + jdyear[1] - jd_ini
        ax.annotate(np.str(2011 + year),
                    xy=(begin + 150, 890),
                    xycoords='data',
                    fontsize=14)
    plt.grid(ls=':', c='gray')
    plt.axhline(0.1, ls='--', c='red')
    plt.axhline(-0.1, ls='--', c='red')
    plt.savefig('orden_history_CAFE.pdf')
Ejemplo n.º 10
0
    def StatisticalProperties(self,
                              PathNodes,
                              PathTS,
                              StartDate,
                              WarmUpPeriod,
                              SavePlots,
                              SavePath,
                              SeparateFiles=False,
                              Filter=False,
                              Distibution="GEV",
                              EstimateParameters=False,
                              Quartile=0,
                              RIMResults=False,
                              SignificanceLevel=0.1):
        """
        =============================================================================
          StatisticalProperties(PathNodes, PathTS, StartDate, WarmUpPeriod, SavePlots, SavePath,
                              SeparateFiles = False, Filter = False, RIMResults = False)
        =============================================================================

        StatisticalProperties method reads the SWIM output file (.dat file) that
        contains the time series of discharge for some computational nodes
        and calculate some statistical properties

        the code assumes that the time series are of a daily temporal resolution, and
        that the hydrological year is 1-Nov/31-Oct (Petrow and Merz, 2009, JoH).

        Parameters
        ----------
            1-PathNodes : [String]
                the name of the file which contains the ID of the computational
                nodes you want to do the statistical analysis for, the ObservedFile
                should contain the discharge time series of these nodes in order.
            2-PathTS : [String]
                the name of the SWIM result file (the .dat file).
            3-StartDate : [string]
                the begining date of the time series.
            4-WarmUpPeriod : [integer]
                the number of days you want to neglect at the begining of the
                Simulation (warm up period).
            5-SavePlots : [Bool]
                DESCRIPTION.
            6-SavePath : [String]
                the path where you want to  save the statistical properties.
            7-SeparateFiles: [Bool]
                if the discharge data are stored in separate files not all in one file
                SeparateFiles should be True, default [False].
            8-Filter: [Bool]
                for observed or RIMresult data it has gaps of times where the
                model did not run or gaps in the observed data if these gap days
                are filled with a specific value and you want to ignore it here
                give Filter = Value you want
            9-RIMResults: [Bool]
                If the files are results form RIM or observed, as the format
                differes between the two. default [False]

        Returns
        -------
            1-Statistical Properties.csv:
                file containing some statistical properties like mean, std, min, 5%, 25%,
                median, 75%, 95%, max, t_beg, t_end, nyr, q1.5, q2, q5, q10, q25, q50,
                q100, q200, q500.
        """

        ComputationalNodes = np.loadtxt(PathNodes, dtype=np.uint16)
        # hydrographs
        if SeparateFiles:
            TS = pd.DataFrame()
            if RIMResults:
                for i in range(len(ComputationalNodes)):
                    TS.loc[:, int(ComputationalNodes[i])] = self.ReadRIMResult(
                        PathTS + "/" + str(int(ComputationalNodes[i])) +
                        '.txt')
            else:
                for i in range(len(ComputationalNodes)):
                    TS.loc[:, int(ComputationalNodes[i])] = np.loadtxt(
                        PathTS + "/" + str(int(ComputationalNodes[i])) +
                        '.txt')  #,skiprows = 0

            StartDate = dt.datetime.strptime(StartDate, "%Y-%m-%d")
            EndDate = StartDate + dt.timedelta(days=TS.shape[0] - 1)
            ind = pd.date_range(StartDate, EndDate)
            TS.index = ind
        else:
            TS = pd.read_csv(PathTS, delimiter=r'\s+', header=None)
            StartDate = dt.datetime.strptime(StartDate, "%Y-%m-%d")
            EndDate = StartDate + dt.timedelta(days=TS.shape[0] - 1)
            TS.index = pd.date_range(StartDate, EndDate, freq="D")
            # delete the first two columns
            del TS[0], TS[1]
            TS.columns = ComputationalNodes

        # neglect the first year (warmup year) in the time series
        TS = TS.loc[StartDate + dt.timedelta(days=WarmUpPeriod):EndDate, :]

        # List of the table output, including some general data and the return periods.
        col_csv = [
            'mean', 'std', 'min', '5%', '25%', 'median', '75%', '95%', 'max',
            't_beg', 't_end', 'nyr'
        ]
        rp_name = [
            'q1.5', 'q2', 'q5', 'q10', 'q25', 'q50', 'q100', 'q200', 'q500',
            'q1000'
        ]
        col_csv = col_csv + rp_name

        # In a table where duplicates are removed (np.unique), find the number of
        # gauges contained in the .csv file.
        # no_gauge = len(ComputationalNodes)
        # Declare a dataframe for the output file, with as index the gaugne numbers
        # and as columns all the output names.
        StatisticalPr = pd.DataFrame(np.nan,
                                     index=ComputationalNodes,
                                     columns=col_csv)
        StatisticalPr.index.name = 'ID'
        DistributionPr = pd.DataFrame(np.nan,
                                      index=ComputationalNodes,
                                      columns=['loc', 'scale'])
        DistributionPr.index.name = 'ID'
        # required return periods
        T = [1.5, 2, 5, 10, 25, 50, 50, 100, 200, 500, 1000]
        T = np.array(T)
        # these values are the Non Exceedance probability (F) of the chosen
        # return periods F = 1 - (1/T)
        # Non Exceedance propabilities
        #F = [1/3, 0.5, 0.8, 0.9, 0.96, 0.98, 0.99, 0.995, 0.998]
        F = 1 - (1 / T)
        # Iteration over all the gauge numbers.
        for i in ComputationalNodes:
            QTS = TS.loc[:, i]
            # The time series is resampled to the annual maxima, and turned into a
            # numpy array.
            # The hydrological year is 1-Nov/31-Oct (from Petrow and Merz, 2009, JoH).
            amax = QTS.resample('A-OCT').max().values

            if type(Filter) != bool:
                amax = amax[amax != Filter]
            if EstimateParameters:
                # estimate the parameters through an optimization
                # alpha = (np.sqrt(6) / np.pi) * amax.std()
                # beta = amax.mean() - 0.5772 * alpha
                # param_dist = [beta, alpha]
                threshold = np.quantile(amax, Quartile)
                if Distibution == "GEV":
                    print("Still to be finished later")
                else:
                    param = Gumbel.EstimateParameter(amax, Gumbel.ObjectiveFn,
                                                     threshold)
                    param_dist = [param[1], param[2]]

            else:
                # estimate the parameters through an maximum liklehood method
                if Distibution == "GEV":
                    param_dist = genextreme.fit(amax)
                else:
                    # A gumbel distribution is fitted to the annual maxima
                    param_dist = gumbel_r.fit(amax)

            if Distibution == "GEV":
                DistributionPr.loc[i, 'c'] = param_dist[0]
                DistributionPr.loc[i, 'loc'] = param_dist[1]
                DistributionPr.loc[i, 'scale'] = param_dist[2]
            else:
                DistributionPr.loc[i, 'loc'] = param_dist[0]
                DistributionPr.loc[i, 'scale'] = param_dist[1]

            # Return periods from the fitted distribution are stored.
            # get the Discharge coresponding to the return periods
            if Distibution == "GEV":
                Qrp = genextreme.ppf(F,
                                     param_dist[0],
                                     loc=param_dist[1],
                                     scale=param_dist[2])
            else:
                Qrp = gumbel_r.ppf(F, loc=param_dist[0], scale=param_dist[1])
            # to get the Non Exceedance probability for a specific Value
            # sort the amax
            amax.sort()
            # calculate the F (Exceedence probability based on weibul)
            cdf_Weibul = ST.Weibul(amax)
            # Gumbel.ProbapilityPlot method calculates the theoretical values based on the Gumbel distribution
            # parameters, theoretical cdf (or weibul), and calculate the confidence interval
            if Distibution == "GEV":
                Qth, Qupper, Qlower = GEV.ProbapilityPlot(
                    param_dist, cdf_Weibul, amax, SignificanceLevel)
                # to calculate the F theoretical
                Qx = np.linspace(0, 1.5 * float(amax.max()), 10000)
                pdf_fitted = genextreme.pdf(Qx,
                                            param_dist[0],
                                            loc=param_dist[2],
                                            scale=param_dist[2])
                cdf_fitted = genextreme.cdf(Qx,
                                            param_dist[0],
                                            loc=param_dist[1],
                                            scale=param_dist[2])
            else:
                Qth, Qupper, Qlower = Gumbel.ProbapilityPlot(
                    param_dist, cdf_Weibul, amax, SignificanceLevel)
                # gumbel_r.interval(SignificanceLevel)
                # to calculate the F theoretical
                Qx = np.linspace(0, 1.5 * float(amax.max()), 10000)
                pdf_fitted = gumbel_r.pdf(Qx,
                                          loc=param_dist[0],
                                          scale=param_dist[1])
                cdf_fitted = gumbel_r.cdf(Qx,
                                          loc=param_dist[0],
                                          scale=param_dist[1])
            # then calculate the the T (return period) T = 1/(1-F)
            if SavePlots:
                fig = plt.figure(60, figsize=(20, 10))
                gs = gridspec.GridSpec(nrows=1, ncols=2, figure=fig)
                # Plot the histogram and the fitted distribution, save it for each gauge.
                ax1 = fig.add_subplot(gs[0, 0])
                ax1.plot(Qx, pdf_fitted, 'r-')
                ax1.hist(amax, density=True)
                ax1.set_xlabel('Annual Discharge(m3/s)', fontsize=15)
                ax1.set_ylabel('pdf', fontsize=15)

                ax2 = fig.add_subplot(gs[0, 1])
                ax2.plot(Qx, cdf_fitted, 'r-')
                ax2.plot(amax, cdf_Weibul, '.-')
                ax2.set_xlabel('Annual Discharge(m3/s)', fontsize=15)
                ax2.set_ylabel('cdf', fontsize=15)

                plt.savefig(SavePath + "/" + "Figures/" + str(i) + '.png',
                            format='png')
                plt.close()

                fig = plt.figure(70, figsize=(10, 8))
                plt.plot(Qth,
                         amax,
                         'd',
                         color='#606060',
                         markersize=12,
                         label='Gumbel Distribution')
                plt.plot(Qth,
                         Qth,
                         '^-.',
                         color="#3D59AB",
                         label="Weibul plotting position")
                if Distibution != "GEV":
                    plt.plot(Qth,
                             Qlower,
                             '*--',
                             color="#DC143C",
                             markersize=12,
                             label='Lower limit (' +
                             str(int(
                                 (1 - SignificanceLevel) * 100)) + " % CI)")
                    plt.plot(Qth,
                             Qupper,
                             '*--',
                             color="#DC143C",
                             markersize=12,
                             label='Upper limit (' +
                             str(int(
                                 (1 - SignificanceLevel) * 100)) + " % CI)")

                plt.legend(fontsize=15, framealpha=1)
                plt.xlabel('Theoretical Annual Discharge(m3/s)', fontsize=15)
                plt.ylabel('Annual Discharge(m3/s)', fontsize=15)
                plt.savefig(SavePath + "/" + "Figures/F-" + str(i) + '.png',
                            format='png')
                plt.close()

            StatisticalPr.loc[i, 'mean'] = QTS.mean()
            StatisticalPr.loc[i, 'std'] = QTS.std()
            StatisticalPr.loc[i, 'min'] = QTS.min()
            StatisticalPr.loc[i, '5%'] = QTS.quantile(0.05)
            StatisticalPr.loc[i, '25%'] = QTS.quantile(0.25)
            StatisticalPr.loc[i, 'median'] = QTS.quantile(0.50)
            StatisticalPr.loc[i, '75%'] = QTS.quantile(0.75)
            StatisticalPr.loc[i, '95%'] = QTS.quantile(0.95)
            StatisticalPr.loc[i, 'max'] = QTS.max()
            StatisticalPr.loc[i, 't_beg'] = QTS.index.min()
            StatisticalPr.loc[i, 't_end'] = QTS.index.max()
            StatisticalPr.loc[
                i, 'nyr'] = (StatisticalPr.loc[i, 't_end'] -
                             StatisticalPr.loc[i, 't_beg']).days / 365.25
            for irp, irp_name in zip(Qrp, rp_name):
                StatisticalPr.loc[i, irp_name] = irp

            # Print for prompt and check progress.
            print("Gauge", i, "done.")
        #
        # Output file
        StatisticalPr.to_csv(SavePath + "/" + "Statistical Properties.csv")
        self.StatisticalPr = StatisticalPr
        DistributionPr.to_csv(SavePath + "/" + "DistributionProperties.csv")
        self.DistributionPr = DistributionPr
Ejemplo n.º 11
0
import matplotlib.patches as patches
import os; import time; start = time.time(); print('Running...')
import matplotlib.gridspec as gridspec

# Main input parameters
col_labels = ['Geometry\n(NA${}_{\\textrm{1}}$ = 0.8, NA${}_{\\textrm{2}}$ = 0.8)', 'Uncertainty Ellipses', r'$\sigma_{\Omega}$ [sr]']
fig_labels = ['a)', 'b)', 'c)']
n_pts = 100 # Points on sphere
n_pts_sphere = 50000 # Points on sphere
n_grid_pts = 5
inch_fig = 5
dpi = 300

# Setup figure and axes
fig = plt.figure(figsize=(3.2*inch_fig, 1*inch_fig))
gs0 = gridspec.GridSpec(1, 3, wspace=0.2, hspace=0.1)
gs00 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[0,0], width_ratios=[1, 0.05], wspace=0.1)
gs10 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[0,1], width_ratios=[1, 0.05], wspace=0.1)
gs20 = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[0,2], width_ratios=[1, 0.05], wspace=0.1)

ax0 = plt.subplot(gs00[0])
ax1 = plt.subplot(gs10[0])
ax2 = plt.subplot(gs20[0])
cax2 = plt.subplot(gs20[1])

for ax, col_label, fig_label  in zip([ax0, ax1, ax2], col_labels, fig_labels):
    ax.annotate(col_label, xy=(0,0), xytext=(0.5, 1.05), textcoords='axes fraction',
                va='bottom', ha='center', fontsize=14, annotation_clip=False)
    ax.annotate(fig_label, xy=(0,0), xytext=(0, 1.05), textcoords='axes fraction',
                va='bottom', ha='center', fontsize=14, annotation_clip=False)
    
def behavior_summary(subjects,
                     sessions,
                     trialslim=[],
                     outputDir='',
                     paradigm=None,
                     soundfreq=None):
    '''
    subjects: an array of animals to analyze (it can also be a string for a single animal)
    sessions: an array of sessions to analyze (it can also be a string for a single session)
    trialslim: array to set xlim() of dynamics' plot
    outputDir: where to save the figure (if not specified, nothing will be saved)
    paradigm: load data from a different paradigm. Warning: data should be loaded with
              loadbehavior.ReversalBehaviorData().
    '''
    if isinstance(subjects, str):
        subjects = [subjects]
    if isinstance(sessions, str):
        sessions = [sessions]
    nSessions = len(sessions)
    nAnimals = len(subjects)

    loadingClass = loadbehavior.FlexCategBehaviorData
    paradigm = '2afc'

    gs = gridspec.GridSpec(nSessions * nAnimals, 3)
    gs.update(hspace=0.5, wspace=0.4)
    plt.clf()
    for inds, thisSession in enumerate(sessions):
        for inda, animalName in enumerate(subjects):
            try:
                behavFile = loadbehavior.path_to_behavior_data(
                    animalName, EXPERIMENTER, paradigm, thisSession)
                behavData = loadingClass(behavFile, readmode='full')
            except IOError:
                print thisSession + ' does not exist'
                continue
            print 'Loaded %s %s' % (animalName, thisSession)
            # -- Plot either psychometric or average performance
            thisAnimalPos = 3 * inda * nSessions
            thisPlotPos = thisAnimalPos + 3 * inds
            ax1 = plt.subplot(gs[thisPlotPos])
            if any(behavData['psycurveMode']):
                (pline, pcaps, pbars,
                 pdots) = plot_frequency_psycurve(behavData, fontsize=8)
                plt.setp(pdots, ms=6)
                plt.ylabel('% rightward')
                nValid = behavData['nValid'][-1]
                nTrials = len(behavData['nValid'])
                if soundfreq is None:
                    freqsToUse = [
                        behavData['lowFreq'][-1], behavData['highFreq'][-1]
                    ]
                titleStr = '{0} [{1}] {2}\n'.format(
                    behavData.session['subject'], behavData.session['date'],
                    behavData.session['hostname'])
                titleStr += '{0} valid, {1:.0%} early'.format(
                    nValid, (nTrials - nValid) / float(nTrials))
                plt.title(titleStr, fontweight='bold', fontsize=8, y=0.95)
            else:
                behavData.find_trials_each_block()
                if soundfreq is None:
                    freqsToUse = [
                        behavData['lowFreq'][-1], behavData['highFreq'][-1]
                    ]
                plot_summary(behavData, fontsize=8, soundfreq=freqsToUse)

            # -- Plot dynamics --
            ax2 = plt.subplot(gs[thisPlotPos + 1:thisPlotPos + 3])
            plot_dynamics(behavData,
                          winsize=40,
                          fontsize=8,
                          soundfreq=freqsToUse)
            #plt.setp(ax1.get_xticklabels(),visible=False)
            ax1xlabel = ax1.get_xlabel()
            ax2xlabel = ax2.get_xlabel()
            ax1.set_xlabel('')
            ax2.set_xlabel('')
            if trialslim:
                plt.xlim(trialslim)
            plt.draw()
            plt.show()
    plt.setp(ax1.get_xticklabels(), visible=True)
    plt.setp(ax2.get_xticklabels(), visible=True)
    ax1.set_xlabel(ax1xlabel)
    ax2.set_xlabel(ax2xlabel)
    #plt.draw()
    #plt.show()

    if len(outputDir):
        animalStr = '-'.join(subjects)
        sessionStr = '-'.join(sessions)
        plt.gcf().set_size_inches((8.5, 11))
        figformat = 'png'  #'png' #'pdf' #'svg'
        filename = 'behavior_%s_%s.%s' % (animalStr, sessionStr, figformat)
        fullFileName = os.path.join(outputDir, filename)
        print 'saving figure to %s' % fullFileName
        plt.gcf().savefig(fullFileName, format=figformat)
import numpy as np 
import matplotlib.pyplot as plt   
import matplotlib.gridspec as gs

# Simple figure - no spans
# Create the figure 
fig1 = plt.figure(1)

# Create a gridspec object
gs1 = gs.GridSpec(nrows = 2, ncols = 2)

# Axis handle for plot 1
ax1 = plt.subplot(gs1[0,0])
ax1.text(x = 0.5, y = 0.5, s = 'ax1', va = 'center', ha = 'center')

# Axis handle for plot 2
ax2 = plt.subplot(gs1[0,1])
ax2.text(x = 0.5, y = 0.5, s = 'ax2', va = 'center', ha = 'center')

# Axis handle for plot 3
ax3 = plt.subplot(gs1[1,0])
ax3.text(x = 0.5, y = 0.5, s = 'ax3', va = 'center', ha = 'center')

# Axis handle for plot 4
ax4 = plt.subplot(gs1[1,1])
ax4.text(x = 0.5, y = 0.5, s = 'ax4', va = 'center', ha = 'center')

plt.suptitle('Subplot via GridSpec')
plt.tight_layout()
plt.subplots_adjust(top = 0.9)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(5, 2)
import math
import pickle
import sys
import operator
import pandas as pd
import collections
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from scipy.optimize import curve_fit
from statistics import mean

from global_definition import *


def func1(x, a, b, c):
    return a * np.exp(-b * x) + c


def func2(x, a, b, c):
    return a * np.exp(-b * x) + c


def all_infos_across_all_shuffle(excluded_systems_tau_list):
    all_taus_for_system_numbers_across_all_shuffles = {}
Ejemplo n.º 15
0
def plot_preds(net, X_test, Y_test, device, epoch=None):

    x_one = X_test[:, Y_test == 1, :][:, 0, :]
    x_one = torch.from_numpy(x_one.reshape((1, 28, 28))).to("cpu")

    toPIL = torchvision.transforms.ToPILImage()

    def rotate(x, r):
        return np.array(torchvision.transforms.functional.rotate(toPIL(x), r))

    list_r = [0, 10, 20, 50]
    list_img = [rotate(x_one, r) for r in list_r]

    plt.figure(figsize=(20, 10))
    gs = gridspec.GridSpec(4, 4, hspace=0.2)
    for i in range(len(list_img)):

        # Image
        ax = plt.subplot(gs[2 * i])
        ax.imshow(list_img[i], cmap="Greys_r")
        ax.set_xticks([])
        ax.set_yticks([])

        # Histo
        ax = plt.subplot(gs[2 * i + 1])
        x = torch.from_numpy(list_img[i]).to(device).view(28, 1,
                                                          28).float() / 256
        arr_probs = []
        for k in range(200):

            probs = F.softmax(net(x.to(device)), dim=-1).detach().cpu().numpy()
            arr_probs.append(probs)
        arr_probs = np.concatenate(arr_probs, 0)

        values, bin_edges = np.histogram(np.ravel(arr_probs), bins=25)

        for k in range(arr_probs.shape[-1]):
            ax.hist(
                arr_probs[:, k],
                color=f"C{k}",
                bins=bin_edges,
                histtype="step",
                label=f"Prob {k}",
            )
        ax.set_xlim([-0.1, 1.1])
        ax.set_yscale("log")
        ymin, ymax = ax.get_ylim()
        ax.set_ylim([ymin, 1.5 * np.max(values)])
        if i == len(list_img) - 1:
            ax.legend(bbox_to_anchor=(1.4, 1), fontsize=16)

    # Random images
    list_img = [
        np.random.uniform(0, 1, (28, 28)).astype(np.float32) for r in range(4)
    ]

    for i in range(len(list_img), 2 * len(list_img)):

        # Image
        ax = plt.subplot(gs[2 * i])
        ax.imshow(list_img[i - len(list_img)], cmap="Greys_r")
        ax.set_xticks([])
        ax.set_yticks([])

        # Histo
        ax = plt.subplot(gs[2 * i + 1])
        x = (torch.from_numpy(list_img[i - len(list_img)]).to(device).view(
            28, 1, 28).float() / 256)
        arr_probs = []
        for k in range(200):

            probs = F.softmax(net(x.to(device)), dim=-1).detach().cpu().numpy()
            arr_probs.append(probs)
        arr_probs = np.concatenate(arr_probs, 0)

        values, bin_edges = np.histogram(np.ravel(arr_probs), bins=25)

        for k in range(arr_probs.shape[-1]):
            ax.hist(
                arr_probs[:, k],
                color=f"C{k}",
                bins=bin_edges,
                histtype="step",
                label=f"Prob {k}",
            )
        ax.set_xlim([-0.1, 1.1])
        ax.set_yscale("log")
        ymin, ymax = ax.get_ylim()
        ax.set_ylim([ymin, 1.5 * np.max(values)])
        if i >= 2 * len(list_img) - 2:
            ax.set_xlabel("Predicted Probability", fontsize=16)

    plt.subplots_adjust(left=0,
                        right=0.9,
                        bottom=0.1,
                        top=0.98,
                        wspace=0.0,
                        hspace=0.02)
    title = (f"figmnist/RNN_{args.model}/fig.png" if epoch is None else
             f"figmnist/RNN_{args.model}/RNN_{args.model}_epoch_{epoch}.png")
    plt.savefig(title)
    plt.clf()
    plt.close()
Ejemplo n.º 16
0
# Free parameters
wl = 830e-9  # light wavelength in free space in meters
width = 20e-6  # width of view in meters
eps2 = 1.0  # dielectric medium is air, n = 1
w0 = 2.5e-6  # width parameter of incident beam in meters
L = 50e-6  # length of slits in meters (also size of simulation)
res = 1e-7  # size of one pixel in meters

# Calculated parameters
eps1 = epsilons.epsilon_Au(wl)
k0 = 2 * N.pi / wl
kSP = k0 * N.sqrt(eps1 * eps2 / (eps1 + eps2))
lambdaSP = 2 * N.pi / kSP.real

gs = gridspec.GridSpec(2, 4, width_ratios=[1, 1, 1, 0.05])
fig1 = P.figure()
fig2 = P.figure()

axis_args = {'aspect': 'equal', 'xticks': [], 'yticks': []}

ax = fig1.add_subplot(gs[0, 0], **axis_args)
p = calculation_figure(ax, 1, 25e-6, k=kSP, w0=w0, L=L, res=res)
ax.set_xlim(-width * 5e5, width * 5e5)
ax.set_ylim(-width * 5e5, width * 5e5)
ax.subfigure_label('(a)', pos='lower right')

ax = fig1.add_subplot(gs[0, 1], **axis_args)
reuse_plot1 = calculation_figure(ax,
                                 -1,
                                 25e-6,
Ejemplo n.º 17
0
def main():
    #
    parser = argparse.ArgumentParser(description='Kmeans')
    parser.add_argument('-fnyp', default='', help='file name of predictions')
    parser.add_argument('-fnyg', default='', help='file name of ground truth')
    parser.add_argument('-K', default=2, type=int, help='number of clusters')
    parser.add_argument('--Lmax',
                        '-L',
                        default=1,
                        type=int,
                        help='Level of clustering for hierarchical')
    parser.add_argument('-Lt',
                        default=1,
                        type=int,
                        help='1 for truncation 0 for no trunc for L')
    parser.add_argument('-Lmin',
                        default=0,
                        type=int,
                        help='Minimum Level of clustering for hierarchical')
    parser.add_argument('-H', default=100, type=int, help='Prediction Horizon')
    parser.add_argument('-hh',
                        default=10,
                        type=int,
                        help='additional horizon to classify')
    parser.add_argument('-Ey', default=10, type=int, help='threshold of ey')
    parser.add_argument('-m',
                        default=1,
                        type=int,
                        help='1 for mean, 0 for leave-one-out')
    parser.add_argument('-d1', default='.', type=str, help='directory d1')
    parser.add_argument('-DISP',
                        default='1',
                        type=str,
                        help='1 for DISP, 0 for noDISP')
    parser.add_argument('-msg', default='', help='message to carry')
    parser.add_argument('-tp0',
                        default=2000,
                        type=int,
                        help='prediction start tyme')

    args = parser.parse_args()
    #  import pdb;pdb.set_trace(); #for debug
    #  import os.path
    #  fnyp=os.path.expanduser(args.fnyp)
    Sth_id = np.array(
        pd.read_csv('{}/tspSth_id.dat'.format(args.d1),
                    delim_whitespace=True,
                    dtype=np.int32,
                    header=None)).reshape((-1))
    all_id = np.array(
        pd.read_csv('{}/tspall_id.dat'.format(args.d1),
                    delim_whitespace=True,
                    dtype=np.int32,
                    header=None))
    tp0 = args.tp0

    colsplt = [
        "black", "red", "dark-green", "magenta", "green", "light-green",
        "salmon", "pink", "grey"
    ]
    colsplt = [
        "black", "red", "dark-green", "blue", "magenta", "green",
        "light-green", "salmon", "pink", "grey"
    ]
    colsplt = [
        "red", "dark-green", "green", "blue", "green", "light-green", "blue",
        "cyan", "orange"
        "salmon", "pink", "magenta", "grey"
    ]
    colsplt = [
        "red", "dark-green", "dark-green", "blue", "dark-green", "green",
        "blue", "cyan", "light-green", "orange"
        "salmon", "pink", "magenta", "grey"
    ]
    colsplt = [
        "dark-green", "dark-green", "blue", "dark-green", "green", "blue",
        "cyan", "light-green", "orange"
        "salmon", "pink", "magenta", "grey"
    ]
    fnyLc = 'tmp/yLc' + str(tp0)
    fpyLc = open(fnyLc + '.plt', 'w')
    fpyLc.write('set style data lines;set nokey\n')

    K = int(args.K)
    Lmax = int(args.Lmax) + 1
    Lmin = int(args.Lmin)
    # read dataset
    #    import pdb;pdb.set_trace(); #for debug
    #  Y = np.loadtxt(fnyp, delimiter=' ')
    #  import pdb;pdb.set_trace(); #for debug
    fnyp = '{}/{}'.format(args.d1, args.fnyp)
    if not os.path.isfile(fnyp):
        #  if len(fnyp) == 0:
        print '#fnyp=%s does not exist' % (fnyp)
        return
    Y = np.array(
        pd.read_csv(fnyp, delim_whitespace=True, dtype=np.float32,
                    header=None))
    Y = Y.T  # transpose
    N, T = Y.shape  #N:number of time series, T:horizon
    Ey = args.Ey  #threshold for predictable horizon
    h_all = []
    #  H=np.zeros((L+1,2**L)).astype('float32')
    #      fnyg=os.path.expanduser(args.fnyg)

    fnyg = '{}/{}'.format(args.d1, args.fnyg)
    #  import pdb;pdb.set_trace(); #for debug
    if os.path.isfile(fnyg):
        print '#predictable horizons'
        yg = np.array(
            pd.read_csv(fnyg,
                        delim_whitespace=True,
                        dtype=np.float32,
                        header=None))
        #    plt.plot(x, yg,color=cols[0]) #ground truth
        ##best yp for ygt
        hb = 0
        ib = 0
        for i in range(N):
            h = horizon(Y[i], yg, Ey)
            h_all.append(h)
            if h > hb:
                hb = h
                ib = i
        h_all_A = np.array(h_all)
        print 'max h(y%d,yg)=%d' % (ib, hb)
        Hgmax_A = hb

    DISP = args.DISP

    fig = plt.figure(figsize=(6, 8))
    #  fig2=plt.figure(figsize=(8,6))
    gs = gridspec.GridSpec(4, 2)
    plt.subplots_adjust(wspace=0.5, hspace=1.0)
    #  plt.subplots_adjust(wspace=0.5, hspace=0.5)
    C = np.zeros((N, Lmax + 1)).astype('uint8')
    envN = 'export N=N:'
    Nenv = []
    if Lmax == -1:
        y = np.zeros((1, H)).astype('float32')  #dummy
        for n in range(N):
            y = np.concatenate((y, Y[n, :H].reshape(1, H)), axis=0)
        y = np.delete(y, 0, axis=0)  #delete dummy
        km = KMeans(n_clusters=K,
                    init='k-means++',
                    n_init=10,
                    max_iter=300,
                    tol=0.0001,
                    precompute_distances='auto',
                    verbose=0,
                    random_state=None,
                    copy_x=True,
                    n_jobs=1)
        pred = km.fit_predict(y)

    else:  #hierarchical clustering
        LONGhg = 0
        #    import pdb;pdb.set_trace(); #for debug
        Lctodeg = dict()
        Lctodegcuml = dict()
        Lctohg = dict()
        L2ctoLig = dict()
        HgHoL = {}
        ncmaxL = {}
        cncmaxL = {}
        Entropy = {}
        HgEntropy = {}
        iLOOHmax = {}
        for L in range(Lmin, Lmax):  #L156
            ctoh0 = dict()
            ctonY = dict()
            ctoLi = dict()
            ctoLic0 = dict()
            ctoLic1 = dict()
            ctoLig = dict()
            if LONGhg == 1 and args.Lt == 1:
                break
            l = L
            for c in range(K**l):  #clas=0 or 1
                ncmaxL[L] = 0
                Entropy[L] = 0
                HgEntropy[L] = 0
                strc = str(c)
                #        strc=str(L)+'-'+str(c)
                #        ctoc[strc]=c
                y = np.zeros((1, T)).astype('float32')  #dummy
                Li = []
                for i in range(N):
                    if C[i, l] == c:
                        y = np.concatenate((y, Y[i, :].reshape(1, T)), axis=0)
                        Li.append(i)
                y = np.delete(y, 0, axis=0)  #delete dummy
                #        import pdb;pdb.set_trace(); #for debug
                nY, T1 = y.shape  #T1=T
                if nY > nth:  # for execute k-means cluster >=2:??
                    h0 = T
                    #          usemean=0
                    if args.m == 1:  #use mean
                        ym = np.mean(y, axis=0)
                        for n in range(nY):
                            h = horizon(y[n], ym, Ey)
                            if h0 > h:
                                h0 = h
                    else:  #leave-one-out
                        for n1 in range(nY):
                            for n2 in range(nY):
                                if n1 != n2:
                                    h = horizon(y[n1], y[n2], Ey)
                                    if h0 > h:
                                        h0 = h
                    ctoh0[strc] = h0
                    ctonY[strc] = nY
                    #        H[l,c]=h
                    print 'l c nY h=%3d %3d %3d %3d' % (l, c, nY, h0)
                    #          y=y[:,:h]
                    #          y=y[:,:h+10] ##?
                    ######################## K-means bellow
                    y = y[:, :h0 + args.hh]  ##?
                    if nY >= nth:
                        km = KMeans(n_clusters=K,
                                    init='k-means++',
                                    n_init=10,
                                    max_iter=300,
                                    tol=0.0001,
                                    precompute_distances='auto',
                                    verbose=0,
                                    random_state=None,
                                    copy_x=True,
                                    n_jobs=1)
                        pred = km.fit_predict(y)

                    else:
                        pred = [0
                                for i in range(len(y))]  #set all y in class 0
                    ip = 0
                    #          Li=[] #List of i
                    Lic0 = []  #List of i
                    Lic1 = []  #List of i
                    for i in range(N):
                        if C[i, l] == c:
                            C[i, l + 1] = C[i, l] * K + pred[ip]
                            #              Li.append(i)
                            if pred[ip] == 0:
                                Lic0.append(i)
                            else:
                                Lic1.append(i)
                            ip += 1
                    #            if L == 3 and c == 0:
                    #              import pdb;pdb.set_trace(); #for debug
                    ctoLi[strc] = Li  #list of n
                    ctoLic0[strc] = Lic0  #list of n
                    ctoLic1[strc] = Lic1  #list of n
            ######################## K-means above
                else:  #  if nY > nth: # for execute k-means cluster >=2:?? L180
                    #          print '#####################nY=%d<2,c=%d,L=%d' % (nY,c,L)
                    ctoLic0[strc] = Li  #list of n
                    ctoLic1[strc] = []  #list of n
                    ctoh0[strc] = 0
                    ctonY[strc] = nY
                    for n in range(N):
                        if C[n, l] == c:
                            C[n, l + 1] = C[n, l] * K
                            ctoLi[strc] = [n]
            #end for c in range(K**l): #clas=0 or 1 L167
            if L >= 0:
                for n in range(N):
                    print 'n%2d' % (n),
                    for l in range(0, L + 2):
                        print '%d ' % (C[n, l]),
#            print'L%d C%3d ' % (l,C[n,l]),
                    print ''
            for i, strc in enumerate(ctoh0):
                c = int(strc)  #???????
                #    import pdb;pdb.set_trace(); #for debug
                #    for c in cton.keys():
                #    for i in range(len(ctoLi)):
                #      c=str(i)
                if strc in ctoLi.keys():
                    print 'c=%d nY=%d h=%d iY' % (c, ctonY[strc],
                                                  ctoh0[strc]), ctoLi[strc]
            print '####'
            #######################
            ##display
            #######################
            #  import pdb;pdb.set_trace(); #for debug
            #  cols=matplotlib.colors.cnames.keys()
            #      cols = ["r", "g", "b", "c", "m", "y", "k","pink"]
            cols = ["g", "b", "c", "m", "y", "k", "pink"]
            cols.append(matplotlib.colors.cnames.keys())
            x = np.arange(0, T, 1)
            #    for i in range(len(ctoLi)):
            #      c=str(i)
            NsL = []
            for ic, c in enumerate(ctoLi):
                for n in ctoLi[c]:
                    if ctonY[c] > 1:
                        y = Y[n, :]
                        plt.plot(x, y, color=cols[i % 7])
#        plt.pause(0.05)
#        import pdb;pdb.set_trace(); #for debug
                print 'L%d c%s n=%d h0=%d' % (L, c, ctonY[c], ctoh0[c]),
                if L == 0:
                    print ''
                else:
                    print(' iY={}'.format(ctoLi[c]))
                    df = pd.DataFrame(Nsh)
                    df.to_csv('{}/Nsh-L{}c{}.csv'.format(args.d1, L, c),
                              index=False,
                              sep=' ',
                              header=None)
                    nNs = Ns.shape[0]
                    Nss = 'N:{}'.format(Ns[0, 0])
                    for iN in range(1, nNs):
                        Nss = Nss + ',{}'.format(Ns[iN, 0])
                    NsL.append([Nss, nNs, c])
                #
                if DISP[0] == '1': plt.show()
            #end for ic,c in enumerate(ctoLi): L268
            if L == Lmax - 1:  #L288
                ##################
                #        import pdb;pdb.set_trace(); #for debug
                #          import pdb;pdb.set_trace(); #for debug
                #      if L>0:
                #        import pdb;pdb.set_trace(); #for debug
                fp = open('{}/Ns-L{}.env'.format(args.d1, L), 'w')
                fp.write('export N_S="{}'.format(NsL[0][0]))
                for ic in range(1, len(NsL)):
                    fp.write(' {}'.format(NsL[ic][0]))
                fp.write('"\nexport n_seed_S="{}'.format(
                    int(100. / NsL[0][1] + 0.5)))
                for ic in range(1, len(NsL)):
                    fp.write(' {}'.format(int(100. / NsL[ic][1] + 0.5)))
                fp.write('"\n')
                fp.write('export nc_S="{},{}'.format(NsL[0][1], NsL[0][2]))
                for ic in range(1, len(NsL)):
                    fp.write(' {},{}'.format(NsL[ic][1], NsL[ic][2]))
                fp.write('" #number of N and c for check\n')
                fp.write('export d1={}'.format(args.d1))
                fp.close()
            #        print 'L%d c%s n=%d hc=%d' % (L,c,ctonY[c],ctoh0[c]),' cn=',ctoLi[c]
            #end if L==Lmax-1: #L288

            print '###mean'
            ymean = np.zeros((K**L, T)).astype('float32')  #dummy
            #    for i in range(len(ctoLi)):
            #      c=str(i)
            for i, c in enumerate(ctoLi):
                for n in ctoLi[c]:
                    ymean[i] += Y[n, :]
                ymean[i] = ymean[i] / len(ctoLi[c])
            ###
            f = open('ymean.dat', 'w')
            for t in range(T):
                f.write('%g' % ymean[0, t])
                for i in range(1, len(ctoLi)):
                    f.write(' %g' % ymean[i, t])
                f.write('\n')
            f.close()
            print 'ymean.dat is saved'

            if L >= 1:  # normalize cumulative deg
                degcumlsum = 0
                for c in range(K**L):  #
                    _c = c * K
                    degcuml = 1.
                    keycuml = str(L - 1) + '-' + str(c)
                    for l in range(1):
                        #          for l in range(L):
                        _c = _c / K
                        key = str(L - l - 1) + '-' + str(_c)
                        if key in Lctodeg:
                            degcuml *= Lctodeg[key]
                    Lctodegcuml[keycuml] = degcuml
                    degcumlsum += degcuml
                print 'degcuml:L%d-' % (L - 1),
                for c in range(K**L):  #
                    keycuml = str(L - 1) + '-' + str(c)
                    #          import pdb;pdb.set_trace(); #for debug
                    Lctodegcuml[keycuml] /= degcumlsum
                    print '(%d)%.3f' % (c, Lctodegcuml[keycuml]),
#          print '%s:%.2f' % (keycuml,Lctodegcuml[keycuml]),
                print ''

#      if os.path.isfile(fnyg):#same as L113???
#        print '#predictable horizons'
#        yg = np.array(pd.read_csv(fnyg,delim_whitespace=True,dtype=np.float32,header=None))
            if 1 == 1:
                h_all_A = np.array(h_all)
                LOOH_all_A = np.zeros(len(h_all_A)).astype('float32')
                print('max h(yi,yg)={} for i={}'.format(
                    h_all_A.max(), np.where(h_all_A == h_all_A.max())))
                print 'deg:', Lctodeg
                #        import pdb;pdb.set_trace(); #for debug
                #        plt.plot(x, Y[ib],color=cols[1])
                ##mean
                #      for i in range(len(ctoLi)):
                #        c=str(i)
                #        import pdb;pdb.set_trace(); #for debug
                for i, strc in enumerate(ctoLi):
                    c = int(strc)  #?????
                    key0 = str(L) + '-' + str(c * K)
                    key1 = str(L) + '-' + str(c * K + 1)
                    key = str(L - 1) + '-' + strc
                    h = horizon(ymean[i], yg, Ey)
                    Lctohg[key] = h
                    print 'L%d c%s N%d h(Yi,ymean)=%d h(ymean,yg)=%d' % (
                        L, strc, ctonY[strc], ctoh0[strc], h),
                    if ctonY[strc] >= nth:  #tag1
                        icol = (i + 2) % 6
                        #            if L==3 and strc=='5':
                        #              import pdb;pdb.set_trace(); #for debug
                        plt.plot(x, ymean[i], color=cols[icol])
                        #best yp via LOOCV horizon
                        nLOOH = len(ctoLi[strc])
                        LOOH = np.zeros(nLOOH).astype('float32')  #
                        for j, n in enumerate(ctoLi[strc]):
                            for m in ctoLi[strc]:
                                if n != m:
                                    H = horizon(Y[n], Y[m], Ey)
                                    LOOH[j] += H
                            if nLOOH > 1:  #20210520
                                LOOH[j] /= (nLOOH - 1)  #######sum of horizon
                            LOOH_all_A[n] = LOOH[j]
#              if math.isnan(LOOH[j]):
#                import pdb;pdb.set_trace(); #for debug
#                print('check')
                        LOOHmax = LOOH.max()  #LOOCVH
                        #            if L==3:
                        #              import pdb;pdb.set_trace(); #for debug
                        nLOOHc0 = len(ctoLic0[strc])
                        nLOOHc1 = len(ctoLic1[strc])
                        #            print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1)

                        if nLOOHc0 >= nth and nLOOHc1 >= nth:  #best yp via LOOCV horizon for c0
                            LOOHc0 = np.zeros(nLOOHc0).astype('float32')  #
                            for j, n in enumerate(ctoLic0[strc]):
                                for m in ctoLic0[strc]:
                                    if n != m:
                                        H = horizon(Y[n], Y[m], Ey)
                                        LOOHc0[j] += H
                                LOOHc0[j] /= (nLOOHc0 - 1
                                              )  #######sum of horizon
#              print 'LOOHc0(len=%d)' % nLOOHc0, LOOHc0
                            LOOHc0max = LOOHc0.max()  #LOOCVHc0
                            #best yp via LOOCV horizon for c1
                            LOOHc1 = np.zeros(nLOOHc1).astype('float32')  #
                            for j, n in enumerate(ctoLic1[strc]):
                                for m in ctoLic1[strc]:
                                    if n != m:
                                        H = horizon(Y[n], Y[m], Ey)
                                        LOOHc1[j] += H
                                LOOHc1[j] /= (nLOOHc1 - 1
                                              )  #######sum of horizon
#              print 'LOOHc1(len=%d)' % nLOOHc1, LOOHc1
                            LOOHc1max = LOOHc1.max()  #LOOCVHc0
                            ####
                            deg0 = float(nLOOHc0) * (LOOHc0max - ctoh0[strc])
                            deg1 = float(nLOOHc1) * (LOOHc1max - ctoh0[strc])
                            #              if (deg0+deg1)==0 == 0:
                            #              print 'nLOOHc0,c1=%d %d' % (nLOOHc0, nLOOHc1)
                            #              print 'deg0=%g = %d*(%g-%g)' % (deg0,nLOOHc0,LOOHc0max,ctoh0[strc])
                            #              print 'deg1=%g = %d*(%g-%g)' % (deg1,nLOOHc1,LOOHc1max,ctoh0[strc])
                            Lctodeg[key0] = deg0 / (deg0 + deg1)
                            #              import pdb;pdb.set_trace(); #for debug
                            Lctodeg[key1] = deg1 / (deg0 + deg1)
                        else:  #if nLOOHc0 >=3 and nLOOHc1 >=2:
                            if nLOOHc0 >= nth:
                                Lctodeg[key0] = 1  #0.5 #1
                            else:
                                Lctodeg[key0] = 0
                            if nLOOHc1 >= nth:
                                Lctodeg[key1] = 1  #0.5 #1
                            else:
                                Lctodeg[key1] = 0
####
                        Lhg = []
                        for j in range(len(LOOH)):
                            if LOOH[j] == LOOHmax:  #search all maximum
                                n = ctoLi[strc][j]
                                h = horizon(Y[n], yg, Ey)
                                print 'h(y%d,LOO)=%.1f h(y%d,yg)=%.1f' % (
                                    n, LOOH.max(), n, h),  #n??
                                ctoLig[strc] = [n]  #last ig
                                Lhg.append(h)  ###???use max?
                                if h >= 100.0:
                                    LONGhg = 1
                                    print '***',
                        if len(Lhg) > 0:
                            Lctohg[key] = max(Lhg)
                        else:
                            Lctohg[key] = 0
                        ####disp degs
                        if L >= 1:
                            keycuml = str(L - 1) + '-' + str(c)
                            print 'degs:%3f:' % (Lctodegcuml[keycuml]),
                            _c = c * K
                            #              for l in range(1):
                            for l in range(L):
                                _c = _c / K
                                keyl = str(L - l - 1) + '-' + str(_c)
                                if keyl in Lctodeg:
                                    print '%s:%.2f' % (keyl, Lctodeg[keyl]),
                                else:
                                    print '%s:?' % (keyl),

#            print 'degs=', Lctodeg,
#          print 'LOOCVh(yi%d)=%.1f h(yi%d,yg)=%.1f' % (LOOH.argmax(),LOOH.max(),n,horizon(Y[n],yg,Ey)),
#            plt.plot(x, Y[n],color="black")
#          print ' LOOCVh=%g nLOOH=%d ' % (LOOH,nLOOH),
                    else:  # if ctonY[strc] >=3:  tag1
                        print ' h(,yg)=%.1f' % (horizon(
                            Y[ctoLi[strc][0]], yg, Ey)),
    #          LOOH=0;nLOOH=0
    #          print ' LOOCVh=%g nLOOH=%d' % (LOOH,nLOOH),
                    if L == 0:
                        print ''
                    else:
                        print 'iY', ctoLi[strc]

                    c = strc
                    Ns = all_id[Sth_id[ctoLi[c]]]
                    h_Lc = h_all_A[ctoLi[c]]
                    Nsh = np.concatenate([Ns, h_Lc.reshape((-1, 1))], axis=1)
                    iNsh = np.concatenate(
                        [np.array(ctoLi[c]).reshape(-1, 1), Nsh], axis=1)
                    if len(h_Lc) == 1:
                        LOOH = np.ones(1) * (-1)  #
                    iNshh = np.concatenate([iNsh, LOOH.reshape(-1, 1)], axis=1)
                    print('i N s h(yi,yg) h-LOOCV(yi) h0={}'.format(h0))
                    LOOHmax = np.max(LOOH)
                    hgmax = np.max(h_Lc)
                    mes = ''
                    mesi = ''
                    mesg = ''
                    for i in range(len(iNshh)):
                        mes = '{} {:3.0f} {:3.0f} {:2.0f}'.format(
                            mes, iNshh[i, 0], iNshh[i, 1], iNshh[i, 2])
                        if iNshh[i, 3] == hgmax:
                            mes = '{} {:3.0f}* '.format(mes, iNshh[i, 3])
                            mesg = '{}({:.0f},{:.0f},{:.0f},{:.0f}*,{:.0f})'.format(
                                mesg, iNshh[i, 0], iNshh[i, 1], iNshh[i, 2],
                                iNshh[i, 3], iNshh[i, 4])
                        else:
                            mes = '{} {:3.0f}'.format(mes, iNshh[i, 3])
                        if iNshh[i, 4] == LOOHmax:
                            mes = '{} {:3.1f}* '.format(mes, iNshh[i, 4])
                            mesi = '{}({:.0f},{:.0f},{:.0f},{:.0f},{:.0f}*)'.format(
                                mesi, iNshh[i, 0], iNshh[i, 1], iNshh[i, 2],
                                iNshh[i, 3], iNshh[i, 4])
                            HgHoc = iNshh[i, 3]  #only last one
                            iLOOHmaxtmp = int(iNshh[i, 0])
                            if len(iNshh) > 1 and L == Lmax - 1:
                                #                import pdb;pdb.set_trace(); #for debug
                                envN = '{}{:.0f},'.format(envN, iNshh[i, 1])
                                Nenv.append(iNshh[i, 1])
                        else:
                            mes = '{} {:3.1f}  '.format(mes, iNshh[i, 4])
                        mes += '\n'
                    #          import pdb;pdb.set_trace(); #for debug
                    ##entropy
                    EntropyLc = 0
                    #          if L==4:
                    #            import pdb;pdb.set_trace(); #for debug
                    Pi = iNshh[:, 4]
                    if len(Pi) > 1:
                        #            Pi/=np.mean(Pi)
                        #            import pdb;pdb.set_trace(); #for debug
                        Pi /= np.sum(Pi)
                        EntropyLc = 0
                        for i in range(len(Pi)):
                            EntropyLc -= Pi[i] * math.log(Pi[i])
                        EntropyLc /= math.log(len(Pi))
#          import pdb;pdb.set_trace(); #for debug
                    print(mes)
                    ha = LOOH
                    print(
                        'hi-LOOCV@L{}c{} with n{} min{:.1f} max{:.1f} mean{:.1f} median{:.1f} std{:.1f} best-iNshh{} {} Entropy={:.3f}'
                        .format(L, c, len(ha), np.min(ha), np.max(ha),
                                np.mean(ha), np.median(ha), np.std(ha), mesi,
                                args.msg, EntropyLc))
                    nc = len(ha)
                    #          import pdb;pdb.set_trace(); #for debug
                    #          if nc>ncmaxL[L] or (nc==ncmaxL[L] and EntropyLc>Entropy[L]):
                    if nc >= ncmaxL[L]:
                        ncmaxL[L] = nc
                        HgHoL[L] = HgHoc
                        cncmaxL[L] = c
                        iLOOHmax[L] = iLOOHmaxtmp
#            if L==2:
#              import pdb;pdb.set_trace(); #for debug
                    if EntropyLc > Entropy[L]:
                        Entropy[L] = EntropyLc
                        HgEntropy[L] = HgHoc
                    if L == 0: LOOH_all_A0 = LOOH_all_A
                    #hist,bin_edges=np.histogram(hp,bins=10)
                    #          plt.clf()
                    #          plt.xlim(50,500)
                    #          plt.ylim(0,50)
                    #          plt.hist(ha, bins=20, histtype='barstacked', ec='black')
                    #          plt.title('ho@L{}c{}'.format(L,c))
                    #          fnfig='{}/ho_L{}c{}.eps'.format(args.d1,L,c)
                    #          plt.savefig(fnfig)
                    #          mylib.myshell('gv {}&'.format(fnfig))
                    ha = h_Lc
                    print(
                        'h(yi,yg)@L{}c{} with n{} min{:.1f} max{:.1f} mean{:.1f} median{:.1f} std{:.1f} best-iNshh{} {}'
                        .format(L, c, len(ha), np.min(ha), np.max(ha),
                                np.mean(ha), np.median(ha), np.std(ha), mesg,
                                args.msg))
#          plt.clf()
#          plt.xlim(50,500)
#          plt.ylim(0,50)
#          plt.hist(ha, bins=20, histtype='barstacked', ec='black')
#          plt.title('hg@L{}c{}'.format(L,c))
#          fnfig='{}/hg_L{}c{}.eps'.format(args.d1,L,c)
#          plt.savefig(fnfig)
#          mylib.myshell('gv {}&'.format(fnfig))
#          import pdb;pdb.set_trace(); #for debug
#          print('h(yi,yg) with  min{} max{} mean{:.3g} median{:.3g} std{:.3g}'.format(np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha)))
#print 'np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha)',np.min(ha),np.max(ha),np.mean(ha),np.median(ha),np.std(ha),np.predictable(ha)
#np.sum	np.nansum	Compute sum of elements
#np.prod	np.nanprod	Compute product of elements
#np.mean	np.nanmean	Compute mean of elements
#np.std	np.nanstd	Compute standard deviation
#np.var	np.nanvar	Compute variance
#np.min	np.nanmin	Find minimum value
#np.max	np.nanmax	Find maximum value
#np.argmin	np.nanargmin	Find index of minimum value
#np.argmax	np.nanargmax	Find index of maximum value
#np.median	np.nanmedian	Compute median of elements
#np.percentile	np.nanpercentile	Compute rank-based statistics of elements
#np.any	N/A	Evaluate whether any elements are true
#np.all	N/A	Evaluate whether all elements are true
                if DISP[0] == '1':
                    plt.show()
            #######################
            ##display y
            #######################
#      import pdb;pdb.set_trace(); #for debugdisplay?
            if 1 == 1:
                if L >= 1:
                    fig = plt.figure(figsize=(6, 2.0 * 2**L))
                    gs = gridspec.GridSpec(4, 2)
                    plt.subplots_adjust(wspace=0.5, hspace=1.0)
#        elif L<=1:
#          fig=plt.figure(figsize=(6,2.0*2**L))
#          gs=gridspec.GridSpec(4,2)
#          plt.subplots_adjust(wspace=0.5, hspace=1.0)
                elif L == 0:
                    L1 = L + 1
                    fig = plt.figure(figsize=(6, 2. * 2**L1))
                    gs = gridspec.GridSpec(4, 2)
                    plt.subplots_adjust(wspace=0.5, hspace=1.0)

                cols = ["g", "b", "c", "m", "y", "k", "pink"]
                plt.clf()
                #     nc1=len(ctoLi)+1
                ygdisp = 0
                nc1 = len(ctoLi) + Lmax - 2
                nc1 = len(ctoLi) + L + 1
                nc1 = 2**L
                if L == 0: nc1 = 2
                xax = np.arange(0, Y.shape[1], 1)
                if ygdisp == 1:
                    #          nc1=len(ctoLi)+L
                    nc1 = len(ctoLi) + Lmax - 1
                    fig.add_subplot(nc1, 1, 1)
                    plt.plot(xax, yg, linewidth=5,
                             color="r")  #        plt.plot(xax, yg)
                    plt.title('yg')
                for ic, c in enumerate(ctoLi):
                    y_ = Y[ctoLi[c], :]
                    #          print '#check nc1,1,int(c)+2={},{},{}'.format(nc1,1,int(c)+2)
                    #          import pdb;pdb.set_trace(); #for debug
                    fig.add_subplot(nc1, 1, int(c) + 1)  #ygdisp=-
                    #          fig.add_subplot(nc1,1,int(c)+2) #ygdisp=1
                    plt.plot(xax, y_.T, linewidth=1)
                    #          plt.plot(xax, y_.T,linewidth=1,color=cols[ctoLi[c][0]%6])
                    #          import pdb;pdb.set_trace(); #for debug
                    if c == cncmaxL[L]:
                        plt.plot(xax,
                                 Y[iLOOHmax[L], :],
                                 linewidth=3,
                                 linestyle='solid',
                                 color='b')
                        print('##########iLOOH={} L{}c{}'.format(
                            iLOOHmax[L], L, cncmaxL[L]))
                    plt.plot(xax,
                             yg,
                             linewidth=2,
                             linestyle='solid',
                             color='r')
                    #          plt.plot(xax, yg,linewidth=2,linestyle='dashdot',color='r')
                    plt.title('yp in L{}c{} n{}'.format(L, c, y_.shape[0]))
#          import pdb;pdb.set_trace(); #for debug

#        plt.pause(0.05) #plt.show() #
                fnfig = '{}/y_L{}.eps'.format(args.d1, L)
                if L == Lmax - 1:
                    mypltshow(fnfig, '0')
                else:
                    mypltshow(fnfig, '0')
#        mypltshow(fnfig,'1')
                plt.close()
                fig = plt.figure(figsize=(4, 4))
                nc1 = len(ctoLi) + 1
                #        import pdb;pdb.set_trace(); #for debug
                for ic, c in enumerate(ctoLi):
                    n = len(ctoLi[c])
                    #          fnfig='{}/hh_L{}c{}.eps'.format(args.d1,L,c)
                    #            if fnfig in ['../../result-chaospred/lorenz1e-8T0.025n10000p256m1_gmp/t0-2000:4300-4800k10N40ns50m2b1a1.0/hh_L1c1.eps',
                    #                             '../../result-chaospred/lorenz1e-8T0.025n10000p256m1_gmp/t0-2000:4300-4800k10N40ns50m2b1a1.0/hh_L2c2.eps']:
                    #              import pdb;pdb.set_trace(); #for debug
                    plt.clf()
                    #        plt.xlim(0,500);plt.ylim(0,500)
                    #        fig.add_subplot(nc1,1,1)
                    plt.scatter(h_all_A[ctoLi[c]],
                                LOOH_all_A[ctoLi[c]],
                                s=20,
                                c="w",
                                alpha=1.0,
                                linewidths="2",
                                edgecolors="k")
                    plt.grid(which='major', color='black', linestyle='--')
                    plt.title('ho vs. hg @L{}c{} n{}'.format(L, c, n))
                    fnfig = '{}/hh_L{}c{}.eps'.format(args.d1, L, c)
                    #          mypltshow(fnfig,'1')
                    mypltshow(fnfig, '0')

#        for ic,c in enumerate(ctoLi):
#          plt.clf()
# #         plt.xlim(0,500); plt.ylim(0,500)
# #         fig.add_subplot(nc1,1,int(c)+1)
#          plt.scatter(h_all_A[ctoLi[c]],LOOH_all_A[ctoLi[c]],s=20, c="w", alpha=1.0, linewidths="2",edgecolors="k");
#          plt.grid(which='major',color='black',linestyle='--');
#          n=len(ctoLi[c])
#          plt.title('ho vs. hg@L{}c{} n{}'.format(L,c,n))
#          fnfig='{}/hh_L{}.eps'.format(args.d1,L)
#          mypltshow(fnfig,0)
##        import pdb;pdb.set_trace(); #for debug
###        hh=np.concatenate([h_all_A.reshape(-1,1),LOOH_all_A.reshape(-1,1)],axis=1)
                for hD in ['hg', 'ho']:  #
                    plt.clf()
                    nc1 = 2**L  #len(ctoLi)+2
                    for ic, c in enumerate(ctoLi):
                        print('#chech nc1,c={},{}'.format(nc1, c))
                        fig.add_subplot(nc1, 1, int(c) + 1)
                        if hD == 'hg':
                            ha = h_all_A[ctoLi[c]]
                        else:
                            #              import pdb;pdb.set_trace(); #for debug
                            ha = LOOH_all_A[ctoLi[c]]
                        plt.xlim(0, 500)
                        #          plt.ylim(0,40)
                        #          plt.hist(ha, bins=20, histtype='barstacked', ec='black')
                        binmin = 0
                        binmax = 400
                        binwidth = 10
                        #            import pdb;pdb.set_trace(); #for debug
                        if len(ha) > 1:
                            plt.hist(ha,
                                     bins=np.arange(binmin, binmax + binwidth,
                                                    binwidth),
                                     histtype='barstacked',
                                     ec='black')
                            plt.title('{}@L{}c{} n{}'.format(
                                hD, L, c, len(ha)))
                    fnfig = '{}/{}_L{}.eps'.format(args.d1, hD, L)
                    mypltshow(fnfig, '0')
            #######################
            ##display y
            #######################

            ###
            #end of if os.path.isfile(fnyg):#same as L113???
            L2ctoLig[str(L)] = ctoLig
###
#      for i,strc in enumerate(ctoLig):
#        c=int(strc) #
#        if strc in ctoLig.keys():
#          Li=ctoLig[strc]
#          i1=Li[0]+1
#          col=colsplt[(2**L+c+1)%9]
#          col=colsplt[(L)%9+1]
#          lw=(Lmax-L)*2
#          lw=2
#          lt=2*L+1
#          if L==0:
#            fpyLc.write(', \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
#          else:
#            fpyLc.write(', \"\" using ($0+%d):%d lt %d lw %d  lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
####

#end of for L in range(Lmin,Lmax):L156
        mesH = '{}'.format(Hgmax_A)
        for L in range(Lmax):
            mesH = '{} {}'.format(mesH, int(HgHoL[L]))
        for L in range(Lmax):
            mesH = '{} {:.5g}'.format(mesH, Entropy[L])
        mesH = '{} #Hgmax,(ncmax{})HgoL0,1,2,Entr0,1,2'.format(mesH, ncmaxL[L])
        import re
        d1ext = re.findall('/t0.*', args.d1)[0][1:]

        print('{} #{}'.format(mesH, d1ext))

        mesH = '{}'.format(Hgmax_A)
        for L in range(Lmax):
            mesH = '{} {}'.format(mesH, int(HgEntropy[L]))
        for L in range(Lmax):
            mesH = '{} {:.5g}'.format(mesH, Entropy[L])

        mesH = '{} #Hgmax,(Entropymax{:.5g})HgoL0,1,2,ncmaxL'.format(
            mesH, Entropy[L])
        print('{} {} #{}'.format(mesH, ncmaxL[L], d1ext))

        if L == Lmax - 1:

            #      import pdb;pdb.set_trace(); #for debug
            envN = envN[:-1]  #remove the last char ','
            fp = open('{}/N-L{}.env'.format(args.d1, L), 'w')
            fp.write('export N=N:{} #{}'.format(int(min(Nenv)), envN))
            fp.close()
        for Lmax1 in range(1, Lmax):
            fpyLc.write('\nset term tgif;set output \"%sL%d.obj\"\n' %
                        (fnyLc, Lmax1))
            i1 = 1
            lt = 1
            lw = 2
            col = colsplt[0]
            fpyLc.write('plot \"' + fnyg +
                        '\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' %
                        (tp0, i1, lt, lw, col, '\\\n'))
            for L in range(Lmin, Lmax1 + 1):
                strL = str(L)
                if strL in L2ctoLig.keys():
                    ctoLig = L2ctoLig[str(L)]
                    for c in range(K**L):
                        strc = str(c)
                        if strc in ctoLig.keys():
                            Li = ctoLig[strc]
                            i1 = Li[0] + 1
                            col = colsplt[(L) % 9 + 1]
                            col = colsplt[(2**L + c) % 9]
                            lw = (Lmax - L) * 2
                            lw = 2
                            lt = 2 * L + 1
                            if L == 0:
                                fpyLc.write(
                                    ', \"' + fnyp +
                                    '\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s'
                                    % (tp0, i1, lt, lw, col, '\\\n'))
    #              fpyLc.write('plot \"'+fnyp+'\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s' % (tp0,i1,lt,lw,col,'\\\n'))
                            elif L == Lmax1:
                                fpyLc.write(
                                    ', \"\" using ($0+%d):%d lt %d lw %d lc rgb \"%s\"%s'
                                    % (tp0, i1, lt, lw, col, '\\\n'))
    #      fpyLc.write('\n');
            fpyLc.write(
                '\nset term postscript eps enhanced color;set output \"%sL%d.eps\";replot\n'
                % (fnyLc, Lmax1))

#####

        fpyLc.close()
        print args.tp0,
        Lctodegcuml['-1-0'] = 1
        for L in range(0, Lmax):
            c = 0
            key = str(L - 1) + '-' + str(c)
            for c in range(K**L):  #clas=0 or 1
                degs = []
                hgs = []
                key = str(L - 1) + '-' + str(c)
                print 'L%s' % (key),
                if key in Lctodegcuml:
                    degs.append(Lctodegcuml[key])
                else:
                    degs.append(0)
                if key in Lctohg.keys():
                    hgs.append(Lctohg[key])
                else:
                    hgs.append(0)
                adegs = np.array(degs)
                ilist = np.argsort(degs)[::-1]
                for i in ilist:
                    print ' %.3f %.3f' % (hgs[i], degs[i]),


### display the y-t in L-c by gnuplot
    print '#class hg deg ...'
    print 'Lctohg:', Lctohg
    print 'Lc-hg %d ' % (tp0), L2ctoLig
    print 'dtodegcuml:', Lctodegcuml
    print('#Results2 are saved in d1={}'.format(args.d1))
Ejemplo n.º 18
0
from matplotlib import gridspec

Z = np.arange(3, 99)

yields_bad = xrl_np.FluorYield(Z, np.array([xrl_np.K_SHELL])).squeeze()

yields_good = []

with open('fluor_yield_revised.dat', 'r') as file:
    for line in file:
        [myZ, y] = line.split()
        yields_good.append(y)
    yields_good = np.asarray(yields_good, dtype=np.float64)

fig = plt.figure(figsize=(7, 10))
gs = gridspec.GridSpec(3, 1, height_ratios=[3, 1, 1])
ax = fig.add_subplot(gs[0])
ax.set_xlabel('Atomic number')
ax.set_ylabel('Fluorescence yield')
plot_bad, = ax.plot(Z,
                    yields_bad,
                    color='r',
                    marker='o',
                    label='Fitted Hubbell')
plot_good, = ax.plot(Z,
                     yields_good[0:Z.shape[0]],
                     color='g',
                     marker='o',
                     label='Recommended Hubbell')
ax.legend(handles=[plot_bad, plot_good], loc=4)
Ejemplo n.º 19
0
    1.5
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes

colors = ['red', 'green', 'blue', 'purple', 'orange']
cmaps = ['Reds', 'Greens', 'Blues', 'Purples', 'Oranges']
markers = ['o', '^', '*', 's']

fig = figure(figsize=figsize(1.0))  #, tight_layout=True)

outergs = gridspec.GridSpec(2,
                            2,
                            figure=fig,
                            height_ratios=[1.0, 0.6],
                            hspace=0.4)

#############################################
# A. Exemples of autocorrelogram
#############################################
from matplotlib.patches import FancyArrowPatch, ArrowStyle, ConnectionPatch, Patch
from matplotlib.lines import Line2D
import matplotlib.colors as colors
import matplotlib.cm as cmx

# suC = fig.add_subplot(3,2,3)
gsC = gridspec.GridSpecFromSubplotSpec(3,
                                       3,
                                       subplot_spec=outergs[0, 0],
Ejemplo n.º 20
0
# This matrix is needed to only use the Non-Dirichlet rows in the del²psi operator. The other rows are basically
# ensuring psi_i,j = 0. What it does is it removes rows from a sparse matrix (unit matrix with some missing elements).
# PsiEliminator -> psiElim
psiElim = np.ones((nx * ny, ))
psiElim[0:ny] = 0
psiElim[-ny:] = 0
psiElim[ny::ny] = 0
psiElim[ny - 1::ny] = 0
psiElim = sparse.csc_matrix(sparse.diags(psiElim, 0))

# Pretty straightforwad; set up the animation stuff
if (generateAnimation):
    # Set up for animation
    fig = plt.figure(figsize=(8.5, 5), dpi=300)
    gs = gridspec.GridSpec(3, 2)
    gs.update(wspace=0.5, hspace=0.75)
    ax1 = plt.subplot(gs[0:2, :], )
    ax2 = plt.subplot(gs[2, 0])
    ax3 = plt.subplot(gs[2, 1])
else:
    fig = plt.figure(figsize=(8.5, 5), dpi=600)

# -- Preconditioning -- #
if (useSuperLUFactorizationEq1):
    start = time.time()
    # Makes LU decomposition.
    factor1 = sparse.linalg.factorized(dlOpPsi)
    end = time.time()
    print("Runtime of LU factorization for eq-1: %.2e seconds" % (end - start))
Ejemplo n.º 21
0
    def mkplot(self):
        """@todo: Docstring for mkplot
        :returns: @todo
        """
        #set up for gridspec plot...

        subplotnum=len(self.pdict)

        plt.close('all')
        #width then height
        fig=plt.figure(figsize=(self.pdims[0]*4, self.pdims[1]*4))

        if self.sepcbar:
            fig=plt.figure(figsize=(self.pdims[0]*4, self.pdims[1]*5))

        if self.sharex:
            hs=.06

        else:
            hs=.225

        #make more space for separate colorbar..
        if self.sepcbar:
            hs+=.06

        if self.sharey:
            ys=.06
        else:
            ys=.08

        #make more space for lots of ylabels..
        if not self.sharey:
            ys+=.08

        #working out the global min and maxes of all fields so we can have one colourbar
        if self.globalcbar!='False':
            fgmin=np.min([np.min(field) for field in self.pdict.values()])
            fgmax=np.max([np.max(field) for field in self.pdict.values()])

            if self.globalclimits is not None:
                fgmin=self.globalclimits[0]
                fgmax=self.globalclimits[1]
            
            gs = gridspec.GridSpec(self.pdims[0], self.pdims[1]+1,\
                    width_ratios=[15]*self.pdims[1]+[1],hspace=hs,wspace=ys)
        else:
            gs = gridspec.GridSpec(self.pdims[0], self.pdims[1],hspace=hs,wspace=ys)

        names=itertools.cycle(self.pdict.keys())
        fields=itertools.cycle(self.pdict.values())
        if len(self.dimlabels.keys())!=0:
            labels=itertools.cycle(self.dimlabels.values())
            labelme=True
        else:
            labelme=False

        ax_xshares=[]
        ax_yshares=[]
        self.paxis={}
        pnum=1
        for rownum in range(self.pdims[0]):
            for colnum in range(self.pdims[1]):
                if pnum<=subplotnum:
                    # print rownum,colnum
                    # print pnum,subplotnum

                    #contour plot
                    name=names.next()
                    field=fields.next()
                    if labelme:
                        label=labels.next()

                    #put it in a list so we can append
                    #bad idea messing with the passed items if we iterate on the object (see tests)
                    # self.pdict[name]=[self.pdict[name]]

                    if rownum==0:
                        ax=plt.subplot(gs[rownum,colnum])
                        ax_xshares.append(ax)

                    #this if needs to be separate from the one above
                    if colnum==0:
                        ax=plt.subplot(gs[rownum,colnum])
                        ax_yshares.append(ax)

                    if self.sharex and rownum>0:
                        ax=plt.subplot(gs[rownum,colnum],sharex=ax_xshares[colnum])

                    if self.sharey and colnum>0:
                        ax=plt.subplot(gs[rownum,colnum],sharey=ax_yshares[rownum])

                        # make labels invisible
                        plt.setp(ax.get_yticklabels(),visible=False)

                    if not self.sharey and not self.sharex:
                        ax=plt.subplot(gs[rownum,colnum])
                        if labelme:
                            ax.set_xlabel(label[0])
                            ax.set_ylabel(label[1])

                    # self.pdict[name]=self.pdict[name].append(ax)
                    self.paxis[name]=ax

                    #
                    if self.globalcbar=='False': #when we are NOT using globalcbar
                        if len(self.cbars.keys())!=0:
                            if self.clevels!=0:
                                cs1=ax.contourf(field,levels=np.linspace(np.min(field),np.max(field),self.clevels),cmap=self.cbars[name])
                            else:
                                cs1=ax.contourf(field,cmap=self.cbars[name])
                        else:
                            if self.clevels!=0:
                                cs1=ax.contourf(field,levels=np.linspace(np.min(field),np.max(field),self.clevels))
                            else:
                                cs1=ax.contourf(field)
                    else: #when a globalcbar is being used

                        #what if we wanted a new centre?
                        if self.globalcbarmiddle:
                            if self.globalcbar=='True':
                                self.globalcbar='jet'
                            colourmap=cmap_center_point_adjust(matplotlib.cm.get_cmap(self.globalcbar),(fgmin,fgmax),self.globalcbarmiddle)
                        else:
                            colourmap=self.globalcbar

                        if self.clevels!=0:
                            if self.globalcbar=='True':
                                cs1=ax.contourf(field,levels=np.linspace(fgmin,fgmax,self.clevels))
                            else:
                                    cs1=ax.contourf(field,levels=np.linspace(fgmin,fgmax,self.clevels),cmap=colourmap)
                        else:
                            if self.globalcbar=='True':
                                cs1=ax.contourf(field,levels=np.linspace(fgmin,fgmax,7)) #defaulting to seven here!
                            else:
                                cs1=ax.contourf(field,levels=np.linspace(fgmin,fgmax,7),cmap=colourmap) #defaulting to seven here!


                    if self.sepcbar or len(self.cbars.keys())!=0:
                        #separate colorbars
                        # Create divider for existing axes instance
                        divider = make_axes_locatable(ax)
                        # Append axes to the right of ax, with 20% width of ax
                        caxis = divider.append_axes("bottom", size="10%", pad=0.45)
                        plt.colorbar(cs1,cax=caxis,orientation='horizontal')

                    # make xlabels invisible
                    if self.sharex and rownum<self.pdims[0]-1:
                        plt.setp(ax.get_xticklabels(),visible=False)

                    #labeling when we are sharing axis
                    if labelme:
                        if colnum==0:
                            ax.set_ylabel(label[1])

                        if rownum==self.pdims[0]-1:
                            ax.set_xlabel(label[0])

                    if self.zoom is not None:
                        ax.set_xlim([self.zoom[0][0],self.zoom[0][1]])
                        ax.set_ylim([self.zoom[1][0],self.zoom[1][1]])

                    # ax.set_title(name)
                    inset_title_box(ax,name)

                    pnum+=1

        if self.globalcbar!='False':
            ax1 = plt.subplot(gs[0:self.pdims[0]+1,self.pdims[1]])
            plt.colorbar(cs1,cax=ax1,orientation='vertical')

        if self.outputpath!='':
            mkdir(os.path.dirname(self.outputpath))
            plt.savefig(self.outputpath,dpi=300)
        else:
            plt.show()

        return self
Ejemplo n.º 22
0
rgb = cv2.cvtColor(orig, cv2.COLOR_RGB2BGR)

plt.imshow(gray, cmap='gray')

#%%

tags = det.detect(gray)

#%%
import matplotlib.gridspec as gridspec
#fig = plt.figure()

plt.clf()


gs = gridspec.GridSpec(2, 2, width_ratios=[2,1])
#ax = [plt.subplot(gs_i) for gs_i in gs]
ax = [plt.subplot(gs_i) for gs_i in [gs[:,0],gs[0,1],gs[1,1]]]


ax[0].imshow(gray, cmap='gray')
atd.plot_detections(tags,ax[0], orig, labels=range(len(tags)))




id=21

pixsize = 10
S = 9*pixsize
s = pixsize
Ejemplo n.º 23
0
    def draw(
        self,
        check_data_size: bool = True
    ) -> Optional[Tuple[mpl.figure.Figure, mpl.axes.Axes]]:
        '''Draw the subplots.
        
        Args:
            check_data_size: If set to True, will not plot if there are > 100K points to avoid locking up your computer for a long time.
              Default True
        '''
        if not has_display():
            print('no display found, cannot plot')
            return None

        plot_timestamps = self._get_plot_timestamps()
        if check_data_size and plot_timestamps is not None and len(
                plot_timestamps) > 100000:
            raise Exception(
                f'trying to plot large data set with {len(plot_timestamps)} points, reduce date range or turn check_data_size flag off'
            )

        date_formatter = None
        if plot_timestamps is not None:
            date_formatter = get_date_formatter(plot_timestamps,
                                                self.date_format)
        height_ratios = [subplot.height_ratio for subplot in self.subplot_list]

        fig = plt.figure(figsize=self.figsize)
        gs = gridspec.GridSpec(len(self.subplot_list),
                               1,
                               height_ratios=height_ratios,
                               hspace=self.hspace)
        axes = []

        for i, subplot in enumerate(self.subplot_list):
            if subplot.is_3d:
                ax = plt.subplot(gs[i], projection='3d')
            else:
                ax = plt.subplot(gs[i])
            axes.append(ax)

        time_axes = [
            axes[i] for i, s in enumerate(self.subplot_list) if s.time_plot
        ]
        if len(time_axes):
            time_axes[0].get_shared_x_axes().join(*time_axes)

        for i, subplot in enumerate(self.subplot_list):
            subplot._draw(axes[i], plot_timestamps, date_formatter)

        if self.title: axes[0].set_title(self.title)

        # We may have added new axes in candlestick plot so get list of axes again
        ax_list = fig.axes
        for ax in ax_list:
            if self.show_grid: ax.grid(linestyle='dotted')

        for ax in ax_list:
            if ax not in axes: time_axes.append(ax)

        for ax in time_axes:
            if self.show_date_gaps and plot_timestamps is not None:
                _draw_date_gap_lines(ax, plot_timestamps)

        for ax in ax_list:
            ax.autoscale_view()

        return fig, ax_list
Ejemplo n.º 24
0
	def plot(self, format = 'landscape'):
		"""
		Plot the weights distribution of the window and the associated
		spectrum (work only for 1D and 2D windows).
		"""
		nod = len(self.dims)
		if nod == 1:
			# Compute 1D spectral response
			spectrum = np.fft.fft(self.coefficients.squeeze(), 2048) / (len(self.coefficients.squeeze()) / 2.0)
			freq = np.linspace(-0.5, 0.5, len(spectrum))
			response = 20 * np.log10(np.abs(np.fft.fftshift(spectrum / abs(spectrum).max())))
			# Plot window properties
			fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
			# First plot: weight distribution
			n = self._depth.values()[0]
			ax1.plot(np.arange(-n, n + 1), self.coefficients.squeeze())
			ax1.set_xlim((-n, n))
			ax1.set_title("Window: " + self.name)
			ax1.set_ylabel("Amplitude")
			ax1.set_xlabel("Sample")
			# Second plot: frequency response
			ax2.plot(freq, response)
			ax2.axis([-0.5, 0.5, -120, 0])
			ax2.set_title("Frequency response of the " + self.name + " window")
			ax2.set_ylabel("Normalized magnitude [dB]")
			ax2.set_xlabel("Normalized frequency [cycles per sample]")
			ax2.grid(True)
			plt.tight_layout()
		elif nod == 2:
			# Compute 2D spectral response
			spectrum = (np.fft.fft2(self.coefficients.squeeze(), [1024, 1024]) /
			            (np.size(self.coefficients.squeeze()) / 2.0))
			response = np.abs(np.fft.fftshift(spectrum / abs(spectrum).max()))
			fx = np.linspace(-0.5, 0.5, 1024)
			fy = np.linspace(-0.5, 0.5, 1024)
			f2d = np.meshgrid(fy, fx)
			print(self._depth)
			nx, ny = self._depth.values()
			if  format == 'landscape':
				gs = gridspec.GridSpec(2, 4, width_ratios=[2, 1, 2, 1], height_ratios=[1, 2])
				plt.figure(figsize=(11.69, 8.27))
			elif format == 'portrait':
				plt.figure(figsize=(8.27, 11.69))
			# Weight disribution along x
			ax_nx = plt.subplot(gs[0])
			ax_nx.plot(np.arange(-nx, nx + 1), self.coefficients.squeeze()[:, ny])
			ax_nx.set_xlim((-nx, nx))
			# Weight disribution along y
			ax_nx = plt.subplot(gs[5])
			ax_nx.plot(self.coefficients.squeeze()[nx, :], np.arange(-ny, ny + 1))
			ax_nx.set_ylim((-ny, ny))
			# Full 2d weight distribution
			ax_n2d = plt.subplot(gs[4])
			nx2d, ny2d = np.meshgrid(np.arange(-nx, nx + 1), np.arange(-ny, ny + 1), indexing='ij')
			print(np.shape(nx2d))
			ax_n2d.pcolormesh(nx2d, ny2d, self.coefficients.squeeze())
			ax_n2d.set_xlim((-nx, nx))
			ax_n2d.set_ylim((-ny, ny))
			box = dict(facecolor='white', pad=10.0)
			ax_n2d.text(0.97, 0.97, r'$w(n_x,n_y)$', fontsize='x-large', bbox=box, transform=ax_n2d.transAxes,
			            horizontalalignment='right', verticalalignment='top')
			# Frequency response for fy = 0
			ax_fx = plt.subplot(gs[2])
			spectrum_plot(ax_fx, fx, response[:, 512].squeeze(),)
			# ax_fx.set_xlim(xlim)
			ax_fx.grid(True)
			ax_fx.set_ylabel(r'$R(f_x,0)$', fontsize=24)
			# Frequency response for fx = 0
			ax_fy = plt.subplot(gs[7])
			spectrum_plot(ax_fy, response[:, 512].squeeze(), fy)
			#ax_fy.set_ylim(ylim)
			ax_fy.grid(True)
			ax_fy.set_xlabel(r'$,R(0,f_y)$', fontsize=24)
			# Full 2D frequency response
			ax_2d = plt.subplot(gs[6])
			spectrum2d_plot(ax_2d, fx, fy, response, zlog=True)
			ax_2d.set_ylabel(r'$f_y$', fontsize=24)
			ax_2d.set_xlabel(r'$f_x$', fontsize=24)
			ax_2d.grid(True)
			box = dict(facecolor='white', pad=10.0)
			ax_2d.text(0.97, 0.97, r'$R(f_x,f_y)$', fontsize='x-large', bbox=box, transform=ax_2d.transAxes,
			           horizontalalignment='right', verticalalignment='top')
			plt.tight_layout()
		else:
			raise ValueError("This number of dimension is not supported by the plot function")
Ejemplo n.º 25
0
 def giant_component_death_curve(calculate_on,
                                 pd_datas,
                                 xlim=None,
                                 ylim=None,
                                 figsize=None,
                                 tight_layout=None,
                                 count='components'):
     if figsize:
         fig = plt.figure(figsize=figsize)
     else:
         fig = plt.figure()
     plot_gridspec = gridspec.GridSpec(3,
                                       3,
                                       width_ratios=[1, 0.001, 1],
                                       height_ratios=[1, 0.001, 1])
     graphs_index = 0
     if count == 'components':
         ylabel = "Number of components"
     else:
         ylabel = "Giant component size"
     xlabel = "Normalized edge weight"
     markers = [
         'D', 's', 'x', '^', 'd', 'h', '+', '*', ',', 'o', '.', '1', 'p',
         '3', '2', '4', 'H', 'v', '8', '<', '>'
     ]
     colorblind = [(0, 0, 0), (230, 159, 0), (86, 180, 233), (0, 158, 115),
                   (240, 228, 66), (0, 114, 178), (213, 94, 0),
                   (204, 121, 167)]
     colorblind = ['#%02x%02x%02x' % (c[0], c[1], c[2]) for c in colorblind]
     colorcycler = cycle(
         [colorblind[3], colorblind[5], colorblind[6], colorblind[1]])
     x_lim = [np.inf, 0]
     y_lim = [np.inf, 0]
     colors = {}
     for title, pd_data in pd_datas:
         for title_legend in pd_data:
             x_lim[0] = min(x_lim[0], min(pd_data[title_legend]['x']))
             x_lim[1] = max(x_lim[1], max(pd_data[title_legend]['x']))
             y_lim[0] = min(y_lim[0], min(pd_data[title_legend]['y']))
             y_lim[1] = max(y_lim[1], max(pd_data[title_legend]['y']))
             if title_legend not in colors:
                 colors[title_legend] = colorcycler.next()
     for title, pd_data in pd_datas:
         x = graphs_index / (len(pd_datas) / 2)
         y = graphs_index % (len(pd_datas) / 2)
         y = 2 if y == 1 else y
         x = 2 if x == 1 else x
         ax = fig.add_subplot(plot_gridspec[x, y])
         ax.set_axisbelow(True)
         legends = pd_data.keys()
         legends.sort()
         markercycler = cycle(markers)
         for title_legend in legends:
             pd_data_i = pd_data[title_legend]
             plt.grid(True)
             color = colors[title_legend]
             plt.plot(pd_data_i['x'],
                      pd_data_i['y'],
                      marker=next(markercycler),
                      label=title_legend,
                      color=color,
                      mec=color,
                      mew=1.4,
                      linewidth=1.2,
                      markevery=2,
                      markersize=3)
         if not xlim:
             plt.xlim(x_lim[0], x_lim[1])
         else:
             plt.xlim(*xlim)
         if not ylim:
             plt.ylim(y_lim[0], y_lim[1])
         else:
             plt.ylim(*ylim)
         plt.title(title)
         graphs_index += 1
     plt.legend(loc=4, numpoints=1)
     ax3 = fig.add_subplot(plot_gridspec[1, 0])
     ax3.set_yticks([])
     ax3.set_xticks([])
     ax3.set_frame_on(False)
     plt.ylabel(ylabel, fontsize=9, labelpad=20)
     ax3 = fig.add_subplot(plot_gridspec[2, 1])
     ax3.set_yticks([])
     ax3.set_xticks([])
     ax3.set_frame_on(False)
     plt.xlabel(xlabel, fontsize=9, labelpad=20)
     plt.suptitle("Snapshot of the " + str(calculate_on) + "th iteration",
                  fontsize=14)
     if tight_layout is not None:
         if not tight_layout:
             plt.tight_layout()
         else:
             plt.tight_layout(rect=tight_layout)
     plt.show()
Ejemplo n.º 26
0
plotBool = False

filestart = 'TDSEkickF2experparams'
numlist = np.arange(6.0, 16.0)
forcelist = (lambdaL / Erecoil) * hbar * 2.0 * k_L / (numlist * 1e-3)
filelist = [
    filestart + str(np.round(numlist[ind], 1)) + '.npz'
    for ind in range(numlist.size)
]
filelist = np.array(filelist)

fig = plt.figure()
fig.clear()
fig.set_size_inches(6.5, 3.5)
gs = gridspec.GridSpec(1, 2)
gs.update(left=0.15,
          right=0.95,
          top=0.85,
          bottom=0.2,
          wspace=0.25,
          hspace=0.25)
pan = fig.add_subplot(gs[0, 0])

chernLine = np.zeros(filelist.size)
delChernLine = np.zeros(filelist.size)
chernDio = np.zeros(filelist.size)
delChernDio = np.zeros(filelist.size)
for ind, filename in enumerate(filelist):
    TDSEfile = np.load(filename)
    qlistTDSE = (TDSEfile['tgrid'] * hbar / Erecoil -
plt.xlim(Time_Start, Time_End)
plt.ylim(TS_Min, TS_Max)

plt.subplot(4, 1, 4, sharex=ax1)
plt.plot(Time, df_SWC['SWC_1'], Time, df_SWC['SWC_2'], Time, df_SWC['SWC_3'],
         Time, df_SWC['SWC_4'], Time, df_SWC['SWC_5'], Time, df_SWC['SWC_6'])
plt.legend(('5cm', '10cm', '20 cm', '30cm', '40cm', '100 cm'),
           loc='upper right')
plt.ylabel('SWC (m3/m3)')
plt.ylim(SWC_Min, SWC_Max)
#plt.title('Pit 1 (~12 m from K34 Tower)')

#%%
# Soil Water Content contours

gs = gridspec.GridSpec(4, 2, width_ratios=[30, 1])
fig = plt.figure()
ax5 = fig.add_subplot(gs[0, 0])
plt.plot(Time_Met, df_Met['P_2'])
plt.ylabel('PREC (mm/30min)')
plt.xlim(Time_Start, Time_End)
plt.title('Soil Moisture')

fig.add_subplot(gs[1, 0], sharex=ax5)
im = plt.contourf(Time, Depth_Soil, Soil_Data, levels=Range_SWC, cmap=cm.jet_r)
# plt.xlim(pd.Timestamp('2015-07-01'), pd.Timestamp('2016-07-01'))
# plt.legend(('k34'),loc = 'upper right')
# plt.colorbar()

fig.add_subplot(gs[2, 0], sharex=ax5)
plt.contourf(Time_R, Depth_R, SWC_p1, levels=Range_SWC, cmap=cm.jet_r)
Ejemplo n.º 28
0
plot_0_1_2 = __var__repeat1_____MAPK_PP

# DataGenerator <plot_0_2_2>
__var__repeat1_____MKK = np.transpose(np.array([sim['[MKK]'] for sim in repeat1]))
if len(__var__repeat1_____MKK.shape) == 1:
     __var__repeat1_____MKK.shape += (1,)
plot_0_2_2 = __var__repeat1_____MKK

# --------------------------------------------------------
# Outputs
# --------------------------------------------------------
# Output <plot_0>
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(num=None, figsize=(9, 5), dpi=80, facecolor='w', edgecolor='k')
from matplotlib import gridspec
__gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax = plt.subplot(__gs[0], projection='3d')
for k in range(plot_0_0_0.shape[1]):
    if k == 0:
        ax.plot(plot_0_0_0[:,k], plot_0_0_1[:,k], plot_0_0_2[:,k], marker = '.', color='r', linewidth=1.5, markersize=4.0, alpha=0.8, label='repeat1.MAPK_P')
    else:
        ax.plot(plot_0_0_0[:,k], plot_0_0_1[:,k], plot_0_0_2[:,k], marker = '.', color='r', linewidth=1.5, markersize=4.0, alpha=0.8)
for k in range(plot_0_0_0.shape[1]):
    if k == 0:
        ax.plot(plot_0_0_0[:,k], plot_0_0_1[:,k], plot_0_1_2[:,k], marker = '.', color='b', linewidth=1.5, markersize=4.0, alpha=0.8, label='repeat1.MAPK_PP')
    else:
        ax.plot(plot_0_0_0[:,k], plot_0_0_1[:,k], plot_0_1_2[:,k], marker = '.', color='b', linewidth=1.5, markersize=4.0, alpha=0.8)
for k in range(plot_0_0_0.shape[1]):
    if k == 0:
        ax.plot(plot_0_0_0[:,k], plot_0_0_1[:,k], plot_0_2_2[:,k], marker = '.', color='g', linewidth=1.5, markersize=4.0, alpha=0.8, label='repeat1.MKK')
    else:
Ejemplo n.º 29
0
def show_periodic_noise(
    nsn: NoisySocialNetwork, noise_start, noise_length, recovery, interval, num, D
):
    logger.debug("plotting periodic noise")
    import matplotlib.pyplot as plt
    import seaborn as sns
    from matplotlib import gridspec
    from opdynamics.visualise import VisSocialNetwork
    from opdynamics.utils.plot_utils import get_time_point_idx

    # calculate optimal bin edges from opinions distribution at noise start + noise_length
    hist, bin_edges = np.histogram(
        nsn.result.y[
            :, get_time_point_idx(nsn.result.t, float(noise_start + noise_length))
        ],
        bins="auto",
    )

    vis = VisSocialNetwork(nsn)
    # create figure and axes
    fig = plt.figure()
    gs = gridspec.GridSpec(
        nrows=2, ncols=3, figure=fig, wspace=0.3, hspace=0.8, height_ratios=(1, 2)
    )
    ax_time = fig.add_subplot(gs[0, :])
    ax_start = fig.add_subplot(gs[-1, 0])
    ax_noise = fig.add_subplot(gs[-1, 1], sharey=ax_start)
    ax_recovery = fig.add_subplot(gs[-1, 2], sharey=ax_start)

    _colors = [PRE_RDN_COLOR, POST_RDN_COLOR, POST_RECOVERY_COLOR]
    # plot graphs
    vis.show_opinions(ax=ax_time, color_code="line", subsample=5, title=False)
    vis.show_opinions_distribution(
        ax=ax_start,
        t=noise_start,
        title=f"t = {noise_start}",
        color=PRE_RDN_COLOR,
        bins=bin_edges,
    )
    vis.show_opinions_distribution(
        ax=ax_noise,
        t=noise_start + noise_length,
        title=f"t={noise_start + noise_length}",
        color=POST_RDN_COLOR,
        bins=bin_edges,
    )
    vis.show_opinions_distribution(
        ax=ax_recovery,
        t=-1,
        title=f"t={noise_start + noise_length + recovery}",
        color=POST_RECOVERY_COLOR,
        bins=bin_edges,
    )
    # adjust view limits
    from scipy import stats

    x_data, y_data = nsn.result.t, nsn.result.y
    s = stats.describe(y_data)
    lower_bound, upper_bound = s.mean - s.variance, s.mean + s.variance
    mask = np.logical_and(lower_bound < y_data, y_data < upper_bound)
    y_mask = y_data[mask]
    lim = (np.min(y_mask), np.max(y_mask))
    ax_time.set_ylim(*lim)
    ax_start.set_xlim(*lim)
    ax_noise.set_xlim(*lim)
    ax_recovery.set_xlim(*lim)
    # annotate plots
    # points where opinion snapshots are taken
    ax_time.vlines(
        x=[
            noise_start,
            noise_start + noise_length,
            noise_start + noise_length + recovery,
        ],
        ymin=lim[0],
        ymax=lim[1],
        color=_colors,
        clip_on=False,
    )
    # noise on/off
    noiseless_time = interval * (num - 1)
    block_time = (noise_length - noiseless_time) / num
    block_times_s = [noise_start + block_time * i + interval * i for i in range(num)]
    block_times_e = [
        noise_start + block_time * (i + 1) + interval * i for i in range(num)
    ]
    ax_time.hlines(
        y=[lim[1]] * num,
        xmin=block_times_s,
        xmax=block_times_e,
        lw=5,
        color="k",
        clip_on=False,
    )
    # value of noise
    ax_time.annotate(f"D = {D}", xy=(noise_start, lim[1]), ha="left", va="bottom")
    # recovery annotation
    # ax_time.annotate(
    #     f"D = 0",
    #     xy=(noise_start + noise_length, lim[1]),
    #     ha="left",
    #     va="bottom",
    # )
    sns.despine()
    ax_noise.set_ylabel("")
    ax_recovery.set_ylabel("")
    return fig, gs
Ejemplo n.º 30
0
def _plot_waterfall(Returns_Poly,
                    waterfall,
                    dt,
                    f,
                    Cut_off,
                    fname="",
                    Win=None):
    """
    Plot the waterfall at the best Dispersion Measure and at close values for comparison.
    """

    fig = plt.figure(figsize=(8.5, 6), facecolor='k')
    fig.subplots_adjust(left=0.08, bottom=0.08, right=0.99, top=0.8)
    grid = gridspec.GridSpec(1, 3, wspace=0.1)

    Title = '{0:}\n\
        Best DM = {1:.3f} $\pm$ {2:.3f}'.format(fname, Returns_Poly[0],
                                                Returns_Poly[1])
    plt.suptitle(Title, color='w', linespacing=1.5)

    DMs = Returns_Poly[0] + 5 * Returns_Poly[1] * np.array(
        [-1, 0, 1])  # DMs +- 5 sigmas away
    for j, dm in enumerate(DMs):
        gs = gridspec.GridSpecFromSubplotSpec(2,
                                              1,
                                              subplot_spec=grid[j],
                                              height_ratios=[1, 4],
                                              hspace=0)
        ax_prof = fig.add_subplot(gs[0])
        ax_wfall = fig.add_subplot(gs[1], sharex=ax_prof)
        try:
            ax_wfall.set_facecolor('k')
        except AttributeError:
            ax_wfall.set_axis_bgcolor('k')

        wfall = _dedisperse_waterfall(waterfall, dm, f, dt)
        prof = wfall.sum(axis=0)

        # Find the time range around the pulse
        if (j == 0) and (Win is None):
            W = _get_Window(prof)
            Spect = _get_Spect(wfall)
            Filter = np.ones_like(Spect)
            Filter[Cut_off:-Cut_off] = 0
            Spike = np.real(ifft(Spect * Filter))
            Spike[0] = 0
            Win = _check_W(Spike, W)

        # Profile
        T = dt * (Win[1] - Win[0]) * 1000
        x = np.linspace(0, T, Win[1] - Win[0])
        y = prof[Win[0]:Win[1]]
        ax_prof.plot(x, y, 'w', linewidth=0.5, clip_on=False)
        ax_prof.axis('off')
        ax_prof.set_title('{0:.3f}'.format(dm), color='w')

        # Waterfall
        bw = f[-1] - f[0]
        im = wfall[:, Win[0]:Win[1]]
        extent = [0, T, f[0], f[-1]]
        MAX_DS = wfall.max()
        MIN_DS = wfall.mean() - wfall.std()
        ax_wfall.imshow(im,
                        origin='lower',
                        aspect='auto',
                        cmap=colormap,
                        extent=extent,
                        interpolation='nearest',
                        vmin=MIN_DS,
                        vmax=MAX_DS)

        ax_wfall.tick_params(axis='both',
                             colors='w',
                             direction='in',
                             right='on',
                             top='on')
        if j == 0: ax_wfall.set_ylabel('Frequency (MHz)')
        if j == 1: ax_wfall.set_xlabel('Time (ms)')
        if j > 0: ax_wfall.tick_params(axis='both', labelleft='off')
        ax_wfall.yaxis.label.set_color('w')
        ax_wfall.xaxis.label.set_color('w')

    if fname != "": fname += "_"
    fig.savefig(fname + "Waterfall_5sig.pdf", facecolor='k', edgecolor='k')
    return