Esempio n. 1
0
    def make_timing_plots(self, active_test_list):
        """ plot the wallclock time history for all the valid tests """

        valid_dirs, all_tests = self.get_run_history(active_test_list)

        # store the timings in NumPy arrays in a dictionary
        timings = {}
        N = len(valid_dirs)
        for t in all_tests:
            timings[t] = np.zeros(N, dtype=np.float64)

        # now get the timings from the web output
        for n, d in enumerate(valid_dirs):
            for t in all_tests:
                ofile = "{}/{}/{}.html".format(self.webTopDir, d, t)
                try:
                    f = open(ofile)
                except:
                    timings[t][n] = 0.0
                    continue

                found = False
                for line in f:
                    if "Execution time" in line:
                        found = True
                        # this is of the form: <li>Execution time: 412.930 s
                        timings[t][n] = float(
                            line.split(":")[1].strip().split(" ")[0])
                        break

                    elif "(seconds)" in line:
                        found = True
                        # this is the older form -- split on "="
                        # form: <p><b>Execution Time</b> (seconds) = 399.414828
                        timings[t][n] = float(line.split("=")[1])
                        break

                f.close()
                if not found:
                    timings[t][n] = 0.0

        # make the plots
        for t in all_tests:
            _d = valid_dirs[:]
            _t = list(timings[t])

            days = []
            times = []
            for n, ttime in enumerate(_t):
                if not ttime == 0.0:
                    # sometimes the date is of the form YYYY-MM-DD-NNN, where NNN
                    # is the run -- remove that
                    date = _d[n]
                    if len(date) > 10:
                        date = date[:date.rfind("-")]

                    days.append(dates.datestr2num(date))
                    times.append(ttime)

            if len(times) == 0: continue

            plt.clf()
            plt.plot_date(days, times, "o", xdate=True)

            years = dates.YearLocator()  # every year
            months = dates.MonthLocator()
            years_fmt = dates.DateFormatter('%Y')

            ax = plt.gca()
            ax.xaxis.set_major_locator(years)
            ax.xaxis.set_major_formatter(years_fmt)
            ax.xaxis.set_minor_locator(months)

            plt.ylabel("time (seconds)")
            plt.title(t)

            if max(times) / min(times) > 10.0:
                ax.set_yscale("log")

            fig = plt.gcf()
            fig.autofmt_xdate()

            plt.savefig("{}/{}-timings.png".format(self.webTopDir, t))
cursor.execute(sql_statement)
rows =  cursor.fetchall()
localDateTimes, dataValues = zip(*rows)

#create plot
fig = plt.figure()
ax = fig.add_subplot(211)
ax.plot(localDateTimes, dataValues, color='grey', linestyle='solid', \
        markersize=0)

#set plot properties
ax.set_ylabel("Temperature ($^\circ$C)")
ax.set_xlabel("Date/Time")
ax.xaxis.set_minor_locator(dates.MonthLocator())
ax.xaxis.set_minor_formatter(dates.DateFormatter('%b'))
ax.xaxis.set_major_locator(dates.YearLocator())
ax.xaxis.set_major_formatter(dates.DateFormatter('\n%Y'))
ax.grid(True)
ax.set_title('Water temperature at Little Bear River \n at McMurdy Hollow \
near Paradise, Utah') #hard coded for now. Should update when SiteID is updated.
fig.tight_layout()

#inputs
SiteID = '1'
VariableID = '36'
StartLocalDateTime = "'2008-01-01'"
EndLocalDateTime = "'2008-12-31'"

#connect to database
conn = pymysql.connect(host='localhost', port=3306, user='******', \
        passwd='', db='LBRODM_small')
Esempio n. 3
0
df1 = data_sma.iloc[1::]
df2 = data_ti
df1.index = df2.index

fig, ax = plt.subplots()
ax.plot(df1, 'b-')
ax2 = ax.twinx()
ax2.plot(df2, 'r.')
plt.title("SMA & RSI graph")
plt.show()

# JPMorgan Daily Closing Price
JPM = pd.read_csv('JPM.csv', header=0, index_col='Date', parse_dates=True)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.gca().xaxis.set_major_locator(mdates.YearLocator())
plt.grid(True)
plt.xticks(rotation=90)
plt.plot(JPM.index, JPM['Adj Close'])
plt.title("Daily Closing Prices")
plt.show()

# AFL Historical stock prices
AFL = AFL_bank.iloc[::-1]
AFL['Date'] = pd.to_datetime(AFL['Date'])
AFL['20wma'] = AFL['Close'].rolling(window=140).mean()
fig2 = go.Figure(data=[
    go.Candlestick(x=AFL['Date'],
                   open=AFL['Open'],
                   high=AFL['High'],
                   low=AFL['Low'],
    def send_temperature(self):  # pour afficher les informations d'une station
        conn = sqlite3.connect('Temperatures.sqlite')
        c = conn.cursor()

        for i in station_list:
            if i.get_nom() == self.path_info[1]:
                station_temp = i

        deb, fin = self.path_info[3], self.path_info[4]
        # Passage du format AAAA-MM-JJ à AAAAMMJJ
        deb = deb[:4] + deb[5:7] + deb[8:10]
        fin = fin[:4] + fin[5:7] + fin[8:10]
        pas = self.path_info[5]

        # configuration du tracé
        fig1 = plt.figure(figsize=(18, 6))
        ax = fig1.add_subplot(111)
        ax.set_ylim(bottom=-10, top=40)
        ax.grid(which='major', color='#888888', linestyle='-')
        ax.grid(which='minor', axis='x', color='#888888', linestyle=':')
        ax.xaxis.set_major_locator(pltd.YearLocator())
        ax.xaxis.set_minor_locator(pltd.MonthLocator())
        ax.xaxis.set_major_formatter(pltd.DateFormatter('%B %Y'))
        ax.xaxis.set_tick_params(labelsize=10)
        ax.xaxis.set_label_text("Date")
        ax.yaxis.set_label_text("ºC")

        if self.path_info[2] == 'Moyenne': sheet = 'TG_1978-2018'
        elif self.path_info[2] == 'Maximale': sheet = 'TX_1978-2018'
        elif self.path_info[2] == 'Minimale': sheet = 'TN_1978-2018'
        c.execute(
            "SELECT * FROM '{}' WHERE Date > {} AND Date < {} AND STAID={} ORDER BY Date"
            .format(sheet, deb, fin, station_temp.get_num()))
        r = c.fetchall()
        # prise en compte du pas, intervalle de temps minimale considéré
        r_pas = [r[i] for i in range(0, len(r), int(pas))]
        # recupération de la date (colonne 2) et transformation dans le format de pyplot
        x = [
            pltd.date2num(
                dt.date(int(str(a[2])[:4]), int(str(a[2])[4:6]),
                        int(str(a[2])[6:8]))) for a in r_pas
        ]
        # récupération de la régularité (colonne 8)
        y = [float(a[3]) / 10 for a in r_pas]
        # tracé de la courbe
        plt.plot(x,
                 y,
                 linewidth=1,
                 linestyle='-',
                 marker='o',
                 color='blue',
                 label=station_temp.get_nom())

        # légendes
        plt.legend(loc='lower left')
        plt.title('Température {} de {} en ºC'.format(self.path_info[2],
                                                      self.path_info[1]),
                  fontsize=16)

        # génération des courbes dans un fichier PNG
        fichier = 'courbes/temperature_' + self.path_info[1] + self.path_info[
            2] + deb + fin + pas + '.png'
        plt.savefig('client/{}'.format(fichier))

        #html = '<img src="/{}?{}" alt="temperature {}" width="100%">'.format(fichier,self.date_time_string(),self.path)
        body = json.dumps({'title': 'Température {} de'.format(self.path_info[2])+self.path_info[1], \
                           'img': '/'+fichier \
                           })

        # on envoie
        headers = [('Content-Type', 'application/json')]
        print(1)
        self.send(body, headers)
Esempio n. 5
0
def plot_network(ifgdates, bperp, rm_ifgdates, pngfile, plot_bad=True):
    """
    Plot network of interferometric pairs.
    
    bperp can be dummy (-1~1).
    Suffix of pngfile can be png, ps, pdf, or svg.
    plot_bad
        True  : Plot bad ifgs by red lines
        False : Do not plot bad ifgs
    """

    imdates_all = tools_lib.ifgdates2imdates(ifgdates)
    n_im_all = len(imdates_all)
    idlist_all = [dt.datetime.strptime(x, '%Y%m%d').toordinal() for x in imdates_all]

    ifgdates = list(set(ifgdates)-set(rm_ifgdates))
    ifgdates.sort()
    imdates = tools_lib.ifgdates2imdates(ifgdates)
    n_im = len(imdates)
    idlist = [dt.datetime.strptime(x, '%Y%m%d').toordinal() for x in imdates]
    
    ### Identify gaps    
    G = inv_lib.make_sb_matrix(ifgdates)
    ixs_inc_gap = np.where(G.sum(axis=0)==0)[0]
    
    ### Plot fig
    figsize_x = np.round((idlist_all[-1]-idlist_all[0])/80)+2
    fig = plt.figure(figsize=(figsize_x, 6))
    ax = fig.add_axes([0.12, 0.12, 0.85,0.85])
    
    ### IFG blue lines
    for i, ifgd in enumerate(ifgdates):
        ix_m = imdates_all.index(ifgd[:8])
        ix_s = imdates_all.index(ifgd[-8:])
        label = 'IFG' if i==0 else '' #label only first
        plt.plot([idlist_all[ix_m], idlist_all[ix_s]], [bperp[ix_m], bperp[ix_s]], color='b', alpha=0.6, zorder=2, label=label)

    ### IFG bad red lines
    if plot_bad:
        for i, ifgd in enumerate(rm_ifgdates):
            ix_m = imdates_all.index(ifgd[:8])
            ix_s = imdates_all.index(ifgd[-8:])
            label = 'Removed IFG' if i==0 else '' #label only first
            plt.plot([idlist_all[ix_m], idlist_all[ix_s]], [bperp[ix_m], bperp[ix_s]], color='r', alpha=0.6, zorder=6, label=label)

    ### Image points and dates
    ax.scatter(idlist_all, bperp, alpha=0.6, zorder=4)
    for i in range(n_im_all):
        if bperp[i] > np.median(bperp): va='bottom'
        else: va = 'top'
        ax.annotate(imdates_all[i][4:6]+'/'+imdates_all[i][6:], (idlist_all[i], bperp[i]), ha='center', va=va, zorder=8)

    ### gaps
    if len(ixs_inc_gap)!=0:
        gap_idlist = []
        for ix_gap in ixs_inc_gap:
            gap_idlist.append((idlist[ix_gap]+idlist[ix_gap+1])/2)
        plt.vlines(gap_idlist, 0, 1, transform=ax.get_xaxis_transform(), zorder=1, label='Gap', alpha=0.6, colors='k', linewidth=3)
        
    ### Locater        
    loc = ax.xaxis.set_major_locator(mdates.AutoDateLocator())
    try:  # Only support from Matplotlib 3.1
        ax.xaxis.set_major_formatter(mdates.ConciseDateFormatter(loc))
    except:
        ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m/%d'))
        for label in ax.get_xticklabels():
            label.set_rotation(20)
            label.set_horizontalalignment('right')
    ax.grid(b=True, which='major')

    ### Add bold line every 1yr
    ax.xaxis.set_minor_locator(mdates.YearLocator())
    ax.grid(b=True, which='minor', linewidth=2)

    ax.set_xlim((idlist_all[0]-10, idlist_all[-1]+10))

    ### Labels and legend
    plt.xlabel('Time')
    if np.all(np.abs(np.array(bperp))<=1): ## dummy
        plt.ylabel('dummy')
    else:
        plt.ylabel('Bperp [m]')
    
    plt.legend()

    ### Save
    plt.savefig(pngfile)
    plt.close()
# Check NAN Values
# print(df_monthly.isna().values.sum())
# print(df_yearly.isna().values.any())

# Check Duplicate
# print(df_yearly.duplicated().values.any())
# print(df_monthly.duplicated().values.any())

# print(df_yearly.describe())
# print(df_monthly.describe())
"""Percentage of Women Dying in Childbirth In The 1840s in Vienna"""
prob = df_yearly.deaths.sum() / df_yearly.births.sum() * 100
# print(f'Chances of dying in the 1840s in Vienna: {prob:.3}%')

# Create locators for ticks on the time axis
years = mdates.YearLocator()
months = mdates.MonthLocator()
years_fmt = mdates.DateFormatter('%Y')

# plt.figure(figsize=(6, 4), dpi=200)
# plt.yticks(fontsize=7)
# plt.xticks(fontsize=7, rotation=45)
# plt.title('Total Number of Monthly Births and Deaths', fontsize=18)
#
# ax1 = plt.gca()
# ax2 = ax1.twinx()
#
# ax1.grid(linestyle='--', color='grey')
# ax1.set_ylabel('Births', color='skyblue', fontsize=8)
# ax2.set_ylabel('Deaths', color='crimson', fontsize=8)
# # Use Locators
Esempio n. 7
0
def graph(output, data, legend=True):
    """
    - output : le fichier ou la requete de sortie
    - data : les donnees a afficher. On considere qu'elles sont triees
        de la plus vieille a la plus recente et sous la forme:
        [ [[les dates], [les donnees], [le label], [la couleur]], [], [] ]
    """
    fig = Figure()
    canvas = FigureCanvas(fig)
    ax = fig.add_subplot(111)

    date_min = data[0][0][0]
    date_max = data[0][0][-1]
    for a_line in data:
        if a_line[0][0] < date_min:
            date_min = a_line[0][0]
        if a_line[0][-1] > date_max:
            date_max = a_line[0][-1]
        if len(a_line[0]) > 80:
            print "trop grand, on fait des moyennes par semaines"
            # on fait des moyennes par semaines
            # le dimanche regroupe la semaine du lundi au dimanche
            axe_x = []
            axe_y = []
            total = 0
            count = 0
            index = 0
            for i in a_line[0]:
                # on parcours les dates
                if i.isoweekday() == 7:
                    # c'est dimanche, on enregistre
                    axe_x.append(i)
                    if count == 0:
                        axe_y.append(0)
                    else:
                        axe_y.append(total / count)
                    total = 0
                    count = 0
                else:
                    count += 1
                    total += a_line[1][index]
                index += 1
        else:
            axe_x = a_line[0]
            axe_y = a_line[1]
        ax.plot_date(axe_x, axe_y, fmt='-', label=a_line[2], color=a_line[3])

    # en fonction du nombre de jours entre le debut et la fin,
    # on choisit le bon locator
    delta = date_max - date_min
    if delta.days > 600:
        fmt = dates.DateFormatter('%Y')
        ax.xaxis.set_major_formatter(fmt)
        ax.xaxis.set_major_locator(dates.YearLocator())
        ax.xaxis.set_minor_locator(dates.MonthLocator())
        fig.autofmt_xdate(bottom=0.18)
    elif delta.days > 50:
        fmt = dates.DateFormatter('%Y-%m')
        ax.xaxis.set_major_formatter(fmt)
        ax.xaxis.set_major_locator(dates.MonthLocator())
        ax.xaxis.set_minor_locator(dates.DayLocator())
        fig.autofmt_xdate(bottom=0.18)
    else:
        ax.xaxis.set_major_formatter(dates.DateFormatter('%m-%Y'))
        ax.xaxis.set_major_locator(dates.MonthLocator())
        #        ax.xaxis.set_minor_formatter(dates.DateFormatter('%d-%m'))
        ax.xaxis.set_minor_locator(dates.DayLocator())
        fig.autofmt_xdate(bottom=0.18)

    # positionnement de la legende
    if legend:
        ax.legend(loc='best')
    ax.grid(True)

    canvas.print_png(output)
Esempio n. 8
0
def run_plot_TS(
        main_gdf,
        figsize=(12, 6),
        y_val_alpha=1,
        scatter_alpha=1,
        error_alpha=0.2,
        y_label="Brightness",
        x_label="Date",
        title_label="Brightness over time - Census Tract 9509, Yabucoa Municipio",
        outname="/Outname.csv",
        figname='',
        custom_x_axis=True,
        show_grid=True,
        show_legend=True,
        min_count=0.5,
        ymin=0,
        ymax=5000):
    print "Plotting..."
    C = 0
    for item in main_gdf['ID'].unique():
        C += 1
        plt.style.use('fivethirtyeight')
        fig, ax = plt.subplots(figsize=figsize)
        gdf3 = main_gdf[(main_gdf.ID == item)]
        title = title_label
        gdf3 = gdf3.sort_values(['date'])
        gdf3 = TS_Trend(gdf3, CMA_Val=5, CutoffDate="2017/08/31")
        gdf3 = gdf3.sort_values(['date'])
        x = gdf3['date']
        xdate = x.astype('O')
        xdate = mdates.date2num(xdate)
        y = gdf3['mean']
        if 'SeasonalForecast' in gdf3.columns:
            if C == 1:
                gdf_holder = gdf3
            else:
                gdf_holder = gdf_holder.append(gdf3)
            T = gdf3['SeasonalForecast']
            if 'observations' in gdf3.columns:
                count = gdf3['observations']
            err_plus = gdf3['mean'] + gdf3['std']
            err_minus = gdf3['mean'] - gdf3['std']
            #Set the min_count value as a value from 0 to 1 (0 to 100%)
            #ax.set_ylim([ymin,ymax])

            #This will filter out observations where over n% of a polygon is masked out
            if min_count > 0:
                z = gdf3['count']
                zmax = gdf3['count'].max()
                z = z / zmax
                xdate = xdate[z >= min_count]
                y = y[z >= min_count]
                if 'observations' in gdf3.columns:
                    count = count[z >= min_count]
                err_plus = err_plus[z >= min_count]
                err_minus = err_minus[z >= min_count]

            if len(xdate) == len(gdf3['Trend']):
                # plot regression line, if desired
                idx = np.isfinite(xdate) & np.isfinite(y)
                p2 = np.poly1d(np.polyfit(xdate[idx], y[idx], 1))
                ax.plot(xdate,
                        gdf3['Trend'],
                        '-',
                        label="Linear Forecast",
                        color='#00C5B0',
                        alpha=y_val_alpha)

                #plot running mean regression line
                #RM=(y.rolling(window=6,center=True, min_periods=2).median())
                #ax.plot(xdate, RM, '-',label="Moving Median", alpha=y_val_alpha)

                #Seasonal Forecast
                T = interpolate_gaps(T, limit=3)
                ax.plot(xdate,
                        T,
                        '-',
                        label="Seasonal Forecast",
                        alpha=1,
                        color='#FF8700')

                # scatter points-median top, mean bottom
                ax.scatter(xdate,
                           y,
                           label="Mean",
                           s=50,
                           color='black',
                           alpha=scatter_alpha)
                #ax.scatter(xdate, y2, label="Mean", s=50, color='red',alpha=scatter_alpha,marker='x')

                # if desired, plot error band
                plt.fill_between(xdate,
                                 err_plus,
                                 err_minus,
                                 alpha=error_alpha,
                                 color='black',
                                 label="Standard Deviation")

                ax.set_ylabel(y_label)
                ax.set_xlabel(x_label)
                if 'observations' in gdf3.columns:
                    ax.scatter(xdate,
                               count,
                               label="# of obs",
                               s=50,
                               color='#330DD0',
                               alpha=y_val_alpha,
                               marker='d')

                ax.yaxis.set_tick_params(labelsize=12)
                if custom_x_axis:
                    xticklabel_pad = 0.1
                    years = mdates.YearLocator()  # every year
                    months = mdates.MonthLocator()  # every month
                    yearsFmt = mdates.DateFormatter('%Y')

                    ax.xaxis.set_major_locator(years)
                    ax.xaxis.set_major_formatter(yearsFmt)
                    ax.xaxis.set_minor_locator(months)

                    plt.rc('xtick', labelsize=12)

                    #ax.set_xticks(xdate)
                    #ax.set_xticklabels(x, rotation=50, fontsize=10)
                    #ax.tick_params(axis='x', which='major', pad=xticklabel_pad)

                    #ax.xaxis.set_major_formatter(dateformat)
                    #ax.set_xlim(datetime.date(settings.plot['x_min'], 1, 1),datetime.date(settings.plot['x_max'], 12, 31))

                if show_grid:
                    ax.grid(b=True,
                            which='minor',
                            color='black',
                            alpha=0.75,
                            linestyle=':')

                if show_legend:
                    handles, labels = ax.get_legend_handles_labels()
                    ax.legend(handles,
                              labels,
                              ncol=1,
                              loc='center right',
                              bbox_to_anchor=[1.1, 0.5],
                              columnspacing=1.0,
                              labelspacing=0.0,
                              handletextpad=0.0,
                              handlelength=1.5,
                              fancybox=True,
                              shadow=True,
                              fontsize='x-small')

                ax.set_title(title)
                plt.tight_layout()
                plt.show()

                # save with figname
                if len(figname) > 0:
                    plt.savefig(figname, dpi=dpi)
    output = outname
    gdf_holder.to_csv(output)
Esempio n. 9
0
def plot_2d_per_time():
    fig_style = dict(cmap="viridis", marker='o')
    df, _ = models.load_unigrams()
    df_ratios = models.load_ratios()

    ug_normalized = (np.log1p(df).T / np.log1p(df).sum(axis=1)).T
    ug_smoothed = ug_normalized.rolling(
        data_config.unigram_rm_smoothing).mean()

    d_svd_unsmooth = pre.scale(
        decomp.TruncatedSVD(n_components=2).fit_transform(
            pre.StandardScaler().fit_transform(
                ug_normalized.ix[data_config.date_begin:].as_matrix())))

    d_svd_smooth = pre.scale(
        decomp.TruncatedSVD(n_components=2).fit_transform(
            pre.StandardScaler().fit_transform(
                ug_smoothed.ix[data_config.date_begin:].as_matrix())))

    ratios_smoothed = df_ratios.ewm(com=21).mean()
    d_ratios_ica = pre.scale(
        decomp.FastICA(n_components=2).fit_transform(
            pre.scale(
                ratios_smoothed.ix[data_config.date_begin:].as_matrix())))
    d_ratios_ica_restricted = pre.scale(
        decomp.FastICA(n_components=2).fit_transform(pre.StandardScaler(
        ).fit_transform(
            ratios_smoothed.ix[data_config.date_turning_point:].as_matrix())))

    d_ratios_ica_rotated = d_ratios_ica.dot(
        scipy.linalg.orthogonal_procrustes(d_ratios_ica, d_svd_smooth)[0])
    d_ratios_ica_restricted_rotated = d_ratios_ica_restricted.dot(
        scipy.linalg.orthogonal_procrustes(
            d_ratios_ica_restricted,
            d_svd_smooth[-d_ratios_ica_restricted.shape[0]:])[0])

    fig, ax = plt.subplots(ncols=3)
    idx = [d.toordinal() for d in df.ix[data_config.date_begin:].index.date]
    scatter_svd_unsmooth = ax[0].scatter(*d_svd_unsmooth.T,
                                         c=idx,
                                         s=15,
                                         **fig_style)
    ax[1].scatter(*d_svd_smooth.T, c=idx, s=15, **fig_style)
    scatter_ica_restricted = ax[2].scatter(
        *d_ratios_ica_restricted_rotated.T,
        c=idx[-d_ratios_ica_restricted.shape[0]:],
        s=15,
        vmin=min(idx),
        vmax=max(idx),
        **fig_style)

    for a in ax:
        a.set_frame_on(False)
        a.get_xaxis().set_visible(False)
        a.get_yaxis().set_visible(False)

    cb = fig.colorbar(scatter_svd_unsmooth,
                      orientation='vertical',
                      ticks=dates.YearLocator(),
                      format=dates.DateFormatter('%Y'))
    cb.outline.set_visible(False)
    fig.savefig("thesis/plots/unigram_decomp.png")
Esempio n. 10
0
import g5lib.plotters as ptrs
from g5lib import g5dset

exp = g5dset.read_exp(sys.argv[1])
varname = 'SSH'
exp.ctl = g5dset.Ctl(exp, 'geosgcm_ocn2d')

exp.gm = exp.ctl(varname).aave()

# Plot

path = exp.plot_path
try:
    os.makedirs(path)
except OSError:
    pass

pl.clf()
exp.gm.name = exp.ctl.name + ' Global mean SSH'
p = ptrs.Plotter1d()
p(exp.gm)
ax = p.axis
if (exp.gm.time.size > 2500):
    myloc = dates.YearLocator((exp.gm.time.size / 1000) * 10)
    ax.xaxis.set_major_locator(myloc)
ax.set_ylabel('SSH, m')
pl.grid()
pl.tight_layout()
pl.show()
pl.savefig(path + '/ssh_gm.png')
Esempio n. 11
0
def xaxis_year_locate_format(ax, i=2, fmt='%y'):
    ax.xaxis.set_major_locator(mdates.YearLocator(i))
    ax.xaxis.set_major_formatter(mdates.DateFormatter(fmt))
Esempio n. 12
0
def plot_all(df,
             dfSteps=None,
             dfMidas=None,
             dfFit=None,
             columns=['east', 'north', 'up'],
             axhline=False,
             title=''):
    #Plot daily positions
    fig, (ax, ax1, ax2) = plt.subplots(3, 1, sharex=True, figsize=(8.5, 11))
    ax.plot(df.index, df[columns[0]], 'k.', label='EW')
    #ax.set_title('NS')

    ax1.plot(df.index, df[columns[1]], 'k.', label='NS')
    #ax1.set_title('EW')

    ax2.plot(df.index, df[columns[2]], 'k.', label='Z')
    #ax2.set_title('Z')

    # Add MIDAS velocities
    if isinstance(dfMidas, pd.DataFrame):
        dfM = add_midas(df, dfMidas)
        ax.plot(dfM.index.values,
                dfM.midas_east.values,
                'm-',
                lw=2,
                label='MIDAS')
        ax1.plot(dfM.index.values, dfM.midas_north.values, 'm-', lw=2)
        ax2.plot(dfM.index.values, dfM.midas_up.values, 'm-', lw=2)

        # Show error bounds
        # NOTE: what exatly are MIDAS error bounds? note 95% confidence limits...
        #ax.fill_between(dfM.index.values, dfM.midas_east_lb.values, dfM.midas_east_ub.values, color='m', alpha=0.5)
        #ax1.fill_between(dfM.index.values, dfM.midas_north_lb.values, dfM.midas_north_ub.values, color='m', alpha=0.5)
        #ax2.fill_between(dfM.index.values, dfM.midas_up_lb.values, dfM.midas_up_ub.values, color='m', alpha=0.5)

    # Add discontinuities
    if isinstance(dfSteps, pd.DataFrame):
        for step in dfSteps.index.intersection(df.index):
            for axes in (ax, ax1, ax2):
                axes.axvline(step, color='k', linestyle='dashed')

    # Add Function Fits
    if isinstance(dfFit, pd.DataFrame):
        ax.plot(dfFit.index, dfFit.fit_east, 'c-', lw=3, label='Fit')
        ax1.plot(dfFit.index.values, dfFit.fit_north.values, 'c-', lw=3)
        ax2.plot(dfFit.index.values, dfFit.fit_up.values, 'c-', lw=3)

    if axhline:
        for axes in (ax, ax1, ax2):
            axes.axhline(color='k', lw=1)

    ax.legend(loc='upper left', frameon=True)
    ax1.legend(loc='upper left', frameon=True)
    ax2.legend(loc='upper left', frameon=True)
    plt.suptitle(title, fontsize=16)
    ax1.set_ylabel('Position [m]')

    months = pltdate.MonthLocator()
    years = pltdate.YearLocator()
    for axes in (ax, ax1, ax2):
        axes.xaxis.set_major_locator(years)
        axes.xaxis.set_minor_locator(months)  #too much
        axes.fmt_xdata = pltdate.DateFormatter('%Y-%m-%d')
        axes.grid(True)

    plt.tick_params(axis='x', which='minor', length=5, top=False, bottom=True)
    plt.tick_params(axis='x', which='major', length=10, top=False, bottom=True)
    fig.autofmt_xdate()
Esempio n. 13
0
def run_tri_plot(main_gdf,
                 gdf2,
                 gdfx,
                 figsize=(12, 6),
                 y_val_alpha=1,
                 scatter_alpha=1,
                 error_alpha=0.2,
                 y_label="Brightness",
                 x_label="Date",
                 title_label="Brightness over time - ID: ",
                 figname='',
                 custom_x_axis=True,
                 show_grid=True,
                 show_legend=True,
                 min_count=0.5,
                 ymin=0,
                 ymax=5000):
    print("Plotting...")
    for item in main_gdf['ID'].unique() & gdf2['ID'].unique():
        plt.style.use('fivethirtyeight')
        fig, ax = plt.subplots(figsize=figsize)
        gdf3 = main_gdf[(main_gdf.ID == item)]
        gdf4 = gdf2[(gdf2.ID == item)]
        gdf5 = gdfx[(gdfx.ID == item)]
        title = title_label + str(item)
        gdf3 = gdf3.sort_values(['date'])
        gdf4 = gdf4.sort_values(['date'])
        gdf5 = gdf5.sort_values(['date'])
        x = gdf3['date']
        xdate = x.astype('O')
        xdate = mdates.date2num(xdate)
        y = gdf3['median']
        y2 = gdf4['median']
        y3 = gdf5['median']
        # print(y,y2)
        if 'observations' in gdf3.columns:
            count = gdf3['observations']
        err_plus = gdf3['percentile_75']
        err_minus = gdf3['percentile_25']
        if 'observations' in gdf4.columns:
            count2 = gdf4['observations']
        err_plus2 = gdf4['percentile_75']
        err_minus2 = gdf4['percentile_25']

        if 'observations' in gdf5.columns:
            count3 = gdf5['observations']
        err_plus3 = gdf5['percentile_75']
        err_minus3 = gdf5['percentile_25']
        # Set the min_count value as a value from 0 to 1 (0 to 100%)
        ax.set_ylim([ymin, ymax])

        # This will filter out observations where over
        # n% of a polygon is masked out
        if min_count > 0:
            z = gdf3['count']
            zmax = gdf3['count'].max()
            z = z / zmax
            # xdate = xdate[z >= min_count]
            y = y[z >= min_count]
            if 'observations' in gdf3.columns:
                count = count[z >= min_count]
            err_plus = err_plus[z >= min_count]
            err_minus = err_minus[z >= min_count]

            z2 = gdf4['count']
            zmax2 = gdf4['count'].max()
            z2 = z2 / zmax2
            # xdate = xdate[z >= min_count]
            y2 = y2[z2 >= min_count]

            if 'observations' in gdf4.columns:
                count2 = count2[z2 >= min_count]
            err_plus2 = err_plus2[z2 >= min_count]
            err_minus2 = err_minus2[z2 >= min_count]

            z3 = gdf5['count']
            zmax3 = gdf5['count'].max()
            z3 = z3 / zmax3
            xdate = xdate[z >= min_count]
            y3 = y3[z3 >= min_count]

            if 'observations' in gdf5.columns:
                count3 = count3[z3 >= min_count]
            err_plus3 = err_plus3[z3 >= min_count]
            err_minus3 = err_minus3[z3 >= min_count]

        # plot running mean regression line
        # RM = (y.rolling(window=6, center=True, min_periods=2).median())
        # ax.plot(xdate, RM, '-',label="Moving Median", alpha=y_val_alpha)

        # Gaussian smoothed plot
        b = gaussian(6, 2)
        ga = filters.convolve1d(y, b / b.sum(), mode="reflect")
        ga = interpolate_gaps(ga, limit=3)
        ax.plot(xdate,
                ga,
                '-',
                label="Gaussian SWIR2",
                alpha=1,
                color='#d7191c')

        ga2 = filters.convolve1d(y2, b / b.sum(), mode="reflect")
        ga2 = interpolate_gaps(ga2, limit=3)
        ax.plot(xdate,
                ga2,
                '-',
                label="Gaussian Green",
                alpha=1,
                color='#44C73D')

        ga3 = filters.convolve1d(y3, b / b.sum(), mode="reflect")
        ga3 = interpolate_gaps(ga3, limit=3)
        ax.plot(xdate,
                ga3,
                '-',
                label="Gaussian NIR",
                alpha=1,
                color='#2b83ba')

        # scatter points-median top, mean bottom
        ax.scatter(xdate,
                   y,
                   label="Median SWIR2",
                   s=50,
                   color='#d7191c',
                   alpha=scatter_alpha,
                   marker=".")
        ax.scatter(xdate,
                   y2,
                   label="Median Green",
                   s=50,
                   color='#44C73D',
                   alpha=scatter_alpha,
                   marker=".")
        ax.scatter(xdate,
                   y3,
                   label="Median NIR",
                   s=50,
                   color='#2b83ba',
                   alpha=scatter_alpha,
                   marker=".")

        # plot regression line, if desired
        idx = np.isfinite(xdate) & np.isfinite(y)
        p2 = np.poly1d(np.polyfit(xdate[idx], y[idx], 1))
        ax.plot(xdate,
                p2(xdate),
                '-',
                label="Linear SWIR2",
                color='#E84448',
                alpha=y_val_alpha)

        idx = np.isfinite(xdate) & np.isfinite(y2)
        p3 = np.poly1d(np.polyfit(xdate[idx], y2[idx], 1))
        ax.plot(xdate,
                p3(xdate),
                '-',
                label="Linear Green",
                color='#65D75F',
                alpha=y_val_alpha)

        idx = np.isfinite(xdate) & np.isfinite(y3)
        p4 = np.poly1d(np.polyfit(xdate[idx], y3[idx], 1))
        ax.plot(xdate,
                p4(xdate),
                '-',
                label="Linear NIR",
                color='#4B97C8',
                alpha=y_val_alpha)

        # if desired, plot error band
        # plt.fill_between(xdate, err_plus, err_minus,
        # alpha=error_alpha, color='black', label="25th to 75th %")
        # plt.fill_between(xdate, err_plus2, err_minus2,
        # alpha=error_alpha, color='black', label="25th to 75th %")

        ax.set_ylabel(y_label)
        ax.set_xlabel(x_label)

        ax.yaxis.set_tick_params(labelsize=12)
        if custom_x_axis:
            # xticklabel_pad = 0.1
            years = mdates.YearLocator()  # every year
            months = mdates.MonthLocator()  # every month
            yearsFmt = mdates.DateFormatter('%Y')

            ax.xaxis.set_major_locator(years)
            ax.xaxis.set_major_formatter(yearsFmt)
            ax.xaxis.set_minor_locator(months)

            plt.rc('xtick', labelsize=12)

            # ax.set_xticks(xdate)
            # ax.set_xticklabels(x, rotation=50, fontsize=10)
            # ax.tick_params(axis='x', which='major', pad=xticklabel_pad)

            # ax.xaxis.set_major_formatter(dateformat)
            # ax.set_xlim(datetime.date(settings.plot['x_min'], 1, 1),
            # datetime.date(settings.plot['x_max'], 12, 31))

        if show_grid:
            ax.grid(b=True,
                    which='minor',
                    color='black',
                    alpha=0.75,
                    linestyle=':')

        if show_legend:
            handles, labels = ax.get_legend_handles_labels()
            ax.legend(handles,
                      labels,
                      ncol=1,
                      loc='center right',
                      bbox_to_anchor=[1.1, 0.5],
                      columnspacing=1.0,
                      labelspacing=0.0,
                      handletextpad=0.0,
                      handlelength=1.5,
                      fancybox=True,
                      shadow=True,
                      fontsize='x-small')

        ax.set_title(title)
        plt.tight_layout()
        plt.show()

        # save with figname
        if len(figname) > 0:
            plt.savefig(figname)
Esempio n. 14
0
# Add some space around the tick lables for better readability
plt.rcParams['xtick.major.pad'] = '8'
plt.rcParams['ytick.major.pad'] = '8'

# 0.2 Formatter for inserting commas in y axis labels with magnitudes in the thousands


def func(x, pos):  # formatter function takes tick label and tick position
    s = '{:0,d}'.format(int(x))
    return s


y_format = plt.FuncFormatter(func)  # make formatter

# 0.3 format the x axis ticksticks
years2, years4, years5, years10, years15 = dts.YearLocator(2), dts.YearLocator(
    4), dts.YearLocator(5), dts.YearLocator(10), dts.YearLocator(15)

# 0.4 y label locator for vertical axes plotting gdp
majorLocator_y = plt.MultipleLocator(3)
majorLocator_shares = plt.MultipleLocator(0.2)

# In[3]:

# 1. Setup for the construction of K and A

# 1.1 Parameters for the model
alpha = 0.35

# If output_solow == TRUE, then Y = C + I.  Else: Y = C + I + G + NX (default)
output_solow = False
Esempio n. 15
0
    def plot_temp_evol_spi(self,
                           ppt_orig_vals,
                           dist_type,
                           prob_zero_ppt,
                           df_col_name,
                           season,
                           out_dir,
                           use_fit_fct=False):
        '''Plot of calculated SPI along time'''
        _, y_orig = self.build_edf_fr_vals(ppt_orig_vals)
        _, edf_not_sorted = self.get_vals_fr_edf_vals(ppt_orig_vals,
                                                      df_col_name, y_orig)

        spi_ = self.calculate_SPI_fr_cdf(edf_not_sorted)
        spi_ = spi_[~np.isnan(spi_)]

        pos_inds, neg_inds = np.where(spi_ >= 0.)[0], np.where(spi_ < 0.)[0]
        time = ppt_orig_vals.index

        years, b_width = mdates.YearLocator(), 30
        fig, ax1 = plt.subplots(1, 1, figsize=(32, 8))
        ax1.bar(time[pos_inds],
                spi_[pos_inds],
                width=b_width,
                align='center',
                color='b')
        ax1.bar(time[neg_inds],
                spi_[neg_inds],
                width=b_width,
                align='center',
                color='r')
        if use_fit_fct:
            _, _, spi_fit, _ = self.spi_cdf_fit_dist(ppt_orig_vals,
                                                     prob_zero_ppt, dist_type,
                                                     use_fit_fct)
            spi_fit = spi_fit[~np.isnan(spi_fit)]

            pos_inds_fit = np.where(spi_fit >= 0.)[0]
            neg_inds_fit = np.where(spi_fit < 0.)[0]
            ax1.bar(time[pos_inds_fit],
                    spi_fit[pos_inds_fit],
                    width=b_width,
                    align='center',
                    color='g')
            ax1.bar(time[neg_inds_fit],
                    spi_fit[neg_inds_fit],
                    width=b_width,
                    align='center',
                    color='m')
        ax1.grid(True, alpha=0.5)
        ax1.set_xlabel("Time")
        ax1.xaxis.set_major_locator(years)
        ax1.set_xlim(time[0], time[-1])
        ax1.format_xdata = mdates.DateFormatter('%Y')
        ax1.set_yticks(
            [-3, -2, -1.6, -1.3, -0.8, -0.5, 0, 0.5, 0.8, 1.3, 1.6, 2, 3])
        ax1.set_ylim([-5, 5])
        ax1.set_ylabel('SPI')
        fig.autofmt_xdate()
        plt.savefig(os.path.join(out_dir,
                                 'temp_season_%s_spi_data_SA.png' % (season)),
                    frameon=True,
                    papertype='a4',
                    bbox_inches='tight')
        return spi_
Esempio n. 16
0
def plot_timeseries(y, timesteps: list=None,
                    selyears: Union[list, int]=None, title=None,
                    legend: bool=True, nth_xyear: int=10, ax=None):
    # ax=None
    #%%


    if hasattr(y.index,'levels'):
        y_ac = y.loc[0]
    else:
        y_ac = y

    if type(y_ac.index) == pd.core.indexes.datetimes.DatetimeIndex:
        datetimes = y_ac.index

    if timesteps is None and selyears is None:
        ac, con_int = autocorr_sm(y_ac)
        where = np.where(con_int[:,0] < 0 )[0]
        # has to be below 0 for n times (not necessarily consecutive):
        n = 1
        n_of_times = np.array([idx+1 - where[0] for idx in where])
        if n_of_times.size != 0:
            cutoff = where[np.where(n_of_times == n)[0][0] ]
        else:
            cutoff = 100

        timesteps = min(y_ac.index.size, 10*cutoff)
        datetimes = y_ac.iloc[:timesteps].index

    if selyears is not None and timesteps is None:
        if type(selyears) is int:
            selyears = [selyears]
        datetimes = get_oneyr(y.index, *selyears)

    if timesteps is not None and selyears is None:
        datetimes = datetimes[:timesteps]

    if ax is None:
        fig, ax = plt.subplots(constrained_layout=True)

    if hasattr(y.index,'levels'):
        for fold in y.index.levels[0]:
            if legend:
                label = f'f {fold+1}' ; color = None ; alpha=.5
            else:
                label = None ; color = 'red' ; alpha=.1
            ax.plot(datetimes, y.loc[fold, datetimes], alpha=alpha,
                    label=label, color=color)
        if legend:
            ax.legend(prop={'size':6})
    else:
        ax.plot(datetimes, y.loc[datetimes])

    if nth_xyear is None:
        nth_xtick = round(len(ax.xaxis.get_ticklabels())/5)
        for n, label in enumerate(ax.xaxis.get_ticklabels()):
            if n % nth_xtick != 0:
                label.set_visible(False)
    else:
        ax.xaxis.set_major_locator(mdates.YearLocator(1)) # set tick every year
        ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y')) # format %Y
        for n, label in enumerate(ax.xaxis.get_ticklabels()):
            if n % nth_xyear != 0:
                label.set_visible(False)

    ax.tick_params(axis='both', which='major', labelsize=8)
    if title is not None:
        ax.set_title(title, fontsize=10)
    #%%
    return ax
        ax1.grid(b=True, which='major', color='#666666', linestyle='-')

        for tl in ax2.get_yticklabels():
            tl.set_color('g')

        myFmt = dates.DateFormatter("%Y")
        ax1.xaxis.set_major_formatter(myFmt)

        #SET X-AXIS LIMITS (xlim)
        ax1.set_xlim([min_date_4fig,max_date_4fig])

        x_ticks = 1 #ANNUAL X-TICKS
        if total_years_float > 40:
            x_ticks = 2

        ax1.xaxis.set_major_locator(dates.YearLocator(x_ticks))#THIS WORKS

        for tick in ax1.get_xticklabels():
            tick.set_rotation(90)

        plt.rcParams.update({'font.size': 12})

        plt.show()

        outname = str('Hydrographs_GWSI_Manual__') + str(location) + str('.png')
        fig.savefig(outname, dpi = 400, bbox_inches='tight', pad_inches=.1)

##############################################################################
##############################################################################
##############################################################################
#  ### ### ### ### ### #### ### ### ### ### ### #### ### ### ### ### ### ###  #
Esempio n. 18
0
def draw_plot(
    well_name,
    date,
    meas,
    no_hyds,
    gwhyd_obs,
    gwhyd_name,
    well_info,
    start_date,
    title_words,
    yaxis_width=-1,
):
    ''' draw_plot() - Creates a PDF file with a graph of the simulated data vs time
        for all hydrographs as lines, with observed values vs time as dots, saved 
        as the well_name.pdf

    Parameters
    ----------
    well_name : str
        Well name, often state well number
    
    date : list
        list of dates (paired with meas)
    
    meas : list
        list of observed values (paired with date)
    
    no_hyds : int
        number of simulation time series to be graphed
    
    gwhyd_obs : list
        simulated IWFM groundwater hydrographs 
        [0]==dates, [1 to no_hyds]==datasets
    
    gwhyd_name : list
        hydrograph names from PEST observations file
    
    well_dict : dict
        key = well name, value = well data from Groundwater.dat file
    
    start_date : str
        first date in simulation hydrograph files
    
    title_words : str
        plot title words
    
    yaxis_width : int, default=-1
        minimum y-axis width, -1 for automatic
    
    Return
    ------
    nothing

    '''
    import iwfm as iwfm
    import datetime
    import matplotlib

    # Force matplotlib to not use any Xwindows backend.
    matplotlib.use('TkAgg')  # Set to TkAgg ...
    matplotlib.use('Agg')  # ... then reset to Agg
    import matplotlib.dates as mdates
    import matplotlib.pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages

    line_colors = [
        'r-',
        'g-',
        'c-',
        'y-',
        'b-',
        'm-',
        'k-',
        'r--',
        'g--',
        'c--',
        'y--',
        'b--',
        'm--',
        'k--',
        'r:',
        'g:',
        'c:',
        'y:',
        'b:',
        'm:',
        'k:',
    ]
    # 'r-' = red line, 'bo' = blue dots, 'r--' = red dashes,
    # 'r:' = red dotted line, 'bs' = blue squares, 'g^' = green triangles, etc

    col = well_info[0]  # gather information

    # each hydrograph in gwhyd_obs has dates in the first column
    # convert the observed values and each set of simulated values to a pair of
    # lists, with 'date, meas' format.
    ymin, ymax = 1e6, -1e6
    sim_heads, sim_dates = [], []
    for j in range(0, no_hyds):
        date_temp, sim_temp = [], []
        for i in range(0, len(gwhyd_obs[j])):
            date_temp.append(
                datetime.datetime.strptime(gwhyd_obs[j][i][0], '%m/%d/%Y'))
            sim_temp.append(gwhyd_obs[j][i][col])
            ymin = min(ymin, gwhyd_obs[j][i][col])
            ymax = max(ymax, gwhyd_obs[j][i][col])
        sim_dates.append(date_temp)
        sim_heads.append(sim_temp)

    for i in range(0, len(meas)):
        ymin = min(ymin, meas[i])
        ymax = max(ymax, meas[i])

    meas_dates = []
    for i in range(0, len(date)):
        meas_dates.append(datetime.datetime.strptime(date[i], '%m/%d/%Y'))

    years = mdates.YearLocator()
    months = mdates.MonthLocator()
    yearsFmt = mdates.DateFormatter('%Y')

    # plot simulated vs sim_dates as line, and meas vs specific dates as points, on one plot
    with PdfPages(well_name + '_' + iwfm.pad_front(col, 4, '0') +
                  '.pdf') as pdf:
        fig = plt.figure(figsize=(10, 7.5))
        ax = plt.subplot(111)
        ax.xaxis_date()
        plt.grid(linestyle='dashed')
        ax.yaxis.grid(True)
        ax.xaxis.grid(True)
        ax.xaxis.set_minor_locator(years)
        plt.xlabel('Date')
        plt.ylabel('Head (ft msl)')
        plt.title(title_words + ': ' + well_name.upper() + ' Layer ' +
                  str(well_info[3]))
        plt.plot(meas_dates, meas, 'bo', label='Observed')

        # minimum y axis width was set by user, so check and set if necessary
        if yaxis_width > 0:
            if ymax > ymin:
                if ymax - ymin < yaxis_width:  # set minimum and maximum values
                    center = (ymax - ymin) / 2 + ymin
                    plt.ylim(center - yaxis_width / 2,
                             center + yaxis_width / 2)

        for j in range(0, no_hyds):
            plt.plot(sim_dates[j],
                     sim_heads[j],
                     line_colors[j],
                     label=gwhyd_name[j])

        leg = ax.legend(frameon=1, facecolor='white')
        pdf.savefig()  # saves the current figure into a pdf page
        plt.close()
    return
# -*- coding: utf-8 -*-
"""
Created on Mon Jun  3 12:31:23 2019

@author: 212566876
"""

#import seaborn as sns
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import openpyxl
import pandas as pd
import numpy as np

yearloc = mdates.YearLocator()
monthloc = mdates.MonthLocator()
years_fmt = mdates.DateFormatter('%Y')

SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12

plt.rc('font', size=SMALL_SIZE)  # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE)  # fontsize of the axes title
plt.rc('axes', labelsize=SMALL_SIZE)  # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE)  # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
Esempio n. 20
0
def makeUserTimeSeriesPlot(dtDate, mlPrdVal, dlPrdVal, refVal, mlPrdValLabel,
                           dlPrdValLabel, refValLabel, xlab, ylab, mainTitle,
                           saveImg, isFore):
    log.info('[START] {}'.format('makeUserTimeSeriesPlot'))

    result = None

    try:
        # 검증스코어 계산 : Bias (Relative Bias), RMSE (Relative RMSE)
        mlRMSE = np.sqrt(np.nanmean((mlPrdVal - refVal)**2))
        mlReRMSE = (mlRMSE / np.nanmean(refVal)) * 100.0

        dlRMSE = np.sqrt(np.nanmean((dlPrdVal - refVal)**2))
        dlReRMSE = (dlRMSE / np.nanmean(refVal)) * 100.0

        # 결측값 마스킹
        mask = ~np.isnan(refVal)
        number = len(refVal[mask])

        # 선형회귀곡선에 대한 계산
        mlSlope, mlInter, mlR, mlPvalue, mlStdErr = linregress(
            mlPrdVal[mask], refVal[mask])
        dlSlope, dlInter, dlR, dlPvalue, dlStdErr = linregress(
            dlPrdVal[mask], refVal[mask])

        prdValLabel_ml = '{0:s} : R = {1:.3f}, %RMSE = {2:.3f} %'.format(
            mlPrdValLabel, mlR, mlReRMSE)
        prdValLabel_dnn = '{0:s} : R = {1:.3f}, %RMSE = {2:.3f} %'.format(
            dlPrdValLabel, dlR, dlReRMSE)
        refValLabel_ref = '{0:s} : N = {1:d}'.format(refValLabel, number)

        plt.grid(True)

        plt.plot(dtDate, mlPrdVal, label=prdValLabel_ml, marker='o')
        plt.plot(dtDate, dlPrdVal, label=prdValLabel_dnn, marker='o')
        plt.plot(dtDate, refVal, label=refValLabel_ref, marker='o')

        # 제목, x축, y축 설정
        plt.title(mainTitle)
        plt.xlabel(xlab)
        plt.ylabel(ylab)
        # plt.ylim(0, 1000)

        if (isFore == True):
            # plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
            plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
            plt.gca().xaxis.set_major_locator(mdates.YearLocator(2))
            plt.gcf().autofmt_xdate()
            plt.xticks(rotation=45, ha='right', minor=False)

        else:
            # plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
            plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
            plt.gca().xaxis.set_major_locator(mdates.YearLocator(1))
            plt.gcf().autofmt_xdate()
            plt.xticks(rotation=0, ha='center')

        plt.legend(loc='upper left')

        plt.savefig(saveImg, dpi=600, bbox_inches='tight')
        plt.show()
        plt.close()

        result = {
            'msg': 'succ',
            'saveImg': saveImg,
            'isExist': os.path.exists(saveImg)
        }

        return result

    except Exception as e:
        log.error('Exception : {}'.format(e))
        return result

    finally:
        # try, catch 구문이 종료되기 전에 무조건 실행
        log.info('[END] {}'.format('makeUserTimeSeriesPlot'))
Esempio n. 21
0
def process_data(data):
    """
    Various plots of time/date tendencies 
    """
    years = dates.YearLocator()
    months = dates.MonthLocator()
    yearsFmt = dates.DateFormatter('%Y')
    dateconv = np.vectorize(datetime.fromtimestamp)

    #
    # Complete history by day with weekly/monthly running averages
    #
    fig, ax = plt.subplots()
    # Floor to midnight (strip off time)
    dd = datetime.fromtimestamp(data['unix_time'].min()).date()
    start_time = mktime(dd.timetuple())  # Convert back to unix time
    # Ceiling to 23:59 of the last email
    dd = datetime.fromtimestamp(data['unix_time'].max()).date()
    end_time = mktime(dd.timetuple()) + 86359.0  # Convert back to unix time
    ndays = int(np.ceil((end_time - start_time) / 86400))
    # Bin by dates
    H, edges = np.histogram(data['unix_time'], range=(start_time, end_time),
                            bins=ndays)
    dcenter = 0.5*(edges[:-1] + edges[1:])
    plt.plot(dateconv(dcenter), H, lw=0.5, color='k', label='daily')
    if ndays > 7:
        plt.plot(dateconv(dcenter[6:]), running_mean(H,7),
                 lw=3, color='b', label='7-day')
    if ndays > 30:
        plt.plot(dateconv(dcenter[29:]), running_mean(H,30),
                 lw=3, color='r', label='30-day')
    plt.xlabel('Time')
    plt.ylabel('Emails / day')
    plt.legend(loc='best')
    plt.subplots_adjust(left=0.03, right=0.99, bottom=0.13, top=0.97)
    fig.set_size_inches(20,4)
    plt.savefig('history.png')

    #
    # Binned by hour
    #
    width = 0.8
    fig, ax = plt.subplots()
    H, edges = np.histogram(data['hour'], range=(-0.5,23.5), bins=24)
    plt.bar(np.arange(24)+0.5-width/2, H/float(ndays), width, color='grey', ec='k')
    plt.xlim(-0.1, 24.1)
    ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
    plt.xlabel('Hour')
    plt.ylabel('Emails / day / hour')
    plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.95)
    plt.savefig('hourly.png')

    #
    # Binned by day of the week
    #
    fig, ax = plt.subplots()
    H, edges = np.histogram(data['day'], range=(-0.5,6.5), bins=7)
    # Move Sunday to the beginning of the week
    sunday = H[-1]
    H[1:] = H[:-1]
    H[0] = sunday
    plt.bar(np.arange(7)+0.5-width/2, H/(ndays/7.0), width, color='grey', ec='k')
    plt.xlim(-0.1, 7.1)
    plt.xticks(np.arange(7)+0.5, ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'])
    plt.ylabel('Emails / day')
    plt.subplots_adjust(left=0.08, right=0.95, bottom=0.08, top=0.95)
    plt.savefig('daily.png')

    #
    # Binned by date of the month
    #
    fig, ax = plt.subplots()
    H, edges = np.histogram(data['date'], range=(0.5,31.5), bins=31)
    plt.bar(np.arange(31)+0.5-width/2, H/(ndays/30.), width, color='grey', ec='k')
    plt.xlim(-0.1, 31.1)
    plt.xticks(np.arange(31)+0.5, ['%d' % (i+1) for i in range(31)])
    ax.tick_params(axis='x', which='both', labelsize=10)
    #ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
    plt.xlabel('Date of Month')
    plt.ylabel('Emails / day')
    plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.95)
    plt.savefig('by-date.png')

    #
    # Binned by week of the year
    #
    width = 0.7
    fig, ax = plt.subplots()
    ax1 = ax.twiny()
    week = np.minimum(data['doy']/7.0, 52)
    H, edges = np.histogram(week, range=(0,52), bins=52)
    plt.bar(np.arange(52)+0.5-width/2, H/(ndays/52.), width, color='grey', ec='k')
    plt.xlim(-0.1, 52.1)
    # Bottom y-axis: week of the year
    # Top y-axis: Month
    days_in_month = [0,31,28,31,30,31,30,31,31,30,31,30,31]
    approx_week = np.cumsum(days_in_month) / 7.0
    month_center = 0.5*(approx_week[:-1] + approx_week[1:])
    months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
              'Sep', 'Oct', 'Nov', 'Dec']

    ax1.set_xticks(approx_week[:-1])
    ax1.set_xlim(-0.1, 52.1)
    ax1.set_xticklabels(months, ha='left')
    ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
    ax.set_xlim(-0.1, 52.1)
    ax.set_ylabel('Emails / day')
    ax.set_xlabel('Week of the year')
    plt.subplots_adjust(left=0.08, right=0.97, bottom=0.1, top=0.92)
    plt.savefig('weekly.png')

    #
    # Punchcard graph
    # Inspired by https://www.mercurial-scm.org/wiki/PunchcardExtension
    #
    fig, ax = plt.subplots()
    H, xe, ye = np.histogram2d(data['hour'], data['day'], bins=(24,7),
                               range=((0,24),(-0.5,6.5)))
    # Move Sunday to the first day of the week
    sunday = H[:,-1]
    H[:,1:] = H[:,:-1]
    H[:,0] = sunday
    day_hour = np.mgrid[0:24, 0:7]
    norm = 200.0 / H.max()
    plt.scatter(day_hour[0].ravel(), day_hour[1].ravel(), s=norm*H.ravel(),
                color='k')
    # Mark typical working day
    rect = patches.Rectangle((8.5,0.5), 9, 5, color='c', alpha=0.3)
    ax.add_patch(rect)
    # Setup custom ticks
    ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
    plt.yticks(range(7), ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'])
    plt.xlabel('Hour')
    plt.xlim(-0.5, 23.5)
    plt.ylim(6.5, -0.5)
    plt.subplots_adjust(left=0.08, right=0.97, bottom=0.1, top=0.97)
    plt.savefig('punchcard.png')
Esempio n. 22
0
def _display_timeline(narrative_name: str, narrative_text: str):
    """
    Displays a timeline of the narrative events.

    :param narrative_name The name/label of the narrative
    :param narrative_text The narrative text (which will not be needed when the event graph is created
                          in load.py)
    :return None (Timeline is displayed)
    """
    logging.info(f'Displaying narrative timeline for {narrative_name}')
    # TODO: Move following processing to load.py and change to retrieving from DB
    sentences = parse_narrative(narrative_text)
    key_date = ''
    current_loc = ''
    orig_dates = set()
    date_text_dict = dict()
    for sent_dict in sentences:
        key_date, current_loc = \
            get_timeline_data(sent_dict, key_date, orig_dates, current_loc, date_text_dict)
    # TODO: What if there are no dates?
    # Sort the dates and simplify the text
    orig_dates = sorted(orig_dates)
    names = []
    for orig_date in orig_dates:
        date_text = ''
        loc_text = date_text_dict[orig_date][0].split('Loc: ')[1].split(
            '\n')[0]
        for text in date_text_dict[orig_date]:
            text = text.replace(f'Loc: {loc_text}\n', '')
            date_text += text
        names.append(f'Loc: {loc_text}\n{date_text}')
    # For matplotlib timeline, need datetime formatting
    dates = [datetime.strptime(d, "%Y") for d in orig_dates]
    # Create a stem plot with some variation in levels as to distinguish close-by events.
    # Add markers on the baseline for visual emphasis
    # For each event, add a text label via annotate, which is offset in units of points from the tip of the event line
    levels = np.tile([-5, 5, -3, 3, -1, 1],
                     int(np.ceil(len(dates) / 6)))[:len(dates)]
    # Create figure and plot a stem plot with the date
    fig, ax = plt.subplots(figsize=(10, 7))
    ax.set(title="Narrative Timeline")
    ax.vlines(dates, 0, levels, color="tab:red")  # The vertical stems
    ax.plot(dates, np.zeros_like(dates), "-o", color="k",
            markerfacecolor="w")  # Baseline and markers on it
    # Annotate lines
    for d, l, r in zip(dates, levels, names):
        ax.annotate(r,
                    xy=(d, l),
                    xytext=(-2, np.sign(l) * 3),
                    textcoords="offset points",
                    horizontalalignment="right",
                    verticalalignment="bottom" if l > 0 else "top",
                    fontsize='x-small')
    # Format x-axis with yearly intervals
    ax.xaxis.set_major_locator(mdates.YearLocator())
    ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y"))
    plt.setp(ax.get_xticklabels(), rotation=30, ha="right", fontsize='small')
    # Remove y axis and spines
    ax.yaxis.set_visible(False)
    ax.spines[["left", "top", "right"]].set_visible(False)
    ax.margins(y=0.1)
    plt.tight_layout()
    # Display in a window
    layout = [[sg.Canvas(key="-CANVAS-")]]
    window_timeline = sg.Window("Narrative Timeline",
                                layout,
                                icon=encoded_logo,
                                element_justification='center').Finalize()
    draw_figure(window_timeline["-CANVAS-"].TKCanvas, fig)
    window_timeline.Maximize()
    # Non-blocking window
    window_timeline.read(timeout=0)
    return
Esempio n. 23
0
    skiprows=3,
    delimiter=',',
    converters={0: mdates.strpdate2num("%Y-%m-%dT%H:%M:%SZ")})

for v in valsT:
    vals.append(v)

for d in datesT:
    dates.append(d)

fig = pylab.figure(figsize=(12, 6), facecolor='w')
fig.suptitle('SSEBop Actual Evapotranspiration, Au Sable Watershed',
             fontsize=26)
ax = fig.gca()
ax.set_ylabel('Monthly Actual Evapotranspiration (mm)', fontsize=18)
ax.plot_date(dates, vals, 'g-')
ax.xaxis.set_major_locator(mdates.YearLocator(10, month=1, day=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter(' %Y'))

for tick in ax.xaxis.get_major_ticks():
    tick.label.set_fontsize(18)

for tick in ax.yaxis.get_major_ticks():
    tick.label.set_fontsize(18)

# ax.set_ylim([182,192])
majorLocator = LinearLocator(numticks=5)
ax.yaxis.set_major_locator(majorLocator)

show()
Esempio n. 24
0
def main():
    """
	# Medians Rn222 HallA
	mdnRnA=[87.0, 88.0, 112.0, 99.0, 102.0, 86.0, 100.0, 88.0, 75.0, 94.0 ,80.0, 86.0, 76.0, 75.0 ,78.0, 67.0 ,67.0, 79.0 ,54.0, 66.0, 63.0, 60.5, 54.0, 63.0, 65.0, 73.0, 84.0, 70.0, 72.5, 77.0, 74.0, 84.0, 81.0, 81.5, 97.0, 97.0, 92.0, 80.0, 87.0, 90.0, 74.0, 75.0, 78.0, 82.0, 78.0, 75.0, 67.0, 74.0, 89.0, 86.0, 81.0, 85.0, 84.0, 81.0, 79.0, 99.0, 114.0, 94.0, 64.0, 76.0, 69.0, 74.0, 71.0, 79.0, 68.0, 64.0, 62.0, 67.0, 60.0, 64.0, 73.0, 68.0, 60.0, 72.0, 71.0, 71.0, 68.0, 77.0, 67.0, 46.0, 75.0, 86.0, 80.0, 88.0, 85.0, 61.0, 80.0, 98.0, 108.0, 89.0, 68.0, 65.0, 91.0,96.0, 106.5, 108.0, 106.5, 109.5, 104.0, 110.0, 86.0, 85.0, 89.0, 85.0, 71.0, 82.5, 75.0, 83.0, 73.0, 57.0, 55.0, 60.0, 63.0, 66.0, 79.0, 74.0, 81.0, 72.5, 67.0, 79.0, 73.0, 73.0, 70.0, 78.5, 69.0, 69.0, 65.0, 75.0, 72.0, 71.0, 72.5, 82.0, 100.0, 97.0, 97.0, 92.0, 101.0, 102.0, 73.0, 69.0, 72.0, 66.5, 72.0, 65.5, 75.5, 73.0, 63.0, 89.0, 98.0, 69.0 , 84.0, 81.0, 77.0, 65.0, 70.0, 87.0, 74.0, 66.0, 80.0, 69.0, 66.0, 62.0, 67.0, 65.0, 74.0, 77.0, 81.0, 79.0, 65.0, 72.0, 81.0, 70.0, 70.0, 70.0, 79.0, 81.0, 91.0, 84.0,73.5, 99.0, 101.0, 67.0, 76.0, 70.0, 74.0, 73.0, 80.0, 77.0, 104.0, 74.0, 80.0, 80.0, 86.0, 105.0, 97.0, 93.5, 92.0,76.0, 68.0, 56.0, 59.0, 72.0, 68.0, 79.0, 64.0, 71.5, 76.0, 74.0, 59.0, 74.0, 64.0, 68.0, 64.0, 67.0, 70.0, 77.0, 67.0, 71.0, 80.0, 84.0, 83.0, 70.0, 70.0, 95.0, 102.0, 109.0, 88.0, 89.0]
	print(len(mdnRnA))
	"""

    # Medians Rn222 HallA (correcto 52 semanas X 5 años =259 valores)
    mdnRnA = [
        90, 79, 99, 117, 99, 99, 86, 95, 93, 69, 87, 94, 74, 90, 76, 71, 87,
        60, 72, 73, 77, 51, 66, 58, 63, 52.5, 67, 63, 78, 84, 69, 75, 77, 71,
        82, 85, 82, 81.5, 94, 99, 97, 78, 93.5, 80, 92, 74, 71, 83, 70, 80, 86,
        61, 77.6969111969112, 70, 83, 95, 82, 86, 83, 83, 82, 79, 101, 122,
        100, 74, 70, 70, 70, 74, 73, 83, 66, 60, 66, 62, 60, 69, 67, 71, 68,
        60, 68, 73, 66, 67, 72, 77, 67, 47, 68, 85.5, 84, 78, 89, 81, 61, 75,
        99, 104, 83, 77.6969111969112, 62, 70, 91, 98, 103, 112, 105, 111, 109,
        99, 110, 88, 82.5, 99, 81, 79, 72, 80, 75, 86, 77, 61, 56, 55, 66, 60,
        71, 71, 74, 72, 54, 65, 74, 75, 76, 72, 69, 78.5, 67, 72, 63, 69, 87,
        71, 71, 72.5, 75, 93, 89, 100, 96, 96, 101, 102, 70.5, 72, 74, 67, 68,
        70, 65, 75.5, 72, 65, 80, 95, 94.5, 71, 84.5, 81, 78, 71, 66, 83, 85,
        62, 73, 80, 69, 66, 63, 63, 69, 68, 78.5, 78, 78, 79, 67, 69, 82, 78,
        61, 73.5, 70, 79, 81.5, 83, 90, 79, 99, 97, 95, 67, 79.5, 65, 80, 74,
        70.5, 79, 78, 104, 77, 74, 87, 84, 94, 109, 91, 93.5, 95, 76, 72, 61,
        57, 59, 70, 68, 82, 67, 69, 73, 76, 70, 57, 75, 63, 72, 64, 66, 70, 81,
        68, 74, 72, 79, 84, 81, 69, 77, 74, 97, 103, 107, 88, 96, 101
    ]

    print(len(mdnRnA), 0.7 * len(mdnRnA), int(round(0.3 * len(mdnRnA))))

    # New values
    # It starts first week of July 2018 until 23th Sept 2018 (12 weeks)
    newValuesReal = [90, 106, 99, 104, 90, 80, 99, 100, 98, 85, 96, 84]

    ################ ANN's en Keras ###################
    #
    dataset = mdnRnA

    sample_size = 52
    ahead = len(newValuesReal)
    dataset = np.asarray(dataset)
    nepochs = 40

    assert 0 < sample_size < dataset.shape[0]

    ## Creacion de las muestras a partir del array normalizado ##
    X = np.atleast_3d(
        np.array([
            dataset[start:start + sample_size]
            for start in range(0, dataset.shape[0] - sample_size)
        ]))
    y = dataset[sample_size:]
    qf = np.atleast_3d([dataset[-sample_size:]])

    # Separamos en datos de entrenamiento y evaluacion
    #test_size = 52
    test_size = int(0.3 * len(mdnRnA))

    trainX, testX = X[:-test_size], X[-test_size:]
    trainY, testY = y[:-test_size], y[-test_size:]

    nextSteps = np.empty((ahead + 1, sample_size, 1))
    nextSteps[0, :, :] = np.atleast_3d(
        np.array([
            dataset[start:start + sample_size]
            for start in range(dataset.shape[0] -
                               sample_size, dataset.shape[0] - sample_size + 1)
        ]))

    ####### Creamos la estructura de la FFNN ###########
    ####(usamos ReLU's como funciones de activacion)###

    # 2 capas ocultas con 64 y 32 neuronas, respectivamente
    neurons = [64, 32]

    # Creamos la base del modelo
    model = Sequential()

    # Ponemos una primera capa oculta con 64 neuronas
    model.add(
        Dense(neurons[0],
              activation='relu',
              input_shape=(X.shape[1], X.shape[2])))

    print(model.layers[-1].output_shape)

    # Incorporamos una segunda capa oculta con 32 neuronas
    model.add(Dense(neurons[1], activation='relu'))
    print(model.layers[-1].output_shape)

    # Aplanamos los datos para reducir la dimensionalidad en la salida
    model.add(Flatten())

    # A\~nadimos la capa de salida de la red con activacion lineal
    model.add(Dense(1, activation='linear'))
    print(model.layers[-1].output_shape)

    # Compilamos el modelo usando el optimizador Adam
    model.compile(loss="mse", optimizer="adam")
    #keras.utils.layer_utils.print_layer_shapes(model,input_shapes=(trainX.shape))

    # Entrenamos la red
    history = model.fit(trainX,
                        trainY,
                        epochs=nepochs,
                        batch_size=10,
                        verbose=0,
                        validation_data=(testX, testY))

    ### Pronostico de los datos de test ###
    pred = model.predict(testX)

    # Calcular ECM y EAM
    testScoreECM = mean_squared_error(testY, pred)
    print('ECM: %.4f' % (testScoreECM))

    testScoreEAM = mean_absolute_error(testY, pred)
    print('EAM: %.4f' % (testScoreEAM))
    ''' predecir el futuro. '''
    newValues = np.zeros(ahead)
    temp = np.zeros(sample_size)

    for i in range(ahead):
        #print('ahead',i)
        #print('prediccion ', model.predict(nextSteps[None,i,:]), scaler.inverse_transform(model.predict(nextSteps[None,i,:])) )
        temp = nextSteps[i, 1:, :]
        #print(temp, len(temp))
        temp = np.append(temp, model.predict(nextSteps[None, i, :]), axis=0)
        newValues[i] = model.predict(nextSteps[None, i, :])
        #print(temp, len(temp))

        #print(nextSteps[i,:,:])
        nextSteps[i + 1, :, :] = temp
        #print(nextSteps[i+1,:,:])

    # Calcular ECM y EAM for ahead values
    #print('ECM ahead: %.4f' % (mean_squared_error(newValuesReal, newValues)))
    #print('EAM ahead: %.4f' % (mean_absolute_error(newValuesReal, newValues)))

    #print(model.output_shape)
    #print(model.summary())
    #print(model.get_config())

    startday = pd.datetime(2013, 7, 1)
    startdaypred = pd.datetime(
        2013, 7, 1) + 7 * pd.Timedelta(len(mdnRnA) - len(pred), unit='D')
    startdayahead = pd.datetime(2013, 7,
                                1) + 7 * pd.Timedelta(len(mdnRnA), unit='D')
    #print(startday,startdaypred,startdayahead)

    ### Plotting ###
    # general plot
    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
    plt.figure(1)
    xaxis = ax.get_xaxis()
    #ax.xaxis.grid(b=True, which='minor', color='0.90', linewidth=0.6)
    ax.xaxis.set_major_locator(mdates.YearLocator())
    ax.xaxis.set_minor_locator(mdates.MonthLocator())
    ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y"))
    #ax.xaxis.set_minor_formatter(mdates.DateFormatter("%b"))

    ax.plot(pd.date_range(startday, periods=len(mdnRnA), freq='W'),
            mdnRnA,
            linewidth=1,
            color='k')
    ax.plot(pd.date_range(startdaypred, periods=len(pred), freq='W'),
            pred,
            linewidth=2,
            linestyle=':',
            color='k')
    #ax.plot(pd.date_range(startdayahead, periods=len(newValues), freq='W'), newValues, linestyle='--', color='b', linewidth=1 )
    #ax.plot(pd.date_range(startdayahead, periods=len(newValuesReal), freq='W'), newValuesReal, color='g', linewidth=1 )

    plt.suptitle('Weekly Predictions at LSC - Hall A')
    #plt.savefig('./prediction_ANN_weekly_D64D32_D1_e50_b10_ss52_ts52.eps')

    # ahead plot
    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))  #
    plt.figure(2)
    xaxis = ax.get_xaxis()
    #ax.xaxis.grid(b=True, which='minor', color='0.90', linewidth=0.6)
    ax.xaxis.set_major_locator(mdates.YearLocator())
    ax.xaxis.set_minor_locator(mdates.MonthLocator())
    ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y"))
    ax.xaxis.set_minor_formatter(mdates.DateFormatter("%b"))

    ax.plot(pd.date_range(startdayahead, periods=len(newValuesReal), freq='W'),
            newValuesReal,
            color='k',
            linewidth=1)
    ax.plot(pd.date_range(startdayahead, periods=len(newValues), freq='W'),
            newValues,
            linestyle=':',
            color='k',
            linewidth=2)

    plt.suptitle('Weekly Predictions at LSC - Hall A')
    #plt.savefig('./detailedprediction_ANN_weekly_D64D32_D1_e50_b10_ss52_ts52.eps')

    # summarize history for loss
    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 6))  # 6,6
    plt.figure(3)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    #ax.set_yscale("log")
    #plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
Esempio n. 25
0
minorLocator_5 = AutoMinorLocator(n=2)
minorFormatter_5 = FormatStrFormatter('%.1f')

minorLocator_6 = AutoMinorLocator(n=2)
minorFormatter_6 = FormatStrFormatter('%.1f')

minorLocator_7 = AutoMinorLocator(n=2)
minorFormatter_7 = FormatStrFormatter('%.1f')

minorLocator_8 = AutoMinorLocator(n=2)
minorFormatter_8 = FormatStrFormatter('%.1f')

minorLocator_9 = AutoMinorLocator(n=2)
minorFormatter_9 = FormatStrFormatter('%.1f')

years = mdates.YearLocator()  #every year
days = mdates.DayLocator(15)
yearFmt = mdates.DateFormatter('%Y')
"""
Функция для построения графиков с тремя переменными: Функция строит график ввиде линий
pr_1, pr_2, pr_3 - основные расчетные переменные, далее по ним будет строиться график (нужно чтобы был индекс и значение).
индекс в формате даты с суточной дискретностью
n_1, n_2, n_3 - текст легенды для основных расчетных переменных
n_4 - подпись заголовка для графика
n_5 - подпись оси y
pr_6, pr_7, pr_8 - пределы по шкале y (pr_6 - нижняя отметка, pr_7 - верхняя отметка, pr_8 - шаг)
pr_9 - параметр, вспомогательных делений
l_p - положение легенды
time_step_1,time_step_2 - временной диапазон
"""
Esempio n. 26
0
def compute_vol_ratio_and_aty(stock_data):
    """
    Computes the Vol Ratio and ATR5 for the given data
    :param stock_data: The stock data dataframe for computing, and for storing the result
    :return:
    """
    # Compute the logarithmic returns using the Closing price
    stock_data['Log_Ret'] = np.log(stock_data['Close'] /
                                   stock_data['Close'].shift(1))

    # Compute Volatility using the pandas rolling standard deviation function
    stock_data['Volatility'] = stock_data['Log_Ret'].rolling(
        window=252, center=False).std() * np.sqrt(252)
    stock_data['ATR'] = abs(stock_data['High'] - stock_data['Low'])

    # Plot the close, volatility and ATR to get a rough idea of what's happening
    stock_data[['Close', 'Volatility', 'ATR']].plot(subplots=True,
                                                    color='blue',
                                                    figsize=(8, 6))
    plt.show()
    plt.close()

    # Calculate ATR for a window of 5 days
    stock_data['ATR5'] = (stock_data['ATR'].rolling(min_periods=1,
                                                    window=5).sum()) / 4
    # Calculate the Volatility Ratio
    stock_data['VolRatio'] = (stock_data['ATR'] / stock_data['ATR5'])

    years = mdates.YearLocator()  # every year
    months = mdates.MonthLocator()  # every month
    yearsFmt = mdates.DateFormatter('%Y')
    fig, ax = plt.subplots()
    ax.plot(stock_data['VolRatio'])
    # format the ticks
    ax.xaxis.set_major_locator(years)
    ax.xaxis.set_major_formatter(yearsFmt)
    ax.xaxis.set_minor_locator(months)

    # round to nearest years...
    datemin = np.datetime64(stock_data.index[0], 'Y')
    datemax = np.datetime64(stock_data.index[-1], 'Y') + np.timedelta64(1, 'Y')
    ax.set_xlim(datemin, datemax)

    # format the coords message box
    def price(x):
        return '$%1.2f' % x

    ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
    ax.format_ydata = price
    ax.grid(True)

    # rotates and right aligns the x labels, and moves the bottom of the
    # axes up to make room for them
    fig.autofmt_xdate()

    plt.show()
    plt.close()

    # Plot the Vol Ratip
    stock_data[['Close', 'VolRatio']].plot(subplots=True,
                                           color='blue',
                                           figsize=(16, 10))
    plt.show()
    plt.close()

    return stock_data
Esempio n. 27
0
    def send_ponctualite(self):

        conn = sqlite3.connect('ter.sqlite')
        c = conn.cursor()

        if len(self.path_info) <= 1 or self.path_info[
                1] == '':  # pas de paramètre => liste par défaut
            # Definition des régions et des couleurs de tracé
            regions = [("Rhône Alpes", "blue"), ("Auvergne", "green"),
                       ("Auvergne-Rhône Alpes", "cyan"), ('Bourgogne', "red"),
                       ('Franche Comté', 'orange'),
                       ('Bourgogne-Franche Comté', 'olive')]
        else:
            # On teste que la région demandée existe bien
            c.execute("SELECT DISTINCT Région FROM 'regularite-mensuelle-ter'")
            reg = c.fetchall()
            if (self.path_info[1], ) in reg:  # Rq: reg est une liste de tuples
                regions = [(self.path_info[1], "blue")]
            else:
                print('Erreur nom')
                self.send_error(404)  # Région non trouvée -> erreur 404
                return None

        # configuration du tracé
        fig1 = plt.figure(figsize=(18, 6))
        ax = fig1.add_subplot(111)
        ax.set_ylim(bottom=80, top=100)
        ax.grid(which='major', color='#888888', linestyle='-')
        ax.grid(which='minor', axis='x', color='#888888', linestyle=':')
        ax.xaxis.set_major_locator(pltd.YearLocator())
        ax.xaxis.set_minor_locator(pltd.MonthLocator())
        ax.xaxis.set_major_formatter(pltd.DateFormatter('%B %Y'))
        ax.xaxis.set_tick_params(labelsize=10)
        ax.xaxis.set_label_text("Date")
        ax.yaxis.set_label_text("% de régularité")

        # boucle sur les régions
        for l in (regions):
            c.execute(
                "SELECT * FROM 'regularite-mensuelle-ter' WHERE Région=? ORDER BY Date",
                l[:1])  # ou (l[0],)
            r = c.fetchall()
            # recupération de la date (colonne 2) et transformation dans le format de pyplot
            x = [
                pltd.date2num(dt.date(int(a[1][:4]), int(a[1][5:]), 1))
                for a in r if not a[7] == ''
            ]
            # récupération de la régularité (colonne 8)
            y = [float(a[7]) for a in r if not a[7] == '']
            # tracé de la courbe
            plt.plot(x,
                     y,
                     linewidth=1,
                     linestyle='-',
                     marker='o',
                     color=l[1],
                     label=l[0])

        # légendes
        plt.legend(loc='lower left')
        plt.title('Régularité des TER (en %)', fontsize=16)

        # génération des courbes dans un fichier PNG
        fichier = 'courbes/ponctualite_' + self.path_info[1] + '.png'
        plt.savefig('client/{}'.format(fichier))
        plt.close()

        #html = '<img src="/{}?{}" alt="ponctualite {}" width="100%">'.format(fichier,self.date_time_string(),self.path)
        body = json.dumps({
                'title': 'Régularité TER '+self.path_info[1], \
                'img': '/'+fichier \
                 })
        # on envoie
        headers = [('Content-Type', 'application/json')]
        self.send(body, headers)
Esempio n. 28
0
def _plot_loop_result(loop_results: dict, config):
    """plot the loop results in a fancy way that displays all information more clearly"""
    # prepare looped_values dataframe
    if not isinstance(loop_results, dict):
        raise TypeError('')
    looped_values = loop_results['complete_values']
    if looped_values.empty:
        raise ValueError(
            f'No meaningful operation list is created in current period thus back looping is skipped!'
        )
    # register matplotlib converters is requested in future matplotlib versions
    register_matplotlib_converters()
    # 计算在整个投资回测区间内每天的持股数量,通过持股数量的变化来推出买卖点
    result_columns = looped_values.columns
    fixed_column_items = ['fee', 'cash', 'value', 'reference', 'ref', 'ret']
    stock_holdings = [
        item for item in result_columns
        if item not in fixed_column_items and item[-2:] != '_p'
    ]
    # 为了确保回测结果和参考价格在同一个水平线上比较,需要将他们的起点"重合"在一起,否则
    # 就会出现两者无法比较的情况。
    # 例如,当参考价格为HS300指数,而回测时的初始资金额为100000时,回测结果的金额通常在
    # 100000以上,而HS300指数的价格仅仅在2000~5000之间波动,这就导致在同一个图表上
    # plot两个指标时,只能看到回测结果,而HS300指数则被压缩成了一条直线,无法对比
    # 解决办法时同时显示两者的相对收益率,两条线的起点都是0,就能很好地解决上述问题。

    # 持股数量变动量,当持股数量发生变动时,判断产生买卖行为
    change = (looped_values[stock_holdings] -
              looped_values[stock_holdings].shift(1)).sum(1)
    # 计算回测记录第一天的回测结果和参考指数价格,以此计算后续的收益率曲线
    start_point = looped_values['value'].iloc[0]
    ref_start = looped_values['reference'].iloc[0]
    # 计算回测结果的每日回报率
    ret = looped_values['value'] - looped_values['value'].shift(1)
    # 计算每日的持仓仓位
    position = 1 - (looped_values['cash'] / looped_values['value'])
    # 回测结果和参考指数的总体回报率曲线
    return_rate = (looped_values.value - start_point) / start_point * 100
    ref_rate = (looped_values.reference - ref_start) / ref_start * 100

    # process plot figure and axes formatting
    years = mdates.YearLocator()  # every year
    months = mdates.MonthLocator()  # every month
    weekdays = mdates.WeekdayLocator()  # every weekday
    years_fmt = mdates.DateFormatter('%Y')
    month_fmt_none = mdates.DateFormatter('')
    month_fmt_l = mdates.DateFormatter('%y/%m')
    month_fmt_s = mdates.DateFormatter('%m')

    chart_width = 0.88
    # 显示投资回报评价信息
    fig, (ax1, ax2, ax3) = plt.subplots(3,
                                        1,
                                        figsize=(12, 8),
                                        facecolor=(0.82, 0.83, 0.85))
    if isinstance(config.asset_pool, str):
        title_asset_pool = config.asset_pool
    else:
        if len(config.asset_pool) > 3:
            title_asset_pool = list_to_str_format(
                config.asset_pool[:3]) + '...'
        else:
            title_asset_pool = list_to_str_format(config.asset_pool)
    fig.suptitle(
        f'Back Testing Result {title_asset_pool} - reference: {config.reference_asset}',
        fontsize=14,
        fontweight=10)
    # 投资回测结果的评价指标全部被打印在图表上,所有的指标按照表格形式打印
    # 为了实现表格效果,指标的标签和值分成两列打印,每一列的打印位置相同
    fig.text(
        0.07, 0.93, f'periods: {loop_results["years"]} years, '
        f'from: {loop_results["loop_start"].date()} to {loop_results["loop_end"].date()}'
        f'time consumed:   signal creation: {time_str_format(loop_results["op_run_time"])};'
        f'  back test:{time_str_format(loop_results["loop_run_time"])}')
    fig.text(0.21,
             0.82, f'Operation summary:\n\n'
             f'Total op fee:\n'
             f'total investment:\n'
             f'final value:',
             ha='right')
    fig.text(
        0.23, 0.82, f'{loop_results["oper_count"].buy.sum()}     buys \n'
        f'{loop_results["oper_count"].sell.sum()}     sells\n'
        f'¥{loop_results["total_fee"]:13,.2f}\n'
        f'¥{loop_results["total_invest"]:13,.2f}\n'
        f'¥{loop_results["final_value"]:13,.2f}')
    fig.text(0.50,
             0.82, f'Total return:\n'
             f'Avg annual return:\n'
             f'ref return:\n'
             f'Avg annual ref return:\n'
             f'Max drawdown:',
             ha='right')
    fig.text(
        0.52, 0.82, f'{loop_results["rtn"] * 100:.2f}%    \n'
        f'{loop_results["annual_rtn"] * 100: .2f}%    \n'
        f'{loop_results["ref_rtn"] * 100:.2f}%    \n'
        f'{loop_results["ref_annual_rtn"] * 100:.2f}%\n'
        f'{loop_results["mdd"] * 100:.3f}%'
        f' on {loop_results["low_date"]}')
    fig.text(0.82,
             0.82, f'alpha:\n'
             f'Beta:\n'
             f'Sharp ratio:\n'
             f'Info ratio:\n'
             f'250-day volatility:',
             ha='right')
    fig.text(
        0.84, 0.82, f'{loop_results["alpha"]:.3f}  \n'
        f'{loop_results["beta"]:.3f}  \n'
        f'{loop_results["sharp"]:.3f}  \n'
        f'{loop_results["info"]:.3f}  \n'
        f'{loop_results["volatility"]:.3f}')

    ax1.set_position([0.05, 0.41, chart_width, 0.40])
    # 绘制参考数据的收益率曲线图
    ax1.plot(looped_values.index,
             ref_rate,
             linestyle='-',
             color=(0.4, 0.6, 0.8),
             alpha=0.85,
             label='Reference')

    # 绘制回测结果的收益率曲线图
    ax1.plot(looped_values.index,
             return_rate,
             linestyle='-',
             color=(0.8, 0.2, 0.0),
             alpha=0.85,
             label='Return')
    ax1.set_ylabel('Total return rate')
    ax1.yaxis.set_major_formatter(mtick.PercentFormatter())
    # 填充参考收益率的正负区间,绿色填充正收益率,红色填充负收益率
    ax1.fill_between(looped_values.index,
                     0,
                     ref_rate,
                     where=ref_rate >= 0,
                     facecolor=(0.4, 0.6, 0.2),
                     alpha=0.35)
    ax1.fill_between(looped_values.index,
                     0,
                     ref_rate,
                     where=ref_rate < 0,
                     facecolor=(0.8, 0.2, 0.0),
                     alpha=0.35)

    # 显示持股仓位区间(效果是在回测区间上用绿色带表示多头仓位,红色表示空头仓位,颜色越深仓位越高)
    # 查找每次买进和卖出的时间点并将他们存储在一个列表中,用于标记买卖时机
    if config.show_positions:
        position_bounds = [looped_values.index[0]]
        position_bounds.extend(looped_values.loc[change != 0].index)
        position_bounds.append(looped_values.index[-1])
        for first, second, long_short in zip(
                position_bounds[:-2], position_bounds[1:],
                position.loc[position_bounds[:-2]]):
            # 分别使用绿色、红色填充交易回测历史中的多头和空头区间
            if long_short > 0:
                # 用不同深浅的绿色填充多头区间
                if long_short > 1: long_short = 1
                ax1.axvspan(first,
                            second,
                            facecolor=((1 - 0.6 * long_short),
                                       (1 - 0.4 * long_short),
                                       (1 - 0.8 * long_short)),
                            alpha=0.2)
            else:
                # 用不同深浅的红色填充空头区间
                ax1.axvspan(first,
                            second,
                            facecolor=((1 - 0.2 * long_short),
                                       (1 - 0.8 * long_short),
                                       (1 - long_short)),
                            alpha=0.2)

    # 显示买卖时机的另一种方法,使用buy / sell 来存储买卖点
    # buy_point是当持股数量增加时为买点,sell_points是当持股数量下降时
    # 在买卖点当天写入的数据是参考数值,这是为了使用散点图画出买卖点的位置
    # 绘制买卖点散点图(效果是在ref线上使用红绿箭头标识买卖点)
    if config.buy_sell_points:
        buy_points = np.where(change > 0, ref_rate, np.nan)
        sell_points = np.where(change < 0, ref_rate, np.nan)
        ax1.scatter(looped_values.index,
                    buy_points,
                    color='green',
                    label='Buy',
                    marker='^',
                    alpha=0.9)
        ax1.scatter(looped_values.index,
                    sell_points,
                    color='red',
                    label='Sell',
                    marker='v',
                    alpha=0.9)

    # put arrow on where max draw down is
    ax1.annotate("Max Drawdown",
                 xy=(loop_results["low_date"],
                     return_rate[loop_results["low_date"]]),
                 xycoords='data',
                 xytext=(loop_results["max_date"],
                         return_rate[loop_results["max_date"]]),
                 textcoords='data',
                 arrowprops=dict(width=3,
                                 headwidth=5,
                                 facecolor='black',
                                 shrink=0.),
                 ha='right',
                 va='bottom')
    ax1.legend()

    ax2.set_position([0.05, 0.23, chart_width, 0.18])
    ax2.plot(looped_values.index, position)
    ax2.set_ylabel('Amount bought / sold')
    ax2.set_xlabel(None)

    ax3.set_position([0.05, 0.05, chart_width, 0.18])
    ax3.bar(looped_values.index, ret)
    ax3.set_ylabel('Daily return')
    ax3.set_xlabel('date')

    # 设置所有图表的基本格式:
    for ax in [ax1, ax2, ax3]:
        ax.yaxis.tick_right()
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        ax.spines['bottom'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.grid(True)

    # format the ticks
    # major tick on year if span > 3 years, else on month
    if loop_results['years'] > 4:
        major_locator = years
        major_formatter = years_fmt
        minor_locator = months
        minor_formatter = month_fmt_none
    elif loop_results['years'] > 2:
        major_locator = years
        major_formatter = years_fmt
        minor_locator = months
        minor_formatter = month_fmt_s
    else:
        major_locator = months
        major_formatter = month_fmt_l
        minor_locator = weekdays
        minor_formatter = month_fmt_none

    for ax in [ax1, ax2, ax3]:
        ax.xaxis.set_major_locator(major_locator)
        ax.xaxis.set_major_formatter(major_formatter)
        ax.xaxis.set_minor_locator(minor_locator)
        ax.xaxis.set_minor_formatter(minor_formatter)

    plt.show()
Esempio n. 29
0
    pass

# Plot index, DJF means
f=open(indfile); x=pickle.load(f); f.close()
tind=range(12,x.time.size,12)
ind=x.subset(tind=tind); 

for i,tt in enumerate(ind.time):
    ind.data[i]=x.data[tind[i]-1:tind[i]+2].mean(0)

ind.name=indtitle

pl.figure(1,figsize=(12,4)); pl.clf()
ind.plot1d(); ax=pl.gca()
ax.set_xlabel(xlab); ax.set_ylabel(units); ax.set_ylim(indylim)
ax.xaxis.set_major_locator(mdates.YearLocator(tint))
pl.grid(); pl.show()
pl.savefig(indpic)

# Positive composite
f=open(posfile); x=pickle.load(f); f.close()
x.name=postitle; x.units=units; x.copts=copts; x.cbar_opts=cbar_opts; x.map=fmap
pl.figure(2); pl.clf()
x.plot_mapfc()
pl.grid(); pl.show()
pl.savefig(pospic)

# Negative composite
f=open(negfile); x=pickle.load(f); f.close()
x.name=negtitle; x.units=units; x.copts=copts; x.cbar_opts=cbar_opts; x.map=fmap
pl.figure(3); pl.clf()
Esempio n. 30
0
async def rank(ctx: commands.Context, username: str = None) -> None:
    """Show rank graphs for OGS and KGS servers."""
    if username is None:
        last_message = await ctx.message.channel.history(limit=1).flatten()
        user = last_message[0].author
    else:
        user = get_user(username, bot)

    if user is not None:
        infos = requests.get("https://openstudyroom.org/league/discord-api/",
                             params={
                                 'uids': [user.id]
                             }).json()
        info = infos.get(str(user.id))
        if info is not None:
            kgs_username = info.get('kgs_username')
            ogs_username = info.get('ogs_username')
            ogs_id = info.get('ogs_id')
            if kgs_username is not None:
                embed = discord.Embed(title="KGS rank history for " +
                                      str(username),
                                      color=0xeee657)
                embed.set_image(url="http://www.gokgs.com/servlet/graph/" +
                                kgs_username + "-en_US.png")
                add_footer(embed, ctx.author)
                await ctx.send(embed=embed)
            if ogs_username is not None:

                def format_gorank(value):
                    if value == 0:
                        return "1d"
                    elif value > 0:
                        return str(int(value)) + "k"
                    elif value < 0:
                        return str(1 + abs(int(value))) + "d"

                r = requests.get(
                    'https://online-go.com/termination-api/player/' +
                    str(ogs_id) + '/rating-history?speed=overall&size=0')
                rl = r.text.split('\n')
                rank = []
                dates = []
                for game in range(1, len(rl) - 1):
                    rank.append(30 - (31.25) *
                                math.log(float(rl[game].split('\t')[4]) / 850))
                    dates.append(
                        datetime.utcfromtimestamp(int(
                            rl[game].split('\t')[0])).strftime('%d/%m/%Y'))

                x = [datetime.strptime(d, '%d/%m/%Y').date() for d in dates]
                y = range(len(x))

                fig, ax = plt.subplots(nrows=1, ncols=1)
                plt.plot(x, rank, color=(0, 194 / 255, 0))

                ax.xaxis.set_major_formatter(mdates.DateFormatter('\n%Y'))
                ax.xaxis.set_major_locator(mdates.YearLocator())
                ax.invert_yaxis()

                fig.canvas.draw()
                labels = [
                    format_gorank(float(item.get_text().replace('−', '-')))
                    for item in ax.get_yticklabels()
                ]
                ax.set_yticklabels(labels)
                fig.patch.set_facecolor((236 / 255, 236 / 255, 176 / 255))
                ax.patch.set_facecolor('black')
                ax.yaxis.grid(linewidth=0.2)
                plt.title("OGS Rank history for " + ogs_username)

                fig.savefig('Rank.png', bbox_inches='tight')

                file = discord.File('Rank.png',
                                    filename="OGS Rank history for " +
                                    ogs_username + ".png")
                await ctx.send(file=file)

                os.remove('Rank.png')
        return
    # Look for nearest matches, if they exist
    users = bot.get_guild(guild_id).members  # type: List[discord.Member]
    # Just using sequencematcher because its simple and no need to install extra Library
    # If keen on better distrance metrics, look at installing Jellyfish or Fuzzy Wuzzy
    similarities = [(member,
                     max(
                         SequenceMatcher(None, username.lower(),
                                         member.display_name.lower()).ratio(),
                         SequenceMatcher(None, username.lower(),
                                         member.name.lower()).ratio()))
                    for member in users]
    similarities.sort(key=lambda tup: tup[1], reverse=True)

    # unlikely to get 5 with >70% match anyway...
    top_matches = [x for x in similarities[:5]
                   if x[1] > 0.7]  # type: List[Tuple[discord.Member, float]]

    uids = [x[0].id for x in top_matches]
    infos = requests.get("https://openstudyroom.org/league/discord-api/",
                         params={
                             'uids': uids
                         }).json()

    # Split and recombine so that OSR members appear top of list
    osr_members = [
        x for x in top_matches if infos.get(str(x[0].id)) is not None
    ]
    not_osr_members = [x for x in top_matches if x not in osr_members]
    top_matches = osr_members + not_osr_members

    message = ''
    for _i, x in enumerate(top_matches):
        message += '\n{}\N{COMBINING ENCLOSING KEYCAP}**{}**#{} {}'.format(
            _i + 1, x[0].display_name, x[0].discriminator,
            user_rank(x[0], infos))
    if username in roles_dict:
        message += "\n\n However, `" + username + "` is a valid role. Did you mean `!list " + username + "`?"
    nearest_or_sorry = '", nearest matches:' if top_matches else '", sorry'
    embed = discord.Embed(description=message,
                          title='No users by the exact name "' + username +
                          nearest_or_sorry)
    add_footer(embed, ctx.message.author)
    msg = await ctx.send(embed=embed)