Ejemplo n.º 1
0
def square_root_diffusion_euler():
    x0 = 0.25
    kappa = 3.0
    theta = 0.15
    sigma = 0.1
    I = 10000
    M = 50
    dt = T / M
    xh = np.zeros((M + 1, I))
    x = np.zeros_like(xh)
    xh[0] = x0
    x[0] = x0
    for t in range(1, M + 1):
        xh[t] = (xh[t - 1] + kappa * (theta - np.maximum(xh[t - 1], 0)) * dt +
                 sigma * np.sqrt(np.maximum(xh[t - 1], 0)) * math.sqrt(dt) *
                 npr.standard_normal(I))
    x = np.maximum(xh, 0)
    plt.figure(figsize=(10, 6))
    plt.hist(x[-1], bins=50)
    plt.xlabel('value(SRT(T)')
    plt.ylabel('frequency')
    plt.show()
    plt.figure(figsize=(10, 6))
    plt.plot(x[:, :100], lw=1.5)
    plt.xlabel('time')
    plt.ylabel('index level')
    plt.show()
    return x
Ejemplo n.º 2
0
def geometric_brownian_motion_option_pricing(
        initial_val=100,
        num_samples=10000,
        riskless_rate=0.05,
        volatility_sigma=0.25,
        time_year=2.0,
        num_time_interval_discretization=50):
    dt = time_year / num_time_interval_discretization
    samples = np.zeros((num_time_interval_discretization + 1, num_samples))
    samples[0] = initial_val

    for t in range(1, num_time_interval_discretization + 1):
        samples[t] = samples[t - 1] * np.exp(
            (riskless_rate - 0.5 * (volatility_sigma**2)) * dt +
            volatility_sigma * np.sqrt(dt) * npr.standard_normal(num_samples))

    print(45 * "=")
    print(samples[1])
    plt.figure(figsize=(10, 6))
    plt.hist(samples[50], bins=50)
    plt.title("Geometric Brownian Motion")
    plt.xlabel('index level')
    plt.ylabel('frequency')
    plt.show()

    plt.figure(figsize=(10, 6))
    plt.plot(samples[:, :10], lw=1.5)
    plt.xlabel('time')
    plt.ylabel('index level')
    plt.title('Sample Path')
    plt.show()

    return samples
Ejemplo n.º 3
0
def jump_diffusion():
    S0 = 100.0
    r = 0.05
    sigma = 0.2
    lamb = 0.05
    mu = -0.6
    delta = 0.25
    rj = lamb * (math.exp(mu + 0.5 * delta**2) - 1)
    T = 1.0
    M = 50
    I = 10000
    dt = T / M

    S = np.zeros((M + 1, I))
    S[0] = S0
    sn1 = npr.standard_normal((M + 1, I))
    sn2 = npr.standard_normal((M + 1, I))
    poi = npr.poisson(lamb * dt, (M + 1, I))
    for t in range(1, M + 1, 1):
        S[t] = S[t - 1] * (np.exp(
            (r - rj - 0.5 * sigma**2) * dt + sigma * math.sqrt(dt) * sn1[t]) +
                           (np.exp(mu + delta * sn2[t]) - 1) * poi[t])
    S[t] = np.maximum(S[t], 0)
    plt.figure(figsize=(10, 6))
    plt.hist(S[-1], bins=50)
    plt.xlabel('value')
    plt.ylabel('frequency')
    plt.show()

    plt.figure(figsize=(10, 6))
    plt.plot(S[:, :100], lw=1.)
    plt.xlabel('time')
    plt.ylabel('index level')
    plt.show()
Ejemplo n.º 4
0
def square_root_diffusion_exact(initial_val=0.05,
                                kappa=3.0,
                                theta=0.02,
                                sigma=0.1,
                                time_year=2,
                                num_samples=10000,
                                num_time_interval_discretization=50):
    x = np.zeros((num_time_interval_discretization + 1, num_samples))
    x[0] = initial_val
    dt = time_year / num_time_interval_discretization

    for t in range(1, num_time_interval_discretization + 1):
        df = 4 * theta * kappa / sigma**2
        c = (sigma**2 * (1 - np.exp(-kappa * dt))) / (4 * kappa)
        nc = np.exp(-kappa * dt) / c * x[t - 1]
        x[t] = c * npr.noncentral_chisquare(df, nc, size=num_samples)

    plt.figure(figsize=(10, 6))
    plt.hist(x[-1], bins=50)
    plt.title("Square root diffusion Exact")
    plt.xlabel('value')
    plt.ylabel('frequency')
    plt.show()

    plt.figure(figsize=(10, 6))
    plt.plot(x[:, :10], lw=1.5)
    plt.xlabel('time')
    plt.ylabel('index level')
    plt.title('Sample Path SRD Exact')
    plt.show()

    return x
Ejemplo n.º 5
0
def square_root_diffusion_euler(initial_val=0.05,
                                kappa=3.0,
                                theta=0.02,
                                sigma=0.1,
                                time_year=2,
                                num_samples=10000,
                                num_time_interval_discretization=50):
    dt = time_year / num_time_interval_discretization

    xh = np.zeros((num_time_interval_discretization + 1, num_samples))
    x = np.zeros_like(xh)
    xh[0] = initial_val
    x[0] = initial_val
    for t in range(1, num_time_interval_discretization + 1):
        xh[t] = (xh[t - 1] + kappa * (theta - np.maximum(xh[t - 1], 0)) * dt +
                 sigma * np.sqrt(np.maximum(xh[t - 1], 0)) * math.sqrt(dt) *
                 npr.standard_normal(num_samples))
    x = np.maximum(xh, 0)

    plt.figure(figsize=(10, 6))
    plt.hist(x[-1], bins=50)
    plt.xlabel('value')
    plt.ylabel('frequency')
    plt.title('Square root diffusion Approx Euler')
    plt.show()

    plt.figure(figsize=(10, 6))
    plt.plot(x[:, :10], lw=1.5)
    plt.xlabel('time')
    plt.ylabel('index level')
    plt.title('Sample Path SRD approx')
    plt.show()

    return x
Ejemplo n.º 6
0
def graph_error_mean_per_hour(dataset, pred, column):
    data = dataset.iloc[dataset.shape[0] - len(pred):]
    data["Pred"] = np.around(pred, 5)
    if column == "Diff":
        res = np.delete(data["Close"].to_numpy(), -1)
        res = np.insert(res, 0, 0)
        data["Pred"] = data["Pred"] + res
    data["AEM"] = np.abs(np.around(data.Close - data.Pred, 5))
    data['Hour'] = data.Date.apply(lambda x: x.hour)
    data["Diff"] = data.Close.diff().apply(abs).fillna(0)

    plt.figure(figsize=(14, 6))
    plt.hist(data.where((data.Hour > 5) & (data.Hour < 20)).dropna()["AEM"],
             150,
             density=True,
             range=(0, 0.003))
    plt.show()

    diff_mean = data.groupby("Hour").mean().Diff
    x_ch = diff_mean.index
    y_ch = diff_mean

    diff_mean_aem = data.groupby("Hour").mean().AEM
    x = diff_mean_aem.index
    y = diff_mean_aem

    plt.figure(figsize=(14, 6))
    plt.bar(x_ch, y_ch, color="green")
    plt.bar(x, y, color="r", alpha=0.7)
    plt.title(
        "Absolute mean of error in predictions per hour compared to absolute mean of price changes",
        fontsize=16)
    plt.show()
Ejemplo n.º 7
0
    def generate_start_time_figures(self):
        recording_time_grouped_by_patient = self.pain_data[["PatientID", "NRSTimeFromEndSurgery_mins"]].groupby("PatientID")
        recording_start_minutes = recording_time_grouped_by_patient.min()

        fig1 = "fig1.pdf"
        fig2 = "fig2.pdf"

        plt.figure(figsize=[8,4])
        plt.title("Pain score recording start times", fontsize=14).set_y(1.05) 
        plt.ylabel("Occurrences", fontsize=14)
        plt.xlabel("Recording Start Time (minutes)", fontsize=14)
        plt.hist(recording_start_minutes.values, bins=20, color="0.5")
        plt.savefig(os.path.join(self.tmp_directory, fig1), bbox_inches="tight")

        plt.figure(figsize=[8,4])
        plt.title("Pain score recording start times, log scale", fontsize=14).set_y(1.05) 
        plt.ylabel("Occurrences", fontsize=14)
        plt.xlabel("Recording Start Time (minutes)", fontsize=14)
        plt.hist(recording_start_minutes.values, bins=20, log=True, color="0.5")
        plt.savefig(os.path.join(self.tmp_directory, fig2), bbox_inches="tight")

        #save the figures in panel format
        f = open(os.path.join(self.tmp_directory, "tmp.tex"), 'w')
        f.write(r"""
            \documentclass[%
            ,float=false % this is the new default and can be left away.
            ,preview=true
            ,class=scrartcl
            ,fontsize=20pt
            ]{standalone}
            \usepackage[active,tightpage]{preview}
            \usepackage{varwidth}
            \usepackage{graphicx}
            \usepackage[justification=centering]{caption}
            \usepackage{subcaption}
            \usepackage[caption=false,font=footnotesize]{subfig}
            \renewcommand{\thesubfigure}{\Alph{subfigure}}
            \begin{document}
            \begin{preview}
            \begin{figure}[h]
                \begin{subfigure}{0.5\textwidth}
                        \includegraphics[width=\textwidth]{""" + fig1 + r"""}
                        \caption{Normal scale}
                \end{subfigure}\begin{subfigure}{0.5\textwidth}
                        \includegraphics[width=\textwidth]{""" + fig2 + r"""}
                        \caption{Log scale}
                \end{subfigure}
            \end{figure}
            \end{preview}
            \end{document}
        """)
        f.close()
        subprocess.call(["pdflatex", 
                            "-halt-on-error", 
                            "-output-directory", 
                            self.tmp_directory, 
                            os.path.join(self.tmp_directory, "tmp.tex")])
        shutil.move(os.path.join(self.tmp_directory, "tmp.pdf"), 
                    os.path.join(self.output_directory, "pain_score_start_times.pdf"))
Ejemplo n.º 8
0
 def plot_charts(self):
     print(self.final_portfolio_valuation)
     plt.figure(figsize=(10, 6))
     plt.hist( self.final_portfolio_valuation, bins=100)
     plt.title("Final Exit Valuation complete Portfolio after {} year as Geometric Brownian Motion".format(self.max_year))
     plt.xlabel('Exit Valuation')
     plt.ylabel('frequency');
     plt.show()
Ejemplo n.º 9
0
def compute_bsm_logNormal_options(current_val=100.0,
                                  riskless_rate=0.05,
                                  volatility_sigma=0.25,
                                  time_year=2.0,
                                  num_samples=10000):
    val_at_T = current_val * npr.lognormal((riskless_rate - 0.5 * (volatility_sigma ** 2)) * time_year, \
                                           volatility_sigma * math.sqrt(time_year), size=num_samples)
    plt.hist(val_at_T, bins=50)
    plt.xlabel('Index Value')
    plt.ylabel('Frequency')
    plt.show()
    print(val_at_T)
    return val_at_T
Ejemplo n.º 10
0
def short_analysis(data, analysis_file_path, fig_root_name):

    # Suppose there are two idx for rt
    for rt_idx in [1, 2]:

        # Convert your data in array for easier manipulation
        rt_column_name = "RT {}".format(rt_idx)
        rt = np.asarray(data[rt_column_name])
        rt_mt_column_name = "RT-MT {}".format(rt_idx)
        rt_mt = np.asarray(data[rt_mt_column_name])

        # Look where 'rt' and 'rt_mt' are different to zero
        cond0 = rt[:] != 0
        cond1 = rt_mt[:] != 0

        # Combine the two conditions
        idx = cond0 * cond1

        # Use the booleans as index and make a cut in your data
        rt = rt[idx]
        rt_mt = rt_mt[idx]

        # Compute 'mt'
        mt = rt_mt - rt

        print("Short analysis.")
        print("'mt {}' is: \n".format(rt_idx), mt)

        # Save this in a new 'xlsx' file
        new_data = dict()
        new_data["RT{}".format(rt_idx)] = rt
        new_data["MT{}".format(rt_idx)] = mt
        write_a_new_file(file_path=analysis_file_path, data=new_data)

        # Do some plots
        plt.scatter(mt, rt)
        plt.xlabel("mt")
        plt.ylabel("rt")
        plt.savefig("{}_scatter_rt{}.pdf".format(fig_root_name, rt_idx))
        plt.close()

        plt.hist(mt)
        plt.xlabel("mt")
        plt.savefig("{}_hist_mt{}.pdf".format(fig_root_name, rt_idx))
        plt.close()

        plt.hist(rt)
        plt.xlabel("rt")
        plt.savefig("{}_hist_rt{}.pdf".format(fig_root_name, rt_idx))
        plt.close()
Ejemplo n.º 11
0
def montacarlo_simulation():
    S0 = 100
    r = 0.05
    sigma = .25
    T = 2.0
    I = 10000
    STD = npr.standard_normal(I)
    ST1 = S0 * np.exp((r - 0.5 * sigma**2) * T + sigma * math.sqrt(T) * STD)

    plt.figure(figsize=(10, 6))
    plt.hist(ST1, bins=50)
    plt.xlabel("Index level")
    plt.ylabel('frequency')
    plt.show()

    plt.figure(figsize=(10, 6))

    return ST1
Ejemplo n.º 12
0
def value_at_risk_gbm(initial_val=100):
    initial_val = 100
    val_t = compute_bsm_simple_options(current_val=initial_val,
                                       volatility_sigma=0.25,
                                       time_year=30 / 365)
    R_gbm = np.sort(val_t)

    plt.figure(figsize=(10, 6))
    plt.hist(R_gbm, bins=50)
    plt.title("Value at Risk")
    plt.xlabel('absolute return')
    plt.ylabel('frequency')
    plt.show()

    percs = [0.01, 0.1, 1., 2.5, 5.0, 10.0]
    var = scs.scoreatpercentile(R_gbm, percs)
    print(var)
    print('%16s %16s' % ('Confidence Level', 'Value-at-Risk'))
    print(33 * '-')
    for pair in zip(percs, var):
        print('%16.2f %16.3f' % (100 - pair[0], initial_val - pair[1]))
Ejemplo n.º 13
0
def evaluate(dataloader, model, device, plot_results=False):
    gt = []
    preds = []

    for i, data in enumerate(dataloader, 0):

        # get the inputs; data is a list of [inputs, labels]
        with torch.no_grad():
            inputs, labels = data
            outputs = model(inputs.to(device)).squeeze().cpu()

        gt.extend(list(labels.numpy()))
        preds.extend(list(outputs.numpy()))

    gt = np.array(gt)
    preds = np.array(preds)
    print(f'testset evaluation completed, mean square error: {MSE(gt,preds)/np.pi:0.3f}, mean abs: {MAE(gt,preds):0.3f}\n')
    if plot_results:
        plt.figure()
        plt.hist(np.degrees(preds-gt), bins=20, density=True, alpha=0.7)
    return gt, preds
Ejemplo n.º 14
0
def histogram(arr,
              valid_range=(0, 1),
              bins=10,
              normed=False,
              cumulative=False,
              file_path='hist.png',
              title=None):
    '''
    Plots a histogram for an input array over a specified range.
    '''
    # Can accept either a gdal.Dataset or numpy.array instance
    if not isinstance(arr, np.ndarray):
        arr = arr.ReadAsArray()

    plt.hist(arr.ravel(),
             range=valid_range,
             bins=bins,
             normed=normed,
             cumulative=cumulative)
    if title is not None:
        plt.title(title)

    plt.savefig(file_path)
Ejemplo n.º 15
0
def geometric_brownian_motion():
    S0 = 100
    r = 0.05
    I = 10000
    M = 50
    sigma = .25
    T = 2.0
    dt = T / M
    S = np.zeros((M + 1, I))
    S[0] = S0
    for t in range(1, M + 1):
        S[t] = S[t - 1] * np.exp((r - 0.5 * sigma**2) * dt + sigma *
                                 math.sqrt(dt) * npr.standard_normal(I))
    plt.figure(figsize=(10, 6))
    plt.hist(S[-1], bins=50)
    plt.xlabel("Index level (Brownian)")
    plt.ylabel('frequency')
    plt.show()
    plt.figure(figsize=(10, 6))
    plt.plot(S[:, :1000], lw=1.5)
    plt.xlabel('time')
    plt.ylabel('index level')
    plt.show()
    return S
        M, N, S0 = p
        dt = T / M
        S = np.zeros( ( M + 1, N) )
        S[ 0 ] = S0
        rn = np.random.standard_normal ( S.shape )
        # ... for each sub-interval of Time .... 
        for t in range( 1, M + 1):
                # for each path 
                for k in range( N ):
                        expr = ( r - sigma ** 2 / 2 ) * dt + sigma * math.sqrt( dt ) * rn[ t, k ]
                        S[ t, k] = S[ t - 1, k ] * math.exp( expr )
        return S

%time S = mcs_simulation_py( ( M, N, S0 ) )
S[ -1 ].mean()
S0 * math.exp( r * T )

# .... Call price .....
C0 = math.exp( -r * T ) * np.maximum( S[ -1 ] - K, 0 ).mean()
# .... Put price .....
P0 = math.exp( -r * T ) * np.maximum( K - S[ -1 ], 0 ).mean()

# ................................................................
# 
# ................................................................
plt.hist( S[-1], bins = 100, edgecolor = 'darkblue', color = 'darkgray' )
plt.vlines( x = S[ -1 ].mean(), ymin = 0, ymax = 450, 
           linestyle ='dotted', color = 'darkred' )
plt.vlines( x = S[ 0 ], ymin = 0, ymax = 450, 
           linestyle ='dotted', color = 'darkgreen' )
Ejemplo n.º 17
0
import h5py
import numpy as np
from pylab import plt

with h5py.File('outs/nrough_00.h5') as fid:
    nlin_corr = fid['nlin_correction'][...]
    corr_vis = nlin_corr[:,0].reshape([-1,3])
    corr_He = nlin_corr[:,1].reshape([-1,3])
    corr_rake = nlin_corr[:,2].reshape([-1,3])
    corr_total = corr_vis + corr_He + corr_rake
    d_pred = fid['d_pred'][...].reshape([-1,3])


norm_corr_vis = np.linalg.norm(corr_vis, axis=1)
norm_corr_He = np.linalg.norm(corr_He, axis=1)
norm_corr_rake = np.linalg.norm(corr_rake, axis=1)

norm_corr_total = np.linalg.norm(corr_total, axis=1)

norm_d_pred = np.linalg.norm(d_pred, axis=1)

percentage = abs(norm_corr_total)/abs(norm_d_pred)

plt.hist(percentage, range=(0,1), bins=40)
plt.show()
 def plot_r_histogram(self):
     newfig('Histogram of sqrt(x^2 + y^2)', 'distance from origin [m]', 'Count')
     t = self.v.state.t[self.v.state.istart:self.v.state.iend]
     r = np.sqrt(self.v.state.north[self.v.state.istart:self.v.state.iend]**2 + \
                 self.v.state.east[self.v.state.istart:self.v.state.iend]**2 )
     plt.hist(r,50)
Ejemplo n.º 19
0
import viscojapan as vj

tp = np.loadtxt('ozawa_2011_obs_file','4a,3f')
sites = [ii[0] for ii in tp]
disp0 = np.asarray([ii[1] for ii in tp]).flatten()
e0 = disp0[0::3]
n0 = disp0[1::3]
u0 = disp0[2::3]

ep = vj.EpochalDisplacement('cumu_post_with_seafloor.h5',
                            filter_sites=sites)
disp1 = ep[0].flatten()
e1 = disp1[0::3]
n1 = disp1[1::3]
u1 = disp1[2::3]

h_diff = np.sqrt((e0-e1)**2 + (n0-n1)**2) / np.sqrt(e0**2 + n0**2)
v_diff = np.abs(u0-u1) / np.abs(u0)

#plt.hist(h_diff, bins=20)
plt.hist(v_diff[~np.isinf(v_diff)], bins=10, range=(1,200))
plt.xlabel("Relative difference between mine and Ozawa's with respec to Ozawa's")
plt.ylabel('No. of stations')
plt.grid('on')
plt.savefig('error.pdf')
#plt.xlim([0,1])
plt.show()

vj.
Ejemplo n.º 20
0
#..................................
#  exp(  Standard Normal )
#..................................
S0 = 100
r = 0.05
sigma = 0.25
T = 2.0
N = 10**4

expr = (r -
        sigma**2 / 2) * T + sigma * np.sqrt(T) * np.random.standard_normal(N)
ST1 = S0 * np.exp(expr)
del expr

plt.figure(figsize=(10, 6))
plt.hist(ST1, bins=100, edgecolor='darkgray', color='darkblue')
plt.xlabel('index level')
plt.ylabel('frequency')
plt.title('Standard Normal')

#..................................
#  log-normal(  mu, sigma )
#..................................
mu = (r - sigma**2 / 2) * T
sigma_l = sigma * math.sqrt(T)

ST2 = S0 * np.random.lognormal(mu, sigma_l, size=N)

plt.figure(figsize=(10, 6))
plt.hist(ST2, bins=100, edgecolor='darkgray', color='darkred')
plt.xlabel('index level')
Ejemplo n.º 21
0
plt.subplot(212)  # 第一张图中的第二张子图
plt.plot([4, 5, 6])

plt.figure(2)  # 第二张图
plt.plot([4, 5, 6])  # 默认创建子图subplot(111)

plt.figure(1)  # 切换到figure 1 ; 子图subplot(212)仍旧是当前图
plt.subplot(211)  # 令子图subplot(211)成为figure1的当前图
plt.title('Easy as 1,2,3')  # 添加subplot 211 的标题

'==========================================='
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)

# 数据的直方图
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)

plt.xlabel('Smarts')
plt.ylabel('Probability')
# 添加标题
plt.title('Histogram of IQ')
# 添加文字
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()

'==========================================='

ax = plt.subplot(111)
Ejemplo n.º 22
0
import h5py
import numpy as np
from pylab import plt

with h5py.File('outs/nrough_00.h5') as fid:
    nlin_corr = fid['nlin_correction'][...]
    corr_vis = nlin_corr[:, 0].reshape([-1, 3])
    corr_He = nlin_corr[:, 1].reshape([-1, 3])
    corr_rake = nlin_corr[:, 2].reshape([-1, 3])
    corr_total = corr_vis + corr_He + corr_rake
    d_pred = fid['d_pred'][...].reshape([-1, 3])

norm_corr_vis = np.linalg.norm(corr_vis, axis=1)
norm_corr_He = np.linalg.norm(corr_He, axis=1)
norm_corr_rake = np.linalg.norm(corr_rake, axis=1)

norm_corr_total = np.linalg.norm(corr_total, axis=1)

norm_d_pred = np.linalg.norm(d_pred, axis=1)

percentage = abs(norm_corr_total) / abs(norm_d_pred)

plt.hist(percentage, range=(0, 1), bins=40)
plt.show()
S_BM[ 0 ] = S0

for t in range( 1, M + 1 ):
        expr = ( r - 0.5 * sigma **2  ) * dt + sigma * math.sqrt( dt ) * np.random.standard_normal( N )
        S_BM[ t ] = S_BM[ t - 1 ] * np.exp( expr )
        del expr
        
S_T = S_BM[ -1 ]        

 
mu_3s = np.mean( S_T )  + 3 * np.std( S_T )
mu_5s = np.mean( S_T )  + 5 * np.std( S_T )
#............................................................................
#   Histogram of final values
#............................................................................
plt.hist( S_T, bins = 100, edgecolor = 'darkgray', color = 'darkblue' )
plt.axvline( S0, linestyle = 'dashed', alpha = 0.8, color = 'darkred' )
plt.axvline( np.mean( S_T ) , linestyle = 'dashed', alpha = 0.8, color = 'red' )

plt.axvline( mu_3s, linestyle = 'dashed', alpha = 0.8, color = 'red' )
plt.annotate( s = '$\mu + 3\sigma = $' + str( round( mu_3s ,1) ), xy=(mu_3s, 500 ) )

plt.axvline( mu_5s, linestyle = 'dashed', alpha = 0.8, color = 'red' )
plt.annotate( s = '$\mu + 5\sigma = $' + str( round( mu_5s ,1) ), xy=(mu_5s, 500 ) )

plt.xlabel('index level' )
plt.ylabel('frequency' )
plt.title( 'Geometric Brownian Motion: distribution of $S_T$' )


#............................................................................
Ejemplo n.º 24
0
R2adjval = R2adj(pred_vec, true_vec)
print("The R2adj is: " + str(R2adjval))

#MAPEval = MAPE(pred_vec,true_vec)
#print("The MAPE is: " + str(MAPEval))

plt.plot(pred_vec)
plt.plot(true_vec)
plt.show()

#plt.plot(S[:,0])
#plt.show()

plt.plot(np.square(true_vec - pred_vec))
plt.show()

bins = np.arange(-100, 100, 0.05)  # fixed bin size

plt.xlim([min(S[:, 0]) - 0.05, max(S[:, 0]) + 0.05])

plt.hist(S[:, 0], bins=bins, alpha=0.5)
plt.title('Katrina Outlier Distribution (fixed bin size)')
plt.xlabel('variable X (bin size = 0.0.5)')
plt.ylabel('count')

plt.show()

plt.plot(S[:, 0])
plt.show
print_statistics(log_returns.flatten())

# In[15]:

log_returns.mean() * M + 0.5 * sigma**2

# In[16]:

log_returns.std() * math.sqrt(M)

# In[17]:

plt.figure(figsize=(10, 6))
plt.hist(log_returns.flatten(),
         bins=70,
         normed=True,
         label='frequency',
         color='b')
plt.xlabel('log_return')
plt.ylabel('frequency')
x = np.linspace(plt.axis()[0], plt.axis()[1])
plt.plot(x,
         scs.norm.pdf(x, loc=r / M, scale=sigma / np.sqrt(M)),
         'r',
         lw=2.0,
         label='pdf')
plt.legend()

# In[18]:

sm.qqplot(log_returns.flatten()[::500], line='s')
Ejemplo n.º 26
0
    train_results = train_one_epoch(trainloader, model.train(), device, epoch)
    eval_gt, eval_preds = evaluate(testloader, model.eval(), device)
    test_results = {'mse': MSE(eval_gt, eval_preds), 'mae': MAE(eval_gt, eval_preds) }
    history.append({'train': train_results, 'test': test_results})

    if test_results['mse'] < best_test_mse:
        best_test_mse = test_results['mse']
        torch.save(model, Config.output_dir / 'model_weights_best.pt')

print('Finished Training')
## % save
import pickle

with open(Config.output_dir / 'history.json', 'wb') as f:
    pickle.dump(history, f)

#%% final evaluation and visualisation

model = torch.load(Config.output_dir / 'model_weights_best.pt').eval().to(device)

print('\nFINAL EVALUATION:')
gt, preds = evaluate(trainloader, model, device)
mse = MSE(gt, preds)
print(f'train dataset: {mse}')
_, bins, _ = plt.hist(np.degrees(preds-gt), bins=20, density=True, alpha=0.7, label=f'train, mse: {mse:0.2f}')

gt, preds = evaluate(testloader, model, device)
mse = MSE(gt, preds)
print(f'test dataset: {mse}')
plt.hist(np.degrees(preds-gt), bins=bins, density=True, alpha=0.7, label=f'train, mse: {mse:0.2f}')