示例#1
0
def generate(M,
             N,
             h_x=0.8,
             h_y=0.8,
             scale=1.,
             signature=False,
             BM=False,
             dim_BM=2):

    if BM:
        X = brownian(M - 1, dim_BM, time=1.)
        Y = brownian(N - 1, dim_BM, time=1.)

    else:
        fbm_generator_X = FBM(M - 1, h_x)
        fbm_generator_Y = FBM(N - 1, h_y)

        x = scale * fbm_generator_X.fbm()
        y = scale * fbm_generator_Y.fbm()

        X = AddTime().fit_transform([x])[0]
        Y = AddTime().fit_transform([y])[0]

    if signature:
        X = iisignature.sig(X, 5, 2)
        Y = iisignature.sig(Y, 5, 2)

        X0 = np.zeros_like(X[0, :].reshape(1, -1))
        X0[0, 0] = 1.
        X = np.concatenate([X0, X])
        Y = np.concatenate([X0, Y])

    return X, Y
示例#2
0
class FractionalBrownianMotion(BlackScholes):
    def __init__(self,
                 drift,
                 volatility,
                 hurst,
                 nb_paths,
                 nb_stocks,
                 nb_dates,
                 spot,
                 maturity,
                 dividend=0,
                 **keywords):
        super(FractionalBrownianMotion,
              self).__init__(drift, volatility, nb_paths, nb_stocks, nb_dates,
                             spot, maturity, dividend, **keywords)
        self.hurst = hurst
        self.fBM = FBM(n=nb_dates,
                       hurst=hurst,
                       length=maturity,
                       method='cholesky')
        self._nb_stocks = self.nb_stocks

    def _generate_one_path(self):
        """Returns a nparray (nb_stocks * nb_dates) with prices."""
        path = np.empty((self._nb_stocks, self.nb_dates + 1))
        for stock in range(self._nb_stocks):
            path[stock, :] = self.fBM.fbm() + self.spot
        # print("path",path)
        return path

    def generate_one_path(self):
        return self._generate_one_path()
示例#3
0
def get_wavelet_var(wavelet, H=0.3, samp_size=1024, plot_sil=1):
    f = FBM(n=samp_size, hurst=H, length=0.5, method='daviesharte')
    fbm_sample = f.fbm()
    t_values = f.times()
    cA, cD = pywt.dwt(fbm_sample, wavelet)

    M = len(cD)
    R = np.zeros(M + 1)

    for k in range(M + 1):
        cD_shift = shift(cD, k, cval=0)
        R[k] = np.sum(cD * np.conjugate(cD_shift))

    print(M + 1)
    print(np.argmin(R))
    plt.plot(R)
    plt.show()
    AutoCorr_mat = toeplitz(R[0:M], R[0:M]) + 2.5
    #pdb.set_trace()

    mean_corr = (np.sum(AutoCorr_mat) - M * AutoCorr_mat[0][0]) / (M**2 - M)
    #print(mean_corr)
    max_corr = np.max(R[1:])
    #pdb.set_trace()
    U, S, V = svd(AutoCorr_mat)
    if (plot_sil != 1):
        plt.plot(np.log10(np.arange(M) + 1), np.log10(S))

    return S[0], 1 + (M - 1) * mean_corr
示例#4
0
 def daviesharte_to_hosking_fallback_test(self):
     # Low n, high hurst
     f = FBM(5, 0.99, 1, method='daviesharte')
     # This only works on py3
     # with self.assertWarns(Warning):
     #     s = f.fbm()
     with warnings.catch_warnings(record=True) as w:
         s = f.fbm()
         self.assertEqual(len(w), 1)
         self.assertIn('invalid for Davies-Harte', str(w[-1].message))
     self.assertEqual('hosking', f.method)
示例#5
0
文件: Hurst.py 项目: DvDxx/fractal_d
def d_h_test(H=None,f=None):
    if not H:
        H = [x / 100 for x in range(1, 100)]
    fractal_d = []
    for h in H:
        if not f:
            f = FBM(n=2000, hurst=h, length=1, method='daviesharte')
        series = f.fbm()
        G = VG(series)
        #Y = nx.degree_histogram(G)
        #print(Y)
        X, Y = GC(G)
        LogXI, LogYI = [], []
        for x in X:
            LogXI.append(math.log(x))
        for y in Y:
            LogYI.append(math.log(y))
        fractal_d.append(-1 * np.polyfit(LogXI, LogYI, 1)[0])
        #print(Y)
    return fractal_d
示例#6
0
    return (right - left) ** 2

iterations = 100

size = 5000
size1 = 20                              # how much particles we have
myarray = np.zeros((size1, size))
myarray2 = np.zeros((size1, size))
myWmsd = np.zeros((size1, size-1))
myWmsd2 = np.zeros((size1, size-1))
coeff = 0.5
f = FBM(size-1, 0.75)
f2 = FBM(size-1, 0.5)

for i in range(myarray.shape[0]):
    myarray[i] = f.fbm()
    myarray2[i] = f2.fbm()


plt.figure()
plt.plot(np.linspace(0, 1, size), myarray[0])
plt.plot(np.linspace(0, 1, size), myarray2[0])
plt.title('fBM particles')
plt.show()

print('save trajectories')
#np.savetxt()

np.savetxt(f'C:\\Users\\arsayder\\PycharmProjects\\diff_masters\\samples\\' + str(datetime.datetime.now()).replace(' ', '').replace(':', '').replace('.', '') +'.txt', myarray[0])
np.savetxt(f'C:\\Users\\arsayder\\PycharmProjects\\diff_masters\\samples\\' + str(datetime.datetime.now()).replace(' ', '').replace(':', '').replace('.', '') +'_2.txt', myarray[1])
#np.savetxt(f'/samples/fbm' + str(datetime.datetime.now()).replace(' ', '') +'.txt', myarray[0])
from fbm import FBM
import matplotlib.pyplot as plt

f = FBM(n=100, hurst=0.9, length=1, method='daviesharte')

# Generate a fBm realization
fbm_sample = f.fbm()

# Generate a fGn realization
fgn_sample = f.fgn()

# Get the times associated with the fBm
t_values = f.times()
plt.plot(t_values, fbm_sample)
plt.show()
示例#8
0
from tensorflow import keras
from fbm import FBM, MBM
import numpy as np
import random

nlength = 14
#Load model
model = keras.models.load_model(".\\good_DLFNNmodels\\model3densediff_n" +
                                str(nlength - 1) + ".h5")
#generate fbm example
simulatedH = []
testH = []
for samples in range(0, 1000):
    Hsim = random.uniform(0, 1)
    f = FBM(n=nlength - 1, hurst=Hsim, length=1, method='hosking')
    x = np.transpose(pd.DataFrame(f.fbm()).values)
    xdiff = np.transpose(pd.DataFrame(f.fbm()).values[1:])
    for j in range(1, len(x)):
        xdiff[0, j - 1] = x[j] - x[j - 1]
        # /(np.amax(x)-np.amin(x))
    # print(x)
    # print(xdiff)
    exp_est = model.predict(xdiff)
    simulatedH.append(Hsim)
    testH.append(exp_est[0][0])
    print('simulated: ', Hsim, 'predicted: ', exp_est)

plt.figure()
plt.plot(simulatedH, testH, 'b.')
plt.plot([0, 1], [0, 1], 'r-')
plt.xlabel('H simulated')
示例#9
0
        data_file = 'example_data/mrw07005n32768.mat'

    # Complete path to file
    current_dir = os.path.dirname(os.path.abspath(__file__))
    data_file = os.path.join(current_dir, data_file)

    #-------------------------------------------------------------------------------
    # Load data
    #-------------------------------------------------------------------------------
    data = get_data_from_mat_file(data_file)

else:
    from fbm import FBM
    f = FBM(2**15, 0.75)
    fgn_sample = f.fgn()
    fbm_sample = f.fbm()
    if mf_process == 10:
        data = fbm_sample
    if mf_process == 11:
        data = fgn_sample

from fbm import FBM
f = FBM(2**15, 0.75)
data_1 = data  #f.fbm()
f = FBM(2**15, 0.6)
data_2 = f.fbm()
#-------------------------------------------------------------------------------
# Setup analysis
#-------------------------------------------------------------------------------

# Multifractal analysis object
def model2(M, tau, H):
    # ### 0. The model:
    # The model that will be used to simulate temperature paths is as in Benth et al. Essentially the model is a discretizination of a ornstein uhlenbeck SDE with a linear - periodic mean function and a periodic volatility function:
    # 
    # $dT(t) = ds(t) - \kappa(T(t) - s(t))dt + \sigma(t)dB(t)$
    # 
    # where 
    # 
    # $s(t) = a + bt + \sum_ia_isin(2\pi it/365) + \sum_jb_jcos(2\pi jt/365)$
    # 
    # and
    # 
    # $\sigma^2(t) = a + \sum_ic_isin(2\pi it/365) + \sum_jd_jcos(2\pi jt/365)$
    # 
    
    # In[2]:
    
    
    T = pd.read_csv("CleanedTemperature.csv")
    T.rename(columns={T.columns[0]: 'Date'}, inplace=True)
    T.index = pd.to_datetime(T["Date"])
    T.drop(T.index[T.index.dayofyear == 366],axis=0, inplace = True)
    T.drop("Date", axis = 1, inplace = True)
    #T.plot(subplots = True, fontsize = 8,layout = (4,3))
    #T["Buttonville A"].plot()
    
    
    # ### 1. Regression to obtain $m(t)$
    
    # In[3]:
    
    
    Y = np.array(T)
    #generating the factor space for the mean
    a = np.ones(len(Y))
    t = np.linspace(1,len(Y),len(Y))
    N = 4 #number of sine and cosine functions to include
    n = np.linspace(1,N,N)
    Sines = np.sin(2*np.pi*(np.outer(t,n))/365)
    Cosines = np.cos(2*np.pi*(np.outer(t,n))/365)
    X = np.stack((a, t), axis=1)
    X = np.concatenate((X,Sines,Cosines),axis=1)
    ## making sure the columns are readable
    cols = ['Constant', 'time']
    for i in range(N):
        cols.append('sin(2pi*'+str(i+1)+'t/365)')
    for i in range(N):
        cols.append('cos(2pi*'+str(i+1)+'t/365)')
    X = pd.DataFrame(X,columns = cols)
    
    
    # #### 1.1 Using Lasso Regression to shrink factors to zero
    # The plot below varies the magnitude of the lasso regularization to see which parameters go to zero
    # 
    # Training data:  $(x_t,y_t)$
    # 
    # Model Specification: $Y = \beta X + C$
    # 
    # Lasso regularization: $\underset{\beta}{\operatorname{argmin}}\sum_t(y_t - (\beta x_t + C))^2 + \lambda||\beta||_{l1} $
    # 
    # Depending on the value of $\lambda$, the coefficients in beta will shrink to zero
    
    # In[4]:
    
    
    Y = np.transpose(Y)
    y = Y[0][:]
    L = []
    model = sm.OLS(y, X)
    for i in range(10):
        results = model.fit_regularized(method = 'elastic_net',alpha=i/10, L1_wt=0.5)
        L.append(results.params)
    
    
    # In[5]:
    
    
    L = pd.DataFrame(L)
    L = L/L.max(axis=0)
    #L.plot()
    #L
    
    
    # In[7]:
    
    
    cols = L.columns[L.iloc[len(L)-1] > 0.001]
    Xs = X[cols]
    
    
    # #### 1.2 Mean Regression Results (p-values, coefficients .... )
    
    # In[8]:
    
    
    model = sm.OLS(y,Xs)
    results = model.fit()
    #print(results.summary())
    
    
    # In[9]:
    
    
    Comparison = pd.DataFrame(results.predict(Xs))
    Comparison["Actual"] = y
    Comparison.rename(columns={Comparison.columns[0]: 'Predicted'}, inplace=True)
    #Comparison.iloc[len(y)-365:len(y)].plot(title = "Buttonville A. Temperature")
    
    
    # ### 2. AR(1) Process for the Residuals
    # 
    # 
    # Discretizing the SDE implies that the mean removed temperature series follows an AR(1) process
    # 
    # $X_{t+1}= \alpha X_t + \sigma (t)\epsilon$
    
    # #### 2.1 The code below is motivation for an AR(1) process for the residuals obtained from above: we see that there is significant correlation among the residuals from the mean process
    
    # In[10]:
    
    
    epsi = Comparison['Actual'] - Comparison['Predicted']
    epsi = np.array(epsi)
    epsi = np.expand_dims(epsi, axis=0)
    
    lag_ = 10  # number of lags (for auto correlation test)
    acf = autocorrelation(epsi, lag_)
    
    lag = 10 # lag to be printed
    ell_scale = 2  # ellipsoid radius coefficient
    fit = 0  # normal fitting
    #InvarianceTestEllipsoid(epsi, acf[0,1:], lag, fit, ell_scale);
    
    
    # In[11]:
    
    
    epsi = Comparison['Actual'] - Comparison['Predicted']
    epsi = np.array(epsi)
    model = sm.tsa.AR(epsi)
    AResults= model.fit(maxlag = 30, ic = "bic",method = 'cmle')
    #print("The maximum number of required lags for the residuals above according to the Bayes Information Criterion is:")
    #sm.tsa.AR(epsi).select_order(maxlag = 10, ic = 'bic',method='cmle')
    
    
    # In[14]:
    
    
    ar_mod = sm.OLS(epsi[1:], epsi[:-1])
    ar_res = ar_mod.fit()
    #print(ar_res.summary())
    
    ep = ar_res.predict()
    #print(len(ep),len(epsi))
    z = ep - epsi[1:]
    
    #plt.plot(epsi[1:],  color='black')
    #plt.plot(ep, color='blue',linewidth=3)
    #plt.title('Residuals AR(1) Process')
    #plt.ylabel(" ")
    #plt.xlabel("Days")
    #plt.legend()
    
    
    # #### 2.2 Invariance check for the residuals of the AR(1) process
    
    # In[15]:
    
    
    z = np.expand_dims(z, axis=0)
    
    lag_ = 10  # number of lags (for auto correlation test)
    acf = autocorrelation(z, lag_)
    
    lag = 10  # lag to be printed
    ell_scale = 2  # ellipsoid radius coefficient
    fit = 0  # normal fitting
    #InvarianceTestEllipsoid(z, acf[0,1:], lag, fit, ell_scale);
    
    
    # #### 2.3 As per Benth lets see what the residuals of the AR(1) process are doing...
    
    # In[16]:
    
    
    z = ep - epsi[1:]
    #plt.plot(z**2)
    
    
    # ### 3. Modelling the Volatility Term: $\sigma^2(t) $
    
    # In[17]:
    
    
    sigma = z**2
    L = []
    volmodel = sm.OLS(sigma, X[1:])
    for i in range(10):
        volresults = volmodel.fit_regularized(method = 'elastic_net',alpha=i/10, L1_wt=0.5)
        L.append(volresults.params)
    
    
    # In[18]:
    
    
    L = pd.DataFrame(L)
    L = L/L.max(axis=0)
    #L.plot()
    #L
    
    
    # In[21]:
    
    
    volcols = L.columns[L.iloc[len(L)-1] > 0.001]
    Xvol = X[volcols].iloc[1:]
    volmodel = sm.OLS(sigma,Xvol)
    VolResults = volmodel.fit()
    #print(VolResults.summary())
    
    
    # In[22]:
    
    
    VolComparison = pd.DataFrame(VolResults.predict())
    VolComparison["Actual"] = sigma
    VolComparison.rename(columns={VolComparison.columns[0]: 'Predicted'}, inplace=True)
    #VolComparison.ix[0:720]['Actual'].plot(title = "Hamilton Volatility Model")
    #VolComparison.ix[0:720]['Predicted'].plot(title = "Buttonville Volatility Model")
    
    
    # In[23]:
    
    
    epsi = z/(VolResults.predict())**0.5
    epsi = np.expand_dims(epsi, axis=0)
    lag_ = 10  # number of lags (for auto correlation test)
    acf = autocorrelation(epsi, lag_)
    
    lag = 10  # lag to be printed
    ell_scale = 2  # ellipsoid radius coefficient
    fit = 0  # normal fitting
    #InvarianceTestEllipsoid(epsi, acf[0,1:], lag, fit, ell_scale);
    
    
    # ### 4. Monte Carlo Simulation
    
    # In[24]:
    
    
    #tau is the risk horizon
    #tau = 365*2
    
    a = np.ones(tau)
    t = np.linspace(len(y),len(y)+tau,tau)
    N = 4 #number of sine and cosine functions to include
    n = np.linspace(1,N,N)
    Sines = np.sin(2*np.pi*(np.outer(t,n))/365)
    Cosines = np.cos(2*np.pi*(np.outer(t,n))/365)
    X_proj = np.stack((a, t), axis=1)
    X_proj = np.concatenate((X_proj,Sines,Cosines),axis=1)
    temp_cols = ['Constant', 'time']
    for i in range(N):
        temp_cols.append('sin(2pi*'+str(i+1)+'t/365)')
    for i in range(N):
        temp_cols.append('cos(2pi*'+str(i+1)+'t/365)')
    X_proj = pd.DataFrame(X_proj,columns = temp_cols)
    
    
    # In[25]:
    
    
    b = X_proj[cols]
    results.predict(b);
    
    
    # In[26]:
    
    
    #M is the number of monte carlo paths to simulate
    #M = 10000
    invariants = np.zeros((tau,M))
    #Fractional Brownian Motion
    #H = 0.61
    f = FBM(n=tau, hurst=H, length=tau, method='daviesharte')
    #
    for i in range(M):
        invariants[:,i] = np.diff(f.fbm())
    vol_proj = (VolResults.predict(X_proj[volcols]))**0.5
    sig_hat = np.expand_dims(vol_proj, axis=1)*invariants
    AR = np.zeros(sig_hat.shape)
    for i in range(sig_hat.shape[0]-1):
        AR[i+1] = ar_res.params[0]*AR[i]+ sig_hat[i]
    x_proj = X_proj[cols]
    Mean_Temp = np.expand_dims(results.predict(x_proj ),axis=1)
    Temp_Paths = np.repeat(Mean_Temp,M,axis=1)+ AR
    #plt.plot(Temp_Paths[:,0:100],'r--',alpha = 0.01);
    
    
    # In[27]:
    
    
    T_innov = pd.DataFrame(invariants)
    T_out = pd.DataFrame(Temp_Paths)
    T_out.index = np.arange(T.index[-1],T.index[-1] + dt.timedelta(tau),dt.timedelta(days=1)).astype(dt.datetime)
    T_innov.index = T_out.index
    #T_out.to_pickle("C:\\Users\\islipd\\Documents\\Thesis Notebooks\\Tout.pkl")
    #T_innov.to_pickle("C:\\Users\\islipd\\Documents\\Thesis Notebooks\\Tinnov.pkl")
    #T_out.mean(axis = 1).plot()
    #T_out["model#"] = 1
    
    return T_out, T_innov
def simulate_fbm_df(d_const, n_dim, n_steps, dt, loc_std=0, hurst=0.5):
    """Simulate and output a single trajectory of fractional brownian motion in a specified number of dimensions.

    :param d_const: diffusion constant in um2/s
    :param n_dim: number of spatial dimensions for simulation (1, 2, or 3)
    :param n_steps: trajectory length (number of steps)
    :param dt: timestep size (s)
    :param loc_std: standard deviation for Gaussian localization error (um)
    :param hurst: Hurst index in range (0,1), hurst=0.5 gives brownian motion
    :return: trajectory dataframe (position in n_dim dimensions, at each timepoint)
    """

    np.random.seed()

    # create fractional brownian motion trajectory generator
    # Package ref: https://github.com/crflynn/fbm
    f = FBM(n=n_steps, hurst=hurst, length=n_steps * dt, method='daviesharte')

    # get time list and trajectory where each timestep has and n-dimensional vector step size
    t_values = f.times()
    fbm_sim = []
    for dim in range(n_dim):
        fbm_sim.append(f.fbm() * np.sqrt(2 * d_const))

    df = pd.DataFrame()
    for i in range(n_steps):

        x_curr = [fbm_sim[dim][i] for dim in range(n_dim)]
        # for initial time point, start at origin and optionally add noise
        if i == 0:
            x_obs_curr = [
                x_curr[dim] + loc_std * np.random.randn()
                for dim in range(n_dim)
            ]
        # for latter timepoints, set "current" position to position determined by displacement out of the last timepoint
        else:
            x_obs_curr = x_obs_next

        # Get next n-dimensional position
        x_next = [fbm_sim[dim][i + 1] for dim in range(n_dim)]
        # Get noise to add to next position, to get the observed position
        noise_next = [loc_std * np.random.randn() for _dim in range(n_dim)]
        x_obs_next = [x_next[dim] + noise_next[dim] for dim in range(n_dim)]
        # break current and next position into vector and magnitdue displacements
        dx_obs = [x_obs_next[dim] - x_obs_curr[dim] for dim in range(n_dim)]
        dx = [x_next[dim] - x_curr[dim] for dim in range(n_dim)]
        dr_obs = np.linalg.norm(dx_obs)
        dr = np.linalg.norm(dx)
        t = t_values[i]

        # Add timestep data to dataframe
        data = {
            't_step': t,
            'x': x_curr,
            'x_obs': x_obs_curr,
            'dx': dx,
            'dx_obs': dx_obs,
            'dr': dr,
            'dr_obs': dr_obs
        }
        df = df.append(data, ignore_index=True)

    return df
    N_test = int(round((N_data * Train_step_proportion), 0))
    # Generate Unaltered Test Data
    data_x_test = np.sort(
        np.random.uniform(low=-(1 + Extrapolation_size),
                          high=(1 + Extrapolation_size),
                          size=N_test))

    data_y_test = unknown_f(data_x_test)

    # Generate Unaltered Training Data
    data_x = np.sort(np.random.uniform(low=-1, high=1, size=N_train))
    data_y = unknown_f(data_x)
else:
    # Generate Fractional Data
    FBM_Generator = FBM(n=N_data, hurst=0.75, length=1, method='daviesharte')
    data_y_outputs_full = FBM_Generator.fbm()
    data_x_outputs_full = FBM_Generator.times()
    # Partition Data
    data_x, data_x_test, data_y, data_y_test = train_test_split(
        data_x_outputs_full,
        data_y_outputs_full,
        test_size=Test_set_proportion,
        random_state=2020,
        shuffle=True)

    # Reorder Train Set
    indices_train = np.argsort(data_x)
    data_x = data_x[indices_train]
    data_y = data_y[indices_train]

    # Reorder Test Set
示例#13
0
def test_FBM_method_fallback(n_fallback, hurst_fallback):
    f = FBM(5, 0.99, 1, method="daviesharte")
    with pytest.warns(Warning):
        sample = f.fbm()
示例#14
0
def test_FBM_fbm(n_good, hurst_good, length_good, fbm_method_good):
    f = FBM(n_good, hurst_good, length_good, fbm_method_good)
    fbm_sample = f.fbm()
    assert isinstance(fbm_sample, np.ndarray)
    assert len(fbm_sample) == n_good + 1
示例#15
0
文件: Hurst.py 项目: DvDxx/fractal_d
def repeat_task(series,N):
    f = FBM(n=10000, hurst=0.75, length=1, method='daviesharte')
    for i in range(N):
        H, c, data  = hurst.compute_Hc(f.fbm() + 10., kind='price', simplified=True)
        yield H
示例#16
0
 def fbm(self, n, hurst, length=1, method="daviesharte"):
     """One off sample of fBm."""
     f = FBM(n, hurst, length, method)
     return f.fbm()
示例#17
0
nlength = 14
inclength = 40
#Load model
model = keras.models.load_model(".\\good_DLFNNmodels\\model3densediff_n" +
                                str(nlength - 1) + ".h5")
multipath = []
multiexp = []
estmultiexp = []
esttime = []

#stitch together multifractional fbm
for i in range(0, 10):
    Hsim = random.uniform(0, 1)
    randinclength = random.randrange(20) + inclength
    f = FBM(n=randinclength, hurst=Hsim, length=1, method='hosking')
    x = f.fbm()
    if i == 0:
        for p in x:
            multipath.append(p)
            multiexp.append(Hsim)
    else:
        checkpoint = multipath[-1]
        for p in x[1:]:
            multipath.append(checkpoint + p)
            multiexp.append(Hsim)

#symmetric window analysis
eitherside = int(np.floor(float(nlength) / 2.))
for i in range(eitherside, len(multipath) - eitherside):
    #for differencing
    if nlength % 2 == 1:
示例#18
0
# flake8: noqa
from fbm import FBM
import matplotlib.pyplot as plt
import time
import math
import numpy as np


def h(s):
    # return 0.499*math.sin(t) + 0.5
    # return 0.6 * t + 0.3
    return 0.5 * np.exp(-8.0 * s**2)


fbm_generator = FBM(2**8, 0.85)
t = fbm_generator.times()
fbm_realization = fbm_generator.fbm()
fgn_realization = fbm_generator.fgn()
h_t = np.array(h(fbm_realization))
plt.plot(t, fbm_realization)
plt.plot(t, h_t)
# plt.plot(t[0:-1], fgn_realization)
plt.show()
示例#19
0
def HVG(series):
    '''
    function HVG(series) convert time series to horizon visibility graph
    :return:networkx graph from time series
    '''
    N = len(series)
    G = nx.Graph()
    for ta in range(N):
        ya = series[ta]
        criterion = 0
        for tb in range(ta + 1, N):
            yb = series[tb]
            if yb >= criterion:
                if ya >= criterion:
                    criterion = yb
                    G.add_edge(ta,tb)
                else:
                    break
                    #edge_list.append([ta, tb])
    return G

if __name__ == '__main__':
    f = FBM(n=1000, hurst=0.5, length=1, method='daviesharte')
    #series = [x for x in range(50)]
    series = f.fbm()
    #series = [2,4,1,5,6,3,6]
    G = HVG(series)
    pro_draw.draw_graph(G)
    #print(G.number_of_edges())

示例#20
0
phi = norm.cdf((u + c2 * T) / (sigma * T**H))
print("u=%.4f  H=%.4f" % (u, H))
print("theoretical upper bound = %.3f" %
      (1 - phi + exp(-2 * u * c2 * T**(1 - 2 * H) / sigma**2) * phi))

## =================================================================
##  分形布朗运动模拟
## =================================================================

from fbm import FBM
from tqdm import trange

f = FBM(n=1000, hurst=H, length=T, method='daviesharte')
for i in trange(实验次数, ncols=80):
    fbm_ts = f.times()
    fbm_asset = f.fbm()
    fbm_asset = [
        u + c2 * fbm_ts[i] - amp**H * fbm_asset[i]
        for i in range(len(fbm_asset))
    ]

    for i in range(len(fbm_asset)):
        if fbm_asset[i] < 0:
            fbm_ts = fbm_ts[:i + 1]
            fbm_asset = fbm_asset[:i + 1]
            break

    if fbm_asset[-1:][0] < 0:
        #print("Ruin time=",fbm_ts[len(fbm_asset)-1],"asset=",fbm_asset[-1:][0])
        破产次数 += 1