예제 #1
0
def test_predict_is_intervals_bbvi():
    model = pf.GARCH(data=data, q=1, p=1)
    x = model.fit('BBVI', iterations=100)
    predictions = model.predict_is(h=10, intervals=True)
    assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
    assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
    assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
예제 #2
0
def test_predict_is_intervals_mh():
    model = pf.GARCH(data=data, q=1, p=1)
    x = model.fit('M-H', nsims=400)
    predictions = model.predict_is(h=10, intervals=True)
    assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
    assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
    assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
예제 #3
0
def GarchVol(ret):
    model = pf.GARCH(ret, p=1, q=1)
    x = model.fit()
    assert (len(model.latent_variables.z_list) == 4)
    lvs = np.array([i.value for i in model.latent_variables.z_list])
    assert (len(lvs[np.isnan(lvs)]) == 0)
    return x, model
예제 #4
0
def test_predict_is_intervals():
    model = pf.GARCH(data=data, q=2, p=2)
    x = model.fit()
    predictions = model.predict_is(h=10, intervals=True)
    assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
    assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
    assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
예제 #5
0
def roll_prediction_egarch(return_tr, vol_tr, return_ts, vol_ts,
                           training_order, method):

    roll_x = return_tr
    vol_hat = []

    for i in range(len(vol_ts)):

        print 'now processing', i

        tmp_x = roll_x[-training_order:]

        if method == 'garch':
            model = pf.GARCH(tmp_x, p=1, q=1)
            x = model.fit()

        elif method == 'egarch':
            model = pf.EGARCH(tmp_x, p=1, q=1)
            x = model.fit()

        vol_hat.append(np.asarray(model.predict(1))[0][0])
        roll_x = np.concatenate((roll_x, return_ts[i:i + 1]))

    # return rooted mse
    return vol_hat, sqrt(
        mean((vol_ts - np.asarray(vol_hat)) * (vol_ts - np.asarray(vol_hat))))
예제 #6
0
def test_predict_is_nans():
    model = pf.GARCH(data=data, q=2, p=2)
    x = model.fit()
    x.summary()
    assert (len(
        model.predict_is(h=5).values[np.isnan(
            model.predict_is(h=5).values)]) == 0)
예제 #7
0
    def _fit_model(self):
        sql_string = "SELECT * FROM underlying_data WHERE symbol='{0}' " \
                     "AND bar_length='1440' AND data_type='TRADES' ORDER BY date".format(self.symbol)
        data = self._read_sql_query(sql_string)
        returns = self._calc_reg_returns(data['close'])

        model = pf.GARCH(returns, p=1, q=1)
        model.fit()

        return model
예제 #8
0
def main():
    symbols = import_from_csv()
    print symbols
    with open(
            'simputsW.csv',
            'wb',
    ) as currdata:
        writer = csv.writer(currdata, delimiter=',')
        writer.writerow([
            "Symbol", "Tag", "Strike", "Price", "Barone-Adesi Whaley",
            "Bjerksund Stensland", "Cox-Ross-Rubenstein", "Jarrow-Rudd",
            "Equal Probabilities", "Trigeorgis", "Tian", "Leisen-Reimer"
        ])
        for i in symbols:
            oc = OptionChain('NASDAQ:' + i, {
                "expy": "2016",
                "expm": "06",
                "expd": "10"
            })
            underlying = float(getQuotes(i)[0]['LastTradeWithCurrency'])
            stock = DataReader(i, 'yahoo', datetime(2014, 1, 1),
                               datetime(2016, 6, 1))
            returns = pd.DataFrame(np.diff(np.log(stock['Adj Close'].values)))
            model = pf.GARCH(abs(returns), p=1, q=1)
            x = model.fit()
            vol = float(
                model.predict(h=7)['0'].values.tolist()[-1]) * float(100)
            for j in oc.puts:
                strike = float(j['strike'])
                if (str(j['p']) != '-'):
                    opttype = Option.Put
                    todaysDate = Date(4, June, 2016)
                    expiryDate = Date(10, June, 2016)
                    print('-' * 32)
                    print(i)
                    print(j['s'])
                    print(str(j['strike']))
                    print(str(j['p']))
                    results = EstimateOption(todaysDate, expiryDate, vol,
                                             opttype, underlying, strike)
                    writer.writerow(
                        [str(i),
                         str(j['s']),
                         str(j['strike']),
                         str(j['p'])] + results)
예제 #9
0
def oneshot_prediction_egarch(return_tr, vol_tr, return_ts, vol_ts, method):

    if method == 'garch1':
        model = pf.GARCH(return_tr, p=1, q=1)
        x = model.fit()

        tr_sigma2, _, ___ = model._model(model.latent_variables.get_z_values())
        vol_tr_hat = tr_sigma2**0.5

    elif method == 'egarch1':
        model = pf.EGARCH(return_tr, p=1, q=1)
        x = model.fit()

        tr_sigma2, _, ___ = model._model(model.latent_variables.get_z_values())
        vol_tr_hat = np.exp(tr_sigma2 / 2.0)

    tmp_pre = np.asarray(model.predict(len(vol_ts)))
    vol_ts_hat = []
    for i in tmp_pre:
        vol_ts_hat.append(i[0])

    return vol_ts_hat, vol_tr_hat, sqrt(mean((vol_ts - np.asarray(vol_ts_hat))*(vol_ts - np.asarray(vol_ts_hat)))), \
sqrt(mean((vol_tr[1:] - np.asarray(vol_tr_hat))*(vol_tr[1:] - np.asarray(vol_tr_hat))))
예제 #10
0
    garch_model.getMu(),
)
print('predict value:', garch_model.predict(sim_data))

# igarch model
igarch_model = IGARCHModel(1, 1, _use_mu=True)
start_time = time.time()
igarch_model.fit(sim_data)
print('fitting time:', time.time() - start_time)
print(
    igarch_model.getAlphas(),
    igarch_model.getBetas(),
    igarch_model.getConst(),
    igarch_model.getMu(),
)
print('predict value:', igarch_model.predict(sim_data))

sim_line, = plt.plot(np.sqrt(garch_sim.var[2:]), label='simulation')
garch_line, = plt.plot(garch_model.getVolatility(), label='garch')
igarch_line, = plt.plot(igarch_model.getVolatility(), label='igarch')
plt.legend(handles=[sim_line, garch_line, igarch_line])
plt.show()

# pyflux's garch model
pf_model = pf.GARCH(sim_data, 1, 1)
start_time = time.time()
pf_ret = pf_model.fit("MLE")
print('fitting time:', time.time() - start_time)
pf_ret.summary()
print(pf_model.predict())
예제 #11
0
import numpy as np
import pyflux as pf
import sys
import pandas as pd

from pandas_datareader.data import DataReader

jpm = DataReader('GOOG', 'google')
returns = pd.DataFrame(np.diff(np.log(jpm['Close'].values)))
returns.index = jpm.index.values[1:jpm.index.values.shape[0]]
returns.columns = ['Google Returns']

model = pf.GARCH(returns, p=1, q=1)
x = model.fit()
#x.summary()

print np.sqrt(np.abs(returns.head(int(sys.argv[1])))) * 100

#model.predict(h=10)
예제 #12
0
def test_predict_is_nonconstant():
    model = pf.GARCH(data=data, p=2, q=2)
    x = model.fit()
    predictions = model.predict_is(h=5, intervals=False)
    assert(not np.all(predictions.values==predictions.values[0]))
예제 #13
0
def test_predict_is_length():
    model = pf.GARCH(data=data, p=2, q=2)
    x = model.fit()
    assert(model.predict_is(h=5).shape[0] == 5)
예제 #14
0
def test_pml():
    model = pf.GARCH(data=data, p=1, q=1)
    x = model.fit('PML')
    assert(len(model.latent_variables.z_list) == 4)
    lvs = np.array([i.value for i in model.latent_variables.z_list])
    assert(len(lvs[np.isnan(lvs)]) == 0)
예제 #15
0
def test_bbvi_mini_batch_elbo():
    model = pf.GARCH(data=data, p=1, q=1)
    x = model.fit('BBVI',iterations=100, map_start=False, mini_batch=32, record_elbo=True)
    assert(x.elbo_records[-1]>x.elbo_records[0])
예제 #16
0
def test_bbvi_mini_batch():
    model = pf.GARCH(data=data, p=1, q=1)
    x = model.fit('BBVI',iterations=100, mini_batch=32)
    assert(len(model.latent_variables.z_list) == 4)
    lvs = np.array([i.value for i in model.latent_variables.z_list])
    assert(len(lvs[np.isnan(lvs)]) == 0)
예제 #17
0
def test_ppc():
    model = pf.GARCH(data=data, q=2, p=2)
    x = model.fit('BBVI', iterations=100)
    p_value = model.ppc()
    assert(0.0 <= p_value <= 1.0)
예제 #18
0
def test_sample_model():
    model = pf.GARCH(data=data, q=2, p=2)
    x = model.fit('BBVI', iterations=100)
    sample = model.sample(nsims=100)
    assert(sample.shape[0]==100)
    assert(sample.shape[1]==len(data)-2)
예제 #19
0
import logging

import pyflux as pf

from TorchTSA.model import ARCHModel
from TorchTSA.simulate import ARCHSim

logging.basicConfig(level=logging.INFO)

arch_sim = ARCHSim((0.6, 0.1), _const=0.1, _mu=0.0)
sim_data = arch_sim.sample_n(1000)

arch_model = ARCHModel(2, _use_mu=True)
arch_model.fit(sim_data)
print(
    arch_model.getAlphas(),
    arch_model.getConst(),
    arch_model.getMu(),
)
print('predict value:', arch_model.predict(sim_data))

pf_model = pf.GARCH(sim_data, 0, 2)
pf_ret = pf_model.fit("MLE")
pf_ret.summary()
print(pf_model.predict())
예제 #20
0
        1 - Maximum_Weight / 100.0)
Instrument_Data['AR_Comb_PL'] = Instrument_Data.apply(
    lambda x: x["PUT_" + file]
    if x["AR_Comb_Forecast"] > 0.1 else x["CALL_" + file]
    if x["AR_Comb_Forecast"] < -0.1 else 0,
    axis=1)
Instrument_Data['Total_AR_Comb_PL'] = list(
    accumu(Instrument_Data['AR_Comb_PL'].tolist()))

print "AR Model" + "\tOptimimal Weight: " + str(
    Maximum_Weight) + "\tMax Equity: " + str(Maximum_EQ)
#########################################################################

#############################	GARCH MODEL #############################
logging.info('\t' + str(datetime.now()) + '	Calculating GARCH Model...')
model1 = pf.GARCH(ts_ret2[:len(ts_ret2) / 2 + 1], p=1, q=1)
results_GR = model1.fit()
results_GR.summary()

# Plot Fit in In-Sample Fitting
model1.plot_fit()

GARCH_Pred = np.sqrt(results_GR.signal).tolist()

for i in range(len(ts_ret2) / 2 - 1, len(ts_ret2) - 1):
    # Fit Model again and make prediction
    model2 = pf.GARCH(ts_ret2[:i], p=1, q=1)
    results_GR2 = model2.fit()

    #print model2.predict(h=1)
예제 #21
0
def test_no_terms():
    model = pf.GARCH(data=data, p=0, q=0)
    x = model.fit()
    assert(len(model.latent_variables.z_list) == 2)
    lvs = np.array([i.value for i in model.latent_variables.z_list])
    assert(len(lvs[np.isnan(lvs)]) == 0)
예제 #22
0
                        if arima_results_bic < arima_bic:
                            arima_p = p
                            arima_q = q
                            arima_model = arima_model_middle
                            arima_bic = arima_results_bic
                            arima_diff = diff_num
                if arima_model == None:
                    predict_arima_pf = np.zeros(len(predict_date), )
                else:
                    predict_arima_pf = arima_model.predict(h=len(predict_date))
                    arima_list.append([k, arima_p, arima_q, arima_diff, 'pf'])
            predict_arima = np.zeros(len(predict_date), )

        data_arima_origin = np.array(
            map((lambda x: x[0]), np.array(data_arima_origin)))
        GARCH_model = pf.GARCH(data=np.array(data_arima_origin), p=2, q=2)
        GARCH_model.fit('MLE')
        # model.plot_fit()
        predict_garch = GARCH_model.predict(90)

        predict_arima_origin = copy.deepcopy(predict_arima)
        for i in xrange(len(predict_date)):
            predict_arima[i] = float(predict_arima[i] *
                                     (1 + bodong_small_final[i]) *
                                     (1 * (1 + bodong_big_final[i])))
            if predict_arima[i] < 0:
                predict_arima[i] = int(0)

        if arima_linear_mix == 1:
            arima_linear_weight = [1, 9]
            for i in xrange(len(predict_date)):
a_returns.head()

# Step 1: Visualization
# creating a plot of Adobe Returns
fig, axs = plt.subplots(2, 1, figsize=(12, 12))
axs[0].plot(a_prices)
axs[0].set_title("ADBE price since Dave Joined")
axs[1].plot(a_returns)
axs[1].set_title("ADBE returns since Dave Joined")
plt.show()

pf.acf_plot(a_returns.values.T[0], max_lag=260)
pf.acf_plot(np.square(a_returns.values.T[0]), max_lag=260)

# Step 2: Propose a model
my_model = pf.GARCH(p=1, q=1, data=a_returns)
print(my_model.latent_variables)

#Step 3: Inference
# Using Metropolis-Hastings for approximate inference on GARCH mode
result = my_model.fit('M-H', nsims=20000)
# Ploting latent variabes alpha and beta
my_model.plot_z([1, 2])

# Step 4: Evaluate Model Fit
# Plotting the series versus its predicted values
# Can check out of sample performance
# plot the fit of the GARCH model and observe that it is picking
# up volatility clustering in the series
my_model.plot_fit(figsize=(15, 5))
예제 #24
0







#errors
error_var1 =  mean_absolute_error(original_values[:,0],allpredicted_values[:,0])
error_var2 = r2_score(original_values[:,0],allpredicted_values[:,0])


#GARCH
data = pd.DataFrame({'pm25data':np.diff(my_data[:,4][:12000])})
model = pf.GARCH(data=data, p=1, q=1,  target='pm25data')
x = model.fit("MLE")
model.plot_fit()

stop = 15000
start = 14900
interval = 20
nmbr_predictions = 2
allpredicted_values = np.array([])
original_values = np.array([])

for i in range(start,stop,interval):
    i = i+nmbr_predictions
    data = pd.DataFrame({'pm25data':np.diff(my_data[:,4][:i])})
    model = pf.GARCH(data=data, p=0, q=1,  target='pm25data')
    x = model.fit("MLE")