Пример #1
0
import ft

# ignore warnings
warnings.filterwarnings("ignore")

#  mode = "daily"
#mode = "weekly"
#mode = "monthly_data+"

dailyfile = open('./daily/wti.csv', 'r')
weeklyfile = open('./weekly/wti_week.csv', 'r')
#monthlyfile = open('./monthly/wti_month.csv', 'r')

if (mode == "daily"):  # Daily
    print("===DAILY DATASET===")
    data = ft.readData(dailyfile, '2000-01-03', '2020-08-30')
elif (mode == "weekly"):  # Weekly_original
    print("===WEEKLY DATASET===")
    data = ft.readData(weeklyfile, '1986-01-03', '2020-08-28')
elif (mode == "monthly"):  # Monthly
    print("===MONTHLY DATASET===")
    #data = ft.readData(monthlyfile, '1960-01-01', '2020-06-01')

# hyperparmeters
test_ratio = 0.2
ARIMA_order = (3, 1, 3)

# train / test split
test_size = int(len(data) * test_ratio)
print("size of dataset:", len(data))
print("size of test dataset:", test_size)
Пример #2
0
'''
#mode = "daily"
mode = "weekly_origin"
#mode = "weekly_tau1"
#mode = "weekly_tau1_for_monthly"
#mode = "monthly"
#mode = "weekly_data+"
#mode = "monthly_data+"

dailyfile = open('./daily/wti.csv', 'r')
weeklyfile = open('./weekly/wti_week.csv', 'r')
monthlyfile = open('./monthly/wti_month.csv', 'r')

if (mode == "daily"):  # Daily
    print("===DAILY DATASET===")
    dates, data = ft.readData(dailyfile, '1986-01-02', '2020-08-31')
    E = 7
    tau = 1
elif (mode == "weekly_origin"):  # Weekly_original
    print("===WEEKLY DATASET===")
    dates, data = ft.readData(weeklyfile, '1986-01-03', '2020-08-28')
    E = 6
    tau = 1
elif (mode == "weekly_tau1"):  # Weekly_tau1
    print("===WEEKLY DATASET===")
    dates, data = ft.readData(weeklyfile, '1986-01-03', '2020-08-28')
    E = 6
    tau = 1
elif (mode == "weekly_tau1_for_monthly"):  # Weekly_tau1
    print("===WEEKLY DATASET===")
    dates, data = ft.readData(weeklyfile, '1986-01-03', '2020-08-28')
import ft

# ignore warnings
warnings.filterwarnings("ignore")

#mode = "daily"
mode = "weekly"
#mode = "monthly"

dailyfile = open('./daily/wti.csv', 'r')
weeklyfile = open('./weekly/wti_week.csv', 'r')
monthlyfile = open('./monthly/wti_month.csv', 'r')

if (mode == "daily"):  # Daily
    print("===DAILY DATASET===")
    data = ft.readData(dailyfile, '2000-01-03', '2020-03-13')
elif (mode == "weekly"):  # Weekly_original
    print("===WEEKLY DATASET===")
    data = ft.readData(weeklyfile, '1986-01-03', '2020-08-28')
elif (mode == "monthly"):  # Monthly
    print("===MONTHLY DATASET===")
    data = ft.readData(monthlyfile, '1986-01-01', '2020-08-01')

# hyperparmeters
test_ratio = 0.2
ARIMA_order = (3, 1, 3)

# train / test split
test_size = int(len(data) * test_ratio)
print("size of dataset:", len(data))
print("size of test dataset:", test_size)
Пример #4
0
import GKFN
import ft

data = ft.readData()

# 선택된 E, tau 값을 이용하여 데이터를 재구성합니다.
E = 6
tau = 9
P = 1  # P는 몇일 뒤 값을 예측할 지 설정 해주는 parameter입니다. 예를 들어 하루 뒤 값을 예측하는 것이면 P = 1

# train set 및 test set을 구성합니다.
dataX, dataY = ft.extracting(tau, E, P, data)
trX = dataX[:-92]
teX = dataX[-92:]
trY = dataY[:-92]
teY = dataY[-92:]

# parameter를 설정하고 학습을 시킵니다.
alpha = 0.25
loop = 5
Kernel_Num = 150

GKFN.GKFN(trX, trY, teX, teY, alpha, loop, Kernel_Num)
Пример #5
0
import ft

# ignore warnings
warnings.filterwarnings("ignore")

mode = "daily"
#mode = "weekly_data+"
#mode = "monthly_data+"

dailyfile = open('./daily/wti.csv', 'r')
#weeklyfile = open('./weekly/wti_week.csv', 'r')
#monthlyfile = open('./monthly/wti_month.csv', 'r')

if (mode == "daily"):  # Daily
    print("===DAILY DATASET===")
    data = ft.readData(dailyfile, '1986-01-02', '2020-08-30')
elif (mode == "weekly"):  # Weekly_original
    print("===WEEKLY DATASET===")
    #data = ft.readData(weeklyfile, '1986-01-03', '2020-06-26')
elif (mode == "monthly"):  # Monthly
    print("===MONTHLY DATASET===")
    #data = ft.readData(monthlyfile, '1960-01-01', '2020-06-01')

# hyperparmeters
test_ratio = 0.2
ARIMA_order = (3, 1, 1)

# train / test split
test_size = int(len(data) * test_ratio)
print("size of dataset:", len(data))
print("size of test dataset:", test_size)