def seasonANNForecasting(ts, dataset, freq, lag):

    # 序列分解
    #ts.index = pd.date_range(start='19960318',periods=len(ts), freq='Q')
    trend, seasonal, residual = season_decompose.seasonDecompose(ts, freq=freq)
    # print trend.shape
    # print seasonal.shape
    # print residual.shape

    # 分别预测
    trendWin = lag
    resWin = trendWin
    t1 = time.time()
    trTrain, trTest, mae1, mrse1, smape1 = ANNFORECAST.ANNforecasting(
        trend, inputDim=trendWin, epoch=100, hiddenNum=100)
    resTrain, resTest, mae2, mrse2, smape2 = ANNFORECAST.ANNforecasting(
        residual, inputDim=resWin, epoch=100, hiddenNum=100)
    t2 = time.time()
    print(t2 - t1)

    #'''
    # 数据对齐
    trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain,
                                    resTest, resWin)

    # 获取最终预测结果
    finalPred = trendPred + seasonal + resPred

    trainPred = trTrain + seasonal[trendWin:trendWin +
                                   trTrain.shape[0]] + resTrain
    testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] + resTest

    # 获得ground-truth数据
    data = dataset[freq // 2:-(freq // 2)]
    trainY = data[trendWin:trendWin + trTrain.shape[0]]
    testY = data[2 * resWin + resTrain.shape[0]:]

    # 评估指标
    # MAE = eval.calcMAE(trainY, trainPred)
    # print ("train MAE",MAE)
    # MRSE = eval.calcRMSE(trainY, trainPred)
    # print ("train MRSE",MRSE)
    # MAPE = eval.calcMAPE(trainY, trainPred)
    # print ("train MAPE",MAPE)
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    # plt.plot(data)
    # plt.plot(finalPred)
    # plt.show()
    #'''
    return trainPred, testPred, MAE, MRSE, SMAPE
def decompose_RNN_forecasting(ts, dataset, freq, lag, epoch=20, hidden_num=64,
                              batch_size=32, lr=1e-3, unit="GRU", varFlag=False, maxLen=48, minLen=24, step=8):

    # 序列分解
    trend, seasonal, residual = decompose.ts_decompose(ts, freq)
    print("trend shape:", trend.shape)
    print("peroid shape:", seasonal.shape)
    print("residual shape:", residual.shape)

    # 分别预测
    resWin = trendWin = lag
    t1 = time.time()
    trTrain, trTest, MAE1, MRSE1, SMAPE1 = RNN_forecasting(trend, lookBack=lag, epoch=epoch, batchSize=batch_size, hiddenNum=hidden_num,
                                            varFlag=varFlag, minLen=minLen, maxLen=maxLen, step=step, unit=unit, lr=lr)
    resTrain, resTest, MAE2, MRSE2, SMAPE2 = RNN_forecasting(residual, lookBack=lag, epoch=epoch, batchSize=batch_size, hiddenNum=hidden_num,
                                            varFlag=varFlag, minLen=minLen, maxLen=maxLen, step=step, unit=unit, lr=lr)
    t2 = time.time()
    print(t2-t1)

    print("trTrain shape:", trTrain.shape)
    print("resTrain shape:", resTrain.shape)

    # 数据对齐
    trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain, resTest, resWin)
    print("trendPred shape is", trendPred.shape)
    print("resPred shape is", resPred.shape)

    # 获取最终预测结果
    # finalPred = trendPred+seasonal+resPred

    trainPred = trTrain+seasonal[trendWin:trendWin+trTrain.shape[0]]+resTrain
    testPred = trTest+seasonal[2*resWin+resTrain.shape[0]:]+resTest

    # 获得ground-truth数据
    data = dataset[freq//2:-(freq//2)]
    trainY = data[trendWin:trendWin+trTrain.shape[0]]
    testY = data[2*resWin+resTrain.shape[0]:]

    # 评估指标
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    plt.plot(testY, label='ground-truth')
    plt.plot(testPred, label='prediction')
    plt.xlabel("Time", fontsize=10)
    plt.ylabel("CPU Utilization(%)", fontsize=10)
    plt.legend()
    foo_fig = plt.gcf()
    foo_fig.savefig('M_1955_CPU.eps', format='eps', dpi=1000, bbox_inches='tight')
    plt.show()

    return trainPred, testPred, MAE, MRSE, SMAPE
def decompose_MLP_forecasting(ts,
                              dataset,
                              freq,
                              lag,
                              epoch=20,
                              hidden_num=64,
                              batch_size=32,
                              lr=1e-3):

    # 序列分解
    trend, seasonal, residual = decompose.ts_decompose(ts, freq=freq)
    print("trend shape:", trend.shape)
    print("peroid shape:", seasonal.shape)
    print("residual shape:", residual.shape)

    # 分别预测
    resWin = trendWin = lag
    t1 = time.time()
    trTrain, trTest, mae1, mrse1, smape1 = \
        ANNFORECAST.MLP_forecasting(trend, inputDim=trendWin, epoch=epoch, hiddenNum=hidden_num,
                                    batchSize=batch_size, lr=lr)
    resTrain, resTest, mae2, mrse2, smape2 = \
        ANNFORECAST.MLP_forecasting(residual, inputDim=trendWin, epoch=epoch, hiddenNum=hidden_num,
                                    batchSize=batch_size, lr=lr)
    t2 = time.time()
    print("time:", t2 - t1)

    # 数据对齐
    # trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain, resTest, resWin)

    # 获取最终预测结果
    # finalPred = trendPred + seasonal + resPred

    trainPred = trTrain + seasonal[trendWin:trendWin +
                                   trTrain.shape[0]] + resTrain
    testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] + resTest

    # 获得ground-truth数据
    data = dataset[freq // 2:-(freq // 2)]
    trainY = data[trendWin:trendWin + trTrain.shape[0]]
    testY = data[2 * resWin + resTrain.shape[0]:]

    # 评估指标
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    # plt.plot(data)
    # plt.plot(finalPred)
    # plt.show()

    return trainPred, testPred, MAE, MRSE, SMAPE
Esempio n. 4
0
def seasonSVRForecasting(ts, dataset, freq, lag):

    # 序列分解
    trend, seasonal, residual = season_decompose.seasonDecompose(ts, freq=freq)
    # print trend.shape
    # print seasonal.shape
    # print residual.shape

    # 分别预测
    trendWin = lag
    resWin = trendWin
    t1 = time.time()
    trTrain, trTest, mae1, mrse1, mape1 = SVRFORECAST.SVRforecasting(
        trend, lookBack=trendWin)
    resTrain, resTest, mae2, mrse2, mape2 = SVRFORECAST.SVRforecasting(
        residual, lookBack=resWin)
    t2 = time.time()
    print(t2 - t1)

    #'''
    # 数据对齐
    trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain,
                                    resTest, resWin)

    # 获取最终预测结果
    finalPred = trendPred + seasonal + resPred

    trainPred = trTrain + seasonal[trendWin:trendWin +
                                   trTrain.shape[0]]  #+resTrain
    testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:]  #+resTest

    # 获得ground-truth数据
    data = dataset[freq // 2:-(freq // 2)]
    trainY = data[trendWin:trendWin + trTrain.shape[0]]
    testY = data[2 * resWin + resTrain.shape[0]:]

    # 评估指标
    # MAE = eval.calcMAE(trainY, trainPred)
    # print ("train MAE",MAE)
    # MRSE = eval.calcRMSE(trainY, trainPred)
    # print ("train MRSE",MRSE)
    # MAPE = eval.calcMAPE(trainY, trainPred)
    # print ("train MAPE",MAPE)
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    # plt.plot(data)
    # plt.plot(finalPred)
    # plt.show()
    #'''
    return trainPred, testPred, MAE, MRSE, SMAPE
def statefulRNNforecasting(dataset,lookBack,inputDim = 1,hiddenNum = 100 ,outputDim = 1 ,unit = "GRU",epoch = 50,batchSize = 10,varFlag=False, minLen = 15, maxLen = 30,inputNum = 150):

    # 归一化数据
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(dataset)

    # 分割序列为样本,并整理成RNN的输入形式
    train,test = util.divideTrainTest(dataset)

    trainX,trainY = util.createSamples(train,lookBack)
    testX, testY = util.createSamples(test,lookBack)

    # 构建模型并训练
    RNNModel = staRNNs.statefulRNNsModel(inputDim, hiddenNum, outputDim, unit=unit, batchSize=batchSize, lag=lookBack)
    if varFlag:
        vtrainX, vtrainY = util.createVariableDataset(train, minLen, maxLen, inputNum)
        RNNModel.train(vtrainX, vtrainY, epoch, batchSize)
    else:
        RNNModel.train(trainX, trainY, epoch, batchSize)

    # 预测
    trainPred = RNNModel.predict(trainX, batchSize)
    testPred = RNNModel.predict(testX, batchSize)

    # 还原数据
    trainPred = scaler.inverse_transform(trainPred)
    trainY = scaler.inverse_transform(trainY)
    testPred = scaler.inverse_transform(testPred)
    testY = scaler.inverse_transform(testY)
    dataset = scaler.inverse_transform(dataset)

    # 评估指标
    # MAE = eval.calcMAE(trainY, trainPred)
    # print ("train MAE",MAE)
    # MRSE = eval.calcRMSE(trainY, trainPred)
    # print ("train MRSE",MRSE)
    # MAPE = eval.calcMAPE(trainY, trainPred)
    # print ("train MAPE",MAPE)
    MAE = eval.calcMAE(testY,testPred)
    print ("test MAE",MAE)
    MRSE = eval.calcRMSE(testY,testPred)
    print ("test RMSE",MRSE)
    MAPE = eval.calcMAPE(testY,testPred)
    print ("test MAPE",MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test MAPE", SMAPE)

    # util.LBtest(testY-testPred)
    # plt.plot(testY-testPred)
    # plt.show()

    #util.plot(trainPred,trainY,testPred,testY)

    return trainPred, testPred, MAE, MRSE, SMAPE
def decompose_SVR_forecasting(ts, dataset, freq, lag, C=0.1, epsilon=0.01):

    # 序列分解
    trend, seasonal, residual = decompose.ts_decompose(ts, freq=freq)
    print("trend shape:", trend.shape)
    print("peroid shape:", seasonal.shape)
    print("residual shape:", residual.shape)

    # 分别预测
    resWin = trendWin = lag
    t1 = time.time()
    trTrain, trTest, mae1, mrse1, mape1 = SVR_forecasting(trend,
                                                          lookBack=lag,
                                                          C=C,
                                                          epsilon=epsilon)
    resTrain, resTest, mae2, mrse2, mape2 = SVR_forecasting(residual,
                                                            lookBack=lag,
                                                            C=C,
                                                            epsilon=epsilon)
    t2 = time.time()
    print(t2 - t1)

    # 数据对齐
    trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain,
                                    resTest, resWin)

    # 获取最终预测结果
    finalPred = trendPred + seasonal + resPred

    trainPred = trTrain + seasonal[trendWin:trendWin +
                                   trTrain.shape[0]] + resTrain
    testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] + resTest

    # 获得ground-truth数据
    data = dataset[freq // 2:-(freq // 2)]
    trainY = data[trendWin:trendWin + trTrain.shape[0]]
    testY = data[2 * resWin + resTrain.shape[0]:]

    # 评估指标
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    # plt.plot(data)
    # plt.plot(finalPred)
    # plt.show()

    return trainPred, testPred, MAE, MRSE, SMAPE
Esempio n. 7
0
def SVRforecasting(dataset, lookBack):

    # 归一化数
    #dataset = dataset.reshape(-1, 1)
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(dataset)

    # 分割序列为样本,此处不整理成RNN形式,采用标准形式
    train, test = util.divideTrainTest(dataset)

    trainX, trainY = util.createSamples(train, lookBack, RNN=False)
    testX, testY = util.createSamples(test, lookBack, RNN=False)

    # 构建模型并训练
    SVRModel = SVR.SVRModel(C=2, epsilon=0.01)
    SVRModel.train(trainX, trainY)

    # 预测
    trainPred = SVRModel.predict(trainX)
    testPred = SVRModel.predict(testX)

    # 还原数据
    trainPred = scaler.inverse_transform(trainPred)
    trainY = scaler.inverse_transform(trainY)
    testPred = scaler.inverse_transform(testPred)
    testY = scaler.inverse_transform(testY)
    dataset = scaler.inverse_transform(dataset)

    # 评估指标
    # MAE = eval.calcMAE(trainY, trainPred)
    # print ("train MAE",MAE)
    # MRSE = eval.calcRMSE(trainY, trainPred)
    # print ("train MRSE",MRSE)
    # MAPE = eval.calcMAPE(trainY, trainPred)
    # print ("train MAPE",MAPE)
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    #util.plot(trainPred,trainY,testPred,testY)

    return trainPred, testPred, MAE, MRSE, SMAPE
def SVR_forecasting(dataset, lookBack, C=2.0, epsilon=0.01, plot_flag=False):

    # normalize time series
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(dataset)

    # divide the series into training/testing samples
    # NOTE: Not RNN format
    train, test = util.divideTrainTest(dataset)

    trainX, trainY = util.createSamples(train, lookBack, RNN=False)
    testX, testY = util.createSamples(test, lookBack, RNN=False)
    print("trainX shape is", trainX.shape)
    print("trainY shape is", trainY.shape)
    print("testX shape is", testX.shape)
    print("testY shape is", testY.shape)

    # buil model and train
    SVRModel = SVR.SVRModel(C=C, epsilon=epsilon)
    SVRModel.train(trainX, trainY)

    # forecasting
    trainPred = SVRModel.predict(trainX).reshape(-1, 1)
    testPred = SVRModel.predict(testX).reshape(-1, 1)

    # reverse the time series
    trainPred = scaler.inverse_transform(trainPred)
    trainY = scaler.inverse_transform(trainY)
    testPred = scaler.inverse_transform(testPred)
    testY = scaler.inverse_transform(testY)

    # evaluate
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    if plot_flag:
        util.plot(trainPred, trainY, testPred, testY)

    return trainPred, testPred, MAE, MRSE, SMAPE
Esempio n. 9
0
def run_aram(data, maxar, maxma):

    train, test = util.divideTrainTest(data)
    print("train shape is", train.shape)
    print("test shape is", test.shape)

    diffn = 0
    if stationarity(train) < 0.01:
        print('平稳,不需要差分')
    else:
        diffn = best_diff(train, maxdiff=8)
        # train = produce_diffed_timeseries(train, diffn)
        print('差分阶数为' + str(diffn))

    print('开始进行ARMA拟合')
    order = choose_order(train, maxar, maxma)
    print('模型的阶数为:' + str(order))
    diffn = 1
    _ar = order[0]
    _ma = order[1]
    train = train.flatten()
    model = pf.ARIMA(data=train,
                     ar=_ar,
                     ma=_ma,
                     integ=diffn,
                     family=pf.Normal())
    model.fit("MLE")
    testPred = model.predict(len(test))
    # testPred = predict_recover(test_predict, train, diffn)

    # 评估指标
    MAE = eval.calcMAE(test, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(test, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(test, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(test, testPred)
    print("test SMAPE", SMAPE)
# autocorrelation_plot(ts)
# pyplot.show()

X = ts.values
X = np.array(X, dtype="float64")
size = int(len(X) * 0.9)
train, test = X[0:size], X[size:len(X)]
history = [x for x in train]
predictions = []
for t in range(len(test)):
    model = ARIMA(history, order=(4, 1, 3))
    model_fit = model.fit(disp=0)
    output = model_fit.forecast()
    yhat = output[0]
    predictions.append(yhat)
    obs = test[t]
    history.append(obs)
    print('predicted=%f, expected=%f' % (yhat, obs))
#RMSE = np.sqrt(mean_squared_error(test, predictions))
test = np.array(test)
predictions = np.array(predictions).reshape(-1)
MAE = eval.calcMAE(test, predictions)
RMSE = eval.calcRMSE(test, predictions)
MAPE = eval.calcMAPE(test, predictions)
print('Test MAE: %.8f' % MAE)
print('Test RMSE: %.8f' % RMSE)
print('Test MAPE: %.8f' % MAPE)
# plot
pyplot.plot(test)
pyplot.plot(predictions, color='red')
pyplot.show()
Esempio n. 11
0
def RNN_forecasting(dataset,
                    lookBack,
                    lr,
                    inputDim=1,
                    hiddenNum=64,
                    outputDim=1,
                    unit="GRU",
                    epoch=20,
                    batchSize=30,
                    varFlag=False,
                    minLen=15,
                    maxLen=30,
                    step=5):

    # 归一化数据
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(dataset)

    # 分割序列为样本,并整理成RNN的输入形式
    train, test = util.divideTrainTest(dataset)

    trainX = None
    trainY = None
    vtrainX = None
    vtrainY = None
    testX = None
    testY = None
    vtestX = None
    vtestY = None

    # 构建模型并训练
    RNNModel = RNNs.RNNsModel(inputDim, hiddenNum, outputDim, unit, lr)
    if varFlag:
        vtrainX, vtrainY = util.createVariableDataset(train, minLen, maxLen,
                                                      step)
        vtestX, vtestY = util.createVariableDataset(test, minLen, maxLen, step)
        print("trainX shape is", vtrainX.shape)
        print("trainY shape is", vtrainY.shape)
        print("testX shape is", vtestX.shape)
        print("testY shape is", vtestY.shape)
        RNNModel.train(vtrainX, vtrainY, epoch, batchSize)
    else:
        trainX, trainY = util.createSamples(train, lookBack)
        testX, testY = util.createSamples(test, lookBack)
        print("trainX shape is", trainX.shape)
        print("trainY shape is", trainY.shape)
        print("testX shape is", testX.shape)
        print("testY shape is", testY.shape)
        RNNModel.train(trainX, trainY, epoch, batchSize)

    # 预测
    if varFlag:
        trainPred = RNNModel.predictVarLen(vtrainX, minLen, maxLen, step)
        testPred = RNNModel.predictVarLen(vtestX, minLen, maxLen, step)
        trainPred = trainPred.reshape(-1, 1)
    else:
        trainPred = RNNModel.predict(trainX)
        testPred = RNNModel.predict(testX)
        trainPred = trainPred.reshape(-1, 1)

    if varFlag:
        # 转化一下test的label
        testY = util.transform_groundTruth(vtestY, minLen, maxLen, step)
        testY = testY.reshape(-1, 1)
        testPred = testPred.reshape(-1, 1)
        print("testY", testY.shape)
        print("testPred", testPred.shape)

    # 还原数据
    testPred = scaler.inverse_transform(testPred)
    testY = scaler.inverse_transform(testY)

    # 评估指标
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    #util.plot(trainPred,trainY,testPred,testY)

    return trainPred, testPred, MAE, MRSE, SMAPE
# 分别获得训练集测试集结果
trainPred = trTrain+resTrain+seasonal[trendWin:trendWin+trTrain.shape[0]]
testPred = trTest+resTest+seasonal[2*resWin+resTrain.shape[0]:]

# 获得ground-truth数据
data = dataset[2:-2]
trainY = data[trendWin:trendWin+trTrain.shape[0]]
testY = data[2*resWin+resTrain.shape[0]:]

# 评估指标
MAE = eval.calcMAE(trainY, trainPred)
print ("train MAE",MAE)
MRSE = eval.calcRMSE(trainY, trainPred)
print ("train MRSE",MRSE)
MAPE = eval.calcMAPE(trainY, trainPred)
print ("train MAPE",MAPE)
MAE = eval.calcMAE(testY,testPred)
print ("test MAE",MAE)
MRSE = eval.calcRMSE(testY,testPred)
print ("test RMSE",MRSE)
MAPE = eval.calcMAPE(testY,testPred)
print ("test MAPE",MAPE)

plt.plot(data,'r')
plt.plot(finalPred,'g')
plt.show()

#'''

Esempio n. 13
0
def diffLagEnsmble(dataset,
                   minLag,
                   maxLag,
                   step,
                   epoch,
                   batchSize=10,
                   inputDim=1,
                   outputDim=1,
                   hiddenNum=50,
                   unit="GRU"):

    # 归一化数据
    dataset = dataset.reshape(-1, 1)
    scaler = MinMaxScaler()
    dataset = scaler.fit_transform(dataset)

    # 分割序列为样本,并整理成RNN的输入形式
    train, test = util.divideTrainTest(dataset)
    modelList = []

    #在所有lag下独立地训练若干个RNN
    for lag in range(minLag, maxLag + 1, step):

        print("lag is ", lag)

        trainX, trainY = util.createPaddedDataset(train, lag, maxLag)
        #testX, testY = util.createPaddedDataset(test, lag, maxLag)

        RNNModel = RNNs.RNNsModel(inputDim, hiddenNum, outputDim, unit)
        RNNModel.train(trainX, trainY, epoch, batchSize)
        modelList.append(RNNModel)

    print("the number of model is ", len(modelList))

    # 变长分割test数据
    testX, testY = util.createVariableDataset(test, minLag, maxLag, step)
    print("testX", testX.shape)
    print("testY", testY.shape)

    lagContainNum = (maxLag - minLag) // step + 1
    print("lag contains num is ", lagContainNum)

    # 对每个数据点上不同的lag用不同的模型做预测,目前是简单取平均
    testNum = len(testX)
    testPred = []
    for i in range(0, testNum, lagContainNum):
        predList = []
        for j in range(0, lagContainNum):
            singlePred = modelList[j].predict(testX[i + j:i + j + 1, :, :])
            predList.append(singlePred)
        testPred.append(np.mean(predList))

    # 还原数据
    testPred = scaler.inverse_transform(testPred)
    testY = scaler.inverse_transform(testY)

    testY = util.transformGroundTruth(testY, minLag, maxLag, step)
    testPred = np.array(testPred)
    print("testY", testY.shape)
    print("testPred", testPred.shape)

    # 评估
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    # plt.plot(testY)
    # plt.plot(testPred)
    # plt.show()

    return testPred, MAE, MRSE, SMAPE
Esempio n. 14
0
def train_single_model(lr_, net_name, origin_data, filename, data_order,
                       Select_num, batch_size, num_epochs, l_forward, l_back,
                       n, train_count, parapath):
    '''
    #########################
    ##### load data     #####
    #########################
    '''
    split_rate = 1 / 2
    T0_train,T1_train,T2_train, \
    Forw_train,Pred_train,\
    T0_test,T1_test,T2_test,\
    Forw_test,Pred_test = load_data(filename =filename ,split_rate = split_rate,train_count =train_count,
                                    data_order = data_order,city_path =origin_data,store_seg ="data/seg",
                                    week_num = 24,store_path = "data/train",select_num =Select_num,step =4,
                                    T = 24 ,n = n,l_back = l_back,m =1,l_forward =l_forward )
    #

    tr_f_dataset = forw_Data(Forw_train, Pred_train)
    tr_f_dataloader = torch.utils.data.DataLoader(tr_f_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  sampler=None,
                                                  batch_sampler=None,
                                                  num_workers=0)

    #%%
    '''
    #####################
    #  model 
    #####################
    '''
    if net_name == 'short_LSTM':
        net1 = short_LSTM(input_dim=l_forward,
                          hidden_dim=2,
                          num_layers=1,
                          batch_size=batch_size).cuda()
    elif net_name == 'BiLSTM':
        net1 = BiLSTM(input_dim=l_forward,
                      hidden_dim=2,
                      num_layers=1,
                      batch_size=batch_size).cuda()
    elif net_name == 'conv_LSTM':
        net1 = conv_LSTM(input_dim=20,
                         hidden_dim=2,
                         num_layers=1,
                         batch_size=batch_size).cuda()
    elif net_name == 'conv_BiLSTM':
        net1 = conv_BiLSTM(input_dim=20,
                           hidden_dim=2,
                           num_layers=1,
                           batch_size=batch_size).cuda()
#    elif net_name == 'LSTM_with_Attention':
#        net1 = LSTM_with_Attention(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
#    elif net_name == 'MLP':
#        net1 = MLP(input_dim = l_forward,common_dim =2).cuda()
    loss_MSE = torch.nn.MSELoss(size_average=False)
    loss_MAPE = calcMAPE()

    optimizer1 = torch.optim.Adam(list(net1.parameters()), lr=lr_)

    #%%
    #####################
    # Train model
    #####################
    t1 = time.time()
    net1.train()
    list_train = []
    num = []
    for i in range(num_epochs):
        count = 0
        #        t3 = time.time()
        train_loss = 0
        for data in (tr_f_dataloader):
            x = data[0]
            y = data[1].view(-1, 1, l_back)
            count += 1
            x, y = x.cuda(), y.cuda()
            result = []
            for j in range(l_back):
                # insert predicted value into window data
                x0, g_truth = x.view(batch_size, -1, 1).cuda(), (y[:, :,
                                                                   j]).cuda()
                if j == 0:
                    x_f = x0
                else:
                    x_f = torch.cat([x_f, pred_f], dim=2)

                if len(x_f[0][0]) > l_forward:
                    x_mid = x_f
                    x_f = x_mid[:, :, 1:len(x_f[0][0])]
                x_f = (torch.tensor(x_f, dtype=torch.float64).cuda()).view(
                    -1, 1, l_forward)

                #forward
                preds_f = net1(x_f.cuda())
                pred_f = (torch.tensor(preds_f,
                                       dtype=torch.float64).cuda()).view(
                                           batch_size, -1, 1)
                #calculate loss
                g_truth = g_truth.float()

                loss = loss_MAPE(g_truth, preds_f)
                #loss = loss_MSE(preds_f,g_truth)
                optimizer1.zero_grad()
                #backward
                loss.backward()
                #update parameters
                optimizer1.step()
                result.append(preds_f)
            r = torch.cat([s for s in result], 1)
            train_loss = train_loss + loss_MAPE(y.view(-1, l_back),
                                                r.view(-1, l_back))

#        t4 = time.time()
        print("epoch:%i" % i, "loss:", train_loss / num_epochs)
        list_train.append(train_loss / num_epochs)
        #        print("training time:",t4-t3)
        num.append(i)

    df = pd.DataFrame({"num": num, "train_MAPE": list_train})
    df.to_csv(parapath + "train_MAPE" + ".csv", index=False, sep=',')
    torch.save(net1.state_dict(), parapath + '%s.pkl' % "net1")
    t2 = time.time()
    print("training time:", t2 - t1)
def seasonRNNForecasting(ts, dataset, freq, lag, unit="GRU"):

    # 序列分解
    #ts.index = pd.date_range(start='19960318',periods=len(ts), freq='Q')
    trend, seasonal, residual = season_decompose.seasonDecompose(ts, freq)
    print(trend.shape)
    print(seasonal.shape)
    print(residual.shape)

    # 分别预测
    trendWin = lag
    resWin = trendWin
    t1 = time.time()
    trTrain, trTest, MAE1, MRSE1, SMAPE1 = RNNFORECAST.RNNforecasting(
        trend, lookBack=trendWin, epoch=50, unit=unit)
    resTrain, resTest, MAE2, MRSE2, SMAPE2 = RNNFORECAST.RNNforecasting(
        residual, lookBack=resWin, epoch=60, unit=unit, hiddenNum=100)
    # trTrain, trTest, MAE1, MRSE1, SMAPE1= RNNFORECAST.RNNforecasting(trend, lookBack=resWin, epoch=30, unit=unit,
    #                                                                     varFlag=True, minLen=20, maxLen=lag, step=4,
    #                                                                     hiddenNum=100)
    # resTrain, resTest, MAE2, MRSE2, SMAPE2 = RNNFORECAST.RNNforecasting(residual, lookBack=resWin, epoch=30, unit=unit,
    #                                                                     varFlag=True, minLen=20, maxLen=lag, step=4, hiddenNum=100)
    t2 = time.time()
    print(t2 - t1)

    print("trTrain shape is", trTrain.shape)
    print("resTrain shape is", resTrain.shape)

    #'''
    # 数据对齐
    trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain,
                                    resTest, resWin)

    print("trendPred shape is", trendPred.shape)
    print("resPred shape is", resPred.shape)

    # 获取最终预测结果
    finalPred = trendPred + seasonal + resPred

    trainPred = trTrain + seasonal[trendWin:trendWin +
                                   trTrain.shape[0]] + resTrain
    testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] + resTest

    # 获得ground-truth数据
    data = dataset[freq // 2:-(freq // 2)]
    trainY = data[trendWin:trendWin + trTrain.shape[0]]
    testY = data[2 * resWin + resTrain.shape[0]:]

    # 评估指标
    MAE = eval.calcMAE(trainY, trainPred)
    print("train MAE", MAE)
    MRSE = eval.calcRMSE(trainY, trainPred)
    print("train MRSE", MRSE)
    MAPE = eval.calcMAPE(trainY, trainPred)
    print("train MAPE", MAPE)
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    MAPE = eval.calcMAPE(testY, testPred)
    print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    # plt.plot(data)
    # plt.plot(finalPred)
    # plt.show()
    #'''
    return trainPred, testPred, MAE, MRSE, SMAPE
Esempio n. 16
0
def test_single_model(net_name, origin_data, filename, data_order, Select_num,
                      batch_size, l_forward, l_back, n, train_count, parapath,
                      forepath):

    #########################
    ##### load data
    #########################

    T0_train,T1_train,T2_train, \
    Forw_train,Pred_train,\
    T0_test,T1_test,T2_test,\
    Forw_test,Pred_test = load_data(filename =filename ,split_rate = 1/2,train_count =train_count,
                                    data_order = data_order,city_path =origin_data,store_seg ="data/seg",
                                    week_num = 24,store_path = "data/train",select_num =Select_num,step =4,
                                    T = 24 ,n = n,l_back = l_back,m =1,l_forward =l_forward )

    te_f_dataset = forw_Data(Forw_test, Pred_test)
    te_f_dataloader = torch.utils.data.DataLoader(te_f_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  sampler=None,
                                                  batch_sampler=None,
                                                  num_workers=0)

    #%%
    #####################
    #  model
    #####################

    if net_name == 'short_LSTM':
        net1 = short_LSTM(input_dim=l_forward,
                          hidden_dim=2,
                          num_layers=1,
                          batch_size=batch_size).cuda()
    elif net_name == 'BiLSTM':
        net1 = BiLSTM(input_dim=l_forward,
                      hidden_dim=2,
                      num_layers=1,
                      batch_size=batch_size).cuda()
    elif net_name == 'conv_LSTM':
        net1 = conv_LSTM(input_dim=20,
                         hidden_dim=2,
                         num_layers=1,
                         batch_size=batch_size).cuda()
    elif net_name == 'conv_BiLSTM':
        net1 = conv_BiLSTM(input_dim=20,
                           hidden_dim=2,
                           num_layers=1,
                           batch_size=batch_size).cuda()
#    elif net_name == 'LSTM_with_Attention':
#        net1 = LSTM_with_Attention(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
#    elif net_name == 'conv_LSTM_with_Attention':
#        net1 = conv_LSTM_with_Attention(input_dim =20, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
#
    loss_MAPE = calcMAPE()
    net1.load_state_dict(
        torch.load(parapath + '%s.pkl' % "net1",
                   map_location=torch.device('cuda:0')))
    net1.eval()
    #%%
    #####################
    # test model
    #####################
    t3 = time.time()
    count_fore = 0
    result = []
    num = []
    for data in te_f_dataloader:
        x = data[0].view(-1, 1, l_forward)
        y = data[1].view(-1, 1, l_back)
        count_fore += 1
        x, y = x.to(device), y.to(device)
        result1 = []
        for j in range(l_back):
            #insert predicted value into window data
            x0, g_truth = x.view(batch_size, -1, 1).cuda(), (y[:, :, j]).cuda()
            if j == 0:
                x_f = x0
            else:
                x_f = torch.cat([x_f, pred_f], dim=2)

            if len(x_f[0][0]) > l_forward:
                x_mid = x_f
                x_f = x_mid[:, :, 1:len(x_f[0][0])]
            x_f = (torch.tensor(x_f, dtype=torch.float64).cuda()).view(
                -1, 1, l_forward)

            preds_f = net1(x_f)
            pred_f = (torch.tensor(preds_f, dtype=torch.float64).cuda()).view(
                batch_size, -1, 1)
            #build predicted sequence
            result1.append(preds_f)

        r = torch.cat([s for s in result1], 1)
        #calculate MAPE of predicting setting
        loss = loss_MAPE(y.view(-1, l_back), r.view(-1, l_back))

        #        plt.title("%f"%loss+"%")
        #        plt.plot((pd.Series(np.array(list(r.view(l_back,-1))).T)), label="Pred")
        #        plt.plot(pd.Series(np.array(list(y.view(l_back,-1))).T), label="Data")
        #        plt.legend()
        #        plt.savefig(forepath+"/%i.png"%count_fore)
        #        plt.show()
        #print("Hybrid:","data_order:%i"%count_fore,loss,"%")
        num.append(count_fore)
        result.append(loss)

    num.append("average")
    result.append(torch.mean(torch.tensor(result)))
    loss_mean = torch.mean(torch.tensor(result))
    t4 = time.time()
    print("  mean loss   |test time (s)")
    print("%s|%s" % (loss_mean, t4 - t3))
    dataframe = pd.DataFrame({'num': num, 'loss': result})
    dataframe.to_csv(forepath + "/forecast" + ".csv", index=False, sep=',')
    # return average MAPE
    return loss_mean
Esempio n. 17
0
def test_DMNNM(net_name,loss_type,origin_data,filename,data_order,Select_num,batch_size,l_forward,l_back,
                       n,train_count,parapath,forepath):
    #####################
    # load data
    #####################
    T0_train,T1_train,T2_train, \
    Forw_train,Pred_train,\
    T0_test,T1_test,T2_test,\
    Forw_test,Pred_test = load_data(filename =filename ,split_rate = 1/2,train_count =train_count,
                                    data_order = data_order,city_path =origin_data,store_seg ="data/seg", 
                                    week_num = 24,store_path = "data/train",select_num =Select_num,step =4,
                                    T = 24 ,n = n,l_back = l_back,m =1,l_forward =l_forward )

    te_f_dataset = forw_Data(Forw_test, Pred_test)
    te_f_dataloader = torch.utils.data.DataLoader(te_f_dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None, num_workers=0)
        
    te_p_dataset = period_Data(T0_test,T1_test,T2_test)
    te_p_dataloader = torch.utils.data.DataLoader(te_p_dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None, num_workers=0)



#%%  
    #####################
    #network selection in predition module
    #####################

    if net_name == 'short_LSTM':
        net1 = short_LSTM(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    elif net_name == 'conv_LSTM':
        net1 = conv_LSTM(input_dim =20, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    elif net_name == 'BiLSTM':
        net1 = BiLSTM(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    elif net_name == 'conv_BiLSTM':
        net1 = conv_BiLSTM(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()  
#    elif net_name == 'MLP':    
#        net1 = MLP(input_dim = l_forward,common_dim =2).cuda()
#    elif net_name == 'LSTM_with_Attention':
#        net1 = LSTM_with_Attention(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    net2 = period_net(input_dim =n, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    net3 =  metrix(in_dim = l_back,n_hidden_1 =2*l_back, n_hidden_2 =2*l_back, out_dim = l_back).cuda()

    loss_MAPE  = calcMAPE()
    # load model 
    net1.load_state_dict(torch.load(parapath+'%s.pkl'%"net1",map_location=torch.device('cuda:0')))
    net2.load_state_dict(torch.load(parapath+'%s.pkl'%"net2",map_location=torch.device('cuda:0')))
    net3.load_state_dict(torch.load(parapath+'%s.pkl'%"net3",map_location=torch.device('cuda:0')))
    net1.eval()
    net2.eval()
    net3.eval()

#%%
#####################
# test model
#####################

    t3 = time.time()
    count_fore = 0
    num = []
    loss_result = []
    #forecast all predictting sets
    for data1,data2 in (zip(te_f_dataloader,te_p_dataloader)):
        x = data1[0]
        y  = data1[1].view(-1,1,l_back) 
        t0 = data2[0].view(-1,n,l_back)
        t1 = data2[1].view(-1,n,l_back)
        t2 = data2[2].view(-1,n,l_back)
        x,y = x.cuda(),y.cuda()
        t0,t1,t2= t0.cuda(),t1.cuda(),t2.cuda()
        count_fore += 1
        result = []
        
        for j in range(l_back):
            # insert predicted value into window data
            x0,g_truth = x.view(-1,l_forward,1).cuda(),(y[:,:,j]).cuda()
            a,b,c, = t0[:,:,j].cuda(),t1[:,:,j].cuda(),t2[:,:,j].cuda()
            if j ==0:
                x_f = x0
            else:
                x_f = torch.cat([x_f,pred_f],dim = 2)
                    
            if len(x_f[0][0]) > l_forward :
                x_mid = x_f
                x_f = x_mid[:,:,1:len(x_f[0][0])]
            x_f = (torch.tensor(x_f,dtype =torch.float64).cuda()).view(-1,1,l_forward)      
            
            #output of predition module and adjustment module: preds_f and preds_p
            #dynamic adjust single predicted value 
            preds_p = net2(a,b,c)
            preds_f = net1(x_f.cuda())
            pred_f = (torch.tensor(preds_f,dtype =torch.float64).cuda()).view(-1,1,1)
            preds_p = preds_p.view(-1,1,1)  
            g_truth =g_truth.float()
            pred = torch.add(preds_f, preds_p)  
            #build predicted sequence
            result.append(pred)
        #refitting predicted sequence    
        result1 = torch.cat([s for s in result], 0)
        result1 = result1.view(1,-1)
        r= net3(result1)
        #calculate MAPE of forecast setting
        loss = loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)) 
        #draw figure
#        plt.title("%f"%loss+"%")
#        plt.plot((pd.Series(np.array(list(r.view(l_back,-1))).T)), label="Pred")
#        plt.plot((pd.Series(np.array(list(y.view(l_back,-1))).T)), label="Data")
#        plt.legend()
#        plt.savefig(forepath+"%i.png"%count_fore)
#        plt.show()
        #print("Hybrid:","data_order:%i"%count_fore,loss,"%")
        num.append(count_fore)
        loss_result.append(loss)
    num.append("average")
    loss_mean = torch.mean(torch.tensor(result))
    loss_result.append(loss_mean)
    t4 = time.time()
    print("  mean loss   |test time (s)")
    print("%s|%s"%(loss_mean,t4-t3))
    dataframe = pd.DataFrame({'num':num,'loss':loss_result})
    dataframe.to_csv(forepath+"/forecast"+".csv",index=False,sep=',')
    # return average MAPE
    return loss_mean
Esempio n. 18
0
def train_DMNNM(lr_,net_name,loss_type,origin_data,filename,data_order,num_epochs,Select_num,batch_size,
                l_forward,l_back,n,train_count,parapath):
    '''
    #########################
    ##### load data     #####
    #########################
    '''
    
    T0_train,T1_train,T2_train, \
    Forw_train,Pred_train,\
    T0_test,T1_test,T2_test,\
    Forw_test,Pred_test = load_data(filename = filename,split_rate = 1/2,train_count =train_count,data_order=data_order,
                                city_path =origin_data,store_seg ="data/seg", week_num = 24,
                                store_path = "data/train",select_num =Select_num,step =4,T = 24 ,n = n,
                                l_back = l_back,m =1,l_forward =l_forward )
    
    tr_f_dataset = forw_Data(Forw_train, Pred_train)
    tr_f_dataloader = torch.utils.data.DataLoader(tr_f_dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None, num_workers=0)
        
    tr_p_dataset = period_Data(T0_train,T1_train,T2_train)
    tr_p_dataloader = torch.utils.data.DataLoader(tr_p_dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None, num_workers=0)


#%%  
#####################
#  model
#####################
    
    if net_name == 'short_LSTM':
        net1 = short_LSTM(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    elif net_name == 'conv_LSTM':
        net1 = conv_LSTM(input_dim =20, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    elif net_name == 'BiLSTM':
        net1 = BiLSTM(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    elif net_name == 'conv_BiLSTM':
        net1 = conv_BiLSTM(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
#    elif net_name == 'LSTM_with_Attention':
#        net1 = LSTM_with_Attention(input_dim =l_forward, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
#    elif net_name == 'MLP':    
#        net1 = MLP(input_dim = l_forward,common_dim =2).cuda()
    net2 = period_net(input_dim =n, hidden_dim=2, num_layers =1,batch_size= batch_size).cuda()
    net3 =  metrix(in_dim = l_back,n_hidden_1 =2*l_back, n_hidden_2 =2*l_back, out_dim = l_back).cuda()

    # Initialize train model 
    loss_MSE = torch.nn.MSELoss(size_average=False)
    loss_MAPE  = calcMAPE()
    loss_COS  = calcCOSine()
    optimizer1 = torch.optim.Adam(list(net1.parameters()), lr=lr_)
    optimizer2 = torch.optim.Adam(list(net2.parameters()), lr=lr_)
    optimizer3 = torch.optim.Adam(list(net3.parameters()), lr=lr_)

#%% 
#####################
# Train model
#####################
    t1 = time.time()
    net1.train()
    net2.train()
    net3.train()
    list_train = []
    num = []
    
    for i in range(num_epochs):
        count = 0
        t3 = time.time()
        train_loss = 0 
        #train all training sets
        for data1,data2 in (zip(tr_f_dataloader,tr_p_dataloader)):
            x = data1[0]
            y  = data1[1].view(-1,1,l_back)
            t0 = data2[0].view(batch_size,n,-1)
            t1 = data2[1].view(batch_size,n,-1)
            t2 = data2[2].view(batch_size,n,-1)
            count = count+ 1
            x,y = x.cuda(),y.cuda()
            t0,t1,t2= t0.cuda(),t1.cuda(),t2.cuda()
            result = []
     
            for j in range(l_back):  
                # insert predicted value into window data
                x0,g_truth = x.view(batch_size,-1,1).cuda(),(y[:,:,j]).cuda()
                a,b,c, = t0[:,:,j].cuda(),t1[:,:,j].cuda(),t2[:,:,j].cuda()
                if j ==0:
                    x_f = x0
                else:
                    x_f = torch.cat([(torch.tensor(x_f,dtype =torch.double)),(torch.tensor(preds_f,dtype =torch.double))],dim = 2)    
                if len(x_f[0][0]) > l_forward :
                    x_mid = x_f
                    x_f = x_mid[:,:,1:len(x_f[0][0])]
                x_f = (torch.tensor(x_f,dtype =torch.float64).cuda()).view(-1,1,l_forward)      
            
            #forward 
            #output of predition module and adjustment module: preds_f and preds_p
            #dynamic adjust single predicted value  

                preds_p = net2(a,b,c).view(-1,1,1)
                preds_f = net1(x_f.cuda()).view(-1,1,1)
            #adjust predicted value
                g_truth =g_truth.float()
                pred_ = torch.add(preds_f, preds_p)
            #calculate loss in single point predition phase
                loss1 = loss_MAPE(g_truth,pred_)
                #loss1 = loss_MSE(pred_.view(-1,1),g_truth.view(-1,1))
                optimizer1.zero_grad()
                optimizer2.zero_grad()
            #backward
                loss1.backward(retain_graph=True)
            #update parameter
                optimizer1.step()
                optimizer2.step()
                result.append(pred_)
                
            result_ = torch.cat([s for s in result], 1)
            #refitting predicted sequence 
            r = net3(result_.view(-1,l_back))
            optimizer3.zero_grad()
            
            #choose hybrid loss function with different k
            if loss_type =="MAPE":
                loss = loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)) 
                #loss = loss_MSE(r.view(-1,l_back),y.view(-1,l_back))
            elif loss_type =="k=0.5":
                loss = torch.add(loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)),(-0.5)*loss_COS(y.view(-1,l_back),r.view(-1,l_back)))
            elif loss_type =="k=0.75":
                loss = torch.add(loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)),(-0.75)*loss_COS(y.view(-1,l_back),r.view(-1,l_back)))
            elif loss_type =="k=1":
                loss = torch.add(loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)),(-1)*loss_COS(y.view(-1,l_back),r.view(-1,l_back)))
            elif loss_type =="k=1.25":
                loss = torch.add(loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)),(-1.25)*loss_COS(y.view(-1,l_back),r.view(-1,l_back)))                
            elif loss_type =="k=1.5":
                loss = torch.add(loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)),(-1.5)*loss_COS(y.view(-1,l_back),r.view(-1,l_back)))
            #backward
            loss.backward()
            #update parameter
            optimizer3.step()
            train_loss =train_loss + loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)) 
            log.info("Hybrid:","data_order:%i"%count,loss_MAPE(y.view(-1,l_back),r.view(-1,l_back)))
        
        log.info(train_loss/num_epochs)
        print("epoch:%i,"%i,"MAPE_loss",train_loss/num_epochs)
        list_train.append(train_loss/num_epochs)
        t4 = time.time()
        log.info("training time:",t4-t3)
        #print("training time:",t4-t3)
        num.append(i)
    dataframe = pd.DataFrame({"num":num,"train_MAPE":list_train})
    dataframe.to_csv(parapath+"train_MAPE"+".csv",index=False,sep=',')
    torch.save(net1.state_dict(),parapath+'%s.pkl'%"net1")
    torch.save(net2.state_dict(),parapath+'%s.pkl'%"net2")
    torch.save(net3.state_dict(),parapath+'%s.pkl'%"net3")

    t2 = time.time()
    print("training time:",t2-t1)
    log.info("training time:",t2-t1)