def predict(X, kernelMeans, kernelSigma, kernelWeights):
    # vector prediction
    if(isinstance(X[0], list)):
        n = len(X)
        Yest = []
        for i in range(n):
            Yest.append(ft.output(X[i], kernelMeans, kernelSigma, kernelWeights))
        Yest = np.array(Yest)
        return Yest
    # scalar prediction
    else:
        return ft.output(X, kernelMeans, kernelSigma, kernelWeights)
def updateWeights(X, y, num_kernels, kernelMeans, kernelSigma, kernelWeights):
    # phase 2
    B = np.identity(num_kernels)
    e = y - ft.output(X, kernelMeans, kernelSigma, kernelWeights)
    B, kernelSigma = ft.Phase2(X, y, e, num_kernels, B, kernelMeans,
                               kernelSigma, kernelWeights)

    # phase 3
    B = np.identity(num_kernels)
    e = y - ft.output(X, kernelMeans, kernelSigma, kernelWeights)
    B, kernelWeights = ft.Phase3(X, y, e, num_kernels, B, kernelMeans,
                                 kernelSigma, kernelWeights)

    return kernelMeans, kernelSigma, kernelWeights
def rolling_forecast(teX, teY, teYdate, num_kernels, kernelMeans, kernelSigma,
                     kernelWeights, formatter, locater):
    """
    model test, rolling forecast
    """

    # forecast and update
    n = len(teX)
    Yest = []

    for i in range(n):
        # forecast
        Yhat = ft.output(teX[i], kernelMeans, kernelSigma, kernelWeights)
        Yest.append(Yhat)
        # update
        kernelMeans, kernelSigma, kernelWeights = \
            updateWeights(teX[i], teY[i],
                          num_kernels,
                          kernelMeans, kernelSigma, kernelWeights)

    # evaluate
    f = open('result.txt', 'w')
    err, rmse, rsq, mae = ft.loss_with_prediction_array(teY, Yest)
    print(format('rmse: %f, R2: %f, MAE: %f') % (rmse, rsq, mae))
    f.write(format('rmse: %f, R2: %f, MAE: %f') % (rmse, rsq, mae) + '\n')
    """
        plot
    """
    dates = [datetime.datetime.strptime(d, "%Y-%m-%d").date() for d in teYdate]
    plt.gca().xaxis.set_major_formatter(formatter)
    plt.gca().xaxis.set_major_locator(locater)

    pre = teY - err
    plt.plot(dates, teY, 'r')
    plt.plot(dates, pre, 'b')
    plt.legend(["Test Data", "Prediction"])
    plt.savefig("./kernel" + str(num_kernels) + "_prediction_graph.png")
    plt.show()

    f.close()
    return rmse, rsq, mae
def rolling_forecast(teX, teY, num_kernels, kernelMeans, kernelSigma, kernelWeights, loop):
    """
    model test, rolling forecast
    """

    # forecast and update
    n = len(teX)
    Yest = []

    for i in range(n):
        # forecast
        Yhat = ft.output(teX[i], kernelMeans, kernelSigma, kernelWeights)
        Yest.append(Yhat)
        # update
        kernelMeans, kernelSigma, kernelWeights = \
            updateWeights(teX[i], teY[i],
                          num_kernels,
                          kernelMeans, kernelSigma, kernelWeights,
                          loop)

    # evaluate
    f = open('result.txt', 'w')
    err, rmse, rsq, mae = ft.loss_with_prediction_array(teY, Yest)
    print(format('rmse: %f, R2: %f, MAE: %f') % (rmse, rsq, mae))
    f.write(format('rmse: %f, R2: %f, MAE: %f') % (rmse, rsq, mae) + '\n')

    # plot
    pre = teY - err
    plt.plot(teY, 'r')
    plt.plot(pre, 'b')
    plt.legend(["Test Data", "Prediction"])
    plt.savefig("./kernel" + str(num_kernels) + "_prediction_graph.png")
    plt.show()

    f.close()
    return rmse, rsq, mae
def rolling_forecast(teX, teY, num_kernels, kernelMeans, kernelSigma,
                     kernelWeights):
    """
    model test, rolling forecast
    """

    # forecast and update
    n = len(teX)
    Yest = []

    for i in range(n):
        # forecast
        Yhat = ft.output(teX[i], kernelMeans, kernelSigma, kernelWeights)
        Yest.append(Yhat)
        # update
        kernelMeans, kernelSigma, kernelWeights = \
            updateWeights(teX[i], teY[i],
                          num_kernels,
                          kernelMeans, kernelSigma, kernelWeights)

    # evaluate
    err, rmse, rsq, mae = ft.loss_with_prediction_array(teY, Yest)

    return Yest, rmse, rsq, mae
def predict(X, kernelMeans, kernelSigma, kernelWeights):
    n = len(X)
    Yest = []
    for i in range(n):
        Yest.append(ft.output(X[i], kernelMeans, kernelSigma, kernelWeights))
    return Yest
def train(trX, trY, teX, teY, epochs, num_kernels, kernelMeans, kernelSigma,
          kernelWeights):
    """model training"""
    log = open('./log.txt', 'w')
    '''
        phase 2 & phase 3
        learning kernel parameter
    '''

    # init
    kernelMeans = kernelMeans[:num_kernels]
    kernelSigma = kernelSigma[:num_kernels]
    kernelWeights = kernelWeights[:num_kernels]

    # history
    epochs_arr = []
    training_err = []
    testing_err = []
    min_err = sys.float_info.max
    best_kernelMeans = None
    best_kernelSigma = None
    best_kernelWeights = None
    best_epoch = None

    for epoch in range(1, epochs + 1):
        # phase 2
        B = np.identity(num_kernels)

        for i in range(len(trX)):
            x = trX[i]
            y = trY[i]
            e = y - ft.output(x, kernelMeans, kernelSigma, kernelWeights)

            if i % 100 == 0:
                err, rmse, rsq, mae = ft.loss(trX, trY, kernelMeans,
                                              kernelSigma, kernelWeights)
                log.write(
                    format('Phase 2 step rmse = %f, rsq = %f\n') % (rmse, rsq))

            B, kernelSigma = ft.Phase2(x, y, e, num_kernels, B, kernelMeans,
                                       kernelSigma, kernelWeights)

        # phase 3
        B = np.identity(num_kernels)

        for i in range(len(trX)):
            x = trX[i]
            y = trY[i]
            e = y - ft.output(x, kernelMeans, kernelSigma, kernelWeights)

            if i % 100 == 0:
                err, rmse, rsq, mae = ft.loss(trX, trY, kernelMeans,
                                              kernelSigma, kernelWeights)
                log.write(
                    format('Phase 3 step rmse = %f, rsq = %f\n') % (rmse, rsq))

            B, kernelWeights = ft.Phase3(x, y, e, num_kernels, B, kernelMeans,
                                         kernelSigma, kernelWeights)

        # check current epoch
        err, rmse, rsq, mae = ft.loss(trX, trY, kernelMeans, kernelSigma,
                                      kernelWeights)
        terr, trmse, trsq, trmae = ft.loss(teX, teY, kernelMeans, kernelSigma,
                                           kernelWeights)
        training_err.append(rmse)
        testing_err.append(trmse)
        epochs_arr.append(epoch)
        print("EPOCH {}: training rmse {}, test rmse {}".format(
            epoch, rmse, trmse))

        # update kernel if it is best
        # metric is rmse
        if (trmse < min_err):
            min_err = trmse
            best_epoch = epoch
            best_kernelMeans = kernelMeans
            best_kernelSigma = kernelSigma
            best_kernelWeights = kernelWeights

    print("EPOCH {} selected.".format(best_epoch))
    plt.plot(epochs_arr, testing_err)
    plt.savefig("./kernel" + str(num_kernels) + "_training_graph.png")
    plt.show()

    log.close()
    return num_kernels, best_kernelMeans, best_kernelSigma, best_kernelWeights
def train(data, trX, trY, teX, teY, te_index, epochs, num_kernels, kernelMeans,
          kernelSigma, kernelWeights, tau, E, P, target_P, mode):
    """model training"""
    log = open('./log.txt', 'w')
    '''
        phase 2 & phase 3
        learning kernel parameter
    '''

    # init
    kernelMeans = kernelMeans[:num_kernels]
    kernelSigma = kernelSigma[:num_kernels]
    kernelWeights = kernelWeights[:num_kernels]

    # history
    epochs_arr = []
    training_err = []
    testing_err = []
    max_rsq = 0
    best_kernelMeans = None
    best_kernelSigma = None
    best_kernelWeights = None
    best_epoch = None
    best_Yest = None
    f = open('result.txt', 'w')

    for epoch in range(1, epochs + 1):
        # phase 2
        B = np.identity(num_kernels)

        for i in range(len(trX)):
            x = trX[i]
            y = trY[i]
            e = y - ft.output(x, kernelMeans, kernelSigma, kernelWeights)

            if i % 100 == 0:
                err, rmse, rsq, mae = ft.loss(trX, trY, kernelMeans,
                                              kernelSigma, kernelWeights)
                log.write(
                    format('Phase 2 step rmse = %f, rsq = %f\n') % (rmse, rsq))

            B, kernelSigma = ft.Phase2(x, y, e, num_kernels, B, kernelMeans,
                                       kernelSigma, kernelWeights)

        # phase 3
        B = np.identity(num_kernels)

        for i in range(len(trX)):
            x = trX[i]
            y = trY[i]
            e = y - ft.output(x, kernelMeans, kernelSigma, kernelWeights)

            if i % 100 == 0:
                err, rmse, rsq, mae = ft.loss(trX, trY, kernelMeans,
                                              kernelSigma, kernelWeights)
                log.write(
                    format('Phase 3 step rmse = %f, rsq = %f\n') % (rmse, rsq))

            B, kernelWeights = ft.Phase3(x, y, e, num_kernels, B, kernelMeans,
                                         kernelSigma, kernelWeights)

        err, rmse, rsq, mae = ft.loss(trX, trY, kernelMeans, kernelSigma,
                                      kernelWeights)
        terr, trmse, trsq, trmae = ft.loss(teX, teY, kernelMeans, kernelSigma,
                                           kernelWeights)
        print("EPOCH {}: training r2 {}, test r2 {}".format(epoch, rsq, trsq))

        if epoch == 1:
            max_rsq = trsq
            best_epoch = epoch
            best_kernelMeans = kernelMeans
            best_kernelSigma = kernelSigma
            best_kernelWeights = kernelWeights

        # check epoch 75 150 300
        if epoch == 75 or epoch == 150 or epoch == 300:

            Yest, termse, tersq, temae = evaluate(data, teX, teY, te_index,
                                                  kernelMeans, kernelSigma,
                                                  kernelWeights, tau, E, P,
                                                  target_P, mode)
            print("Evaluation {}: rmse {} r2 {},MAE {}".format(
                epoch, termse, tersq, temae))
            f.write(
                format('epoch : %d, rmse: %f, R2: %f, MAE: %f') %
                (epoch, termse, tersq, temae) + '\n')

            # update kernel if it is best
            # metric is rsq
            if (tersq > max_rsq):
                max_rsq = tersq
                best_Yest = Yest
                best_epoch = epoch
                best_kernelMeans = kernelMeans
                best_kernelSigma = kernelSigma
                best_kernelWeights = kernelWeights

    f.close()
    print("EPOCH {} selected.".format(best_epoch))
    log.close()
    return best_Yest, num_kernels, best_kernelMeans, best_kernelSigma, best_kernelWeights, best_epoch
Пример #9
0
def GKFN(trX, trY, teX, teY, alpha, loop, Kernel_Num):
    """model training"""

    #initial model parameter
    m = 0  # kernelnumber
    kernelMeans = None
    kernelSigma = None
    kernelWeights = None
    invPSI = None

    #initial kernel recruiting

    #첫번쨰 커널, 두번째 커널: y값이 가장 큰 index와 가장 작은 index
    idx1 = np.argmax(trY)
    x1 = trX[idx1]
    y1 = trY[idx1]
    e1 = y1

    idx2 = np.argmin(trY)
    x2 = trX[idx2]
    y2 = trY[idx2]
    e2 = y2

    m += 2
    kernelWeights = np.array([e1, e2])
    kernelMeans = np.array([x1, x2])

    dist = np.sqrt(np.sum(np.square(x1 - x2)))  #x1,x2사이 거리
    sig1, sig2 = alpha * dist, alpha * dist
    kernelSigma = np.array([sig1, sig2])
    initial_PSI = None
    initial_PSI = np.ndarray(shape=(2, 2))
    initial_PSI[0][0] = ft.GaussianKernel(x1, kernelMeans[0], sig1)
    initial_PSI[0][1] = ft.GaussianKernel(x1, kernelMeans[1], sig2)
    initial_PSI[1][0] = ft.GaussianKernel(x2, kernelMeans[0], sig1)
    initial_PSI[1][1] = ft.GaussianKernel(x2, kernelMeans[1], sig2)

    invPSI = lin.inv(initial_PSI)
    init_y = np.array([y1, y2])
    kernelWeights = np.matmul(invPSI, init_y)

    #Phase 1
    estv = ft.EstimatedNoiseVariance(trY)
    # print(np.sqrt(estv))

    trainerr = []
    validerr = []

    # 커널 수를 늘려가며 학습을 합니다.
    while (True):
        err, rmse, rsq = ft.loss(trX, trY, kernelMeans, kernelSigma,
                                 kernelWeights)
        # verr, vrmse, vrsq = ft.loss(vaX, vaY, kernelMeans, kernelSigma, kernelWeights)
        #    print(format('train: Phase1 : m = %d, rmse = %f, rsq = %f \nvalidation Phase1 : m = %d, rmse = %f, rsq = %f') % (m, rmse, rsq, m, vrmse, vrsq))

        trainerr.append(rmse)
        # validerr.append(vrmse)

        if m > Kernel_Num:
            break
    #    if (rmse**2) < estv:
    #        break
    #    if rsq > 0.9:
    #        break
    ##    if np.abs(temp-rsq) < 1e-5:
    #        break
    #
    #    temp = rsq

        if m % 10 == 0:
            print(m)

        idx = np.argmax(np.abs(err), axis=0)

        x = trX[idx]
        y = trY[idx]
        e = err[idx]

        m, kernelMeans, kernelSigma, kernelWeights, invPSI = ft.Phase1(
            x, y, e, m, alpha, kernelMeans, kernelSigma, kernelWeights, invPSI)

    # # 커널수에 따른 에러
    # plt.plot(trainerr,'r')
    # plt.plot(validerr,'b')
    # plt.xticks(np.arange(0,100,5)) #x축 눈금
    # plt.show()

    # 커널 몇개를 할것인가?
    m = Kernel_Num

    kernelMeans = kernelMeans[:m]
    kernelSigma = kernelSigma[:m]
    kernelWeights = kernelWeights[:m]

    #Phase 2 & Phase3 : kernel parameter 학습
    for i in range(loop):
        B = None
        B = np.identity(m)

        for i in range(len(trX)):
            x = trX[i]
            y = trY[i]
            e = y - ft.output(x, kernelMeans, kernelSigma, kernelWeights)

            if i % 100 == 0:
                err, rmse, rsq = ft.loss(trX, trY, kernelMeans, kernelSigma,
                                         kernelWeights)
                print(format('Phase 2 step rmse = %f, rsq = %f') % (rmse, rsq))

            B, kernelSigma = ft.Phase2(x, y, e, m, B, kernelMeans, kernelSigma,
                                       kernelWeights)

        B = None
        B = np.identity(m)

        for i in range(len(trX)):
            x = trX[i]
            y = trY[i]
            e = y - ft.output(x, kernelMeans, kernelSigma, kernelWeights)

            if i % 100 == 0:
                err, rmse, rsq = ft.loss(trX, trY, kernelMeans, kernelSigma,
                                         kernelWeights)
                print(format('Phase 3 step rmse = %f, rsq = %f') % (rmse, rsq))

            B, kernelWeights = ft.Phase3(x, y, e, m, B, kernelMeans,
                                         kernelSigma, kernelWeights)
    """model test"""

    err, rmse, rsq = ft.loss(teX, teY, kernelMeans, kernelSigma, kernelWeights)
    print(format('rmse: %f, R2: %f') % (rmse, rsq))

    #pre = teY - err

    #plt.plot(teY,'r')
    #plt.plot(pre,'b')
    #plt.show()

    return rmse, rsq