Ejemplo n.º 1
0
def cluster_method(individual, dataset, **kwargs):
    from pyFTS.common import Util, Membership
    from pyFTS.models import hofts
    from pyFTS.partitioners import Grid, Entropy
    from pyFTS.benchmarks import Measures
    import numpy as np

    if individual['mf'] == 1:
        mf = Membership.trimf
    elif individual['mf'] == 2:
        mf = Membership.trapmf
    elif individual['mf'] == 3 and individual['partitioner'] != 2:
        mf = Membership.gaussmf
    else:
        mf = Membership.trimf

    window_size = kwargs.get('window_size', 800)
    train_rate = kwargs.get('train_rate', .8)
    increment_rate = kwargs.get('increment_rate', .2)
    parameters = kwargs.get('parameters', {})

    errors = []
    sizes = []

    for count, train, test in Util.sliding_window(dataset,
                                                  window_size,
                                                  train=train_rate,
                                                  inc=increment_rate):

        if individual['partitioner'] == 1:
            partitioner = Grid.GridPartitioner(data=train,
                                               npart=individual['npart'],
                                               func=mf)
        elif individual['partitioner'] == 2:
            npart = individual['npart'] if individual['npart'] > 10 else 10
            partitioner = Entropy.EntropyPartitioner(data=train,
                                                     npart=npart,
                                                     func=mf)

        model = hofts.WeightedHighOrderFTS(partitioner=partitioner,
                                           lags=individual['lags'],
                                           alpha_cut=individual['alpha'],
                                           order=individual['order'])
        model.fit(train)

        forecasts = model.predict(test)

        #rmse, mape, u = Measures.get_point_statistics(test, model)
        rmse = Measures.rmse(test[model.max_lag:], forecasts)

        size = len(model)

        errors.append(rmse)
        sizes.append(size)

    return {
        'parameters': individual,
        'rmse': np.nanmean(errors),
        'size': np.nanmean(size)
    }
Ejemplo n.º 2
0
def cluster_method(individual, train, test):
    from pyFTS.common import Util, Membership
    from pyFTS.models import hofts
    from pyFTS.partitioners import Grid, Entropy
    from pyFTS.benchmarks import Measures

    if individual['mf'] == 1:
        mf = Membership.trimf
    elif individual['mf'] == 2:
        mf = Membership.trapmf
    elif individual['mf'] == 3 and individual['partitioner'] != 2:
        mf = Membership.gaussmf
    else:
        mf = Membership.trimf

    if individual['partitioner'] == 1:
        partitioner = Grid.GridPartitioner(data=train, npart=individual['npart'], func=mf)
    elif individual['partitioner'] == 2:
        npart = individual['npart'] if individual['npart'] > 10 else 10
        partitioner = Entropy.EntropyPartitioner(data=train, npart=npart, func=mf)


    model = hofts.WeightedHighOrderFTS(partitioner=partitioner,
                               lags=individual['lags'],
                               alpha_cut=individual['alpha'],
                               order=individual['order'])

    model.fit(train)

    rmse, mape, u = Measures.get_point_statistics(test, model)

    size = len(model)

    return individual, rmse, size, mape, u
Ejemplo n.º 3
0
def phenotype(individual, train):
    try:
        if individual['mf'] == 1:
            mf = Membership.trimf
        elif individual['mf'] == 2:
            mf = Membership.trapmf
        elif individual['mf'] == 3 and individual['partitioner'] != 2:
            mf = Membership.gaussmf
        else:
            mf = Membership.trimf

        if individual['partitioner'] == 1:
            partitioner = Grid.GridPartitioner(data=train,
                                               npart=individual['npart'],
                                               func=mf)
        elif individual['partitioner'] == 2:
            partitioner = Entropy.EntropyPartitioner(data=train,
                                                     npart=individual['npart'],
                                                     func=mf)

        model = hofts.WeightedHighOrderFTS(partitioner=partitioner,
                                           lags=individual['lags'],
                                           alpha_cut=individual['alpha'],
                                           order=individual['order'])

        model.fit(train)

        return model

    except Exception as ex:
        print("EXCEPTION!", str(ex), str(individual))
        return None
Ejemplo n.º 4
0
from pyFTS.partitioners import Grid, Entropy, Util as pUtil
from pyFTS.benchmarks import benchmarks as bchmk, Measures
from pyFTS.models import chen, yu, cheng, ismailefendi, hofts, pwfts
from pyFTS.common import Transformations

tdiff = Transformations.Differential(1)

from pyFTS.data import TAIEX, SP500, NASDAQ, Malaysia

dataset = Malaysia.get_data('temperature')[:1000]

p = Grid.GridPartitioner(data=dataset, npart=20)

print(p)

model = hofts.WeightedHighOrderFTS(partitioner=p, order=2)

model.fit(dataset)  #[22, 22, 23, 23, 24])

print(model)

Measures.get_point_statistics(dataset, model)
'''
#dataset = SP500.get_data()[11500:16000]
#dataset = NASDAQ.get_data()
#print(len(dataset))


bchmk.sliding_window_benchmarks(dataset, 1000, train=0.8, inc=0.2,
                                methods=[chen.ConventionalFTS], #[pwfts.ProbabilisticWeightedFTS],
                                benchmark_models=False,
Ejemplo n.º 5
0
rows = []

fig, ax = plt.subplots(nrows=1, ncols=1,figsize=[15,7])
ax.plot(y, label="Original Data", color="Black")

part = Grid.GridPartitioner(data=y, npart=80)

model = hofts.HighOrderFTS(order=2, partitioner=part)
model.fit(y)
forecasts = model.predict(y)
plt_fc = np.insert(forecasts, 0, y[0])
ax.plot(plt_fc, label="HOFTS")
rmse, mape, u = Measures.get_point_statistics(y, model)
rows.append(["HOFTS", rmse, mape, u])

model = hofts.WeightedHighOrderFTS(order=2, partitioner=part)
model.fit(y)
forecasts = model.predict(y)
plt_fc = np.insert(forecasts, 0, y[0])
ax.plot(plt_fc, label="WHOFTS")
rmse, mape, u = Measures.get_point_statistics(y, model)
rows.append(["WHOFTS", rmse, mape, u])

model = pwfts.ProbabilisticWeightedFTS(order=2, partitioner=part)
model.fit(y)
forecasts = model.predict(y)
plt_fc = np.insert(forecasts, 0, y[0])
ax.plot(plt_fc, label="PWFTS")
rmse, mape, u = Measures.get_point_statistics(y, model)
rows.append(["PWFTS", rmse, mape, u])
Ejemplo n.º 6
0
from pyFTS.models.incremental import IncrementalEnsemble, TimeVariant

from pyFTS.data import AirPassengers, artificial

from pyFTS.models.ensemble import ensemble
from pyFTS.models import hofts
from pyFTS.data import TAIEX

data = TAIEX.get_data()

model = ensemble.EnsembleFTS()

for k in [15, 25, 35]:
    for order in [1, 2]:
        fs = Grid.GridPartitioner(data=data, npart=k)
        tmp = hofts.WeightedHighOrderFTS(partitioner=fs)

        tmp.fit(data)

        model.append_model(tmp)

forecasts = model.predict(data, type='interval', method='quantile', alpha=.05)

from pyFTS.benchmarks import benchmarks as bchmk

#f, ax = plt.subplots(1, 1, figsize=[20, 5])

#ax.plot(data)
#bchmk.plot_interval(ax, forecasts, 3, "")
print(forecasts)
'''