Ejemplo n.º 1
0
def pftsExploreOrderAndPartitions(data, save=False, file=None):
    fig, axes = plt.subplots(nrows=4, ncols=1, figsize=[6, 8])
    data_fs1 = Grid.GridPartitioner(data=data, npart=10).sets
    mi = []
    ma = []

    axes[0].set_title('Point Forecasts by Order')
    axes[2].set_title('Interval Forecasts by Order')

    for order in np.arange(1, 6):
        fts = pwfts.ProbabilisticWeightedFTS("")
        fts.shortname = "n = " + str(order)
        fts.train(data, sets=data_fs1.sets, order=order)
        point_forecasts = fts.forecast(data)
        interval_forecasts = fts.forecast_interval(data)
        lower = [kk[0] for kk in interval_forecasts]
        upper = [kk[1] for kk in interval_forecasts]
        mi.append(min(lower) * 0.95)
        ma.append(max(upper) * 1.05)
        for k in np.arange(0, order):
            point_forecasts.insert(0, None)
            lower.insert(0, None)
            upper.insert(0, None)
        axes[0].plot(point_forecasts, label=fts.shortname)
        axes[2].plot(lower, label=fts.shortname)
        axes[2].plot(upper)

    axes[1].set_title('Point Forecasts by Number of Partitions')
    axes[3].set_title('Interval Forecasts by Number of Partitions')

    for partitions in np.arange(5, 11):
        data_fs = Grid.GridPartitioner(data=data, npart=partitions).sets
        fts = pwfts.ProbabilisticWeightedFTS("")
        fts.shortname = "q = " + str(partitions)
        fts.train(data, sets=data_fs.sets, order=1)
        point_forecasts = fts.forecast(data)
        interval_forecasts = fts.forecast_interval(data)
        lower = [kk[0] for kk in interval_forecasts]
        upper = [kk[1] for kk in interval_forecasts]
        mi.append(min(lower) * 0.95)
        ma.append(max(upper) * 1.05)
        point_forecasts.insert(0, None)
        lower.insert(0, None)
        upper.insert(0, None)
        axes[1].plot(point_forecasts, label=fts.shortname)
        axes[3].plot(lower, label=fts.shortname)
        axes[3].plot(upper)

    for ax in axes:
        ax.set_ylabel('F(T)')
        ax.set_xlabel('T')
        ax.plot(data, label="Original", color="black", linewidth=1.5)
        handles, labels = ax.get_legend_handles_labels()
        ax.legend(handles, labels, loc=2, bbox_to_anchor=(1, 1))
        ax.set_ylim([min(mi), max(ma)])
        ax.set_xlim([0, len(data)])

    plt.tight_layout()

    cUtil.show_and_save_image(fig, file, save)
Ejemplo n.º 2
0
def phenotype(individual, train):
    try:
        if individual['mf'] == 1:
            mf = Membership.trimf
        elif individual['mf'] == 2:
            mf = Membership.trapmf
        elif individual['mf'] == 3 and individual['partitioner'] != 2:
            mf = Membership.gaussmf
        else:
            mf = Membership.trimf

        if individual['partitioner'] == 1:
            partitioner = Grid.GridPartitioner(data=train,
                                               npart=individual['npart'],
                                               func=mf)
        elif individual['partitioner'] == 2:
            partitioner = Entropy.EntropyPartitioner(data=train,
                                                     npart=individual['npart'],
                                                     func=mf)

        model = hofts.WeightedHighOrderFTS(partitioner=partitioner,
                                           lags=individual['lags'],
                                           alpha_cut=individual['alpha'],
                                           order=individual['order'])

        model.fit(train)

        return model

    except Exception as ex:
        print("EXCEPTION!", str(ex), str(individual))
        return None
Ejemplo n.º 3
0
def cluster_method(individual, train, test):
    from pyFTS.common import Util, Membership
    from pyFTS.models import hofts
    from pyFTS.partitioners import Grid, Entropy
    from pyFTS.benchmarks import Measures

    if individual['mf'] == 1:
        mf = Membership.trimf
    elif individual['mf'] == 2:
        mf = Membership.trapmf
    elif individual['mf'] == 3 and individual['partitioner'] != 2:
        mf = Membership.gaussmf
    else:
        mf = Membership.trimf

    if individual['partitioner'] == 1:
        partitioner = Grid.GridPartitioner(data=train, npart=individual['npart'], func=mf)
    elif individual['partitioner'] == 2:
        npart = individual['npart'] if individual['npart'] > 10 else 10
        partitioner = Entropy.EntropyPartitioner(data=train, npart=npart, func=mf)


    model = hofts.WeightedHighOrderFTS(partitioner=partitioner,
                               lags=individual['lags'],
                               alpha_cut=individual['alpha'],
                               order=individual['order'])

    model.fit(train)

    rmse, mape, u = Measures.get_point_statistics(test, model)

    size = len(model)

    return individual, rmse, size, mape, u
Ejemplo n.º 4
0
def fuzzycnn_forecast(train_df, test_df, params):
    _input = list(params['input'])
    _npartitions = params['npartitions']
    _order = params['order']
    _conv_layers = params['conv_layers']
    _filters = params['filters']
    _kernel_size = params['kernel_size']
    _pooling_size = params['pooling_size']
    _dense_layer_neurons = params['dense_layer_neurons']
    _dropout = params['dropout']
    _batch_size = params['batch_size']
    _epochs = params['epochs']
    _step = params.get('step', 1)

    fuzzy_sets = Grid.GridPartitioner(data=train_df[_input].values,
                                      npart=_npartitions).sets
    model = FuzzyImageCNN.FuzzyImageCNN(
        fuzzy_sets,
        nlags=_order,
        steps=1,
        conv_layers=_conv_layers,
        filters=_filters,
        kernel_size=_kernel_size,
        pooling_size=_pooling_size,
        dense_layer_neurons=_dense_layer_neurons,
        dropout=_dropout,
        debug=False)

    model.fit(train_df[_input], batch_size=_batch_size, epochs=_epochs)

    forecast = model.predict(test_df[_input], steps_ahead=_step)

    return [f[0] for f in forecast]
Ejemplo n.º 5
0
def cluster_method(individual, dataset, **kwargs):
    from pyFTS.common import Util, Membership
    from pyFTS.models import hofts
    from pyFTS.partitioners import Grid, Entropy
    from pyFTS.benchmarks import Measures
    import numpy as np

    if individual['mf'] == 1:
        mf = Membership.trimf
    elif individual['mf'] == 2:
        mf = Membership.trapmf
    elif individual['mf'] == 3 and individual['partitioner'] != 2:
        mf = Membership.gaussmf
    else:
        mf = Membership.trimf

    window_size = kwargs.get('window_size', 800)
    train_rate = kwargs.get('train_rate', .8)
    increment_rate = kwargs.get('increment_rate', .2)
    parameters = kwargs.get('parameters', {})

    errors = []
    sizes = []

    for count, train, test in Util.sliding_window(dataset,
                                                  window_size,
                                                  train=train_rate,
                                                  inc=increment_rate):

        if individual['partitioner'] == 1:
            partitioner = Grid.GridPartitioner(data=train,
                                               npart=individual['npart'],
                                               func=mf)
        elif individual['partitioner'] == 2:
            npart = individual['npart'] if individual['npart'] > 10 else 10
            partitioner = Entropy.EntropyPartitioner(data=train,
                                                     npart=npart,
                                                     func=mf)

        model = hofts.WeightedHighOrderFTS(partitioner=partitioner,
                                           lags=individual['lags'],
                                           alpha_cut=individual['alpha'],
                                           order=individual['order'])
        model.fit(train)

        forecasts = model.predict(test)

        #rmse, mape, u = Measures.get_point_statistics(test, model)
        rmse = Measures.rmse(test[model.max_lag:], forecasts)

        size = len(model)

        errors.append(rmse)
        sizes.append(size)

    return {
        'parameters': individual,
        'rmse': np.nanmean(errors),
        'size': np.nanmean(size)
    }
Ejemplo n.º 6
0
 def __init__(self, trainData, parts, fuzzyMethod, fuzzyMode, order=1):
     self.order = order
     self.trainData = trainData
     self.fs = Grid.GridPartitioner(data=self.trainData, npart=parts)
     self.fuzzyfied = self.fs.fuzzyfy(self.trainData,
                                      method=fuzzyMethod,
                                      mode=fuzzyMode)
     self.patterns = FLR.generate_non_recurrent_flrs(self.fuzzyfied)
     if self.order > 1:
         self.modelHO = pyFTS.models.hofts.HighOrderFTS(order=self.order,
                                                        partitioner=self.fs)
         self.modelHO.fit(self.trainData)
     else:
         self.model = chen.ConventionalFTS(partitioner=self.fs)
         self.model.fit(self.trainData)
Ejemplo n.º 7
0
def simplenonstationary_gridpartitioner_builder(data, npart, transformation):
    from pyFTS.partitioners import Grid
    from pyFTS.models.nonstationary import perturbation, partitioners

    tmp_fs = Grid.GridPartitioner(data=data,
                                  npart=npart,
                                  transformation=transformation)
    fs = partitioners.SimpleNonStationaryPartitioner(
        data,
        tmp_fs,
        location=perturbation.polynomial,
        location_params=[1, 0],
        location_roots=0,
        width=perturbation.polynomial,
        width_params=[1, 0],
        width_roots=0)
    return fs
Ejemplo n.º 8
0
def index():
    train = [Enrollments.get_data()]
    test = [Enrollments.get_data()]
    fs = Grid.GridPartitioner(data=train, npart=10)
    model = chen.ConventionalFTS(partitioner=fs)
    model.fit(train)
    forecasts = model.predict(test)

    data = {
        'train': train,
        'test': test,
        'forecast': forecasts,
        # 'rmse': er
    }

    # return jsonify({'message': 'Hello World'})
    return jsonify(data)
Ejemplo n.º 9
0
def __fts(train, test, model_type='chen'):
    fs = Grid.GridPartitioner(data=train, npart=10)

    if model_type == 'chen':
        model = chen.ConventionalFTS(partitioner=fs)
    elif model_type == 'cheng':
        model = cheng.TrendWeightedFTS(partitioner=fs)
    else:
        model = chen.ConventionalFTS(partitioner=fs)

    model.fit(train)
    forecasts = model.predict(test)
    er = rmse(train, forecasts)

    # print(train)
    data = {
        'train': train,
        'test': test,
        'forecast': forecasts,
        # 'rmse': er
    }
    return JsonResponse(data)
Ejemplo n.º 10
0
def phenotype(individual, train, fts_method, parameters={}, **kwargs):
    """
    Instantiate the genotype, creating a fitted model with the genotype hyperparameters

    :param individual: a genotype
    :param train: the training dataset
    :param fts_method: the FTS method 
    :param parameters: dict with model specific arguments for fit method.
    :return: a fitted FTS model
    """
    from pyFTS.models import hofts, ifts, pwfts

    if individual['mf'] == 1:
        mf = Membership.trimf
    elif individual['mf'] == 2:
        mf = Membership.trapmf
    elif individual['mf'] == 3 and individual['partitioner'] != 2:
        mf = Membership.gaussmf
    else:
        mf = Membership.trimf

    if individual['partitioner'] == 1:
        partitioner = Grid.GridPartitioner(data=train,
                                           npart=individual['npart'],
                                           func=mf)
    elif individual['partitioner'] == 2:
        partitioner = Entropy.EntropyPartitioner(data=train,
                                                 npart=individual['npart'],
                                                 func=mf)

    model = fts_method(partitioner=partitioner,
                       lags=individual['lags'],
                       alpha_cut=individual['alpha'],
                       order=individual['order'])

    model.fit(train, **parameters)

    return model
Ejemplo n.º 11
0
def FTS(train, test):
    # Universe of Discourse Partitioner
    partitioner = Grid.GridPartitioner(data=train, npart=75)

    # Create an empty model using the Chen(1996) method
    model = chen.ConventionalFTS(partitioner=partitioner)

    # The training procedure is performed by the method fit
    model.fit(train)

    # The forecasting procedure is performed by the method predict
    forecasts = model.predict(test)

    # Plot
    plt.plot(test, color='red', label='Real Stock Price')
    plt.plot(forecasts, color='blue', label='Predicted Stock Price')
    plt.title(' Stock Price Prediction using fuzzy logic')
    plt.xlabel('Time')
    plt.ylabel('Stock Price')
    plt.legend()
    plt.show()
    print("For Fuzzy time series The mean squared error is:")
    print(mean_squared_error(test, forecasts))
    print("For fuzzy time series The R squared error is:")
    print(r2_score(test, forecasts))
    plt.style.use('fivethirtyeight')
    plt.scatter(train,
                train - model.predict(train),
                color="green",
                s=10,
                label='Train data')
    plt.scatter(test, test - forecasts, color="blue", s=10, label='Test data')
    plt.hlines(y=0, xmin=0, xmax=250, linewidth=2)
    plt.legend(loc='upper right')
    plt.title("Residual errors for FTS")
    plt.show()
Ejemplo n.º 12
0
#boxcox = Transformations.BoxCox(0)

#df = pd.read_csv('https://query.data.world/s/z2xo3t32pkl4mdzp63x6lyne53obmi')
#dados = df.iloc[2710:2960 , 0:1].values # somente a 1 coluna sera usada
#dados = df['temperature'].values
#dados = dados.flatten().tolist()

dados = Enrollments.get_data()

l = len(dados)

#dados_treino = dados[:int(l*.7)]
#dados_teste = dados[int(l*.7):]

particionador = Grid.GridPartitioner(data = dados, npart = 10, func = Membership.trimf)

modelo = pwfts.ProbabilisticWeightedFTS(partitioner = particionador, order = 1, standard_horizon=3)
#modelo = hofts.WeightedHighOrderFTS(partitioner = particionador, order = 1, standard_horizon=2)
#modelo = chen.ConventionalFTS(partitioner = particionador, standard_horizon=3)

modelo.fit(dados)

print(modelo)

# Todo o procedimento de inferência é feito pelo método predict
predicoes = modelo.predict(dados)

print(predicoes)

Ejemplo n.º 13
0
df = Enrollments.get_dataframe()
plt.plot(df['Year'], df['Enrollments'])
data = df['Enrollments'].values
"""## Training procedure

### Definition of the Universe of Discourse U & Linguistic variable creation

The Universe of Discourse (U) partitioners are responsible for identifying U, split the partitions and create their fuzzy sets. There are several ways to partition U and this has a direct impact on the accuracy of the predictive model.

For this example we are using grid partitioning, where all sets are equal. The default membership function is triangular.
"""

from pyFTS.partitioners import Grid

fs = Grid.GridPartitioner(data=data, npart=10)

fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[15, 5])

fs.plot(ax)
"""### Fuzzyfication

This is demo-only, and you do not need to explicitly run it. This entire process runs automatically within the fit function, which trains the model.
"""

fuzzyfied = fs.fuzzyfy(data, method='maximum', mode='sets')

fuzzyfied
"""### Temporal patterns

This is demo-only, and you do not need to explicitly run it. This entire process runs automatically within the fit function, which trains the model.
Ejemplo n.º 14
0
from pyFTS.models import chen
'''getting the whole TAIEX dataframe'''
data = TAIEX.get_dataframe()
'''Data Visualistion'''
plt.plot(data['Date'], data['avg'])
'''getting target variable'''

temp = TAIEX.get_data()
train = temp[1:4000]
test = temp[4000:5000]
'''Universe of Discourse Partitioner'''

fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[10, 5])

partitioner = Grid.GridPartitioner(data=train, n=10)
partitioner.plot(ax)
plt.show()
'''creating the chen's model'''

model = chen.ConventionalFTS(name="a", partitioner=partitioner)
'''fitting data for training'''
model.fit(train)
''' Time series forecasting'''
forecasts = model.predict(test)
'''visualising the result for having rough idea of accuracy'''
plt.plot(data['Date'].dt.year[4000:5000], test)
plt.plot(data['Date'].dt.year[4000:5000], forecasts)


def rmse(predictions, targets):
Ejemplo n.º 15
0
def SelecaoSimples_MenorRMSE(original, parameters, modelo):
    ret = []
    errors = []
    forecasted_best = []
    print("Série Original")
    fig = plt.figure(figsize=[20, 12])
    fig.suptitle("Comparação de modelos ")
    ax0 = fig.add_axes([0, 0.5, 0.65, 0.45])  # left, bottom, width, height
    ax0.set_xlim([0, len(original)])
    ax0.set_ylim([min(original), max(original)])
    ax0.set_title('Série Temporal')
    ax0.set_ylabel('F(T)')
    ax0.set_xlabel('T')
    ax0.plot(original, label="Original")
    min_rmse = 100000.0
    best = None
    for p in parameters:
        sets = Grid.GridPartitioner(data=original, npart=p).sets
        fts = modelo(str(p) + " particoes")
        fts.train(original, sets=sets)
        # print(original)
        forecasted = fts.forecast(original)
        forecasted.insert(0, original[0])
        # print(forecasted)
        ax0.plot(forecasted, label=fts.name)
        error = Measures.rmse(np.array(forecasted), np.array(original))
        print(p, error)
        errors.append(error)
        if error < min_rmse:
            min_rmse = error
            best = fts
            forecasted_best = forecasted
    handles0, labels0 = ax0.get_legend_handles_labels()
    ax0.legend(handles0, labels0)
    ax1 = fig.add_axes([0.7, 0.5, 0.3, 0.45])  # left, bottom, width, height
    ax1.set_title('Comparação dos Erros Quadráticos Médios')
    ax1.set_ylabel('RMSE')
    ax1.set_xlabel('Quantidade de Partições')
    ax1.set_xlim([min(parameters), max(parameters)])
    ax1.plot(parameters, errors)
    ret.append(best)
    ret.append(forecasted_best)
    # Modelo diferencial
    print("\nSérie Diferencial")
    difffts = Transformations.differential(original)
    errors = []
    forecastedd_best = []
    ax2 = fig.add_axes([0, 0, 0.65, 0.45])  # left, bottom, width, height
    ax2.set_xlim([0, len(difffts)])
    ax2.set_ylim([min(difffts), max(difffts)])
    ax2.set_title('Série Temporal')
    ax2.set_ylabel('F(T)')
    ax2.set_xlabel('T')
    ax2.plot(difffts, label="Original")
    min_rmse = 100000.0
    bestd = None
    for p in parameters:
        sets = Grid.GridPartitioner(data=difffts, npart=p)
        fts = modelo(str(p) + " particoes")
        fts.train(difffts, sets=sets)
        forecasted = fts.forecast(difffts)
        forecasted.insert(0, difffts[0])
        ax2.plot(forecasted, label=fts.name)
        error = Measures.rmse(np.array(forecasted), np.array(difffts))
        print(p, error)
        errors.append(error)
        if error < min_rmse:
            min_rmse = error
            bestd = fts
            forecastedd_best = forecasted
    handles0, labels0 = ax2.get_legend_handles_labels()
    ax2.legend(handles0, labels0)
    ax3 = fig.add_axes([0.7, 0, 0.3, 0.45])  # left, bottom, width, height
    ax3.set_title('Comparação dos Erros Quadráticos Médios')
    ax3.set_ylabel('RMSE')
    ax3.set_xlabel('Quantidade de Partições')
    ax3.set_xlim([min(parameters), max(parameters)])
    ax3.plot(parameters, errors)
    ret.append(bestd)
    ret.append(forecastedd_best)
    return ret
Ejemplo n.º 16
0
import pandas as pd

from pyFTS.common import Util as cUtil, FuzzySet
from pyFTS.partitioners import Grid, Entropy, Util as pUtil
from pyFTS.benchmarks import benchmarks as bchmk, Measures
from pyFTS.models import chen, yu, cheng, ismailefendi, hofts, pwfts
from pyFTS.common import Transformations

tdiff = Transformations.Differential(1)

from pyFTS.data import TAIEX, SP500, NASDAQ, Malaysia

dataset = Malaysia.get_data('temperature')[:1000]

p = Grid.GridPartitioner(data=dataset, npart=20)

print(p)

model = hofts.WeightedHighOrderFTS(partitioner=p, order=2)

model.fit(dataset)  #[22, 22, 23, 23, 24])

print(model)

Measures.get_point_statistics(dataset, model)
'''
#dataset = SP500.get_data()[11500:16000]
#dataset = NASDAQ.get_data()
#print(len(dataset))
Ejemplo n.º 17
0
from pyFTS.models import chen, hofts
from pyFTS.models.incremental import IncrementalEnsemble, TimeVariant

from pyFTS.data import AirPassengers, artificial

from pyFTS.models.ensemble import ensemble
from pyFTS.models import hofts
from pyFTS.data import TAIEX

data = TAIEX.get_data()

model = ensemble.EnsembleFTS()

for k in [15, 25, 35]:
    for order in [1, 2]:
        fs = Grid.GridPartitioner(data=data, npart=k)
        tmp = hofts.WeightedHighOrderFTS(partitioner=fs)

        tmp.fit(data)

        model.append_model(tmp)

forecasts = model.predict(data, type='interval', method='quantile', alpha=.05)

from pyFTS.benchmarks import benchmarks as bchmk

#f, ax = plt.subplots(1, 1, figsize=[20, 5])

#ax.plot(data)
#bchmk.plot_interval(ax, forecasts, 3, "")
print(forecasts)
Ejemplo n.º 18
0
def sliding_window_simple_search(data, windowsize, model, partitions, orders,
                                 **kwargs):

    _3d = len(orders) > 1
    ret = []
    errors = np.array([[0 for k in range(len(partitions))]
                       for kk in range(len(orders))])
    forecasted_best = []

    figsize = kwargs.get('figsize', [10, 15])
    fig = plt.figure(figsize=figsize)

    plotforecasts = kwargs.get('plotforecasts', False)
    if plotforecasts:
        ax0 = fig.add_axes([0, 0.4, 0.9, 0.5])  # left, bottom, width, height
        ax0.set_xlim([0, len(data)])
        ax0.set_ylim([min(data) * 0.9, max(data) * 1.1])
        ax0.set_title('Forecasts')
        ax0.set_ylabel('F(T)')
        ax0.set_xlabel('T')
    min_rmse = 1000000.0
    best = None

    intervals = kwargs.get('intervals', False)
    threshold = kwargs.get('threshold', 0.5)

    progressbar = kwargs.get('progressbar', None)

    rng1 = enumerate(partitions, start=0)

    if progressbar:
        from tqdm import tqdm
        rng1 = enumerate(tqdm(partitions), start=0)

    for pc, p in rng1:
        fs = Grid.GridPartitioner(data=data, npart=p)

        rng2 = enumerate(orders, start=0)

        if progressbar:
            rng2 = enumerate(tqdm(orders), start=0)

        for oc, o in rng2:
            _error = []
            for ct, train, test in Util.sliding_window(data, windowsize, 0.8,
                                                       **kwargs):
                fts = model("q = " + str(p) + " n = " + str(o), partitioner=fs)
                fts.fit(train, order=o)
                if not intervals:
                    forecasted = fts.forecast(test)
                    if not fts.has_seasonality:
                        _error.append(
                            Measures.rmse(np.array(test[o:]),
                                          np.array(forecasted[:-1])))
                    else:
                        _error.append(
                            Measures.rmse(np.array(test[o:]),
                                          np.array(forecasted)))
                    for kk in range(o):
                        forecasted.insert(0, None)
                    if plotforecasts: ax0.plot(forecasted, label=fts.name)
                else:
                    forecasted = fts.forecast_interval(test)
                    _error.append(1.0 - Measures.rmse_interval(
                        np.array(test[o:]), np.array(forecasted[:-1])))
            error = np.nanmean(_error)
            errors[oc, pc] = error
            if (min_rmse - error) > threshold:
                min_rmse = error
                best = fts
                forecasted_best = forecasted

    # print(min_rmse)
    if plotforecasts:
        # handles0, labels0 = ax0.get_legend_handles_labels()
        # ax0.legend(handles0, labels0)
        elev = kwargs.get('elev', 30)
        azim = kwargs.get('azim', 144)
        ax0.plot(test, label="Original", linewidth=3.0, color="black")
        if _3d: ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
    if not plotforecasts:
        ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
    # ax1 = fig.add_axes([0.6, 0.5, 0.45, 0.45], projection='3d')
    if _3d:
        ax1.set_title('Error Surface')
        ax1.set_ylabel('Model order')
        ax1.set_xlabel('Number of partitions')
        ax1.set_zlabel('RMSE')
        X, Y = np.meshgrid(partitions, orders)
        surf = ax1.plot_surface(X,
                                Y,
                                errors,
                                rstride=1,
                                cstride=1,
                                antialiased=True)
    else:
        ax1 = fig.add_axes([0, 1, 0.9, 0.9])
        ax1.set_title('Error Curve')
        ax1.set_ylabel('Number of partitions')
        ax1.set_xlabel('RMSE')
        ax0.plot(errors, partitions)
    ret.append(best)
    ret.append(forecasted_best)

    # plt.tight_layout()

    file = kwargs.get('file', None)
    save = kwargs.get('save', False)

    Util.show_and_save_image(fig, file, save)

    return ret
Ejemplo n.º 19
0
import seaborn as sns
import pandas as pd

from pyFTS.fcm import fts as fcm_fts
from pyFTS.partitioners import Grid
from pyFTS.common import Util, Membership

df = pd.read_csv('https://query.data.world/s/56i2vkijbvxhtv5gagn7ggk3zw3ksi',
                 sep=';')

data = df['glo_avg'].values[:]

train = data[:7000]
test = data[7000:7500]

fs = Grid.GridPartitioner(data=train, npart=5, func=Membership.trimf)

model = fcm_fts.FCM_FTS(partitioner=fs,
                        order=2,
                        activation_function=Activations.relu)

model.fit(train, method='GD', alpha=0.5, momentum=None, iteractions=1)
'''
model.fit(train, method='GA', ngen=15, #number of generations
    mgen=7, # stop after mgen generations without improvement
    npop=15, # number of individuals on population
    pcruz=.5, # crossover percentual of population
    pmut=.3, # mutation percentual of population
    window_size = 7000,
    train_rate = .8,
    increment_rate =.2,
Ejemplo n.º 20
0
warnings.filterwarnings('ignore')

import pandas as pd
from pyFTS.partitioners import Grid
from pyFTS.models import chen
from pyFTS.common import FLR
from pyFTS.common import Util
import numpy as np
from flask import Flask
from flask import render_template
from flask import request

data = pd.read_csv('4.csv')
data = data['4h'].values

fuzzy = Grid.GridPartitioner(data = data, npart = 11)
fuzzyfied = fuzzy.fuzzyfy(data, method = 'maximum', mode = 'sets')

model = chen.ConventionalFTS(partitioner = fuzzy)
model.fit(data)

app = Flask(__name__)

@app.route('/')
def home():
    return render_template('index.php')

@app.route('/predict', methods=['POST'])
def predict():
    features = [float(x) for x in request.form.values()]
    final_features = [np.array(features)]
Ejemplo n.º 21
0
dados_treino = dados[:qtde_dt_tr]

#print(dados_treino)

ttr = list(range(len(dados_treino)))

ordem = 1  # ordem do modelo, indica quantos ultimos valores serao usados

dados_teste = dados[qtde_dt_tr - ordem:250]
tts = list(
    range(
        len(dados_treino) - ordem,
        len(dados_treino) + len(dados_teste) - ordem))

particionador = Grid.GridPartitioner(data=dados_treino,
                                     npart=30,
                                     func=Membership.trimf)

modelo = pwfts.ProbabilisticWeightedFTS(partitioner=particionador, order=ordem)

modelo.fit(dados_treino)

print(modelo)

# Todo o procedimento de inferência é feito pelo método predict
predicoes = modelo.predict(dados_teste[38:40])

print(predicoes)
'''
from pyFTS.data import TAIEX, NASDAQ, SP500
from pyFTS.common import Util
Ejemplo n.º 22
0
#data.index = np.arange(0,len(data.index))

#data = data["a"].tolist()

from pyFTS.models.seasonal import sfts, cmsfts, SeasonalIndexer, common

# ix = SeasonalIndexer.LinearSeasonalIndexer([7],[1])

ix = SeasonalIndexer.DateTimeSeasonalIndexer("date",
                                             [common.DateTime.day_of_week],
                                             [None, None],
                                             'a',
                                             name="weekday")

from pyFTS.partitioners import Grid

fs = Grid.GridPartitioner(data=data, npart=10, indexer=ix)

#model = sfts.SeasonalFTS(indexer=ix, partitioner=fs)
model = cmsfts.ContextualMultiSeasonalFTS(indexer=ix, partitioner=fs)

model.fit(data)

print(model)

print(model.predict(data))

from pyFTS.benchmarks import Measures

Measures.get_point_statistics(data, model)
Ejemplo n.º 23
0
from pyFTS.models.nonstationary import nsfts
from pyFTS.partitioners import Grid
import matplotlib.pyplot as plt
from pyFTS.common import Util as cUtil
import pandas as pd

from pyFTS.data import artificial

lmv1 = artificial.generate_gaussian_linear(1, 0.2, 0.2, 0.05)

ts = 200
ws = 35
train1 = lmv1[:ts]
test1 = lmv1[ts:]

tmp_fs1 = Grid.GridPartitioner(data=train1[:50], npart=10)

fs1 = partitioners.PolynomialNonStationaryPartitioner(train1,
                                                      tmp_fs1,
                                                      window_size=ws,
                                                      degree=1)

nsfts1 = honsfts.HighOrderNonStationaryFTS("", partitioner=fs1)

nsfts1.fit(train1, order=2, parameters=ws)

print(fs1)

print(nsfts1.predict(test1))

print(nsfts1)
Ejemplo n.º 24
0
models = []
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=[20, 5])

ax[0].plot(train_uv[:240])
ax[1].plot(train_uv)

from statsmodels.tsa.stattools import acf

fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[15, 5])

ax.plot(acf(train_uv, nlags=48))
ax.set_title("Autocorrelation")
ax.set_ylabel("ACF")
ax.set_xlabel("LAG")

from itertools import product

levels = ['VL', 'L', 'M', 'H', 'VH']
sublevels = [str(k) for k in np.arange(0, 7)]
names = []
for combination in product(*[levels, sublevels]):
    names.append(combination[0] + combination[1])

print(names)

fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[15, 3])

part = Grid.GridPartitioner(data=train_uv, npart=35, names=names)

part.plot(ax)
Ejemplo n.º 25
0
from pyFTS.benchmarks import Measures
from pyFTS.partitioners import Grid, Entropy
from pyFTS.models import hofts
from pyFTS.common import Membership

x = [k for k in np.arange(-2 * np.pi, 2 * np.pi, 0.1)]
y = [np.sin(k) for k in x]

rows = []

fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[15, 5])

ax.plot(y, label='Original', color='black')

for npart in np.arange(5, 35, 5):
    part = Grid.GridPartitioner(data=y, npart=npart)
    model = hofts.HighOrderFTS(order=1, partitioner=part)
    model.fit(y)
    forecasts = model.predict(y)

    ax.plot(forecasts[:-1], label=str(npart) + " partitions")

    rmse, mape, u = Measures.get_point_statistics(y, model)

    rows.append([npart, rmse, mape, u])

handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc=2, bbox_to_anchor=(1, 1))

df = pd.DataFrame(rows, columns=['Partitions', 'RMSE', 'MAPE', 'U'])
Ejemplo n.º 26
0
import pandas as pd
from pyFTS.common import Transformations

tdiff = Transformations.Differential(1)

from pyFTS.data import TAIEX, SP500, NASDAQ

dataset = TAIEX.get_data()
#dataset = SP500.get_data()[11500:16000]
#dataset = NASDAQ.get_data()
#print(len(dataset))

from pyFTS.partitioners import Grid, Util as pUtil

partitioner = Grid.GridPartitioner(data=dataset[:2000],
                                   npart=20)  #, transformation=tdiff)

from pyFTS.common import Util as cUtil
from pyFTS.benchmarks import benchmarks as bchmk, Util as bUtil, Measures, knn, quantreg, arima, naive

from pyFTS.models import pwfts, song, chen, ifts, hofts
from pyFTS.models.ensemble import ensemble

print(partitioner)

#model = chen.ConventionalFTS(partitioner=partitioner)
model = hofts.HighOrderFTS(partitioner=partitioner, order=2)
#model.append_transformation(tdiff)
model.fit(dataset[:2000])

print(model)
Ejemplo n.º 27
0
from pyFTS.partitioners import Grid, Entropy, Util as pUtil, Simple
from pyFTS.benchmarks import benchmarks as bchmk, Measures
from pyFTS.models import chen, yu, cheng, ismailefendi, hofts, pwfts, tsaur, song, sadaei
from pyFTS.common import Transformations, Membership

from pyFTS.fcm import fts, common, GA

from pyFTS.data import Enrollments, TAIEX

import pandas as pd
df = pd.read_csv('https://query.data.world/s/7zfy4d5uep7wbgf56k4uu5g52dmvap',
                 sep=';')

data = df['glo_avg'].values[:12000]

fs = Grid.GridPartitioner(data=data, npart=35, func=Membership.trimf)

GA.parameters['num_concepts'] = 35
GA.parameters['order'] = 2
GA.parameters['partitioner'] = fs

GA.execute('TAIEX', data)
'''
model = fts.FCM_FTS(partitioner=fs, order=1)

model.fcm.weights = np.array([
    [1, 1, 0, -1, -1],
    [1, 1, 1, 0, -1],
    [0, 1, 1, 1, 0],
    [-1, 0, 1, 1, 1],
    [-1, -1, 0, 1, 1]
Ejemplo n.º 28
0
#carregamento do conjunto de treinamento
df = pd.read_csv(treino)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[10, 5])
plt.plot(df['Adj Close'])
data = df['Adj Close'].values
new_data = []
for i in range(len(data)):
    if data[i] == data[i]:
        new_data.append(data[i])
train_data = np.array(new_data)
data_max = max(train_data)
data_min = min(train_data)
norm_train_data = (train_data - data_min) / (data_max - data_min)

from pyFTS.partitioners import Grid
fs = Grid.GridPartitioner(data=norm_train_data, npart=fzz)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[15, 5])
fs.plot(ax)

#Cria os conjuntos fuzzy
fuzzyfied = fs.fuzzyfy(norm_train_data, method='fuzzy', mode='sets')
print(fuzzyfied)

#Ordena os conjuntos fuzzy
from pyFTS.common import FLR
patterns = FLR.generate_non_recurrent_flrs(fuzzyfied)
print([str(k) for k in patterns])

#Treina o modelo com o conjunto fuzzy
from pyFTS.models import chen
model = chen.ConventionalFTS(partitioner=fs)
Ejemplo n.º 29
0
#from mpl_toolkits.mplot3d import Axes3D

import pandas as pd
from pyFTS.common import Transformations

tdiff = Transformations.Differential(1)

from pyFTS.data import TAIEX, SP500, NASDAQ

dataset = TAIEX.get_data()
#dataset = SP500.get_data()[11500:16000]
#dataset = NASDAQ.get_data()
#print(len(dataset))

from pyFTS.partitioners import Grid, Util as pUtil
partitioner = Grid.GridPartitioner(data=dataset[:800], npart=10, transformation=tdiff)


from pyFTS.common import Util as cUtil
from pyFTS.benchmarks import benchmarks as bchmk, Util as bUtil, Measures, knn, quantreg, arima, naive

from pyFTS.models import pwfts, song, chen, ifts, hofts
from pyFTS.models.ensemble import ensemble

model = chen.ConventionalFTS(partitioner=partitioner)
#model = hofts.HighOrderFTS(partitioner=partitioner,order=2)
model.append_transformation(tdiff)
model.fit(dataset[:800])

cUtil.plot_rules(model, size=[20,20], rules_by_axis=5, columns=1)
Ejemplo n.º 30
0
from pyFTS.common import Transformations

tdiff = Transformations.Differential(1)

boxcox = Transformations.BoxCox(0)

from pyFTS.data import TAIEX, NASDAQ, SP500
from pyFTS.common import Util

train = TAIEX.get_data()[1000:1800]
test = TAIEX.get_data()[1800:2000]

from pyFTS.models import pwfts
from pyFTS.partitioners import Grid

fs = Grid.GridPartitioner(data=train, npart=15, transformation=tdiff)

#model = pwfts.ProbabilisticWeightedFTS(partitioner=fs, order=1)

model = chen.ConventionalFTS(partitioner=fs)
model.append_transformation(tdiff)
model.fit(train)

from pyFTS.benchmarks import ResidualAnalysis as ra

ra.plot_residuals_by_model(test, [model])

horizon = 10
'''
forecasts = model.predict(test[9:20], type='point')
intervals = model.predict(test[9:20], type='interval')