def Computar_solucao_teste(self, dados, lags, enxame): 
     '''
     Este método tem por objetivo computar o fitness de uma particula sentry para um determinado conceito 
     :param dados: dados que serao usados para definir um conceito
     :param lags: quantidade de lags para modelar a entrada da redes
     :param enxame: enxame que possui a particula sentry 
     :return: retorna o fitness para o conjunto de dados passados  
     '''    
     
     # modelando os dados e realizando as previsoes
     particao = Particionar_series(dados, [1, 0, 0], lags)
     [caracteristicas_entrada, caracteristicas_saida] = particao.Part_train()
     previsoes = enxame.Predizer(Entradas=caracteristicas_entrada)
     
     #calculando a estatistica do sensor
     acuracias = []
     for i in range(len(caracteristicas_entrada)):
         erro = mean_absolute_error(caracteristicas_saida[i:i+1], previsoes[i:i+1])
         acuracias.append(erro)
         
     estatistica = [0] * 2
     estatistica[0] = np.mean(acuracias)
     estatistica[1] = np.std(acuracias)
     
     return estatistica
Exemple #2
0
    def Computar_Media(self, vetor_caracteristicas, lags, sensor):
        '''
        Metodo para computar a deteccao de mudanca na serie temporal por meio do comportamento das particulas
        :param vetor_caracteristicas: vetor com uma amostra da serie temporal que sera avaliada para verificar a mudanca
        :param lags: quantidade de lags para modelar as entradas da RNA
        :param modelo: enxame utilizado para computar as estatisticas dos sensores
        :param sensor: particula utilizada como sensor
        :return: retorna a media e o desvio padrao do sensor para o vetor de caracteristicas: estatistica[media, desvio]
        '''

        particao = Particionar_series(vetor_caracteristicas, [1, 0, 0], lags)
        [caracteristicas_entrada, caracteristicas_saida] = particao.Part_train()
        predicao_caracteristica = sensor.Predizer(caracteristicas_entrada)
        
        #calculando a estatistica do sensor
        acuracias = []
        for i in range(len(caracteristicas_entrada)):
            erro = mean_absolute_error(caracteristicas_saida[i:i+1], predicao_caracteristica[i:i+1])
            acuracias.append(erro)
            
        estatistica = [0] * 2
        estatistica[0] = np.mean(acuracias)
        estatistica[1] = np.std(acuracias)
        
        return estatistica
Exemple #3
0
    def Atualizar_comportamento_media(self, vetor_caracteristicas, lags, enxame):
        '''
        Metodo para computar a deteccao de mudanca na serie temporal por meio do comportamento das particulas
        :param vetor_caracteristicas: vetor com uma amostra da serie temporal que sera avaliada para verificar a mudanca
        :param lags: quantidade de lags para modelar as entradas da RNA
        :param enxame: enxame utilizado para verificar a mudanca
        :return: retorna a media ou o comportamento do enxame em relacao ao vetor de caracteristicas
        '''
        
        #particionando o vetor de caracteristicas para usar para treinar 
        particao = Particionar_series(vetor_caracteristicas, [1, 0, 0], lags)
        [caracteristicas_entrada, caracteristicas_saida] = particao.Part_train()
        
        #variavel para salvar as medias das predicoes
        medias = []
        
        #realizando as previsoes e armazenando as acuracias 
        for i in range(enxame.numero_particulas):
            predicao_caracteristica = enxame.sensores[i].Predizer(caracteristicas_entrada)
            MAE = mean_absolute_error(caracteristicas_saida, predicao_caracteristica)
            medias.append(MAE)          

        #salvando a media e desvio padrao das acuracias
        comportamento = [0] * 2
        comportamento[0] = np.mean(medias)
        comportamento[1] = np.std(medias)
            
        return comportamento
Exemple #4
0
def main():
    data = get_data_without_cols(2)
    X_train, y_train, X_test, y_test = data
    model = RandomForestRegressor(
        n_estimators=100,
        n_jobs=-1,
        random_state=1,
        criterion="mse",
        max_features="sqrt",
        # min_samples_split=3, max_depth=9,
        # min_samples_leaf=1,
        # min_weight_fraction_leaf=0,
        # min_impurity_decrease=0,
    )
    model.fit(X_train, y_train.iloc[:, 0])
    y_pred = model.predict(X_test).reshape(-1, 1)

    r2 = r2_score(y_test, y_pred)
    m_e = mean_absolute_error(y_test, y_pred)
    mean = y_test.mean(axis=0).mean()
    mean_pred = y_pred.mean(axis=0).mean()

    print("{} | mean error : {} | mean {} / {}".format(
        round(r2, 3),
        round(m_e, 3),
        round(mean_pred, 3),
        round(mean, 3),
    ))
Exemple #5
0
def evaluate_model(predicted_values, actual_values):
    print('---------------------------------\n')
    print('R2 Score : %f' %
          regression.r2_score(actual_values, predicted_values))
    print('Mean Absoulte Error : %f' %
          regression.mean_absolute_error(actual_values, predicted_values))
    print('Median Absolute Error: %f' %
          regression.median_absolute_error(actual_values, predicted_values))
    print('----------------------------------\n')
Exemple #6
0
 def calc_error_metrics(self):
     # Log loss, aka logistic loss or cross-entropy loss.
     self.scores['LogLoss'] = log_loss(self.y_true, self.y_pred) 
     # Mean Squared Error
     self.scores['Mean Squared Error'] = mean_squared_error(self.y_true, self.y_pred)
     # Mean Absolute Error
     self.scores['Mean Absolute Error'] = mean_absolute_error(self.y_true, self.y_pred)
     # R^2 (coefficient of determination) regression score function - indicated how well data fits the statistical model
     self.scores['R2 Score'] = r2_score(self.y_true, self.y_pred)
     
     """TBD compute the log-loss to consider boolean"""
     return
Exemple #7
0
def get_regression_metrics(y_test, y_score, reg_type='lin', res_rmse=True):
    from scipy.stats import linregress, spearmanr
    from sklearn.metrics import regression
    rmse = np.sqrt(regression.mean_absolute_error(y_test, y_score))
    if reg_type == 'lin':
        slope, intercept, r, p_value, std_err = linregress(y_test, y_score)
        result = "$r$=%0.2f" % (r)
    elif reg_type == 'rank':
        r, p_value = spearmanr(y_test, y_score)
        result = "$\rho$=%0.2f" % (r)
    if res_rmse:
        result = "%s\nRMSE=%0.2f" % (result, rmse)
    return result, r, rmse
Exemple #8
0
def _print_regressionMetrics(_linear, _X, _y, _predict):
	metrics = [['Regresión Lineal', 'Datos obtenidos'],
			   ['Coeficiente', _linear.coef_],
			   ['Interceptación', _linear.intercept_],
			   ['Calificación (score)', _linear.score(_X, _y)],
			   ['Variance Score', r2_score(_y, _predict)],
			   ['Explained Variance Score', explained_variance_score(_y, _predict)],
			   ['Mean Squared Error', mean_squared_error(_y, _predict)],
			   ['Mean Absolute Error', mean_absolute_error(_y, _predict)], ]
	
	print('\nMinería de Datos - Regresión Lineal - <VORT>', '\n')
	print(_linear, '\n')
	print(look(metrics))
Exemple #9
0
def ml_scoring(model, testX, testY):
    prediction = model.predict(testX)
    mxerr = max_error(testY, prediction)
    mabserr = mean_absolute_error(testY, prediction)
    r2d2 = r2_score(testY, prediction)
    # mlogerr = mean_squared_log_error(testY, prediction)

    return {
        "max_err": mxerr,
        "max_abs_err": mabserr,
        "r2": r2d2
        # "mlog": mlogerr
    }
Exemple #10
0
def main(establishment_number=0, out="results.csv"):

    # Get data
    cols_to_drop = [
        "date_timestamp",
        "Vacances_A",
        "Vacances_B",
        "library",
    ]
    cols_weather = [
        'rainfall',
        'temperature',
        'humidity',
        'pressure',
        'pressure_variation',
        'pressure_variation_3h',
    ]

    data = get_data(
        columns_to_drop=cols_to_drop + cols_weather, drop_na=True,
        establishment_number=establishment_number,
        drop_value_below_threshold=None,
    )
    X_train, y_train, X_test, y_test = data

    # Train model and predict
    model = RandomForestRegressor(n_estimators=500, min_samples_split=3, max_features="sqrt", random_state=1)
    y_train_array = y_train.iloc[:, 0]
    model.fit(X_train, y_train_array)
    y_pred = model.predict(X_test).reshape(-1, 1)


    # Write results
    total = np.concatenate([X_test, y_test, y_pred], axis=1)
    df = pd.DataFrame(total, columns=[*list(X_test), "Real visitor number", "Predicted visitor number"])
    df.to_csv("data/" + out, sep=";", decimal=",")

    # Show scores
    r2 = r2_score(y_test, y_pred)
    max_visitors = y_test.max(axis=0).max()
    mean_visitors = y_test.mean(axis=0).mean()
    mean_error = mean_absolute_error(y_test, y_pred)

    print(
        """
        score R2   : {}
        max        : {}
        mean       : {}
        mean error : {}
        """.format(r2, max_visitors, mean_visitors, mean_error)
    )
Exemple #11
0
    def Avaliar_particulas(self, enxame_atual, dados, lags):
        '''
        metodo para avaliar cada particula para um determinado conceito, retorna a melhor particula
        :param: enxame_atual: enxame atual que será substituido
        :param: dados: dados para avaliar a acuracia dos modelos armazenados
        :param: lags: quantidade de lags para modelar os dados de entrada da rede
        :return: retorna a melhor particula para o determinado conceito
        '''
        
        particao = Particionar_series(dados, [1, 0, 0], lags)
        [dados_x, dados_y] = particao.Part_train()
        
        #print("Quantidade de dados para treinamento: ", len(dados_y))
        
        acuracias = []
        for i in range(len(enxame_atual.sensores)):
            previsao = enxame_atual.Predizer(dados_x, i)
            erro = mean_absolute_error(dados_y, previsao)
            #print("Sensor [", i, "]:", erro)
            acuracias.append(erro)
            
            
        j = np.argmin(acuracias)
        #print("Melhor particula: [",j,"]: ", acuracias[j])
        '''
        sequencia = range(0, len(acuracias))
        colors = ['blue'] * len(acuracias)
        colors[0] = 'green'
        colors[j] = 'red'
        
        barras = plt.bar(sequencia, acuracias, 0.6, align='center', color = colors)
        plt.ylabel('MAE')
        plt.xlabel('Particles')
        plt.title('Swarm')
        plt.xticks(range(len(acuracias)))
        limiar = min(acuracias) * 0.06
        plt.axis([-1, len(acuracias), min(acuracias) - limiar, max(acuracias) + limiar])
        rects = barras.patches

        # Now make some labels
        height = rects[0].get_height()
        plt.text(rects[0].get_x() + rects[0].get_width()/2, height + (limiar/5), 'Current Gbest', ha='center', va='bottom', rotation='vertical')
        
        height = rects[j].get_height()
        plt.text(rects[j].get_x() + rects[j].get_width()/2, height+(limiar/2), 'Min error', ha='center', va='bottom', rotation='vertical')
        
        plt.legend()
        plt.show()
        '''
        return enxame_atual.sensores[j]
Exemple #12
0
def evaluate(ytrue, ypred):
    """

    :param ytrue: true value of the dependent variable, numpy array
    :param ypred: predictions for the dependent variable, numpy array
    :return: different evaluation metrics: R squared, mean square error, mean absolute error, and fraction of variance
    explained and Spearman ranking correlation coefficient
    """
    r2 = r2_score(ytrue, ypred)
    mse = mean_squared_error(ytrue, ypred)
    mae = mean_absolute_error(ytrue, ypred)
    variance_explained = explained_variance_score(ytrue, ypred)
    spearman = spearmanr(ytrue, ypred)[0]
    return r2, mse, mae, variance_explained, spearman
 def Reavaliar_sentry(self, dados, lags, enxame): 
     '''
     Este método tem por objetivo computar o fitness de uma particula sentry para um determinado conceito 
     :param dados: dados que serao usados para definir um conceito
     :param lags: quantidade de lags para modelar a entrada da redes
     :param enxame: enxame que possui a particula sentry 
     :return: retorna o fitness para o conjunto de dados passados  
     '''    
     
     # modelando os dados e realizando as previsoes
     particao = Particionar_series(dados, [1, 0, 0], lags)
     [caracteristicas_entrada, caracteristicas_saida] = particao.Part_train()
     previsoes = enxame.Predizer(Entradas=caracteristicas_entrada)
     
     return mean_absolute_error(caracteristicas_saida, previsoes)
Exemple #14
0
def get_regression_metrics(y_test,y_score,
                            reg_type='lin',
                            res_rmse=True):
    from scipy.stats import linregress,spearmanr
    from sklearn.metrics import regression
    rmse=np.sqrt(regression.mean_absolute_error(y_test,y_score))
    if reg_type=='lin':
        slope, intercept, r, p_value, std_err = linregress(y_test,y_score)
        result="$r$=%0.2f" % (r)
    elif reg_type=='rank':
        r, p_value= spearmanr(y_test,y_score)
        result="$\rho$=%0.2f" % (r)
    if res_rmse:
        result="%s\nRMSE=%0.2f" % (result,rmse)        
    return result,r,rmse
Exemple #15
0
def test_multi_output_learner_regressor():

    stream = RegressionGenerator(n_samples=5500,
                                 n_features=10,
                                 n_informative=20,
                                 n_targets=2,
                                 random_state=1)
    stream.prepare_for_use()

    estimator = SGDRegressor(random_state=112,
                             tol=1e-3,
                             max_iter=10,
                             loss='squared_loss')
    learner = MultiOutputLearner(base_estimator=estimator)

    X, y = stream.next_sample(150)
    learner.partial_fit(X, y)

    cnt = 0
    max_samples = 5000
    predictions = []
    true_targets = []
    wait_samples = 100
    correct_predictions = 0

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Test every n samples
        if (cnt % wait_samples == 0) and (cnt != 0):
            predictions.append(learner.predict(X)[0])
            true_targets.append(y[0])
            if np.array_equal(y[0], predictions[-1]):
                correct_predictions += 1

        learner.partial_fit(X, y)
        cnt += 1

    expected_performance = 2.444365309339395
    performance = mean_absolute_error(true_targets, predictions)
    assert np.isclose(performance, expected_performance)

    assert learner._estimator_type == "regressor"
    assert type(learner.predict(X)) == np.ndarray

    with pytest.raises(AttributeError):
        learner.predict_proba(X)
Exemple #16
0
def read_input_data():
    X_train, Y_train, X_test, Y_test = normalize_data()
    rng = numpy.random.RandomState(123)
    # print(normalized_X_train.values, normalized_Y_train)
    # print(normalized_X_train.shape, normalized_Y_train.shape)
    dbn = DBN(input=X_train,
              label=Y_train,
              n_ins=X_train.shape[1],
              hidden_layer_sizes=[80] * 10,
              n_outs=1,
              rng=rng)
    dbn.pretrain(lr=0.001, k=1, epochs=1000)
    dbn.finetune(lr=0.001, epochs=200)
    resutls = dbn.predict(X_test)
    print("results", resutls, Y_test)
    print(Y_test.shape)
    print(r2_score(resutls, Y_test), mean_squared_error(resutls, Y_test))
    print(mean_absolute_error(resutls, Y_test))
Exemple #17
0
def get_regression_metrics(ground_truth_value, predicted_value):
    regression_metric_dict = dict({})
    regression_metric_dict['r2_score'] = r2_score(ground_truth_value,
                                                  predicted_value)
    regression_metric_dict['mean_squared_error'] = mean_squared_error(
        ground_truth_value, predicted_value)
    #regression_metric_dict['mean_squared_log_error'] = mean_squared_log_error(ground_truth_value, predicted_value)
    regression_metric_dict['mean_absolute_error'] = mean_absolute_error(
        ground_truth_value, predicted_value)
    regression_metric_dict[
        'explained_variance_score'] = explained_variance_score(
            ground_truth_value, predicted_value)
    regression_metric_dict['median_absolute_error'] = median_absolute_error(
        ground_truth_value, predicted_value)
    regression_metric_dict['max_error'] = max_error(ground_truth_value,
                                                    predicted_value)

    return regression_metric_dict
def get_resutls_column(model, trainfolds_dfs, testfolds_dfs, train_set,
                       test_set, feature_set, target_col_name):
    MSEs = [None] * len(testfolds_dfs)
    MAEs = [None] * len(testfolds_dfs)
    SPs = [None] * len(testfolds_dfs)
    PNs = [None] * len(testfolds_dfs)
    for i in range(len(testfolds_dfs)):
        train_X = trainfolds_dfs[i].loc[:, feature_set].values
        train_Y = trainfolds_dfs[i].loc[:, target_col_name].values
        test_X = testfolds_dfs[i].loc[:, feature_set].values
        test_Y = testfolds_dfs[i].loc[:, target_col_name].values
        model.fit(train_X, train_Y)
        test_pred = model.predict(test_X)
        MAEs[i] = MAE(test_pred, test_Y)
        MSEs[i] = MSE(test_pred, test_Y)
        SPs[i] = SPC(test_pred, test_Y)
        PNs[i] = PNC(test_pred, test_Y)

    train_cvavg_MAE = numpy.mean(MAEs)
    train_cvavg_MSE = numpy.mean(MSEs)
    train_cvavg_PN = numpy.mean(PNs)
    train_cvavg_SP = numpy.mean(SPs)

    test_Y = test_set.loc[:, target_col].values
    test_X = test_set.loc[:, feature_set].values
    train_X = train_set.loc[:, feature_set].values
    train_Y = train_set.loc[:, target_col].values

    model.fit(train_X, train_Y)
    test_pred = model.predict(test_X)

    testset_pn, _ = pearsonr(test_Y, test_pred)
    testset_sp, _ = spearmanr(test_Y, test_pred)
    testset_mae = mean_absolute_error(test_Y, test_pred)
    testset_mse = mean_squared_error(test_Y, test_pred)

    column = [
        testset_mae, testset_mse, testset_pn, testset_sp, train_cvavg_MAE,
        train_cvavg_MSE, train_cvavg_PN, train_cvavg_SP, feature_set,
        len(feature_set)
    ]
    column += list(test_pred)
    return column
Exemple #19
0
def test_models(library_name, xy_train_test, models=classes_to_test, print_results=False):
    X_train, y_train, X_test, y_test = xy_train_test

    results = []

    for model, name in models:
        model.fit(X_train, y_train.iloc[:, 0])
        y_pred = model.predict(X_test).reshape(-1, 1)
        r2 = r2_score(y_test, y_pred)
        n, p = X_test.shape
        r_adj = calculate_r2_adjusted_score(r2, n, p)

        out = np.concatenate([X_test, y_test, y_pred], axis=1)
        # out = pd.concat([pd.DataFrame(X_test), pd.DataFrame(y_test), pd.DataFrame(y_pred)], axis=1)
        df = pd.DataFrame(out, columns=[* list(X_test), * list(y_test), "pred"])

        m_e = mean_absolute_error(y_test, y_pred)
        mean = y_test.mean(axis=0).mean()
        mean_pred = y_pred.mean(axis=0).mean()

        results += [{
            "score": r2,
            "score_adjusted": r_adj,
            "parameters": list(X_test),
            "library_name": library_name,
            "model_name": name,
            "data": df,

        }]

        if print_results:
            print("{} : {} / {} ||| mean error : {} | mean : {} / {}".format(
                name,
                round(r2, 3),
                round(r_adj, 3),
                round(m_e, 3),

                round(mean_pred, 3),
                round(mean, 3),
            ))

    return results
Exemple #20
0
def score(
        # n_estimators,
        max_depth,
        min_child_weight,
        subsample,
        gamma,
        colsample_bytree):
    params['max_depth'] = int(max_depth)
    params['min_child_weight'] = min_child_weight
    params['subsample'] = subsample
    params['gamma'] = gamma
    params['colsample_bytree'] = colsample_bytree
    scores = []
    for (dtrain, dtest) in ttsplits:
        model = xgb.train(params, dtrain, 500)
        # int(n_estimators))
        y_pred = model.predict(dtest)
        score = mean_absolute_error(dtest.get_label(), y_pred)
        scores.append(score)
    return -np.mean(score)
 def Computar_estatisticas_ECDD(self, vetor_caracteristicas, lags, ELM):
     '''
     Metodo para computar a deteccao de mudanca do erro por meio do ECDD
     :param vetor_caracteristicas: vetor com uma amostra da serie temporal que sera avaliada para verificar a mudanca
     :param lags: quantidade de lags para modelar as entradas da RNA
     :param enxame: enxame utilizado para verificar a mudanca
     :return: retorna a media ou o comportamento do enxame em relacao ao vetor de caracteristicas
     '''
     #particionando o vetor de caracteristicas para usar para treinar 
     particao = Particionar_series(vetor_caracteristicas, divisao_dataset, lags)
     [caracteristicas_entrada, caracteristicas_saida] = particao.Part_train()
     
     #realizando as previsoes e armazenando as acuracias 
     predicao_caracteristica = ELM.Predizer(caracteristicas_entrada)
     
     erros = []
     for i in range(len(caracteristicas_saida)):
         erro = mean_absolute_error(caracteristicas_saida[i:i+1], predicao_caracteristica[i:i+1])
         erros.append(erro)
         
     return np.mean(erros), np.std(erros)
Exemple #22
0
    def Computar_comportamento_atual(self, dados, real, enxame):
        '''
        Metodo para computar o comportamento para os dados atuais
        :param dados: dados para realizar a previsao um passo a frente
        :param enxame: enxame utilizado para verificar a mudanca
        :return: retorna o comportamento para o instante atual
        '''
        
        #variavel para salvar as medias das predicoes
        medias = []
        
        #realizando as previsoes e armazenando as acuracias 
        for i in range(enxame.numero_particulas):
            predicao_caracteristica = enxame.sensores[i].Predizer(dados)
            MAE = mean_absolute_error(real, predicao_caracteristica)
            medias.append(MAE)          

        #salvando a media e desvio padrao das acuracias
        comportamento = [0] * 2
        comportamento[0] = np.mean(medias)
        comportamento[1] = np.std(medias)
            
        return comportamento   
 def compile_results(self):
     # compute the average absolute error and R^2 at each autoencoder dimension
     avgAbsErrors = []
     r2Values = []
     for dim in MLParameterTuner.dims:
         actualThenPredicted = np.loadtxt(str(self.get_path(dim) / 'actualThenPredicted.txt'))
         avgAbsErrors.append(mean_absolute_error(actualThenPredicted[0], actualThenPredicted[1]))
         r2Values.append(r2_score(actualThenPredicted[0], actualThenPredicted[1]))
         
     # make plots with matplotlib
     plt.plot(MLParameterTuner.dims, avgAbsErrors)
     plt.ylim(0,20)
     plt.title('ML pipeline tuning: scan over autoencoder dimension')
     plt.xlabel('Latent representation dimension')
     plt.ylabel('SVM cross validation avg absolute error')
     plt.savefig(str(Path.home() / 'Desktop' / 'AvgAbsErrorVsDim.png'))
     plt.gcf().clear()
     
     plt.plot(MLParameterTuner.dims, r2Values)
     plt.ylim(0,1)
     plt.title('ML pipeline tuning: scan over autoencoder dimension')
     plt.xlabel('Latent representation dimension')
     plt.ylabel('SVM cross validation R^2')
     plt.savefig(str(Path.home() / 'Desktop' / 'R2VsDim.png'))
 def Funcao(self, posicao):
     '''
     Metodo para calcular a funcao objetivo do IDPSO, nesse caso a funcao e a previsao de um ELM 
     :param posicao: posicao seria os pesos da camada de entrada e os bias da rede ELM 
     :return: retorna o MSE obtido da previsao de uma ELM
     '''
     
     # instanciando um modelo ELM
     ELM = ELMRegressor(self.qtd_neuronios)
     
     # modelando a dimensao das particulas para serem usadas 
     posicao = posicao.reshape(self.linhas, self.qtd_neuronios)
     
     # ELM treinando com a entrada e a saida do conjunto de treinamento e tambem com os pesos da particula 
     ELM.Treinar(self.dataset[0], self.dataset[1], posicao)
     
     # Realizando a previsao para o conjunto de validacao
     prediction_train = ELM.Predizer(self.dataset[2])
     
     # computando o erro do conjunto de validacao
     MAE_val = mean_absolute_error(self.dataset[3], prediction_train)
     
     # retornando o erro do conjunto de validacao - forma de evitar o overfitting
     return MAE_val
 def Predizer(self, Entradas, num_sensor = None, Saidas = None, grafico = None):
     '''
     Metodo para realizar a previsao com a melhor particula (ELM) do enxame e apresentar o grafico de previsao
     :param Entradas: padroes de entrada para realizar a previsao
     :param Saidas: padroes de saida para computar o MSE
     :param grafico: variavel booleana para ativar ou desativar o grafico de previsao
     :return: Retorna a predicao para as entradas apresentadas. Se as entradas e saidas sao apresentadas o MSE e retornado
     '''
     
     # se o numero do sensor não é passado então a predição é feita com o gbest
     if(num_sensor == None):
     
         #retorna somente a previsao
         if(Saidas == None):
             prediction = self.best_elm.Predizer(Entradas)
             return prediction
         else:
             prediction = self.best_elm.Predizer(Entradas)
             MSE = mean_absolute_error(Saidas, prediction)
             print('\n MSE: %s' %MSE)
 
             #apresentar grafico
             if(grafico == True):
                 plt.plot(Saidas, label = 'Real', color = 'Blue')
                 plt.plot(prediction, label = 'Previsão', color = 'Red')
                 plt.title('MSE: %s' %MSE)
                 plt.legend()
                 plt.tight_layout()
                 plt.show()
             
             return MSE
     
     else:
         # realizando a previsao com o sensor passado
         prediction = self.sensores[num_sensor].Predizer(Entradas)
         return prediction
Exemple #26
0
def main():
    
    #importando o dataset
    dtst = Datasets('dentro')
    serie = dtst.Leitura_dados(dtst.bases_linear_graduais(3), csv=True)
    particao = Particionar_series(serie, [0.0, 0.0, 0.0], 0)
    serie = particao.Normalizar(serie)

    # instanciando a memoria
    memoria = LTM(1, 0.5)

    # criando o primeiro modelo
    serie1 = serie[:500]
    enxame1 = IDPSO_ELM(serie1, [0.8, 0.2, 0], 5, 10)
    enxame1.Treinar() 
    # criando o primeiro ambiente
    ambiente1 = Ambiente(enxame1.particulas, enxame1.best_elm)
    memoria.Adicionar_ambiente(ambiente1)
    
    # criando o primeiro modelo
    serie1 = serie[:500]
    enxame1 = IDPSO_ELM(serie1, [0.8, 0.2, 0], 5, 10)
    enxame1.Treinar() 
    # criando o primeiro ambiente
    ambiente1 = Ambiente(enxame1.particulas, enxame1.best_elm)
    memoria.Adicionar_ambiente(ambiente1)
    
    # criando o primeiro modelo
    serie1 = serie[:500]
    enxame1 = IDPSO_ELM(serie1, [0.8, 0.2, 0], 5, 10)
    enxame1.Treinar() 
    # criando o primeiro ambiente
    ambiente1 = Ambiente(enxame1.particulas, enxame1.best_elm)
    memoria.Adicionar_ambiente(ambiente1)
    
    # criando o primeiro modelo
    serie1 = serie[:500]
    enxame1 = IDPSO_ELM(serie1, [0.8, 0.2, 0], 5, 10)
    enxame1.Treinar() 
    # criando o primeiro ambiente
    ambiente1 = Ambiente(enxame1.particulas, enxame1.best_elm)
    memoria.Adicionar_ambiente(ambiente1)
    
    # criando o segundo modelo
    serie2 = serie[500:1000]
    enxame2 = IDPSO_ELM(serie2, [0.8, 0.2, 0], 5, 10)
    enxame2.Treinar() 
    
    # criando o segundo ambiente
    ambiente2 = Ambiente(enxame2.particulas, enxame2.best_elm)
    memoria.Adicionar_ambiente(ambiente2)
    
    # criando o segundo modelo
    serie3 = serie[0:500]
    enxame3 = IDPSO_ELM(serie3, [0.8, 0.2, 0], 5, 10)
    enxame3.Treinar() 
    
    # criando o segundo ambiente
    ambiente3 = Ambiente(enxame3.particulas, enxame3.best_elm)
    memoria.Adicionar_ambiente(ambiente3)
    
    # relembrando um modelo passado
    serie4 = serie[1500:2000]
    enxame3 = memoria.Relembrar_ambiente(enxame3, serie4, 5)
    
    # criando o segundo modelo
    serie5 = serie[2000:2500]
    enxame4 = IDPSO_ELM(serie5, [0.8, 0.2, 0], 5, 10)
    enxame4.Treinar() 
        
    # avaliando as particulas para um determinado conceito, atualiza o gbest se tiver uma particula melhor
    serie6 = serie[2500:3000]
    particao = Particionar_series(serie6, [1, 0, 0], 5)
    [dados_x, dados_y] = particao.Part_train()
    
    memoria.Avaliar_particulas(enxame1, serie6, 5)
    previsao = enxame1.Predizer(dados_x)
    mae = mean_absolute_error(dados_y, previsao)
    print("\ngbest anterior - mae: ", mae)
    
    
    best_model = memoria.Avaliar_particulas(enxame1, serie6, 5)
    enxame1.Atualizar_bestmodel(best_model)
    previsao = enxame1.Predizer(dados_x)
    mae = mean_absolute_error(dados_y, previsao)
    print("gbest posterior - mae: ", mae)
Exemple #27
0
    def Relembrar_ambiente(self, enxame_atual, dados, lags):
        '''
        metodo para relembrar um ambiente, caso o enxame não seja suficiente retreina a partir da melhor solução
        :param: enxame_atual: enxame atual que será substituido
        :param: dados: dados para avaliar a acuracia dos modelos armazenados
        :param: lags: quantidade de lags para modelar os dados de entrada da rede
        :return: retorna o melhor enxame para os dados passados
        '''
        
        #return enxame_atual.best_elm
        
        particao = Particionar_series(dados, [1, 0, 0], lags)
        [dados_x, dados_y] = particao.Part_train()
        
        #print("Quantidade de dados para treinamento: ", len(dados_y))
        
        acuracias = []
        if(self.qtd_memoria != 0):
            for i in self.vetor_ambientes:
                previsao = i.gbest.Predizer(dados_x)
                acuracias.append(mean_absolute_error(dados_y, previsao))
            #print("Acuracias: ", acuracias)
            
            previsao = enxame_atual.Predizer(dados_x)
            erro_atual = mean_absolute_error(dados_y, previsao)
            #print("Acuracia do modelo atual: ", erro_atual)
            
            j = np.argmin(acuracias)
            #print("Acuracia do melhor modelo da memória: ", acuracias[j])
            
            if(acuracias[j] < erro_atual):
                #print("Trocou de solução [", j, "]: ", acuracias[j])
                #enxame_atual.particulas = copy.deepcopy(self.vetor_ambientes[j].particulas)
                #enxame_atual.Atualizar_bestmodel(self.vetor_ambientes[j].gbest) 
                #print("Comparacao modelos: ", enxame_atual.best_elm == self.vetor_ambientes[j].gbest)
                
                '''
                # plotando erro
                j = j+1
                acuracias = [erro_atual] + acuracias
                colors = ['blue'] * len(acuracias)
                colors[0] = 'green'
                colors[j] = 'red'
                
                sequencia = range(0, len(acuracias))
                barras = plt.bar(sequencia, acuracias, 0.6, align='center', color = colors)
                plt.title('Solutions in memory')
                plt.ylabel('MAE')
                plt.xlabel('Solutions')
                plt.xticks(range(len(acuracias)))
                limiar = min(acuracias) * 0.6
                plt.axis([-1, len(acuracias), min(acuracias) - limiar, max(acuracias) + (2*limiar)])
                
                rects = barras.patches
                
                # Now make some labels
                height = rects[0].get_height()
                plt.text(rects[0].get_x() + rects[0].get_width()/2, height+(limiar/2), 'Current', ha='center', va='bottom', rotation='vertical')
                
                height = rects[j].get_height()
                plt.text(rects[j].get_x() + rects[j].get_width()/2, height+(limiar/2), 'Min error', ha='center', va='bottom', rotation='vertical')

                plt.legend()
                plt.show()
                '''
                
                return self.vetor_ambientes[j].gbest
            
            else:
                
                return enxame_atual.best_elm
 def Executar(self, grafico = None):
     '''
     Metodo para executar o procedimento do algoritmo
     :param grafico: variavel booleana para ativar ou desativar o grafico
     :return: retorna 5 variaveis: [falsos_alarmes, atrasos, falta_deteccao, MAPE, tempo_execucao]
     '''
     
     ################################################################################################################################################
     ################################# CONFIGURACAO DO DATASET ######################################################################################
     ################################################################################################################################################
     
     #dividindo os dados da dataset dinamica para treinamento_inicial inicial e para uso do stream dinâmico
     treinamento_inicial = self.dataset[0:self.n]
     stream = self.dataset[self.n:]
 
     ################################################################################################################################################
     ################################# PERIODO ESTATICO #############################################################################################
     ################################################################################################################################################
     
     #criando e treinando um modelo_vigente para realizar as previsões
     enxame = IDPSO_ELM(treinamento_inicial, divisao_dataset, self.lags, self.qtd_neuronios)
     enxame.Parametros_IDPSO(it, self.numero_particulas, inercia_inicial, inercia_final, c1, c2, xmax, crit_parada)
     enxame.Treinar()  
    
     #ajustando com os dados finais do treinamento a janela de predicao
     janela_predicao = Janela()
     janela_predicao.Ajustar(enxame.dataset[0][(len(enxame.dataset[0])-1):])
     predicao = enxame.Predizer(janela_predicao.dados)
     
     #janela com o atual conceito, tambem utilizada para armazenar os dados de retreinamento
     janela_caracteristicas = Janela()
     janela_caracteristicas.Ajustar(treinamento_inicial)
 
     # instanciando a janela de alerta para quando acontecer 
     janela_alerta = Janela()
     janela_alerta.Ajustar(janela_predicao.dados_mais)
     
     #ativando os sensores de acordo com a primeira janela de caracteristicas
     s = S(self.qtd_sensores, self.w, self.c)
     s.armazenar_conceito(janela_caracteristicas.dados, self.lags, enxame)
     
     #instanciando a classe LTM
     memoria = LTM(0, 0)
     #gerar imagem
     #memoria.Avaliar_particulas(enxame, janela_caracteristicas.dados, self.lags)
     ################################################################################################################################################
     ################################# PERIODO DINAMICO #############################################################################################
     ################################################################################################################################################
     
     #variavel para armazenar o erro do stream
     erro_stream = 0
     #variavel para armazenar as deteccoes
     deteccoes = []
     #variavel para armazenar os alarmes
     alarmes = []
     #variavel para armazenar o tempo inicial
     start_time = time.time()
     
     #vetor para armazenar a predicoes_vetor
     if(grafico):
         predicoes_vetor = [None] * len(stream)
         erro_stream_vetor = [None] * len(stream)
         
     #variavel auxiliar 
     mudanca_ocorreu = False
     alerta_ocorreu = False
     
     #entrando no stream de dados
     for i in range(1, len(stream)):
         
         #computando o erro
         loss = mean_absolute_error(stream[i:i+1], predicao)
         erro_stream += loss
 
         #adicionando o novo dado a janela de predicao
         janela_predicao.Add_janela(stream[i])
             
         #realizando a nova predicao com a nova janela de predicao
         predicao = enxame.Predizer(janela_predicao.dados)
         
         # salvando o erro e predicao
         if(grafico):                
             #salvando o erro 
             erro_stream_vetor[i] = loss
             #salvando a predicao
             predicoes_vetor[i] = predicao
         
         #print("[", i, "]")
         
         # se mudança ocorreu entra aqui
         if(mudanca_ocorreu == False):
             
             #verificando os sensores
             mudou = s.monitorar(loss, i, True)
             
             # se aconteceu uma mudanca entra
             if(mudou):
                 if(grafico == True):    
                     print("[%d] Mudança" % (i))
                 deteccoes.append(i)
                 
                 #zerando a janela de treinamento
                 janela_caracteristicas.Zerar_Janela()
             
                 #atualizando o melhor modelo pela melhor particula do enxame
                 best_model = memoria.Avaliar_particulas(enxame, janela_alerta.dados, self.lags)
                 enxame.Atualizar_bestmodel(best_model)
                 
                 #variavel para alterar o fluxo, ir para o periodo de retreinamento
                 mudanca_ocorreu = True
                 alerta_ocorreu = False
             
             # se estiver em estado de alerta começa a guardar dados
             if(alerta_ocorreu):
                 # adicionando os dados a janela
                 janela_alerta.Increment_Add(stream[i])
                 
                 # se a jenela de alerta tiver n dadms, entao ja e possivel retreinar
                 if(len(janela_alerta.dados) > (self.n/2)):
             
                     # zerando a janela de alerta
                     janela_alerta.Ajustar(janela_predicao.dados_mais[0])
                   
             # se nao mudou entra
             else:
                 
                 # verificando se está em estado de alerta
                 alerta = s.monitorar_gbest()
                 if(alerta):
                     if(grafico):    
                         print("[%d] Alarme" % (i))
                     alarmes.append(i)
                 
                     # ativando o botao de alerta para comecar a armazenar dados
                     alerta_ocorreu = True
                     # adicionando a primeira janela aos dados
                     janela_alerta.Ajustar(janela_predicao.dados_mais[0])
                 
         else:
             
             #print("[", i, "] - atualizando gbest, dados: ", len(janela_alerta.dados))
             
             # atualizar a cada time step o gbest
             best_model = memoria.Avaliar_particulas(enxame, janela_alerta.dados, self.lags)
             enxame.Atualizar_bestmodel(best_model)
             
             # coletando dados ate ter suficiente para retreinar
             if(len(janela_caracteristicas.dados) < self.n):
               
                 #adicionando a nova instancia na janela de caracteristicas
                 janela_caracteristicas.Increment_Add(stream[i])
                 
                 #adicionando a nova instancia na janela de alerta
                 janela_alerta.Increment_Add(stream[i])
                 
                 
                 # se a jenela de alerta tiver n dadom, entao ja e possivel retreinar
                 if(len(janela_alerta.dados) >= self.n):
            
                     # atualizando a janela de caracteristicas
                     janela_caracteristicas.Ajustar(janela_alerta.dados)
                     
                     # zerando a janela de alerta
                     janela_alerta.Ajustar(janela_predicao.dados_mais[0])
                 
             
             # dados de retreinamento coletados entao
             else:
             
                 #atualizando o modelo_vigente preditivo
                 enxame = IDPSO_ELM(janela_caracteristicas.dados, divisao_dataset, self.lags, self.qtd_neuronios)
                 enxame.Parametros_IDPSO(it, self.numero_particulas, inercia_inicial, inercia_final, c1, c2, xmax, crit_parada)
                 enxame.Treinar() 
                 
                 #ajustando com os dados finais do treinamento a janela de predicao
                 janela_predicao = Janela()
                 janela_predicao.Ajustar(enxame.dataset[0][(len(enxame.dataset[0])-1):])
                 predicao = enxame.Predizer(janela_predicao.dados)
                 
                 #ativando os sensores de acordo com a primeira janela de caracteristicas
                 s = S(self.qtd_sensores, self.w, self.c)
                 s.armazenar_conceito(janela_caracteristicas.dados, self.lags, enxame)
                 
                 #variavel para voltar para o loop principal
                 mudanca_ocorreu = False
     
     #variavel para armazenar o tempo final
     end_time = time.time()
     
     #computando as metricas de deteccao
     mt = Metricas_deteccao()
     [falsos_alarmes, atrasos] = mt.resultados(stream, deteccoes, self.n)
  
     #computando a acuracia da previsao ao longo do fluxo de dados
     MAE = erro_stream/len(stream)
     
     #computando o tempo de execucao
     tempo_execucao = (end_time-start_time)
     
     if(grafico == True):
         tecnica = "P-IDPSO-ELM-SV"
         print(tecnica)
         print("Alarmes:")
         print(alarmes)
         print("Deteccoes:")
         print(deteccoes)
         print("Falsos Alarmes: ", falsos_alarmes)
         print("Atrasos: ", atrasos)
         print("MAE: ", MAE)
         print("Tempo de execucao: ", tempo_execucao)
     
     #plotando o grafico de erro
     if(grafico == True):
         g = Grafico()
         g.Plotar_graficos(stream, predicoes_vetor, deteccoes, alarmes, erro_stream_vetor, self.n, atrasos, falsos_alarmes, tempo_execucao, MAE, nome=tecnica)
                        
     #retorno do metodo
     return falsos_alarmes, atrasos, MAE, tempo_execucao
            MAE_list = []
            
            for z in range(10):
                # instanciando o método IDPSO-ELM
                idpso_elm = IDPSO_ELM(serie, divisao_dataset, janela_tempo, qtd_neuronis)
                idpso_elm.Parametros_IDPSO(it, particulas, inercia_inicial[j], inercia_final[k], c1, c2, xmax[i], crit)
                idpso_elm.Treinar()  
                
                # organizando os dados para comparacao #
                train_x, train_y = idpso_elm.dataset[0], idpso_elm.dataset[1] 
                val_x, val_y = idpso_elm.dataset[2], idpso_elm.dataset[3]
                test_x, test_y = idpso_elm.dataset[4], idpso_elm.dataset[5]
                
                ################################## computando a previsao para o conjunto de validacao ################################
                previsao = idpso_elm.Predizer(val_x)
                MAE = mean_absolute_error(val_y, previsao)
                MAE_list.append(MAE)
            
            cont += 1
            media = np.mean(MAE_list)
            print(cont, 'Media MAE: ', MAE)
            
            if(erro_best > media):
                erro_best = MAE
                xmax_best = i
                inercia_inicial_best = j
                inercia_final_best = k
            ###################################################################################


print("Erro best: ", erro_best)
Exemple #30
0
    def Executar(self, grafico = None):
        '''
        Metodo para executar o procedimento do algoritmo
        :param grafico: variavel booleana para ativar ou desativar o grafico
        :return: retorna 5 variaveis: [falsos_alarmes, atrasos, falta_deteccao, MAPE, tempo_execucao]
        '''

        ################################################################################################################################################
        ################################# CONFIGURACAO DO DATASET ######################################################################################
        ################################################################################################################################################
        
        #dividindo os dados da dataset dinamica para treinamento_inicial inicial e para uso do stream din�mico
        treinamento_inicial = self.dataset[0:self.n]
        stream = self.dataset[self.n:]
    
        ################################################################################################################################################
        ################################# PERIODO ESTATICO #############################################################################################
        ################################################################################################################################################
        
        #criando e treinando um enxame_vigente para realizar as previsoes
        ELM = ELMRegressor(self.qtd_neuronios)
        ELM.Tratamento_dados(treinamento_inicial, divisao_dataset, self.lags)
        ELM.Treinar(ELM.train_entradas, ELM.train_saidas)
        
        #ajustando com os dados finais do treinamento a janela de predicao
        janela_predicao = Janela()
        janela_predicao.Ajustar(ELM.train_entradas[len(ELM.train_entradas)-1:])
        predicao = ELM.Predizer(janela_predicao.dados)
        
        #janela com o atual conceito, tambem utilizada para armazenar os dados de retreinamento
        janela_caracteristicas = Janela()
        janela_caracteristicas.Ajustar(treinamento_inicial)
        
        ################################################################################################################################################
        ################################# PERIODO DINAMICO #############################################################################################
        ################################################################################################################################################
        
        #variavel para armazenar o erro do stream
        erro_stream = 0
        #variavel para armazenar as deteccoes
        deteccoes = []
        #variavel para armazenar os alarmes
        alarmes = []
        #variavel para armazenar o tempo inicial
        start_time = time.time()
        
        #vetor para armazenar a predicoes_vetor
        if(grafico == True):
            predicoes_vetor = [None] * len(stream)
            erro_stream_vetor = [None] * len(stream)
        
        #entrando no stream de dados
        for i in range(1, len(stream)):
            
            #computando o erro
            loss = mean_absolute_error(stream[i:i+1], predicao)
            erro_stream += loss
    
            #adicionando o novo dado a janela de predicao
            janela_predicao.Add_janela(stream[i])
                
            #realizando a nova predicao com a nova janela de predicao
            predicao = ELM.Predizer(janela_predicao.dados)
            
            #
            if(grafico == True):                
                #salvando o erro 
                erro_stream_vetor[i] = loss
                #salvando a predicao
                predicoes_vetor[i] = predicao

                            
        #variavel para armazenar o tempo final
        end_time = time.time()
        
        #computando as metricas de deteccao
        mt = Metricas_deteccao()
        deteccoes.append(-500)
        [falsos_alarmes, atrasos] = mt.resultados(stream, deteccoes, self.n)
        
        #computando a acuracia da previsao ao longo do fluxo de dados
        MAE = erro_stream/len(stream)
        
        #computando o tempo de execucao
        tempo_execucao = (end_time-start_time)
        
        if(grafico == True):
            tecnica = "ELM"
            print(tecnica)
            print("Alarmes:")
            print(alarmes)
            print("Deteccoes:")
            print(deteccoes)
            print("Falsos Alarmes: ", falsos_alarmes)
            print("Atrasos: ", atrasos)
            print("MAE: ", MAE)
            print("Tempo de execucao: ", tempo_execucao)
        
        #plotando o grafico de erro
        if(grafico == True):
            g = Grafico()
            deteccoes.append(-500)
            g.Plotar_graficos(stream, predicoes_vetor, deteccoes, alarmes, erro_stream_vetor, self.n, atrasos, falsos_alarmes, tempo_execucao, MAE, nome=tecnica)
                           
        #retorno do metodo
        return falsos_alarmes, atrasos, MAE, tempo_execucao
    def Executar(self, grafico = None):
        '''
        Metodo para executar o procedimento do algoritmo
        :param grafico: variavel booleana para ativar ou desativar o grafico
        :return: retorna 5 variaveis: [falsos_alarmes, atrasos, falta_deteccao, MAPE, tempo_execucao]
        '''

        ################################################################################################################################################
        ################################# CONFIGURACAO DO DATASET ######################################################################################
        ################################################################################################################################################
        
        #dividindo os dados da dataset dinamica para treinamento_inicial inicial e para uso do stream din�mico
        treinamento_inicial = self.dataset[0:self.n]
        stream = self.dataset[self.n:]
    
        ################################################################################################################################################
        ################################# PERIODO ESTATICO #############################################################################################
        ################################################################################################################################################
        
        #criando e treinando um enxame_vigente para realizar as previsoes
        ELM = ELMRegressor(self.qtd_neuronios)
        ELM.Tratamento_dados(treinamento_inicial, divisao_dataset, self.lags)
        ELM.Treinar(ELM.train_entradas, ELM.train_saidas)
        
        #ajustando com os dados finais do treinamento a janela de predicao
        janela_predicao = Janela()
        janela_predicao.Ajustar(ELM.train_entradas[len(ELM.train_entradas)-1:])
        predicao = ELM.Predizer(janela_predicao.dados)
        
        #janela com o atual conceito, tambem utilizada para armazenar os dados de retreinamento
        janela_caracteristicas = Janela()
        janela_caracteristicas.Ajustar(treinamento_inicial)
        
        #atualizar por ECDD
        [MI0, SIGMA0] = self.Computar_estatisticas_ECDD(janela_caracteristicas.dados, self.lags, ELM)
        ecdd = ECDD(self.Lambda, self.w, self.c)
        ecdd.armazenar_conceito(MI0, SIGMA0)
        
        ################################################################################################################################################
        ################################# PERIODO DINAMICO #############################################################################################
        ################################################################################################################################################
        
        #variavel para armazenar o erro do stream
        erro_stream = 0
        #variavel para armazenar as deteccoes
        deteccoes = []
        #variavel para armazenar os alarmes
        alarmes = []
        #variavel para armazenar o tempo inicial
        start_time = time.time()
        
        #vetor para armazenar a predicoes_vetor
        if(grafico == True):
            predicoes_vetor = [None] * len(stream)
            erro_stream_vetor = [None] * len(stream)
        
        #variavel auxiliar 
        mudanca_ocorreu = False
            
        #entrando no stream de dados
        for i in range(1, len(stream)):
            
            #computando o erro
            loss = mean_absolute_error(stream[i:i+1], predicao)
            erro_stream += loss
    
            #adicionando o novo dado a janela de predicao
            janela_predicao.Add_janela(stream[i])
                
            #realizando a nova predicao com a nova janela de predicao
            predicao = ELM.Predizer(janela_predicao.dados)

            if(grafico == True):                
                #salvando o erro 
                erro_stream_vetor[i] = loss
                #salvando a predicao
                predicoes_vetor[i] = predicao

            if(mudanca_ocorreu == False):
                    
                #atualizando o media_zt e desvio_zt
                ecdd.atualizar_ewma(loss, i)
                
                #monitorando o erro
                string_ecdd = ecdd.monitorar()
                
                #verificar se houve mudanca
                if(string_ecdd == ecdd.alerta):
                    if(grafico == True):
                        print("[%d] Alarme" % (i))
                        alarmes.append(i)
                
                if(string_ecdd == ecdd.mudanca):
                    if(grafico == True):
                        print("[%d] Detectou uma mudanca" % (i))
                    deteccoes.append(i)
                    
                    #zerando a janela de treinamento
                    janela_caracteristicas.Zerar_Janela()
                
                    mudanca_ocorreu = True
            
            else:
                
                if(len(janela_caracteristicas.dados) < self.n):
                    
                    #adicionando a nova instancia na janela de caracteristicas
                    janela_caracteristicas.Increment_Add(stream[i])
                    
                else:
                    
                    #atualizando o enxame_vigente preditivo
                    ELM = ELMRegressor(self.qtd_neuronios)
                    ELM.Tratamento_dados(janela_caracteristicas.dados, divisao_dataset, self.lags)
                    ELM.Treinar(ELM.train_entradas, ELM.train_saidas)
                    
                    #ajustando a janela de previsao
                    janela_predicao = Janela()
                    janela_predicao.Ajustar(ELM.train_entradas[len(ELM.train_entradas)-1:])
                    predicao = ELM.Predizer(janela_predicao.dados)
                        
                    #atualizar por ECDD
                    [MI0, SIGMA0] = self.Computar_estatisticas_ECDD(janela_caracteristicas.dados, self.lags, ELM)
                    ecdd = ECDD(self.Lambda, self.w, self.c)
                    ecdd.armazenar_conceito(MI0, SIGMA0)
                    
                    #variavel para voltar para o loop principal
                    mudanca_ocorreu = False
                            
        #variavel para armazenar o tempo final
        end_time = time.time()
        
        #computando as metricas de deteccao
        mt = Metricas_deteccao()
        [falsos_alarmes, atrasos] = mt.resultados(stream, deteccoes, self.n)
        
        #computando a acuracia da previsao ao longo do fluxo de dados
        MAE = erro_stream/len(stream)
        
        #computando o tempo de execucao
        tempo_execucao = (end_time-start_time)
        
        if(grafico == True):
            tecnica = "ELM-ECDD"
            print(tecnica)
            print("Alarmes:")
            print(alarmes)
            print("Deteccoes:")
            print(deteccoes)
            print("Falsos Alarmes: ", falsos_alarmes)
            print("Atrasos: ", atrasos)
            print("MAE: ", MAE)
            print("Tempo de execucao: ", tempo_execucao)
        
        #plotando o grafico de erro
        if(grafico == True):
            g = Grafico()
            g.Plotar_graficos(stream, predicoes_vetor, deteccoes, alarmes, erro_stream_vetor, self.n, atrasos, falsos_alarmes, tempo_execucao, MAE, nome=tecnica)
                           
        #retorno do metodo
        return falsos_alarmes, atrasos, MAE, tempo_execucao
Exemple #32
0
def projectexample_modelling(series, model_name, parameters):
    """ Function that performs the following plots
        shape of the series
        the first items

            :params series: univariate time series
            :type series: dataframe
            :return:
                - error (int): variable with error code
        """

    # modelling
    error = 0
    try:
        print("{} time series modelling".format('-' * 20))
        print("{} {} model".format('-' * 20, model_name))

        if model_name=='SARIMAX':

            p = parameters[0]
            d = parameters[1]
            q = parameters[2]
            P = parameters[3]
            D = parameters[4]
            Q = parameters[5]
            S = parameters[6]
            t = parameters[7]

            print("{} fitting model".format('-' * 20))
            # fit the model
            model = SARIMAX(series.values,
                             trend = t,
                             order = (p, d, q),
                             seasonal_order = (P, D, Q, S),
                             enforce_stationarity = False,
                             enforce_invertibility = False).fit()


            # Model summary
            print("{} Model summary".format('-' * 20))
            print(model.summary().tables[1])


            # Model diagnostic
            print("{} Model diagnostic".format('-' * 20))
            fig = model.plot_diagnostics(figsize=(20, 12))
            fig.savefig(os.path.join(os.getcwd(), 'figures\\diagnostic_{}.png'.format(model_name)))
            fig.show()

    except Exception as exception_msg:
        print('{} (!) Error in projectexample_modelling: '.format('-' * 20) + str(exception_msg))
        error = 1
        model = []
        return model, error

    # Metrics
    print("{} Metrics".format('-' * 20))
    try:
        # Regression metrics
        y_fitted = model.predict()
        R2 = round(r2_score(series, y_fitted), 3)
        MAE = round(mean_absolute_error(series, y_fitted), 3)
        RMSE = round(np.sqrt(mean_squared_error(series, y_fitted)), 3)

        print("{} R2: {}".format('-' * 20, R2))
        print("{} MAE: {}".format('-' * 20, MAE))
        print("{} RMSE: {}".format('-' * 20, RMSE))

    except Exception as exception_msg:
        print('{} (!) Error in projectexample_modelling (metrics): '.format('-' * 20) + str(exception_msg))
        error = 2
        return model, error




    return model, error
Exemple #33
0
def validate(y_true, y_pred):
    
    print 'Kolmogorov-Smirnov test = ', ks_2samp(y_true, y_pred)
    print 'mean_squared_error = ', mean_squared_error(y_true, y_pred)
    print 'mean_absolute_error = ', mean_absolute_error(y_true, y_pred)
    print 'r2_score = ', r2_score(y_true, y_pred)
    
    """TBD compute the log-loss to consider boolean"""
    
    print "log_loss = " + str(log_loss(y_true, y_pred)) #Log loss, aka logistic loss or cross-entropy loss.
    
    precision, recall, thresholds = precision_recall_curve(y_true, y_pred)    #Compute precision-recall pairs for different probability thresholds
    #print "precision = " + str(precision)
    #print "recall = " + str(recall)
    #print "thresholds = " +  str(thresholds)
    average_precision = average_precision_score(y_true, y_pred)    #Compute average precision (AP) from prediction scores
    print "average_precision_score = ", average_precision
    
    ##############################################################################
    # Plot of a ROC curve for a specific class
    plt.figure()
    plt.plot(precision, recall, label='AUC = %0.2f' % average_precision)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.title('Precision-Recall Curve')
    plt.legend(loc="lower right")
    plt.show()
    ##############################################################################

    fpr, tpr, thresholds = roc_curve(y_true, y_pred)    #Compute Receiver operating characteristic (ROC)
    print "fpr = " + str(fpr)
    print "tpr = " + str(tpr)
    print "thresholds = " +  str(thresholds)
    print "roc_auc_score = " + str(roc_auc_score(y_true, y_pred))    #Compute Area Under the Curve (AUC) from prediction scores
    roc_auc = auc(fpr, tpr)
    
    ##############################################################################
    # Plot of a ROC curve for a specific class
    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic ')
    plt.legend(loc="lower right")
    plt.show()
    ##############################################################################
    
    
    ##print "pearsonr = " + str(pearsonr(np.array(y_true), np.array(y_pred)))
    ##print "spearmanr = " + str(spearmanr(np.array(y_true), np.array(y_pred)))
    ##print matthews_corrcoef(y_true, y_pred)    #Compute the Matthews correlation coefficient (MCC) for binary classes
    ##print confusion_matrix(y_true, y_pred)    #Compute confusion matrix to evaluate the accuracy of a classification
    ##print accuracy_score(y_true, y_pred)    #Accuracy classification score.
    ##print classification_report(y_true, y_pred)    #Build a text report showing the main classification metrics
    ##print f1_score(y_true, y_pred)    #Compute the F1 score, also known as balanced F-score or F-measure
    ##print fbeta_score(y_true, y_pred)    #Compute the F-beta score
    ##print hamming_loss(y_true, y_pred)    #Compute the average Hamming loss.
    ##print jaccard_similarity_score(y_true, y_pred)    #Jaccard similarity coefficient score
    ##print precision_recall_fscore_support(y_true, y_pred)    #Compute precision, recall, F-measure and support for each class
    ##print precision_score(y_true, y_pred)    #Compute the precision
    ##print recall_score(y_true, y_pred)    #Compute the recall
    ##print zero_one_loss(y_true, y_pred) #Zero-one classification loss.
    ##print "hinge_loss = " + str(hinge_loss(y_true, y_pred))    #Average hinge loss (non-regularized)
    
    return
# Separate output from inputs
y_train = data_df['time_to_failure']
x_train_seg = data_df['segment_id']
x_train = data_df.drop(['time_to_failure', 'segment_id'], axis=1)

svReg = SVR(C=9137.08647605824366,
            cache_size=200,
            coef0=0.0,
            degree=2,
            epsilon=0.001,
            gamma=0.586414861763494,
            kernel='rbf',
            max_iter=-1,
            shrinking=True,
            tol=0.001,
            verbose=True)

svReg.fit(x_train, y_train)
# Create an variable to pickle and open it in write mode
mh = ModelHolder(svReg, most_dependent_columns)
mh.save(model_name)
svReg = None
mh_new = load_model(model_name)
svReg, most_dependent_columns = mh_new.get()

y_pred = svReg.predict(x_train)

mas = mean_absolute_error(y_train, y_pred)
print('Mean Absolute Error', mas)
    def Executar(self, grafico = None):
        '''
        Metodo para executar o procedimento do algoritmo
        :param grafico: variavel booleana para ativar ou desativar o grafico
        :return: retorna 5 variaveis: [falsos_alarmes, atrasos, falta_deteccao, MAPE, tempo_execucao]
        '''
        
        
        
        ################################################################################################################################################
        ################################# CONFIGURACAO DO DATASET ######################################################################################
        ################################################################################################################################################
        
        #dividindo os dados da dataset dinamica para treinamento_inicial inicial e para uso do stream dinâmico
        treinamento_inicial = self.dataset[0:self.n]
        stream = self.dataset[self.n:]
        
        ################################################################################################################################################
        ################################# PERIODO ESTATICO #############################################################################################
        ################################################################################################################################################
        
        #criando e treinando um enxame_vigente para realizar as previsões
        enxame = IDPSO_ELM(treinamento_inicial, divisao_dataset, self.lags, self.qtd_neuronios)
        enxame.Parametros_IDPSO(it, self.numero_particulas, inercia_inicial, inercia_final, c1, c2, xmax, crit_parada)
        enxame.Treinar()  
       
        #ajustando com os dados finais do treinamento a janela de predicao
        janela_predicao = Janela()
        janela_predicao.Ajustar(enxame.dataset[0][(len(enxame.dataset[0])-1):])
        predicao = enxame.Predizer(janela_predicao.dados)
        
        #janela com o atual conceito, tambem utilizada para armazenar os dados de retreinamento
        janela_caracteristicas = Janela()
        janela_caracteristicas.Ajustar(treinamento_inicial)
        
        #ativando o sensor de comportamento de acordo com a primeira janela de caracteristicas para media e desvio padrão
        b = B(self.limite, self.w, self.c)
        b.armazenar_conceito(janela_caracteristicas.dados, self.lags, enxame)

        ################################################################################################################################################
        ################################# PERIODO DINAMICO #############################################################################################
        ################################################################################################################################################
        
        #variavel para armazenar o erro do stream
        erro_stream = 0
        #variavel para armazenar as deteccoes
        deteccoes = []
        #variavel para armazenar os alarmes
        alarmes = []
        #variavel para armazenar o tempo inicial
        start_time = time.time()
        
        #vetor para armazenar a predicoes_vetor
        if(grafico == True):
            predicoes_vetor = [None] * len(stream)
            erro_stream_vetor = [None] * len(stream)
            
        #variavel auxiliar 
        mudanca_ocorreu = False
        
        #entrando no stream de dados
        for i in range(1, len(stream)):
            
            #computando o erro
            loss = mean_absolute_error(stream[i:i+1], predicao)
            erro_stream += loss
                
            #adicionando o novo dado a janela de predicao
            janela_predicao.Add_janela(stream[i])
                
            #realizando a nova predicao com a nova janela de predicao
            predicao = enxame.Predizer(janela_predicao.dados)
                
            if(grafico == True):                
                #salvando o erro 
                erro_stream_vetor[i] = loss
                #salvando a predicao
                predicoes_vetor[i] = predicao
            
            if(mudanca_ocorreu == False):
                
                #computando o comportamento para a janela de predicao, para somente uma instancia - media e desvio padrão
                mudou = b.monitorar(janela_predicao.dados, stream[i:i+1], enxame, i)
                
                if(mudou == True):
                    if(grafico == True):
                        print("[%d] Detectou uma mudança" % (i))
                    deteccoes.append(i)
                    
                    #zerando a janela de treinamento
                    janela_caracteristicas.Zerar_Janela()
                    
                    #variavel para alterar o fluxo, ir para o periodo de retreinamento
                    mudanca_ocorreu = True
                    
            else:
                
                if(len(janela_caracteristicas.dados) < self.n):
                  
                    #adicionando a nova instancia na janela de caracteristicas
                    janela_caracteristicas.Increment_Add(stream[i])
                    
                else:
                    
                    #atualizando o enxame_vigente preditivo
                    enxame = IDPSO_ELM(janela_caracteristicas.dados, divisao_dataset, self.lags, self.qtd_neuronios)
                    enxame.Parametros_IDPSO(it, self.numero_particulas, inercia_inicial, inercia_final, c1, c2, xmax, crit_parada)
                    enxame.Treinar() 
                    
                    #ajustando com os dados finais do treinamento a janela de predicao
                    janela_predicao = Janela()
                    janela_predicao.Ajustar(enxame.dataset[0][(len(enxame.dataset[0])-1):])
                    predicao = enxame.Predizer(janela_predicao.dados)
                    
                    # atualizando o conceito para a caracteristica de comportamento
                    b = B(self.limite, self.w, self.c)
                    b.armazenar_conceito(janela_caracteristicas.dados, self.lags, enxame)
                    
                    #variavel para voltar para o loop principal
                    mudanca_ocorreu = False
                    
        #variavel para armazenar o tempo final
        end_time = time.time()
        
        #computando as metricas de deteccao
        mt = Metricas_deteccao()
        [falsos_alarmes, atrasos] = mt.resultados(stream, deteccoes, self.n)
      
        #computando a acuracia da previsao ao longo do fluxo de dados
        MAE = erro_stream/len(stream)
        
        #computando o tempo de execucao
        tempo_execucao = (end_time-start_time)
        
        if(grafico == True):
            tecnica = "IDPSO_ELM_B"
            print(tecnica)
            print("Alarmes:")
            print(alarmes)
            print("Deteccoes:")
            print(deteccoes)
            print("Falsos Alarmes: ", falsos_alarmes)
            print("Atrasos: ", atrasos)
            print("MAE: ", MAE)
            print("Tempo de execucao: ", tempo_execucao)
        
        #plotando o grafico de erro
        if(grafico == True):
            g = Grafico()
            g.Plotar_graficos(stream, predicoes_vetor, deteccoes, alarmes, erro_stream_vetor, self.n, atrasos, falsos_alarmes, tempo_execucao, MAE, nome=tecnica)
                           
        #retorno do metodo
Exemple #36
0
        y.append(data[i + lag])
    return np.array(X), np.array(y)


lag = 3
X_train, y_train = create_sliding_windows(train, lag)
y_train = np.reshape(y_train, (len(y_train), 1))
X_val, y_val = create_sliding_windows(validation, lag)
X_test, y_test = create_sliding_windows(test, lag)

# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[30, 40],
                                    learning_rate_rbm=0.001,
                                    learning_rate=0.001,
                                    n_epochs_rbm=30,
                                    n_iter_backprop=100,
                                    batch_size=32,
                                    activation_function='relu',
                                    dropout_p=0.2)
regressor.fit(X_train, y_train)

# Save the model
regressor.save('model.pkl')

# Restore it
regressor = SupervisedDBNRegression.load('model.pkl')

# Test
y_pred = regressor.predict(X_test)
print('Done.\nMAE: %f' % mean_absolute_error(y_test, y_pred))
Exemple #37
0
                       axes=(28, 14),
                       angle=0,
                       startAngle=0,
                       endAngle=360,
                       color=(255, 255, 255),
                       thickness=-1)
    result = np.bitwise_and(image, mask)
    result = result[14:64 - 14, :]
    return result


testSamples, testLabels = load_images("eye_left")

model = load_model("model.01-45.01.h5")  ### add the correct model name!!!

predictions = model.predict(testSamples)

results = np.zeros((800, 1500, 3))

# create an image with current predictions
for i in range(testSamples.shape[0]):
    cv2.circle(results, (int(testLabels[i, 0]), int(testLabels[i, 1])), 10,
               (0, 255, 0), 2)
    cv2.circle(results, (int(predictions[i, 0]), int(predictions[i, 1])), 10,
               (255, 0, 0), 2)
    cv2.line(results, (int(predictions[i, 0]), int(predictions[i, 1])),
             (int(testLabels[i, 0]), int(testLabels[i, 1])), (255, 0, 0), 2)
cv2.imwrite("test_model.jpg", results)

print("Final MAE: {}".format(mean_absolute_error(testLabels, predictions)))
input("")
Exemple #38
0
from sklearn.ensemble.weight_boosting import AdaBoostRegressor
from sklearn.model_selection._split import train_test_split

tl = TrendLine(data_type='train')
data_df = tl.get()

train_set, test_set = train_test_split(data_df,
                                       test_size=0.2,
                                       random_state=np.random.randint(1, 1000))

y_train = train_set['time_to_failure']
x_train_seg = train_set['segment_id']
x_train = train_set.drop(['time_to_failure', 'segment_id'], axis=1)

y_test = test_set['time_to_failure']
x_test_seg = test_set['segment_id']
x_test = test_set.drop(['time_to_failure', 'segment_id'], axis=1)

adbReg = AdaBoostRegressor(n_estimators=50,
                           learning_rate=1.0,
                           loss='linear',
                           random_state=42)

adbReg.fit(x_train, y_train)

y_pred = adbReg.predict(x_test)

# y_pred = x_train.mean(axis=1)

print('MAE Score for acerage ', mean_absolute_error(y_test, y_pred))
Exemple #39
0
def mae(x, y):
    return mean_absolute_error(x, y)
pso_elm.Treinar()

# organizando os dados para comparacao #
train_x, train_y = idpso_elm.dataset[0], idpso_elm.dataset[1] 
val_x, val_y = idpso_elm.dataset[2], idpso_elm.dataset[3]
test_x, test_y = idpso_elm.dataset[4], idpso_elm.dataset[5]


################################## computando a previsao para o conjunto de treinamento ################################

plt.plot(train_y, label = "Real")

print("\n------------------------------------------")

previsao = idpso_elm.Predizer(train_x)
MAE = mean_absolute_error(train_y, previsao)
plt.plot(previsao, label = "Previsao IDPSO-ELM: " + str(MAE))
print('IDPSO_ELM - Train MAE: ', MAE)


previsao = elm.Predizer(train_x)
MAE = mean_absolute_error(train_y, previsao)
plt.plot(previsao, label = "Previsao ELM: " + str(MAE))
print('ELM - Train MAE: ', MAE)


previsao = pso_slfn.Predizer(train_x)
MAE = mean_absolute_error(train_y, previsao)
plt.plot(previsao, label = "Previsao PSO_SLFN: " + str(MAE))
print('PSO_SLFN - Train MAE: ', MAE)
def main():
    #load da serie
    dtst = Datasets()
    serie = dtst.Leitura_dados(dtst.bases_reais(3), csv=True)
    particao = Particionar_series(serie, [0.0, 0.0, 0.0], 0)
    serie = particao.Normalizar(serie)

    '''
    serie1 = serie[10000:10300]
    serie2 = serie[12000:14000]
    
    modelo = IDPSO_ELM(serie1, [1, 0, 0], 5, 10)
    modelo.Parametros_IDPSO(100, 30, 0.8, 0.4, 2, 2, 20)
    modelo.Treinar()  
    
    previsao = modelo.Predizer(modelo.dataset[0])
    MAE = mean_absolute_error(modelo.dataset[1], previsao)
    print('IDPSO_ELM - MAE: ', MAE)
    
    plt.plot(serie1)
    plt.plot(previsao)
    plt.show()

    
    serie2_modelada = modelo.Tratamento_Dados(serie2, [1, 0, 0], 5)
    
    previsao = modelo.Predizer(serie2_modelada[0])
    MAE = mean_absolute_error(serie2_modelada[1], previsao)
    print('IDPSO_ELM - MAE: ', MAE)
    
    plt.plot(serie2)
    plt.plot(previsao)
    plt.show()
    '''
    
    divisao_dataset = [0.8, 0.2, 0]
    qtd_neuronis = 10
    janela_tempo = 5
    
    modelo = IDPSO_ELM(serie, divisao_dataset, janela_tempo, qtd_neuronis)
    modelo.Parametros_IDPSO(100, 30, 0.8, 0.4, 2, 2, 50)
    modelo.Treinar()  
    
    previsao = modelo.Predizer(modelo.dataset[0])
    MAE = mean_absolute_error(modelo.dataset[1], previsao)
    print('IDPSO_ELM - Train MAE: ', MAE)
    previsao = modelo.Predizer(modelo.dataset[2])
    MAE = mean_absolute_error(modelo.dataset[3], previsao)
    print('IDPSO_ELM - Val MAE: ', MAE)
    
    ###################################################################################
    
    ################################### ELM ##########################################
    
    ELM = ELMRegressor(qtd_neuronis)
    ELM.Tratamento_dados(serie, divisao_dataset, janela_tempo)
    ELM.Treinar(ELM.train_entradas, ELM.train_saidas)
    
    # treinando a rede com o conjunto de treinamento
    previsao = ELM.Predizer(ELM.train_entradas)
    MAE = mean_absolute_error(ELM.train_saidas, previsao)
    print('ELM - Train MAE: ', MAE)
    
    previsao = ELM.Predizer(ELM.val_entradas)
    MAE = mean_absolute_error(ELM.val_saidas, previsao)
    print('ELM - Val MAE: ', MAE)
Exemple #42
0
                          subsample=1.0,
                          subsample_for_bin=200000,
                          subsample_freq=0,
                          verbosity=-1)

model.fit(x_train, y_train, verbose=1000)

# Create an variable to pickle and open it in write mode
mh = ModelHolder(model, most_dependent_columns)
mh.save(model_name)
model = None
mh_new = load_model(model_name)
model, most_dependent_columns = mh_new.get()

y_pred = model.predict(x_test)
mas = mean_absolute_error(y_test, y_pred)
print('Mean Absolute Error', mas)

# params = {'num_leaves': 256,
#          'min_data_in_leaf': 50,
#          'objective': 'regression',
#          'max_depth':-1,
#          'learning_rate': 0.001,
#          "boosting": "gbdt",
#           "feature_fraction": 0.5,
#          "bagging_freq": 2,
#          "bagging_fraction": 0.5,
#          "bagging_seed": 0,
#          "metric": 'mae',
#          "verbosity":-1,
#          'reg_alpha': 0.25,
Exemple #43
0
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.5,
                                                        random_state=2016)

    print("Linear RT")
    t0 = time.time()
    lrt = linear_regression_tree(max_depth=3,
                                 max_features="sqrt",
                                 min_samples=20,
                                 kerneltype="poly",
                                 gridsearch=True,
                                 random_state=2016).fit(X_train, y_train)
    y_pred = lrt.predict(X_test)
    print("Time taken: %0.3f" % (time.time() - t0))
    score = mean_absolute_error(y_test, y_pred)
    print("Error: %0.3f" % score)
    print("")

    #printtree(lrt._tree, indent="  ")

    print("Linear RF")
    t0 = time.time()
    lrt = regression_forest(n_estimators=10,
                            max_depth=4,
                            max_features="sqrt",
                            seed=2016,
                            min_samples=100).fit(X_train, y_train)
    y_pred = lrt.predict(X_test)
    print("Time taken: %0.3f" % (time.time() - t0))
    score = mean_absolute_error(y_test, y_pred)
def MAE(Y1, Y2):
    return mean_absolute_error(Y1, Y2)
    def Executar(self, grafico = None):
        '''
        Metodo para executar o procedimento do algoritmo
        :param grafico: variavel booleana para ativar ou desativar o grafico
        :return: retorna 5 variaveis: [falsos_alarmes, atrasos, falta_deteccao, MAPE, tempo_execucao]
        '''
        
        ################################################################################################################################################
        ################################# CONFIGURACAO DO DATASET ######################################################################################
        ################################################################################################################################################
        
        #dividindo os dados da dataset dinamica para treinamento_inicial inicial e para uso do stream dinâmico
        treinamento_inicial = self.dataset[0:self.m]
        stream = self.dataset[self.m:]
    
        ################################################################################################################################################
        ################################# PERIODO ESTATICO #############################################################################################
        ################################################################################################################################################
        
        #criando e treinando um modelo_vigente para realizar as previsões
        enxame = PSO_ELM(treinamento_inicial, divisao_dataset, self.lags, self.qtd_neuronios)
        enxame.Parametros_PSO(it, self.numero_particulas, inercia, inercia, c1, c2, xmax, crit_parada, self.tx)
        enxame.Treinar()  
       
        #ajustando com os dados finais do treinamento a janela de predicao
        janela_predicao = Janela()
        janela_predicao.Ajustar(enxame.dataset[0][(len(enxame.dataset[0])-1):])
        predicao = enxame.Predizer(janela_predicao.dados)
        
        #janela com o atual conceito, tambem utilizada para armazenar os dados de retreinamento
        janela_caracteristicas = Janela()
        janela_caracteristicas.Ajustar(treinamento_inicial)
    
        #ativando os sensores de acordo com a primeira janela de caracteristicas
        solucao = self.Computar_solucao_teste(janela_caracteristicas.dados, self.lags, enxame)
        self.Sentry(solucao)
        ################################################################################################################################################
        ################################# PERIODO DINAMICO #############################################################################################
        ################################################################################################################################################
        
        #variavel para armazenar o erro do stream
        erro_stream = 0
        #variavel para armazenar as deteccoes
        deteccoes = []
        #variavel para armazenar os alarmes
        alarmes = []
        #variavel para armazenar o tempo inicial
        start_time = time.time()
        
        #vetor para armazenar a predicoes_vetor
        if(grafico == True):
            predicoes_vetor = [None] * len(stream)
            erro_stream_vetor = [None] * len(stream)
            
        #variavel auxiliar 
        mudanca_ocorreu = False
        
        #entrando no stream de dados
        for i in range(1, len(stream)):
            
            #computando o erro
            loss = mean_absolute_error(stream[i:i+1], predicao)
            erro_stream += loss
    
            #adicionando o novo dado a janela de predicao
            janela_predicao.Add_janela(stream[i])
                
            #realizando a nova predicao com a nova janela de predicao
            predicao = enxame.Predizer(janela_predicao.dados)
            
            # atualizando a janela de caracteristicas
            janela_caracteristicas.Add_janela(stream[i])
            
            
            if(grafico == True):                
                #salvando o erro 
                erro_stream_vetor[i] = loss
                #salvando a predicao
                predicoes_vetor[i] = predicao[0]
                
            #print("[", i, "]")
            
            if(mudanca_ocorreu == False):
                
                if((i % self.S) == 0):
                    #verificando os sensores
                    nova_solucao = self.Reavaliar_sentry(janela_caracteristicas.dados[0], self.lags, enxame)
                    mudou = self.Monitorar_sentry(nova_solucao)

                    if(mudou):
                        if(grafico == True):    
                            print("[%d] Mudança" % (i))
                        deteccoes.append(i)
                        
                        #variavel para alterar o fluxo, ir para o periodo de retreinamento
                        mudanca_ocorreu = True
                    
            else:
                
                #atualizando o modelo_vigente preditivo
                dataset = enxame.Tratamento_Dados(janela_caracteristicas.dados[0], divisao_dataset, self.lags)
                enxame.dataset = dataset
                enxame.Retreinar() 
                    
                #ajustando com os dados finais do treinamento a janela de predicao
                janela_predicao = Janela()
                janela_predicao.Ajustar(enxame.dataset[0][(len(enxame.dataset[0])-1):])
                predicao = enxame.Predizer(janela_predicao.dados)
                    
                #ativando os sensores de acordo com a primeira janela de caracteristicas
                solucao = self.Computar_solucao_teste(janela_caracteristicas.dados[0], self.lags, enxame)
                self.Sentry(solucao)
                    
                #variavel para voltar para o loop principal
                mudanca_ocorreu = False
        
        #variavel para armazenar o tempo final
        end_time = time.time()
        
        #computando as metricas de deteccao
        mt = Metricas_deteccao()
        [falsos_alarmes, atrasos] = mt.resultados(stream, deteccoes, self.m)
        
        #computando a acuracia da previsao ao longo do fluxo de dados
        MAE = erro_stream/len(stream)
        
        #computando o tempo de execucao
        tempo_execucao = (end_time-start_time)
        
        if(grafico == True):
            tecnica = "RPSO-ELM"
            print(tecnica)
            print("Alarmes:")
            print(alarmes)
            print("Deteccoes:")
            print(deteccoes)
            print("Falsos Alarmes: ", falsos_alarmes)
            print("Atrasos: ", atrasos)
            print("MAE: ", MAE)
            print("Tempo de execucao: ", tempo_execucao)
        
        #plotando o grafico de erro
        if(grafico == True):
            g = Grafico()
            g.Plotar_graficos(stream, predicoes_vetor, deteccoes, alarmes, erro_stream_vetor, self.m, atrasos, falsos_alarmes, tempo_execucao, MAE, nome=tecnica)
                           
        #retorno do metodo
        return falsos_alarmes, atrasos, MAE, tempo_execucao