示例#1
0
    def test_holt_damp(self):
        fit1 = SimpleExpSmoothing(self.livestock2_livestock).fit()
        mod4 = Holt(self.livestock2_livestock,damped=True)
        fit4 = mod4.fit(damping_slope=0.98)
        mod5 = Holt(self.livestock2_livestock,exponential=True,damped=True)
        fit5 = mod5.fit()
        #We accept the below values as we getting a better SSE than text book
        assert_almost_equal(fit1.params['smoothing_level'],1.00, 2)
        assert_almost_equal(fit1.params['smoothing_slope'],np.NaN, 2)
        assert_almost_equal(fit1.params['damping_slope'],np.NaN, 2)
        assert_almost_equal(fit1.params['initial_level'],263.92, 2)
        assert_almost_equal(fit1.params['initial_slope'],np.NaN, 2)
        assert_almost_equal(fit1.sse,6761.35, 2) #6080.26

        assert_almost_equal(fit4.params['smoothing_level'],0.98, 2)
        assert_almost_equal(fit4.params['smoothing_slope'],0.00, 2)
        assert_almost_equal(fit4.params['damping_slope'],0.98, 2)
        assert_almost_equal(fit4.params['initial_level'],257.36, 2)
        assert_almost_equal(fit4.params['initial_slope'],6.51, 2)
        assert_almost_equal(fit4.sse,6036.56, 2) #6080.26
        assert_almost_equal(fit5.params['smoothing_level'],0.97, 2)
        assert_almost_equal(fit5.params['smoothing_slope'],0.00, 2)
        assert_almost_equal(fit5.params['damping_slope'],0.98, 2)
        assert_almost_equal(fit5.params['initial_level'],258.95, 2)
        assert_almost_equal(fit5.params['initial_slope'],1.02, 2)
        assert_almost_equal(fit5.sse,6082.00, 2) #6100.11
示例#2
0
def holt(alpha=0.2):
    df = pd.read_csv('airline_passengers.csv',
                     index_col='Month',
                     parse_dates=True)
    df['EWMA'] = df['Passengers'].ewm(alpha=alpha, adjust=False).mean()
    df.index.freq = 'MS'

    N_test = 12
    train = df.iloc[:-N_test]
    test = df.iloc[-N_test:]

    train_idx = df.index <= train.index[-1]
    test_idx = df.index > train.index[-1]

    holt = Holt(df['Passengers'])
    res_h = holt.fit()
    df['Holt'] = res_h.fittedvalues
    df[['Passengers', 'Holt']].plot()
    plt.show()

    holt = Holt(train['Passengers'])
    res_h = holt.fit()
    df.loc[train_idx, 'Holt'] = res_h.fittedvalues
    df.loc[test_idx, 'Holt'] = res_h.forecast(N_test)

    df[['Passengers', 'Holt']].plot()
    plt.show()
示例#3
0
    def test_holt_damp_fit(self):
        # Smoke test for parameter estimation
        fit1 = SimpleExpSmoothing(self.livestock2_livestock).fit()
        mod4 = Holt(self.livestock2_livestock, damped=True)
        fit4 = mod4.fit(damping_slope=0.98)
        mod5 = Holt(self.livestock2_livestock, exponential=True, damped=True)
        fit5 = mod5.fit()
        # We accept the below values as we getting a better SSE than text book
        assert_almost_equal(fit1.params['smoothing_level'], 1.00, 2)
        assert_almost_equal(fit1.params['smoothing_slope'], np.NaN, 2)
        assert_almost_equal(fit1.params['damping_slope'], np.NaN, 2)
        assert_almost_equal(fit1.params['initial_level'], 263.92, 2)
        assert_almost_equal(fit1.params['initial_slope'], np.NaN, 2)
        assert_almost_equal(fit1.sse, 6761.35, 2)  # 6080.26

        assert_almost_equal(fit4.params['smoothing_level'], 0.98, 2)
        assert_almost_equal(fit4.params['smoothing_slope'], 0.00, 2)
        assert_almost_equal(fit4.params['damping_slope'], 0.98, 2)
        assert_almost_equal(fit4.params['initial_level'], 257.36, 2)
        assert_almost_equal(fit4.params['initial_slope'], 6.64, 2)
        assert_almost_equal(fit4.sse, 6036.56, 2)  # 6080.26

        assert_almost_equal(fit5.params['smoothing_level'], 0.97, 2)
        assert_almost_equal(fit5.params['smoothing_slope'], 0.00, 2)
        assert_almost_equal(fit5.params['damping_slope'], 0.98, 2)
        assert_almost_equal(fit5.params['initial_level'], 258.95, 2)
        assert_almost_equal(fit5.params['initial_slope'], 1.04, 2)
        assert_almost_equal(fit5.sse, 6082.00, 2)  # 6100.11
示例#4
0
 def Holt(self):
     # model = Holt(self._train_data,damped=True)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         model = Holt(self._train_data.values)
         model_fit = model.fit()
         output = model_fit.forecast(self._h)
     # print(output)
     return output
 def update(self):
     begin = max(0, self.index - self.window)
     data = self.arrivals[begin:self.index]
     # fit model
     model = Holt(data)
     model_fit = model.fit()
     self.model = model_fit
     # make prediction
     self.prediction = model_fit.predict(len(data), len(data))[0]
示例#6
0
def update_graph(selected_dropdown1, selected_dropdown2):
    df = values["Gold"]

    trace1 = []
    trace1.append(
        go.Scatter(x=values.date,
                   y=df,
                   mode='lines',
                   opacity=0.6,
                   name='Actual values',
                   textposition='bottom center'))

    if selected_dropdown1 != None and selected_dropdown2 != None:
        alpha = float(selected_dropdown1)
        beta = float(selected_dropdown2)

        model = Holt(np.asarray(np.asarray(df)))
        fit = model.fit(smoothing_level=alpha, smoothing_slope=beta)
        result = fit.fittedvalues

        name = 'Exponential Smoothing ' + '{0:.2f}'.format(
            alpha) + ',' + '{0:.2f}'.format(beta)
        trace1.append(
            go.Scatter(x=values.date,
                       y=result,
                       mode='lines',
                       opacity=0.6,
                       name=name,
                       textposition='bottom center'))

    title = ""
    traces = [trace1]
    data = [val for sublist in traces for val in sublist]
    figure = {
        'data':
        data,
        'layout':
        go.Layout(colorway=[
            "#5E0DAC", '#FF4F00', '#375CB1', '#FF7400', '#FFF400', '#FF0056'
        ],
                  height=600,
                  title=title,
                  xaxis={
                      "title": "Date",
                      'rangeslider': {
                          'visible': True
                      },
                  },
                  yaxis={"title": "Price (USD)"},
                  paper_bgcolor='rgba(0,0,0,0)',
                  plot_bgcolor='rgba(0,0,0,0)')
    }

    return figure
示例#7
0
文件: tmp.py 项目: ElMora96/stage_3
 def daily_trend_pred(self, now, delta=pd.Timedelta(3, "W")):
     train_time = pd.date_range(start=now - delta,
                                end=now - pd.Timedelta(1, "H"),
                                freq="H")
     model_series = self.trend[train_time]
     h_model = Holt(model_series, exponential=True, damped_trend=True)
     #Fit
     fitted_h = h_model.fit(optimized=True)
     #Predict tomorrow
     pred = fitted_h.predict(start=now, end=now + pd.Timedelta(23, "H"))
     return pred
def forecast(data, train_hours, test_hours):
    # Train on the first 6 days to predict the 7th day
    train = data.iloc[25:train_hours]

    # Create the SES Model
    model = Holt(np.asarray(train['seasonally differenced']), damped=True)
    model._index = pd.to_datetime(train.index)

    # Fit the model, and forecast
    fit = model.fit()
    pred = fit.forecast(test_hours)
    data['holtDamped'] = 0
    data['holtDamped'][25:] = list(fit.fittedvalues) + list(pred)
def forecast(data, train_hours, test_hours):
    # Split data into training and test data
    train = data.iloc[25:train_hours]

    # Create the Holt Model
    model = Holt(np.asarray(train['seasonally differenced']))
    model._index = pd.to_datetime(train.index)

    # Fit the model, and forecast
    fit = model.fit()
    pred = fit.forecast(test_hours)
    data['holt'] = 0
    data['holt'][25:] = list(fit.fittedvalues) + list(pred)
def forecast(data, train_hours, test_hours, in_place=True):
    train = data.iloc[25:train_hours]
    model = Holt(np.asarray(train['seasonally adjusted']), damped=True)
    fit = model.fit()
    pred = fit.forecast(test_hours)

    fcst = pd.concat(
        [pd.Series([0] * 25), data['seasonal indices'][25:].multiply(
                list(fit.fittedvalues) + list(pred)
            )
         ]
    )

    if in_place:
        data['holtDamped adjusted'] = fcst
    else:
        data['holtDamped adjusted'] = fcst
        return fcst
示例#11
0
class HoltWintersPredictor(object):
    def __init__(self, data_in, num_prediction_periods):
        self.__history = data_in
        self.__num_prediction_periods = num_prediction_periods

        y = np.array(data_in).reshape(-1, 1)
        y = np.clip(y, 0.00001, None)  # HoltWinters doesn't like zeros

        self.__model = Holt(y, exponential=True, damped=True)
        self.__results = self.__model.fit(optimized=True)

    @property
    def configuration(self):
        return ""

    def predict_counts(self):
        start_index = len(self.__history)
        y = self.__model.predict(self.__results.params,
                                 start=start_index + 1,
                                 end=start_index +
                                 self.__num_prediction_periods)
        y_list = y.ravel().tolist()
        return clip(y_list, 0, inf)
示例#12
0
    def predict_with_exp(self, dataset, code='PT', y='Total_Cases', days=5):
        tmp = dataset[[y+code]]
        history = [x for x in tmp.values]
        news = []
        data_mod = tmp.reset_index()
        data_mod.columns = ["Date",y+code]
        for i in range(days):
            model = Holt(history[i:])
            model_fit = model.fit(smoothing_level=self.level)
            output = model_fit.forecast()
            yhat = output[0]
            history.append(np.array([round(yhat)]))
            
            xn = datetime.datetime.strptime(data_mod.iloc[-1]['Date'], '%m/%d/%y') \
                + datetime.timedelta(days=i+1)
            news.append(
                pd.Series([xn.strftime("%m/%d/%y"), round(yhat)], index=data_mod.columns)
            )
            
        data_mod = pd.DataFrame(news)
        data_mod.set_index('Date', inplace=True, drop=True)
        data_mod.columns = ["EXPO"+code]

        return data_mod
示例#13
0
def forecasting_model(data_initial,variable_col,date_col,model,independentVariables,test_split):
    
    if date_col == '':
        index = data_initial.index
    else:
        index = pd.DatetimeIndex(data_initial[date_col])
    
    ts = pd.Series(data_initial[variable_col])
    ts.index = index 
    ts = ts.sort_index()
    test_ind = round(len(ts)*test_split)
    ts_train = ts[:len(ts)-test_ind]
    ts_test = ts[-test_ind:]
    no_of_periods_to_forecast = len(ts_test)
    
    #ts.plot()
    
    ########## resampling with time frame monthly, quarterly and annual          
    try:
        # Resampling to monthly frequency
        ts_monthly = ts_train.resample('M').mean()
        # Resampling to quarterly frequency
        ts_quarterly = ts_train.resample('Q-DEC').mean()
        #Resampling to annual frequency
        ts_annual = ts_train.resample('A-DEC').mean()
        
    #    ts_monthly.plot()
    #    ts_quarterly.plot()
    #    ts_annual.plot()
    
    except:
        print('You need specify the date column')
    
    days_available = len(data_initial.dropna()) 
    months_available = len(ts_monthly.dropna())
    years_available= len(ts_annual.dropna())
    
    if  days_available+months_available+years_available == 0:
        error = "Insufficient data for time series analysis with "+days_available+" Days, "+months_available+" Months and "+years_available+" Years"
        print(error)
    #    return 1
    else:
        error = ''
    
    try:
        result = seasonal_decompose(ts_train, model='additive',freq=1)
        #result.plot()
        decomp_overall = pd.DataFrame([list(result.observed), list(result.trend), list(result.seasonal), list(result.resid), list(ts_train.index)])
        decomp_overall = decomp_overall.transpose()
        decomp_overall.columns = ["actual","trend","seasonality","randomness","date"]
    except:
        error = "Unable to Decompose time series"
        print(error)
        
       
    Percentage_Variance_Explained_by_trend = (decomp_overall['trend'].dropna().var())/(decomp_overall['actual'].dropna().var())*100
    Percentage_Variance_Explained_by_seasonality = (decomp_overall['seasonality'].dropna().var())/(decomp_overall['actual'].dropna().var())*100
    Percentage_Variance_Explained_by_randomness = (decomp_overall['randomness'].dropna().var())/(decomp_overall['actual'].dropna().var())*100
    
    gradient, intercept, r_value, p_value, std_err = stats.linregress(list(decomp_overall['actual']), range(len(decomp_overall)))
    slope_with_time = gradient
    slope_text = "<li class=\"nlg-item\"><span class=\"nlgtext\"> The slope of time for <font style=\"color: #078ea0;\">"+variable_col+" </font> is <b>"+str(round(slope_with_time,4))+"</b> , For every one unit change in time <font style=\"color: #078ea0;\">"+variable_col+" </font> is effected by <b>"+str(round(slope_with_time,4))+"</b> units </span></li>"
    
    model = 'NNETAR'
        
    if (model == "Holtwinters"):
        forecast_model = Holt(np.asarray(decomp_overall['actual'])).fit()
        forecasted = forecast_model.predict(start=len(ts_train)-1, end=len(ts_train)-1+no_of_periods_to_forecast-1)
        forecasted = pd.Series(forecasted)
    elif (model == "ARIMA"):
        forecast_model = ARIMA(ts_train, order=(7,0,1))
        model_fit = forecast_model.fit(disp=0)
        forecasted = model_fit.predict(start=len(ts_train)-1, end=len(ts_train)-1+no_of_periods_to_forecast-1)
    elif (model == "NNETAR"):
        forecasted = nnetar(ts_train,3,no_of_periods_to_forecast)    
        forecasted = pd.Series(forecasted)
    else:
        print('Model method not specified')    
    #### Error metrics evaluate
    forecasted.index = ts_test.index
        
    MAPE = np.mean((np.abs(forecasted - ts_test)/np.abs(ts_test))*100)
    MSE = np.mean((forecasted - ts_test)**2)
    ME = np.mean(forecasted - ts_test)
    MAE = np.mean(np.abs(forecasted - ts_test))
    
    # NLG with model description
    
    Percentage_Variance_Explained_by_trend_text = "<li class=\"nlg-item\"><span class=\"nlgtext\"> Variance Explained By <font style=\"color: #078ea0;\"> Trend </font> is <b>"+str(round(Percentage_Variance_Explained_by_trend,4))+"</b> </span></li>"
    Percentage_Variance_Explained_by_seasonality_text = "<li class=\"nlg-item\"><span class=\"nlgtext\"> Variance Explained By <font style=\"color: #078ea0;\"> Seasonality </font>is <b>"+str(round(Percentage_Variance_Explained_by_seasonality,4))+"</b> </span></li>"
    Percentage_Variance_Explained_by_randomness_text  = "<li class=\"nlg-item\"><span class=\"nlgtext\"> Variance Explained By <font style=\"color: #078ea0;\"> Randomness </font> is <b>"+str(round(Percentage_Variance_Explained_by_randomness,4))+"</b> </span></li>"
    
    seasonal_text = "The amount of "+variable_col+" is effected due to seasonality with time"
    seasonal_component = pd.concat([decomp_overall["date"],decomp_overall["seasonality"]], axis=1)
    model_text = "Model has Forecasted the "+ variable_col+" for next "+str(no_of_periods_to_forecast)+" periods"
    
    output = [decomp_overall,slope_text,Percentage_Variance_Explained_by_trend_text,Percentage_Variance_Explained_by_seasonality_text,Percentage_Variance_Explained_by_randomness_text,seasonal_text,seasonal_component,model_text,forecasted,ts_test,MAPE,MSE,ME,MAE]  
    
    return output
示例#14
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#  
# http://www.apache.org/licenses/LICENSE-2.0
#  
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from statsmodels.tsa.holtwinters import Holt

x2 = np.linspace(0, 99, 100)
y2 = pd.Series(0.1 * x2 + 2 * np.random.randn(100))
ets2 = Holt(y2)
r2 = ets2.fit()
pred2 = r2.predict(start=len(y2), end=len(y2) + len(y2)//2)

pd.DataFrame({
        'origin': y2,
            'fitted': r2.fittedvalues,
            'pred': pred2
            }).plot(legend=True)
# Fit Models

# These 4 hyperparameters will be automatically tuned if optimized=True:
#    smoothing_level (alpha): the smoothing coefficient for the level.
#    smoothing_slope (beta): the smoothing coefficient for the trend.
#    smoothing_seasonal (gamma): the smoothing coefficient for the seasonal component.
#    damping_slope (phi): the coefficient for the damped trend.

# Simple Exponential Smoothing
ses_model_897 = SimpleExpSmoothing(train_store897_y_ets,
                                   initialization_method='estimated')
ses_model_897 = ses_model_897.fit(optimized=True)

# Holt (double)
holt_model_897 = Holt(train_store897_y_ets, initialization_method='estimated')
holt_model_897 = holt_model_897.fit(optimized=True)

# Holt-Winters (triple)
holt_winters_model_897 = ExponentialSmoothing(
    train_store897_y_ets,
    trend="add",
    seasonal="add",
    seasonal_periods=7,
    initialization_method='estimated')
holt_winters_model_897 = holt_winters_model_897.fit(optimized=True)

# Save and store training data
train_store897_y_ets.to_pickle(
    '../../../../data/rossmann/intermediate/03_SalesModelingBase/04_Store897/train_store897_y_ets.pkl'
)
示例#16
0
# In[1]: Train the model

# load dataset
Y = pd.read_csv('Y stocks.csv').iloc[:, [2]]

# split up the data into training and testing
X = Y.values
size = int(len(X) * 0.8)
train, test = X[0:size], X[size:len(X)]

# train and forecast one step ahead for the entire test set
history = [x for x in train]
predictions = list()
for t in range(len(test)):
    model = Holt(history)
    model_fit = model.fit()
    output = model_fit.forecast()
    yhat = output[0]
    predictions.append(yhat)
    obs = test[t]
    history.append(obs)
    print('predicted=%f, expected=%f' % (yhat, obs))

# collect predictions and actual values
predictions = pd.DataFrame({
    "Actual": test.flatten(),
    "Predict": np.array(predictions).flatten()
})
# predictions.to_csv("holt predictions.csv", index=False)

# In[2]: Visualize the predictions
示例#17
0
    def hw(self, hi, lo, input_period, train_period, output_period,
           pct_require):

        input_time = self.extract_time_type(input_period)
        train_time = self.extract_time_type(train_period)

        # extract column from data using column_no
        data = self.data.iloc[:, self.column_no - 1].dropna()
        data = pd.DataFrame(data)
        # convert from month to year
        if self.frequency[input_time[0]] >= self.frequency[train_time[0]]:
            data_check = data.copy()
            if train_period == 'week':
                previous_period = 'week'
            if train_period == 'month':
                previous_period = 'day'
            if train_period == 'quarter':
                previous_period = 'quarter'
            if train_period == 'year':
                previous_period = 'month'
            last_time = getattr(data.index[len(data) - 1], previous_period)
            # cutting data if it's less than 6
            if last_time > self.frequency[input_period] / 2:
                data = self.arima_to_fill(data, last_time,
                                          self.frequency[previous_period],
                                          input_period)
            else:
                data = self.remove(data, last_time)
            data_info = self.data_info(data)
            # get portion
            if train_period == 'month' or train_period == 'year':
                percent_portion = self.percentage_type1(
                    data_info, input_time[0], train_time[0]).reset_index()
            else:
                percent_portion = self.percentage_type2(
                    data_info, input_time[0], train_time[0]).reset_index()
        else:
            print(input_time, ' cannot be converted to ', train_time,
                  ' as it is smaller')

        # if(train_period=='week'):
        if train_period == 'week':
            data = self.resemble_week(data_info[self.name])
        data = data[self.name].resample(self.time[train_period]).sum()
        if pct_require:
            data = self.percent_change(data, train_period).dropna()
        train = self.outlier(data, lower=lo, upper=hi)
        train = train.interpolate(method='cubic', order=3, s=0).ffill().bfill()
        model = Holt(train)
        model_fit = model.fit()
        future_forecast = model_fit.predict(
            len(train),
            len(train) + self.no_of_forecast_month - 1).reset_index()
        future_forecast = future_forecast.rename(columns={
            'index': 'Date',
            0: self.name
        })
        train = train.reset_index()
        train = train.append(future_forecast)
        # train = self.percent_change(train).reset_index()
        # train = pd.melt(train, id_vars=[train.columns[0]], value_vars=[train.columns[1]])

        train = train[len(train) -
                      self.no_of_forecast_month:len(train)].reset_index(
                          drop=True)
        time_add = train_period + 's'
        if time_add == 'quarters':
            last_date = train['Date'][len(train) - 1] + relativedelta(months=4)
        else:
            last_date = train['Date'][len(train) -
                                      1] + relativedelta(**{time_add: 1})
        dummy_data = pd.DataFrame(columns=['Date', self.name])
        dummy_data.loc[0] = [last_date, 0]
        train = train.append(dummy_data)
        train = train.set_index('Date')
        train_converted = train[self.name].resample(self.time[input_period],
                                                    fill_method='ffill')
        train_converted = train_converted[:-1].reset_index()
        portion = percent_portion
        if self.frequency[input_time[0]] == self.frequency[train_time[0]]:
            portion[self.name] = 1
        train_converted['Date'] = pd.to_datetime(train_converted['Date'])
        train_converted['month'] = pd.DatetimeIndex(
            train_converted['Date']).month
        join_data = train_converted.merge(percent_portion,
                                          left_on=input_period,
                                          right_on=input_period)
        join_data[self.name] = join_data[self.name +
                                         '_x'] * join_data[self.name + '_y']
        join_data = join_data.sort_values(by=['Date']).set_index('Date')
        forecast_value = pd.Series(join_data[self.name], index=join_data.index)
        final = forecast_value.resample(
            self.time[output_period]).sum().reset_index()
        final = pd.melt(final,
                        id_vars=[final.columns[0]],
                        value_vars=[final.columns[1]])
        print(final)
示例#18
0
def get_holt(data):
    model = Holt(data, damped_trend=True, initialization_method="estimated")
    model = model.fit(smoothing_level = 0.8, smoothing_trend = 0.2)
    preds = model.forecast(DAYS_TO_PREDICT)
    return preds.tolist()
示例#19
0
# Fit Models

# These 4 hyperparameters will be automatically tuned if optimized=True:
#    smoothing_level (alpha): the smoothing coefficient for the level.
#    smoothing_slope (beta): the smoothing coefficient for the trend.
#    smoothing_seasonal (gamma): the smoothing coefficient for the seasonal component.
#    damping_slope (phi): the coefficient for the damped trend.

# Simple Exponential Smoothing
ses_model_708 = SimpleExpSmoothing(train_store708_y_ets,
                                   initialization_method='estimated')
ses_model_708 = ses_model_708.fit(optimized=True)

# Holt (double)
holt_model_708 = Holt(train_store708_y_ets, initialization_method='estimated')
holt_model_708 = holt_model_708.fit(optimized=True)

# Holt-Winters (triple)
holt_winters_model_708 = ExponentialSmoothing(
    train_store708_y_ets,
    trend="add",
    seasonal="add",
    seasonal_periods=7,
    initialization_method='estimated')
holt_winters_model_708 = holt_winters_model_708.fit(optimized=True)

# Save and store training data
train_store708_y_ets.to_pickle(
    '../../../../data/rossmann/intermediate/04_SalesModelingReduced/02_Store708/train_store708_y_ets.pkl'
)
# Fit Models

# These 4 hyperparameters will be automatically tuned if optimized=True:
#    smoothing_level (alpha): the smoothing coefficient for the level.
#    smoothing_slope (beta): the smoothing coefficient for the trend.
#    smoothing_seasonal (gamma): the smoothing coefficient for the seasonal component.
#    damping_slope (phi): the coefficient for the damped trend.

# Simple Exponential Smoothing
ses_model_198 = SimpleExpSmoothing(train_store198_y_ets,
                                   initialization_method='estimated')
ses_model_198 = ses_model_198.fit(optimized=True)

# Holt (double)
holt_model_198 = Holt(train_store198_y_ets, initialization_method='estimated')
holt_model_198 = holt_model_198.fit(optimized=True)

# Holt-Winters (triple)
holt_winters_model_198 = ExponentialSmoothing(
    train_store198_y_ets,
    trend="add",
    seasonal="add",
    seasonal_periods=7,
    initialization_method='estimated')
holt_winters_model_198 = holt_winters_model_198.fit(optimized=True)

# Save and store training data
train_store198_y_ets.to_pickle(
    '../../../../data/rossmann/intermediate/03_SalesModelingBase/03_Store198/train_store198_y_ets.pkl'
)
示例#21
0
    def test_holt_damp_R(self):
        # Test the damping parameters against the R forecast packages `ets`
        # library(ets)
        # livestock2_livestock <- c(...)
        # res <- ets(livestock2_livestock, model='AAN', damped=TRUE, phi=0.98)
        mod = Holt(self.livestock2_livestock, damped=True)
        params = {
            'smoothing_level': 0.97402626,
            'smoothing_slope': 0.00010006,
            'damping_slope': 0.98,
            'initial_level': 252.59039965,
            'initial_slope': 6.90265918
        }
        fit = mod.fit(optimized=False, **params)

        # Check that we captured the parameters correctly
        for key in params.keys():
            assert_allclose(fit.params[key], params[key])

        # Summary output
        # print(res$mse)
        assert_allclose(fit.sse / mod.nobs, 195.4397924865488, atol=1e-3)
        # print(res$aicc)
        # TODO: this fails - different AICC definition?
        # assert_allclose(fit.aicc, 282.386426659408, atol=1e-3)
        # print(res$bic)
        # TODO: this fails - different BIC definition?
        # assert_allclose(fit.bic, 287.1563626818338)

        # print(res$states[,'l'])
        # note: this array includes the initial level
        desired = [
            252.5903996514365, 263.7992355246843, 268.3623324350207,
            261.0312983437606, 266.6590942700923, 277.3958197247272,
            283.8256217863908, 290.2962560621914, 292.5701438129583,
            300.7655919939834, 309.2118057241649, 318.2377698496536,
            329.2238709362550, 338.7709778307978, 339.3669793596703,
            329.0127022356033, 314.7684267018998, 314.5948077575944,
            321.3612035017972, 329.6924360833211, 346.0712138652086,
            352.2534120008911, 348.5862874190927, 415.8839400693967,
            417.2018843196238, 417.8435306633725, 412.4857261252961,
            412.0647865321129, 395.2500605270393, 401.4367438266322,
            408.1907701386275, 414.1814574903921
        ]
        assert_allclose(np.r_[fit.params['initial_level'], fit.level], desired)

        # print(res$states[,'b'])
        # note: this array includes the initial slope
        desired = [
            6.902659175332394, 6.765062519124909, 6.629548973536494,
            6.495537532917715, 6.365550989616566, 6.238702070454378,
            6.113960476763530, 5.991730467006233, 5.871526257315264,
            5.754346516684953, 5.639547926790058, 5.527116419415724,
            5.417146212898857, 5.309238662451385, 5.202580636191761,
            5.096941655567694, 4.993026494493987, 4.892645486210410,
            4.794995106664251, 4.699468310763351, 4.606688340205792,
            4.514725879754355, 4.423600168391240, 4.341595902295941,
            4.254462303550087, 4.169010676686062, 4.084660399498803,
            4.002512751871354, 3.920332298146730, 3.842166514133902,
            3.765630194200260, 3.690553892582855
        ]
        # TODO: not sure why the precision is so low here...
        assert_allclose(np.r_[fit.params['initial_slope'], fit.slope],
                        desired,
                        atol=1e-3)

        # print(res$fitted)
        desired = [
            259.3550056432622, 270.4289967934267, 274.8592904290865,
            267.3969251260200, 272.8973342399166, 283.5097477537724,
            289.8173030536191, 296.1681519198575, 298.3242395451272,
            306.4048515803347, 314.7385626924191, 323.6543439406810,
            334.5326742248959, 343.9740317200002, 344.4655083831382,
            334.0077050580596, 319.6615926665040, 319.3896003340806,
            326.0602987063282, 334.2979150278692, 350.5857684386102,
            356.6778433630504, 352.9214155841161, 420.1387040536467,
            421.3712573771029, 421.9291611265248, 416.4886933168049,
            415.9872490289468, 399.0919861792231, 405.2020670104834,
            411.8810877289437
        ]
        assert_allclose(fit.fittedvalues, desired, atol=1e-3)

        # print(forecast(res)$mean)
        desired = [
            417.7982003051233, 421.3426082635598, 424.8161280628277,
            428.2201774661102, 431.5561458813270, 434.8253949282395,
            438.0292589942138, 441.1690457788685, 444.2460368278302,
            447.2614880558126
        ]
        assert_allclose(fit.forecast(10), desired, atol=1e-4)
示例#22
0
# start_params=None,
# initial_level=None,
# initial_slope=None,
# use_brute=True)

######################################################################
########  Part1、霍尔特线性趋势法:Holt’s Linear trend method
######################################################################

# 3.训练模型
alpha = 0.8
beta = 0.2

# 线性:指定固定的alpha, beta
mdl = Holt(ts)
results = mdl.fit(smoothing_level=alpha, smoothing_slope=beta, optimized=False)

# results = Holt(ts).fit(
#                 smoothing_level=alpha,
#                 smoothing_slope=beta,
#                 optimized=False)
# print(results.summary())

print('参数:\n', results.params)

# 4.模型评估
print(results.aic, results.aicc, results.bic)
# print(results.resid)    #残差resid=y - pred
y_pred = results.fittedvalues
y_true = ts