Exemple #1
0
 def _get_ma(self, data):
     data = np.asarray(data)
     model = pf.LLEV(data)
     x = model.fit()
     mas = model.return_local_level()
     ma = mas['data']
     return ma
Exemple #2
0
 def durbin_koopman_simulation(self, n=10, plot=True):
     df = pd.DataFrame(self.full_data)
     model = pf.LLEV(df)
     x = model.fit()
     if plot is True:
         plt.figure(figsize=(15, 5))
         for i in range(n):
             print(model.latent_variables.get_z_values)
             plt.plot(
                 model.index,
                 model.simulation_smoother(
                     model.latent_variables.get_z_values())[0]
                 [0:model.index.shape[0]])
         plt.show()
     else:
         data = []
         for i in range(10):
             data.append(
                 model.simulation_smoother(
                     model.latent_variables.get_z_values())[0]
                 [0:model.index.shape[0]])
         return model.index, data
Exemple #3
0
# Filtered data
#print cpu_data
fig2 = plt.figure(figsize=(15, 5))
plt.plot(date, tempf, 'r-', linewidth=2)
plt.xlabel("Timestamp")
plt.ylabel("Filtered CPU Usage (%)")
plt.legend(['Filtered'])
plt.title("Filtered CPU usage")
plt.show()

cpu_data = tempf
print "start ensemble"
model1 = pf.ARIMA(data=cpu_data, ar=4, ma=0)
model2 = pf.ARIMA(data=cpu_data, ar=8, ma=0)
model3 = pf.LLEV(data=cpu_data)
#model4 = pf.GASLLEV(data=cpu_data, family=pf.GASt())
model4 = pf.GASLLEV(data=cpu_data, family=pf.Poisson())
model5 = pf.GPNARX(data=cpu_data, ar=1, kernel=pf.SquaredExponential())
model6 = pf.GPNARX(data=cpu_data, ar=2, kernel=pf.SquaredExponential())
model7 = pf.DynReg('CPUusage', data=dataframe)

mix = pf.Aggregate(learning_rate=1.0, loss_type='squared')
mix.add_model(model1)
mix.add_model(model2)
mix.add_model(model3)
#mix.add_model(model4)
#mix.add_model(model5)
#mix.add_model(model6)
mix.add_model(model7)
import matplotlib.pyplot as plt
#%matplotlib inline

growthdata = pd.read_csv('http://www.pyflux.com/notebooks/GDPC1.csv')
USgrowth = pd.DataFrame(np.diff(np.log(growthdata['VALUE']))[149:len(growthdata['VALUE'])])
USgrowth.index = pd.to_datetime(growthdata['DATE'].values[1+149:len(growthdata)])
USgrowth.columns = ['US Real GDP Growth']
plt.figure(figsize=(15,5))
plt.plot(USgrowth)
plt.ylabel('Real GDP Growth')
plt.title('US Real GDP Growth');
plt.show()

model1 = pf.ARIMA(data=USgrowth, ar=4, ma=0)
model2 = pf.ARIMA(data=USgrowth, ar=8, ma=0)
model3 = pf.LLEV(data=USgrowth)
#model4 = pf.GASLLEV(data=USgrowth, family=pf.GASt())
model5 = pf.GPNARX(data=USgrowth, ar=1, kernel=pf.SquaredExponential())
model6 = pf.GPNARX(data=USgrowth, ar=2, kernel=pf.SquaredExponential())

mix = pf.Aggregate(learning_rate=1.0, loss_type='squared')
mix.add_model(model1)
mix.add_model(model2)
mix.add_model(model3)
#mix.add_model(model4)
mix.add_model(model5)
mix.add_model(model6)

mix.tune_learning_rate(40)
mix.learning_rate
Exemple #5
0
 def plot_llm(self):
     model = pf.LLEV(self.full_data)
     x = model.fit()
     model.plot_fit()
Exemple #6
0
 def _get_margin(self):
     model = pf.LLEV(self.full_data)
     x = model.fit()
     data = model.return_local_level()
     margin = data['margin']
     return margin
Exemple #7
0
 def _get_noise(self):
     model = pf.LLEV(self.full_data)
     x = model.fit()
     noise = model.return_noise()
     return noise