def stl_seasonal_decomposition(self): if self.has_validation_error: return # Decomposition based on stl - Package: stldecompose org_unit_group_stl = decompose(self.series, period=12) fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(14, 9)) self.series.plot(ax=ax1) org_unit_group_stl.trend.plot(ax=ax2) org_unit_group_stl.seasonal.plot(ax=ax3) org_unit_group_stl.resid.plot(ax=ax4) ax1.set_title("Vaccine Demand for {} in {}".format( self.vaccine, self.health_facility)) ax2.set_title("Trend") ax3.set_title("Seasonality") ax4.set_title("Residuals") plt.tight_layout() plt.show() # Eliminating the seasonal component org_unit_group_adjusted = self.series - org_unit_group_stl.seasonal plt.figure(figsize=(12, 8)) org_unit_group_adjusted.plot() plt.title( "Plot of Vaccine Demand of {} in {} without Seasonal Component". format(self.vaccine, self.health_facility)) plt.show() # # Getting the seasonal component only # Seasonality gives structure to the data plt.figure(figsize=(12, 8)) org_unit_group_stl.seasonal.plot() plt.title( "Plot of Seasonal Component of Vaccine Demand of {} in {}".format( self.vaccine, self.health_facility)) plt.show() # Creating a forecast based on STL stl_fcast = forecast(org_unit_group_stl, steps=12, fc_func=seasonal_naive, seasonal=True) # Plot of the forecast and the original data plt.figure(figsize=(12, 8)) plt.plot(self.series, label='BCG Wastage Rate') plt.plot(stl_fcast, label=stl_fcast.columns[0]) plt.title( "Plot of Vaccine Demand of {} in {} Next Year Forecast".format( self.vaccine, self.health_facility)) plt.legend() plt.show()
def stl(X,ts): print("Entering STL") from stldecompose import decompose, forecast train_size = int(len(X) * 0.90) test_size = len(X)-train_size train, test = ts[0:train_size], ts[train_size:len(X)] decomp = decompose(train, period=7) fcast = forecast(decomp, steps=test_size, fc_func=naive, seasonal=True) #Error Calculation y_pred=[] for i in fcast.values: y_pred.append(i[0]) y_true=[] for i in test.values: y_true.append(i[0]) Ferror=mean_squared_error(y_true, y_pred) print("Leaving STL") return decomp,Ferror
def main(): ''' Main function that generates the result. ''' # load data data = pd.read_csv(args.excep_train, parse_dates=["SHIFT_DATE"]) # create train, val, and test train = data[(data["SHIFT_DATE"] > "2012-12-31") & (data["SHIFT_DATE"] < "2018-01-01")] val = data[(data["SHIFT_DATE"] > "2017-12-31") & (data["SHIFT_DATE"] < "2019-01-01")] # using only a portion of the sites train_clean = train[(train["SITE"] == "St Paul's Hospital") | (train["SITE"] == "Mt St Joseph") | (train["SITE"] == "Holy Family") | (train["SITE"] == "SVH Langara") | (train["SITE"] == "Brock Fahrni") | (train["SITE"] == "Youville Residence")] train_clean = train_clean[(train_clean["JOB_FAMILY"] == "DC1000") | (train_clean["JOB_FAMILY"] == "DC2A00") | (train_clean["JOB_FAMILY"] == "DC2B00")] val_clean = val[(val["SITE"] == "St Paul's Hospital") | (val["SITE"] == "Mt St Joseph") | (val["SITE"] == "Holy Family") | (val["SITE"] == "SVH Langara") | (val["SITE"] == "Brock Fahrni") | (val["SITE"] == "Youville Residence")] val_clean = val_clean[(val_clean["JOB_FAMILY"] == "DC1000") | (val_clean["JOB_FAMILY"] == "DC2A00") | (val_clean["JOB_FAMILY"] == "DC2B00")] # create training dataframes splitting_train = train_clean.groupby( ["JOB_FAMILY", "SITE", "SUB_PROGRAM", "SHIFT_DATE"]).size().reset_index() splitting_train = splitting_train.rename({ "SHIFT_DATE": "ds", 0: "y" }, axis=1) # create validation dataframes splitting_val = val_clean.groupby( ["JOB_FAMILY", "SITE", "SUB_PROGRAM", "SHIFT_DATE"]).size().reset_index() splitting_val = splitting_val.rename({"SHIFT_DATE": "ds", 0: "y"}, axis=1) # create timeframe data for prediction total_timeframe = pd.DataFrame( pd.date_range(start='2013-01-01', end='2017-12-31', freq="D")).rename({0: "ds"}, axis=1) timeframe = pd.DataFrame( pd.date_range(start='2018-01-01', end='2018-12-31', freq="D")).rename({0: "ds"}, axis=1) # unique combinations sites = train_clean["SITE"].unique() job_families = train_clean["JOB_FAMILY"].unique() sub_programs = train_clean["SUB_PROGRAM"].unique() # create and store predictions and true results models = {} split_data = {} pred_results_past = {} pred_results_future = {} true_results = {} for i in sites: for j in job_families: for k in sub_programs: temp_data_train = splitting_train[ (splitting_train["SITE"] == i) & (splitting_train["JOB_FAMILY"] == j) & (splitting_train["SUB_PROGRAM"] == k)].reset_index() temp_data_train = pd.merge(total_timeframe, temp_data_train, on="ds", how="outer") temp_data_train["y"] = temp_data_train["y"].fillna(0) temp_data_val = splitting_val[ (splitting_val["SITE"] == i) & (splitting_val["JOB_FAMILY"] == j) & (splitting_val["SUB_PROGRAM"] == k)].reset_index(drop=True) temp_data_val = pd.merge(timeframe, temp_data_val, on="ds", how="outer") temp_data_val["y"] = temp_data_val["y"].fillna(0) split_data[(i, j, k)] = temp_data_train true_results[(i, j, k)] = temp_data_val if temp_data_val["y"].sum() >= 300.0: pred_results_past[(i, j, k)], models[(i, j, k)] = run_prophet( temp_data_train, total_timeframe) pred_results_future[(i, j, k)] = models[(i, j, k)].predict(timeframe) print("Fitting -", i, j, k, ": Done") # combine predictions and true results combined = {} for i in pred_results_future: combined[i] = pd.merge( true_results[i], pred_results_future[i], on="ds", how="outer")[["ds", "y", "yhat", "yhat_lower", "yhat_upper"]] # convert to week and calculating errors weekly weekly = {} for i in combined: # create week column combined[i]["ds"] = combined[i]["ds"] - pd.DateOffset(weekday=0, weeks=1) combined[i]["week"] = combined[i]["ds"].dt.week # store y, yhat, yhat_lower, yhat_upper weekly_y = combined[i].groupby("ds").y.sum().reset_index() weekly_yhat = combined[i].groupby("ds").yhat.sum().astype( int).reset_index() weekly_yhat_lower = combined[i].groupby("ds").yhat_lower.sum().astype( int).reset_index() weekly_yhat_upper = combined[i].groupby("ds").yhat_upper.sum().astype( int).reset_index() # replace negative prediction values with 0 weekly_yhat = weekly_yhat.where(weekly_yhat["yhat"] >= 0, 0) weekly_yhat_lower = weekly_yhat_lower.where( weekly_yhat_lower["yhat_lower"] >= 0, 0) weekly_yhat_upper = weekly_yhat_upper.where( weekly_yhat_upper["yhat_upper"] >= 0, 0) # merge weekly results weekly[i] = pd.concat([ weekly_y, weekly_yhat["yhat"], weekly_yhat_lower["yhat_lower"], weekly_yhat_upper["yhat_upper"] ], axis=1) # create columns "year", "site", "job_family", "sub_program" length = weekly[i].shape[0] weekly[i]["week"] = weekly[i]["ds"].dt.weekofyear weekly[i]["site"] = np.repeat(i[0], length) weekly[i]["job_family"] = np.repeat(i[1], length) weekly[i]["sub_program"] = np.repeat(i[2], length) # model residuals for i in weekly: forecasted = pred_results_past[i] actual = split_data[i] error = actual["y"] - forecasted["yhat"] obs = total_timeframe.copy() obs["error"] = error obs = obs.set_index("ds") decomp = decompose(obs, period=365) weekly_fcast = forecast(decomp, steps=365, fc_func=drift, seasonal=True) weekly_fcast["week"] = weekly_fcast.index - pd.DateOffset(weekday=0, weeks=1) weekly_fcast = weekly_fcast.groupby("week").sum() resid_fcast = weekly_fcast.reset_index()["drift+seasonal"] weekly_yhat = (weekly[i]["yhat"] + resid_fcast).round(0) weekly_yhat_lower = (weekly[i]["yhat_lower"] + resid_fcast).round(0) weekly_yhat_upper = (weekly[i]["yhat_upper"] + resid_fcast).round(0) weekly[i]["yhat"] = weekly_yhat.where(weekly_yhat >= 0, 0) weekly[i]["yhat_lower"] = weekly_yhat_lower.where( weekly_yhat_lower >= 0, 0) weekly[i]["yhat_upper"] = weekly_yhat_upper.where( weekly_yhat_upper >= 0, 0) # create data/predictions folder if it doesn't exist predictions_path = "../data/predictions/" if not os.path.exists(predictions_path): os.mkdir(predictions_path) # export to "data/predictions/" directory total_data = pd.DataFrame() for i in weekly: total_data = pd.concat([total_data, weekly[i]], axis=0) total_data.to_csv(predictions_path + "exception_predictions.csv")
def ValuePredictor(to_predict_list): to_predict = np.array(to_predict_list).reshape(1, 2) meal_name = to_predict[0][0] for i in range(len(Meals)): if Meals[i] == meal_name: break Mid = int(totalMeals[i]) week = to_predict[0][1] week = int(week) Ingredients = RawNames[0].unique().tolist() present = 0 Raw = [] try: #If STL model is better for s in STL: if (Mid == s): from stldecompose import decompose, forecast FName = "flaskinventory\models\STL" + str(Mid) + ".xml" model = jl.load(FName) fore = forecast(model, steps=week, fc_func=naive, seasonal=True) Pred = [] for j in fore.values: Pred.append(j[0]) RawMat = Quantity.loc[Mid] for p in range(0, len(Pred)): qt = 'Week%s' % p qt = [] for q in range(1, len(RawMat) + 1): rw = int(round(Pred[p] * RawMat[q])) qt.append(rw) Raw.append(qt) break #If ETS model is better for e in ETS: if (Mid == e): FName = "flaskinventory\models\ETS" + str(Mid) + ".xml" model = jl.load(FName) Pred = [] Pred = model.forecast(week) Pred = Pred.tolist() print("Type", type(Pred)) RawMat = Quantity.loc[Mid] for p in range(0, len(Pred)): qt = 'Week%s' % p qt = [] #for q in range(0,len(RawMat)) for q in range(1, len(RawMat) + 1): rw = round(Pred[p] * RawMat[q]) qt.append(rw) Raw.append(qt) break except Exception as e: print("Exception", e) Prediction = "No prediction" RawMaterials = "No raw materials prediction" else: #Calculation of Cycle, safety stock and reorder point sumi = 0 for i in range(0, len(Pred)): sumi = Pred[i] + sumi Predicted = int(round(sumi)) Raw = np.array(Raw) res = np.sum(Raw, 0) #Raw=pd.DataFrame Prediction = Predicted RawMaterials = res leadTime = [1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 4, 3, 3, 1, 1] len(leadTime) maxlead = max(leadTime) #print(maxlead) avglead = mean(leadTime) avglead = round(avglead, 1) #print(avglead) RawSafe = Raw.transpose() p = len(RawSafe) #print(len(RawSafe)) SafetyStock = [] t = [] R = [] ReorderPoint = [] for j in range(0, p): maxt = 0 avgt = 0 Safety = 0 ld = 0 Reorder = 0 t = RawSafe[j] maxt = max(t) avgt = round(mean(t), 2) Safety = round(((maxt * maxlead) - (avgt * avglead)), 2) SafetyStock.append(Safety) ld = round((leadTime[j] * avgt), 2) Reorder = round((ld + Safety), 2) ReorderPoint.append(Reorder) return Prediction, RawMaterials, SafetyStock, ReorderPoint, Ingredients, Pred, Mid, week
def stl(k): from stldecompose import decompose, forecast ar=new_tab.loc[(k)].values #print(len(ar)) a=[] for i in range(len(ar)): a.append(ar[i][0]) def timeseries_df(): index = pd.date_range(start="01-01-2017", periods=len(a), freq='W-SAT') ts = pd.DataFrame(a, index=index, columns=['num_orders']) ts['num_orders']=a return ts ts = timeseries_df() #print(ts) #print(ts.index) X = ts.values train_size = int(len(X) * 0.60) test_size = len(X)-train_size #print(test_size," ", train_size) #training, testing = ts[0:train_size], ts[train_size:len(X)] train, test = ts[0:train_size], ts[train_size:len(X)] #print(train) #print(test) #print('Observations: %d' % (len(X))) #print('Training Observations: %d' % (len(train))) #print('Testing Observations: %d' % (len(test))) trend=['add','add','mul','mul'] seasonal=['add','mul','add','mul'] #print(test) decomp = decompose(train, period=7) #print(decomp) #print(type(decomp)) #s=sm.tsa.seasonal_decompose(train) #print("trend") #print(decomp.trend) #print(decomp.resid) #print("season") #print(decomp.seasonal) fcast = forecast(decomp, steps=test_size, fc_func=naive, seasonal=True) #print(fcast) y_pred=[] for i in fcast.values: y_pred.append(i[0]) #print(y_pred) y_true=[] for i in test.values: y_true.append(i[0]) #print(y_true) Ferror=mean_squared_error(y_true, y_pred) return decomp,Ferror,test_size
print("Finished STL") modelETS,errorETS,testlenETS=ets(i) error=min(errorSTL,errorETS) if(error==errorSTL): FinalModel=modelSTL FModel='STL' print("STL") elif(error==errorETS): FinalModel=modelETS FModel='ETS' print("ETS") from stldecompose import decompose, forecast Pred=[] if(FModel=='STL'): forecast=forecast(FinalModel, steps=testlenSTL, fc_func=naive, seasonal=True) globals()['STL%s' % i] = FinalModel print('STL%s' % i) STL.append(i) joblib.dump(FinalModel, 'STL'+ str(i) +'.xml', compress=1) files.download('STL'+ str(i) +'.xml') for j in forecast.values: Pred.append(j[0]) elif(FModel=='ETS'): Pred=FinalModel.forecast(testlenETS) globals()['ETS%s' % i] = FinalModel print('ETS%s' % i) ETS.append(i) joblib.dump(FinalModel, 'ETS'+ str(i) +'.xml', compress=1)
def ValuePredictor(): if request.method == "POST": Mid = request.form["Meal_ID"] Mid = int(Mid) week = request.form["Week"] week = int(week) meal_info = pd.read_csv( r'C:/Users/jtani/inventory/flask-inventory/static/meal_info.csv') Quantity = pd.read_csv( r'C:/Users/jtani/inventory/flask-inventory/static/QuantityRequired - Sheet1.csv' ) totalMeals = meal_info['meal_id'].unique() #len(totalMeals) STL = [ 1885, 1993, 2139, 2631, 1248, 1778, 1062, 2707, 2640, 2306, 2826, 1754, 1902, 1311, 1803, 1525, 2304, 1878, 1216, 1247, 1770, 1198, 1438, 2494, 1847, 2760, 2492, 1543, 2664, 2569, 1571, 2956 ] ETS = [ 2539, 1207, 1230, 2322, 2290, 1727, 1109, 2126, 1971, 1558, 2581, 1962, 1445, 2444, 2867, 2704, 2577, 2490, 2104 ] Quantity = Quantity.set_index('meal_id') present = 0 Raw = [] try: for s in STL: if (Mid == s): from stldecompose import decompose, forecast FName = "C:/Users/jtani/inventory/flask-inventory/models/STL" + str( Mid) + ".xml" #print(FName) print("hi") model = jl.load(FName) print("hi") #print(model) fore = forecast(model, steps=week, fc_func=naive, seasonal=True) Pred = [] for j in fore.values: Pred.append(j[0]) print("hi") RawMat = Quantity.loc[Mid] print("hi") #print(RawMat) for p in range(0, len(Pred)): qt = 'Week%s' % p qt = [] for q in range(0, len(RawMat)): rw = int(round(Pred[p] * RawMat[q])) qt.append(rw) Raw.append(qt) break for e in ETS: if (Mid == e): FName = "C:/Users/jtani/inventory/flask-inventory/models/ETS" + str( Mid) + ".xml" model = jl.load(FName) Pred = [] Pred = model.forecast(week) RawMat = Quantity.loc[Mid] for p in range(0, len(Pred)): qt = 'Week%s' % p qt = [] for q in range(0, len(RawMat)): rw = int(round(Pred[p] * RawMat[q])) qt.append(rw) Raw.append(qt) break except Exception as e: print("Exception", e) Prediction = "Eneter a" RawMaterials = "raw" else: for i in range(0, len(Pred)): sumi = Pred[i] + sumi Predicted = int(round(sumi)) #print(Predicted) #print(Raw) Raw = np.array(Raw) res = np.sum(Raw, 0) #print(len(Raw)) Raw = pd.DataFrame #print(res) Prediction = Predicted RawMaterials = res return Prediction, RawMaterials return render_template("MealPrediction.html")
def home(): cur = db.connection.cursor() cur.execute("SELECT * FROM quant") Quantity = pd.DataFrame(cur) cur.execute("SELECT * FROM meal_info") mealInfo = pd.DataFrame(cur) cur.execute("SELECT * FROM raw_materials") RawNames = pd.DataFrame(cur) cur.execute("select meal_id from meal_info where model='ETS'") ETS = pd.DataFrame(cur) ETS = ETS[0].unique().tolist() cur.execute("select meal_id from meal_info where model='STL'") STL = pd.DataFrame(cur) STL = STL[0].unique().tolist() print("ETS", ETS) print("STL", STL) totalMeals = mealInfo[0].unique() Quantity = Quantity.set_index(0) for meal in totalMeals: Mid = meal Mid = int(Mid) week = 10 week = int(week) Ingredients = RawNames[0].unique().tolist() present = 0 Raw = [] try: for s in STL: if (Mid == s): from stldecompose import decompose, forecast FName = "flaskinventory\models\STL" + str(Mid) + ".xml" model = jl.load(FName) fore = forecast(model, steps=week, fc_func=naive, seasonal=True) Pred = [] for j in fore.values: Pred.append(j[0]) RawMat = Quantity.loc[Mid] for p in range(0, len(Pred)): qt = 'Week%s' % p qt = [] for q in range(1, len(RawMat) + 1): rw = int(round(Pred[p] * RawMat[q])) qt.append(rw) Raw.append(qt) break for e in ETS: if (Mid == e): FName = "flaskinventory\models\ETS" + str(Mid) + ".xml" model = jl.load(FName) Pred = [] Pred = model.forecast(week) RawMat = Quantity.loc[Mid] for p in range(0, len(Pred)): qt = 'Week%s' % p qt = [] for q in range(1, len(RawMat) + 1): rw = int(round(Pred[p] * RawMat[q])) qt.append(rw) Raw.append(qt) break except Exception as e: print("Exception", e) Prediction = "No prediction" RawMaterials = "No raw materials prediction" else: sumi = 0 for i in range(0, len(Pred)): sumi = Pred[i] + sumi Predicted = int(round(sumi)) Raw = np.array(Raw) res = np.sum(Raw, 0) Prediction = Predicted RawMaterials = res.tolist() leadTime = [1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 4, 3, 3, 1, 1] len(leadTime) maxlead = max(leadTime) avglead = mean(leadTime) avglead = round(avglead, 1) RawSafe = Raw.transpose() p = len(RawSafe) SafetyStock = [] t = [] R = [] ReorderPoint = [] for j in range(0, p): maxt = 0 avgt = 0 Safety = 0 ld = 0 Reorder = 0 t = RawSafe[j] maxt = max(t) avgt = round(mean(t), 2) Safety = round(((maxt * maxlead) - (avgt * avglead)), 2) SafetyStock.append(Safety) ld = round((leadTime[j] * avgt), 2) Reorder = round((ld + Safety), 2) ReorderPoint.append(Reorder) print('Done') #Adding cycle stock to DB #cur.execute("call sysproc.admin_cmd('reorg table QKX97621.CYCLESTOCK')") cur.execute("SELECT * FROM cyclestock WHERE meal_id = ?", (Mid, )) df = pd.DataFrame(cur) if df.empty: print("Does not exist") cur.execute("insert into cyclestock (meal_id) values (?)", (Mid, )) else: print('Meal ID already added!') for i in range(len(Ingredients)): try: q = "update cyclestock set " + Ingredients[ i] + " = ? where meal_id=?" cur.execute(q, ( RawMaterials[i], Mid, )) except: q1 = "ALTER TABLE cyclestock ADD " + Ingredients[i] + " INTEGER" cur.execute(q1) q2 = "update cyclestock set " + Ingredients[ i] + " = ? where meal_id=?" cur.execute(q2, ( RawMaterials[i], Mid, )) cur.execute("SELECT * FROM cyclestock WHERE meal_id = ?", (Mid, )) df2 = pd.DataFrame(cur) #Adding safety stock #cur.execute("call sysproc.admin_cmd('reorg table QKX97621.SAFETYSTOCK')") cur.execute("SELECT * FROM safetystock WHERE meal_id = ?", (Mid, )) df = pd.DataFrame(cur) if df.empty: print("Does not exist") cur.execute("insert into safetystock (meal_id) values (?)", (Mid, )) else: print('Meal ID already added!') for i in range(len(Ingredients)): try: q = "update safetystock set " + Ingredients[ i] + " = ? where meal_id=?" cur.execute(q, ( SafetyStock[i], Mid, )) except: q1 = "ALTER TABLE safetystock ADD " + Ingredients[ i] + " INTEGER" cur.execute(q1) q2 = "update safetystock set " + Ingredients[ i] + " = ? where meal_id=?" cur.execute(q2, ( RawMaterials[i], Mid, )) cur.execute("SELECT * FROM safetystock WHERE meal_id = ?", (Mid, )) df2 = pd.DataFrame(cur) #Adding reorder point #cur.execute("call sysproc.admin_cmd('reorg table QKX97621.REORDERPOINT')") cur.execute("SELECT * FROM reorderpoint WHERE meal_id = ?", (Mid, )) df = pd.DataFrame(cur) if df.empty: print("Does not exist") cur.execute("insert into reorderpoint (meal_id) values (?)", (Mid, )) else: print('Meal ID already added!') for i in range(len(Ingredients)): try: q = "update reorderpoint set " + Ingredients[ i] + " = ? where meal_id=?" cur.execute(q, ( ReorderPoint[i], Mid, )) except: q1 = "ALTER TABLE reorderpoint ADD " + Ingredients[ i] + " INTEGER" cur.execute(q1) q2 = "update reorderpoint set " + Ingredients[ i] + " = ? where meal_id=?" cur.execute(q2, ( ReorderPoint[i], Mid, )) cur.execute("SELECT * FROM reorderpoint WHERE meal_id = ?", (Mid, )) df2 = pd.DataFrame(cur) return 'Stock prediction Done!'
# nottem_stl.resid.plot(ax = ax4) # ax1.set_title("Nottem") # ax2.set_title("Trend") # ax3.set_title("Seasonality") # ax4.set_title("Residuals") # plt.tight_layout() # Eliminating the seasonal component # nottem_adjusted = nottemts - nottem_stl.seasonal # plt.figure(figsize=(12,8)) # nottem_adjusted.plot() # Getting the seasonal component only # Seasonality gives structure to the data # plt.figure(figsize=(12,8)) # nottem_stl.seasonal.plot() stl_fcast = forecast(nottem_stl, steps=12, fc_func=seasonal_naive, seasonal=True) stl_fcast.head() # Plot of the forecast and the original data plt.figure(figsize=(12, 8)) plt.plot(nottemts, label='Nottem') plt.plot(stl_fcast, label=stl_fcast.columns[0]) plt.legend() input("Press enter to exit ;)")
# code for minimizing errors (model residuals) forecasted = models[i].predict(timeframe_future) actual = data_individual[i] # get residuals error = actual["y"] - forecasted["yhat"] obs = timeframe_past.copy() obs["error"] = error obs = obs.set_index("ds") # model residuals period = int((np.max(timeframe_future) - np.min(timeframe_future)).dt.days) + 1 decomp = decompose(obs, period=period) weekly_fcast = forecast(decomp, steps=period, fc_func=drift, seasonal=True) weekly_fcast["week"] = weekly_fcast.index - pd.DateOffset( weekday=0, weeks=1) weekly_fcast = weekly_fcast.groupby("week").sum() # replace weekly data resid_fcast = weekly_fcast.reset_index()["drift+seasonal"] weekly_yhat = (weekly[i]["yhat"] + resid_fcast).round(0) weekly_yhat_lower = (weekly[i]["yhat_lower"] + resid_fcast).round(0) weekly_yhat_upper = (weekly[i]["yhat_upper"] + resid_fcast).round(0) # replace negatives with 0s weekly[i]["yhat"] = weekly_yhat.where(weekly_yhat >= 0, 0)