def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits ARIMA models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = None tmp_folder = str(uuid.uuid4()) + "_arima_folder/" if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Import the ARIMA python module pm = importlib.import_module('pmdarima') # Init models self.models = {} # Convert to pandas X = X.to_pandas() XX = X[self.tgc].copy() XX['y'] = np.array(y) self.nan_value = np.mean(y) self.ntrain = X.shape[0] # Group the input by TGC (Time group column) excluding the time column itself tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] # Prepare for multi processing num_tasks = len(XX_grp) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo( logger, "Arima will use {} workers for parallel processing".format(n_jobs)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) # Build 1 ARIMA model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "autoarima_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) args = (X_path, grp_hash, self.time_column, tmp_folder) kwargs = {} pool.submit_tryget(None, MyParallelAutoArimaTransformer_fit_async, args=args, kwargs=kwargs, out=self.models) pool.finish() for k, v in self.models.items(): self.models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def transform(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target If self.is_train exists, it means we are doing in-sample predictions if it does not then we Arima is used to predict the future :param X: Datatable Frame containing the features :return: ARIMA predictions """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) X = X.to_pandas() XX = X[self.tgc].copy() tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] assert len(XX_grp) > 0 num_tasks = len(XX_grp) def processor(out, res): out.append(res) pool_to_use = small_job_pool loggerinfo(logger, "Arima will use {} workers for transform".format(n_jobs)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) XX_paths = [] model_paths = [] nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just print where we are in the process of fitting models if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups)) # Create time group key to store and retrieve fitted models key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) # Create file path to store data and pass it to the fitting pool X_path = os.path.join(tmp_folder, "autoarima_Xt" + str(uuid.uuid4())) # Commented for performance, uncomment for debug # print("ARIMA - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash)) if grp_hash in self.models: model = self.models[grp_hash] model_path = os.path.join( tmp_folder, "autoarima_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X, X_path) model_paths.append(model_path) args = (model_path, X_path, self.nan_value, hasattr(self, 'is_train'), self.time_column, self.pred_gap, tmp_folder) kwargs = {} pool.submit_tryget( None, MyParallelAutoArimaTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) else: # Don't go through pools XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # unseen groups # Sync indices XX.index = X.index save_obj(XX, X_path) XX_paths.append(X_path) pool.finish() XX = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) self._clean_tmp_folder(logger, tmp_folder) return XX
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): # Get TGC and time column self.tgc = self.params_base.get('tgc', None) self.time_column = self.params_base.get('time_column', None) self.nan_value = np.mean(y) self.cap = np.max( y ) * 1.5 # TODO Don't like this we should compute a cap from average yearly growth self.prior = np.mean(y) if self.time_column is None: self.time_column = self.tgc[0] # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) loggerinfo( logger, "Start Fitting Prophet Model with params : {}".format(self.params)) # Get temporary folders for multi process communication tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Convert to pandas XX = X[:, self.tgc].to_pandas() XX = XX.replace([None, np.nan], 0) XX.rename(columns={self.time_column: "ds"}, inplace=True) # Make target available in the Frame XX['y'] = np.array(y) # Set target prior self.nan_value = np.mean(y) # Group the input by TGC (Time group column) excluding the time column itself tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] self.models = {} self.priors = {} # Prepare for multi processing num_tasks = len(XX_grp) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerdebug(logger, "Prophet will use {} workers for fitting".format(n_jobs)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) # Fit 1 FB Prophet model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) self.priors[grp_hash] = X['y'].mean() args = (X_path, grp_hash, tmp_folder, self.params, self.cap) kwargs = {} pool.submit_tryget(None, MyParallelProphetTransformer_fit_async, args=args, kwargs=kwargs, out=self.models) pool.finish() for k, v in self.models.items(): self.models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return None
def predict(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target :param X: Datatable Frame containing the features :return: FB Prophet predictions """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) if self.tgc is None or not all([x in X.names for x in self.tgc]): loggerdebug(logger, "Return 0 predictions") return np.ones(X.shape[0]) * self.nan_value tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) XX = X[:, self.tgc].to_pandas() XX = XX.replace([None, np.nan], 0) XX.rename(columns={self.time_column: "ds"}, inplace=True) if self.params["growth"] == "logistic": XX["cap"] = self.cap tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] assert len(XX_grp) > 0 num_tasks = len(XX_grp) def processor(out, res): out.append(res) pool_to_use = small_job_pool loggerdebug(logger, "Prophet will use {} workers for transform".format(n_jobs)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) XX_paths = [] model_paths = [] nb_groups = len(XX_grp) print("Nb Groups = ", nb_groups) for _i_g, (key, X) in enumerate(XX_grp): # Log where we are in the transformation of the dataset if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups)) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) X_path = os.path.join(tmp_folder, "fbprophet_Xt" + str(uuid.uuid4())) # Commented for performance, uncomment for debug # print("prophet - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash)) if grp_hash in self.models: model = self.models[grp_hash] model_path = os.path.join( tmp_folder, "fbprophet_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X, X_path) model_paths.append(model_path) args = (model_path, X_path, self.priors[grp_hash], tmp_folder) kwargs = {} pool.submit_tryget( None, MyParallelProphetTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) else: XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # unseen groups XX.index = X.index save_obj(XX, X_path) XX_paths.append(X_path) pool.finish() XX = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) self._clean_tmp_folder(logger, tmp_folder) return XX['yhat'].values
def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits FB Prophet models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir, username=self.context.username, ) try: # Add value of prophet_top_n in recipe_dict variable inside of config.toml file # eg1: recipe_dict="{'prophet_top_n': 200}" # eg2: recipe_dict="{'prophet_top_n':10}" self.top_n = config.recipe_dict['prophet_top_n'] except KeyError: self.top_n = 50 loggerinfo( logger, f"Prophet will use {self.top_n} groups as well as average target data." ) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = X[:, self.tgc].to_pandas() # Fill NaNs or None X = X.replace([None, np.nan], 0) # Add target, Label encoder is only used for Classif. which we don't support... if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) X['y'] = np.array(y) self.nan_value = X['y'].mean() # Change date feature name to match Prophet requirements X.rename(columns={self.time_column: "ds"}, inplace=True) # Create a general scale now that will be used for unknown groups at prediction time # Can we do smarter than that ? self.general_scaler = MinMaxScaler().fit( X[['y', 'ds']].groupby('ds').median().values) # Go through groups and standard scale them if len(tgc_wo_time) > 0: X_groups = X.groupby(tgc_wo_time) else: X_groups = [([None], X)] self.scalers = {} scaled_ys = [] print(f'{datetime.now()} Start of group scaling') for key, X_grp in X_groups: # Create dict key to store the min max scaler grp_hash = self.get_hash(key) # Scale target for current group self.scalers[grp_hash] = MinMaxScaler() y_skl = self.scalers[grp_hash].fit_transform(X_grp[['y']].values) # Put back in a DataFrame to keep track of original index y_skl_df = pd.DataFrame(y_skl, columns=['y']) # (0, 'A') (1, 4) (100, 1) (100, 1) # print(grp_hash, X_grp.shape, y_skl.shape, y_skl_df.shape) y_skl_df.index = X_grp.index scaled_ys.append(y_skl_df) print(f'{datetime.now()} End of group scaling') # Set target back in original frame but keep original X['y_orig'] = X['y'] X['y'] = pd.concat(tuple(scaled_ys), axis=0) # Now Average groups X_avg = X[['ds', 'y']].groupby('ds').mean().reset_index() # Send that to Prophet params = { "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } mod = importlib.import_module('fbprophet') Prophet = getattr(mod, "Prophet") self.model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True) if params["country_holidays"] is not None: self.model.add_country_holidays( country_name=params["country_holidays"]) if params["monthly_seasonality"]: self.model.add_seasonality(name='monthly', period=30.5, fourier_order=5) with suppress_stdout_stderr(): self.model.fit(X[['ds', 'y']]) print(f'{datetime.now()} General Model Fitted') self.top_groups = None if len(tgc_wo_time) > 0: if self.top_n > 0: top_n_grp = X.groupby(tgc_wo_time).size().sort_values( ).reset_index()[tgc_wo_time].iloc[-self.top_n:].values self.top_groups = [ '_'.join(map(str, key)) for key in top_n_grp ] if self.top_groups: self.grp_models = {} self.priors = {} # Prepare for multi processing num_tasks = len(self.top_groups) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo(logger, f"Prophet will use {n_jobs} workers for fitting.") loggerinfo( logger, "Prophet parameters holidays {} / monthly {}".format( self.country_holidays, self.monthly_seasonality)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) # # Fit 1 FB Prophet model per time group columns nb_groups = len(X_groups) # Put y back to its unscaled value for top groups X['y'] = X['y_orig'] for _i_g, (key, X) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) grp_hash = self.get_hash(key) if grp_hash not in self.top_groups: continue self.priors[grp_hash] = X['y'].mean() params = { "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } args = (X_path, grp_hash, tmp_folder, params) kwargs = {} pool.submit_tryget(None, MyParallelProphetTransformer_fit_async, args=args, kwargs=kwargs, out=self.grp_models) pool.finish() for k, v in self.grp_models.items(): self.grp_models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def transform(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target :param X: Datatable Frame containing the features :return: FB Prophet predictions """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = X[:, self.tgc].to_pandas() # Fill NaNs or None X = X.replace([None, np.nan], 0) # Change date feature name to match Prophet requirements X.rename(columns={self.time_column: "ds"}, inplace=True) # Predict y using unique dates X_time = X[['ds']].groupby('ds').first().reset_index() with suppress_stdout_stderr(): y_avg = self.model.predict(X_time)[['ds', 'yhat']] # Prophet transforms the date column to datetime so we need to transfrom that to merge back X_time.sort_values('ds', inplace=True) X_time['yhat'] = y_avg['yhat'] X_time.sort_index(inplace=True) # Merge back into original frame on 'ds' # pd.merge wipes the index ... so keep it to provide it again indices = X.index X = pd.merge(left=X, right=X_time[['ds', 'yhat']], on='ds', how='left') X.index = indices # Go through groups and recover the scaled target for knowed groups if len(tgc_wo_time) > 0: X_groups = X.groupby(tgc_wo_time) else: X_groups = [([None], X)] inverted_ys = [] for key, X_grp in X_groups: grp_hash = self.get_hash(key) # Scale target for current group if grp_hash in self.scalers.keys(): inverted_y = self.scalers[grp_hash].inverse_transform( X_grp[['yhat']]) else: inverted_y = self.general_scaler.inverse_transform( X_grp[['yhat']]) # Put back in a DataFrame to keep track of original index inverted_df = pd.DataFrame(inverted_y, columns=['yhat']) inverted_df.index = X_grp.index inverted_ys.append(inverted_df) XX_general = pd.concat(tuple(inverted_ys), axis=0).sort_index() if self.top_groups: # Go though the groups and predict only top XX_paths = [] model_paths = [] def processor(out, res): out.append(res) num_tasks = len(self.top_groups) pool_to_use = small_job_pool pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) nb_groups = len(X_groups) for _i_g, (key, X_grp) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups predicted" % (100 * (_i_g + 1) // nb_groups)) # Create dict key to store the min max scaler grp_hash = self.get_hash(key) X_path = os.path.join(tmp_folder, "fbprophet_Xt" + str(uuid.uuid4())) if grp_hash not in self.top_groups: XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) # unseen groups XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue if self.grp_models[grp_hash] is None: XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) # unseen groups XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue model = self.grp_models[grp_hash] model_path = os.path.join( tmp_folder, "fbprophet_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X_grp, X_path) model_paths.append(model_path) args = (model_path, X_path, self.priors[grp_hash], tmp_folder) kwargs = {} pool.submit_tryget( None, MyParallelProphetTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) pool.finish() XX_top_groups = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) self._clean_tmp_folder(logger, tmp_folder) features_df = pd.DataFrame() features_df[self.display_name + '_GrpAvg'] = XX_general['yhat'] if self.top_groups: features_df[self.display_name + f'_Top{self.top_n}Grp'] = XX_top_groups['yhat'] self._output_feature_names = list(features_df.columns) self._feature_desc = list(features_df.columns) return features_df
def transform(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target :param X: Datatable Frame containing the features :return: FB Prophet predictions """ # Get the logger if it exists logger = self.get_experiment_logger() tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) # Change date feature name to match Prophet requirements X = self.convert_to_prophet(X) y_predictions = self.predict_with_average_model(X, tgc_wo_time) y_predictions.columns = ['average_pred'] # Go through groups for grp_col in tgc_wo_time: # Get the unique dates to be predicted X_groups = X[['ds', grp_col]].groupby(grp_col) # Go though the groups and predict only top XX_paths = [] model_paths = [] def processor(out, res): out.append(res) num_tasks = len(X_groups) pool_to_use = small_job_pool pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) for _i_g, (key, X_grp) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, num_tasks // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups predicted" % (100 * (_i_g + 1) // num_tasks)) # Create dict key to store the min max scaler grp_hash = self.get_hash(key) X_path = os.path.join(tmp_folder, "fbprophet_Xt" + str(uuid.uuid4())) if grp_hash not in self.grp_models[grp_col]: # unseen groups XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue if self.grp_models[grp_col][grp_hash] is None: # known groups but not enough train data XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue model = self.grp_models[grp_col][grp_hash] model_path = os.path.join( tmp_folder, "fbprophet_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X_grp, X_path) model_paths.append(model_path) args = (model_path, X_path, self.priors[grp_col][grp_hash], tmp_folder) kwargs = {} pool.submit_tryget( None, MyProphetOnSingleGroupsTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) pool.finish() y_predictions[f'{grp_col}_pred'] = pd.concat( (load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) # Now we can invert scale # But first get rid of NaNs for grp_col in tgc_wo_time: # Add time group to the predictions, will be used to invert scaling y_predictions[grp_col] = X[grp_col] # Fill NaN y_predictions[f'{grp_col}_pred'] = y_predictions[ f'{grp_col}_pred'].fillna(y_predictions['average_pred']) # Go through groups and recover the scaled target for knowed groups if len(tgc_wo_time) > 0: X_groups = y_predictions.groupby(tgc_wo_time) else: X_groups = [([None], y_predictions)] for _f in [f'{grp_col}_pred' for grp_col in tgc_wo_time] + ['average_pred']: inverted_ys = [] for key, X_grp in X_groups: grp_hash = self.get_hash(key) # Scale target for current group if grp_hash in self.scalers.keys(): inverted_y = self.scalers[grp_hash].inverse_transform( X_grp[[_f]]) else: inverted_y = self.general_scaler.inverse_transform( X_grp[[_f]]) # Put back in a DataFrame to keep track of original index inverted_df = pd.DataFrame(inverted_y, columns=[_f]) inverted_df.index = X_grp.index inverted_ys.append(inverted_df) y_predictions[_f] = pd.concat(tuple(inverted_ys), axis=0).sort_index()[_f] self._clean_tmp_folder(logger, tmp_folder) y_predictions.drop(tgc_wo_time, axis=1, inplace=True) self._output_feature_names = [ f'{self.display_name}_{_f}' for _f in y_predictions ] self._feature_desc = [ f'{self.display_name}_{_f}' for _f in y_predictions ] return y_predictions
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): # Get TGC and time column self.tgc = self.params_base.get('tgc', None) self.time_column = self.params_base.get('time_column', None) self.nan_value = np.mean(y) self.cap = np.max( y ) * 1.5 # TODO Don't like this we should compute a cap from average yearly growth self.prior = np.mean(y) if self.time_column is None: self.time_column = self.tgc[0] # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) loggerinfo( logger, "Start Fitting Prophet Model with params : {}".format(self.params)) try: # Add value of prophet_top_n in recipe_dict variable inside of config.toml file # eg1: recipe_dict="{'prophet_top_n': 200}" # eg2: recipe_dict="{'prophet_top_n':10}" self.top_n = config.recipe_dict['prophet_top_n'] except KeyError: self.top_n = 50 loggerinfo( logger, f"Prophet will use {self.top_n} groups as well as average target data." ) # Get temporary folders for multi process communication tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = X[:, self.tgc].to_pandas() # Fill NaNs or None X = X.replace([None, np.nan], 0) # Add target, Label encoder is only used for Classif. which we don't support... if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) X['y'] = np.array(y) self.nan_value = X['y'].mean() # Change date feature name to match Prophet requirements X.rename(columns={self.time_column: "ds"}, inplace=True) # Create a general scale now that will be used for unknown groups at prediction time # Can we do smarter than that ? general_scaler = MinMaxScaler().fit( X[['y', 'ds']].groupby('ds').median().values) # Go through groups and standard scale them if len(tgc_wo_time) > 0: X_groups = X.groupby(tgc_wo_time) else: X_groups = [([None], X)] scalers = {} scaled_ys = [] print('Number of groups : ', len(X_groups)) for g in tgc_wo_time: print(f'Number of groups in {g} groups : {X[g].unique().shape}') for key, X_grp in X_groups: # Create dict key to store the min max scaler grp_hash = self.get_hash(key) # Scale target for current group scalers[grp_hash] = MinMaxScaler() y_skl = scalers[grp_hash].fit_transform(X_grp[['y']].values) # Put back in a DataFrame to keep track of original index y_skl_df = pd.DataFrame(y_skl, columns=['y']) y_skl_df.index = X_grp.index scaled_ys.append(y_skl_df) # Set target back in original frame but keep original X['y_orig'] = X['y'] X['y'] = pd.concat(tuple(scaled_ys), axis=0) # Now Average groups X_avg = X[['ds', 'y']].groupby('ds').mean().reset_index() # Send that to Prophet mod = importlib.import_module('fbprophet') Prophet = getattr(mod, "Prophet") nrows = X[['ds', 'y']].shape[0] n_changepoints = max(1, int(nrows * (2 / 3))) if n_changepoints < 25: model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True, n_changepoints=n_changepoints) else: model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True) if self.params["country_holidays"] is not None: model.add_country_holidays( country_name=self.params["country_holidays"]) if self.params["monthly_seasonality"]: model.add_seasonality( name='monthly', period=30.5, fourier_order=self.params["monthly_seasonality"]) if self.params["quarterly_seasonality"]: model.add_seasonality( name='quarterly', period=92, fourier_order=self.params["quarterly_seasonality"]) with suppress_stdout_stderr(): model.fit(X[['ds', 'y']]) top_groups = None if len(tgc_wo_time) > 0: if self.top_n > 0: top_n_grp = X.groupby(tgc_wo_time).size().sort_values( ).reset_index()[tgc_wo_time].iloc[-self.top_n:].values top_groups = ['_'.join(map(str, key)) for key in top_n_grp] grp_models = {} priors = {} if top_groups: # Prepare for multi processing num_tasks = len(top_groups) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo(logger, f"Prophet will use {n_jobs} workers for fitting.") pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) # # Fit 1 FB Prophet model per time group columns nb_groups = len(X_groups) # Put y back to its unscaled value for top groups X['y'] = X['y_orig'] for _i_g, (key, X) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) grp_hash = self.get_hash(key) if grp_hash not in top_groups: continue priors[grp_hash] = X['y'].mean() args = (X_path, grp_hash, tmp_folder, self.params, self.cap) kwargs = {} pool.submit_tryget(None, MyParallelProphetTransformer_fit_async, args=args, kwargs=kwargs, out=grp_models) pool.finish() for k, v in grp_models.items(): grp_models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) self.set_model_properties( model={ 'avg': model, 'group': grp_models, 'priors': priors, 'topgroups': top_groups, 'skl': scalers, 'gen_scaler': general_scaler }, features=self.tgc, # Prophet uses time and timegroups importances=np.ones(len(self.tgc)), iterations=-1 # Does not have iterations ) return None
def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits FB Prophet models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = self.get_experiment_logger() loggerinfo( logger, f"Prophet will use individual groups as well as average target data." ) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = self.convert_to_prophet(X) # Add target, Label encoder is only used for Classif. which we don't support... if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) X['y'] = np.array(y) self.prior_value = X['y'].mean() self.general_scaler = self.fit_scaler_to_median_target(X) X = self.scale_target_for_each_time_group(X, tgc_wo_time) self.avg_model = self.fit_prophet_model_on_average_target(X) # Go through individual time group columns and create avg models self.grp_models = {} self.priors = {} for grp_col in tgc_wo_time: self.grp_models[grp_col] = {} self.priors[grp_col] = {} X_groups = X[['ds', 'y', grp_col]].groupby(grp_col) nb_groups = len(X_groups) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo(logger, f"Prophet will use {n_jobs} workers for fitting.") loggerinfo( logger, "Prophet parameters holidays {} / monthly {}".format( self.country_holidays, self.monthly_seasonality)) pool = pool_to_use(logger=None, processor=processor, num_tasks=nb_groups, max_workers=n_jobs) for _i_g, (key, X_grp) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) # Save target average for current group grp_hash = self.get_hash(key) self.priors[grp_col][grp_hash] = X_grp['y'].mean() # Average by date X_grp_avg = X_grp.groupby('ds')['y'].mean().reset_index() save_obj(X_grp_avg, X_path) params = { "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } args = (X_path, grp_hash, tmp_folder, params) kwargs = {} pool.submit_tryget( None, MyProphetOnSingleGroupsTransformer_fit_async, args=args, kwargs=kwargs, out=self.grp_models[grp_col]) pool.finish() for k, v in self.grp_models[grp_col].items(): self.grp_models[grp_col][k] = load_obj( v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def fit(self, X: dt.Frame, y: np.array = None): """ Fits ARIMA models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = None tmp_folder = str(uuid.uuid4()) + "_arima_folder/" if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir ) tmp_folder = self.context.experiment_tmp_dir + "/" + str(uuid.uuid4()) + "_arima_folder/" # Create a temp folder to store files used during multi processing experiment # This temp folder will be removed at the end of the process loggerinfo(logger, "Arima temp folder {}".format(tmp_folder)) try: os.mkdir(tmp_folder) except PermissionError: # This not occur so log a warning loggerwarning(logger, "Arima was denied temp folder creation rights") tmp_folder = temporary_files_path + "/" + str(uuid.uuid4()) + "_arima_folder/" os.mkdir(tmp_folder) except FileExistsError: # We should never be here since temp dir name is expected to be unique loggerwarning(logger, "Arima temp folder already exists") tmp_folder = self.context.experiment_tmp_dir + "/" + str(uuid.uuid4()) + "_arima_folder/" os.mkdir(tmp_folder) except: # Revert to temporary file path tmp_folder = temporary_files_path + "/" + str(uuid.uuid4()) + "_arima_folder/" os.mkdir(tmp_folder) # Import the ARIMA python module pm = importlib.import_module('pmdarima') # Init models self.models = {} # Convert to pandas X = X.to_pandas() XX = X[self.tgc].copy() XX['y'] = np.array(y) self.nan_value = np.mean(y) self.ntrain = X.shape[0] # Group the input by TGC (Time group column) excluding the time column itself tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] # Prepare for multi processing num_tasks = len(XX_grp) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool if hasattr(self, "params_base"): max_workers = self.params_base['n_jobs'] else: loggerinfo(logger, "Custom Recipe does not have a params_base attribute") # Beware not to use the disable_gpus keyword here. looks like cython does not like it # max_workers = get_max_workers(True) # Just set default to 2 max_workers = 2 loggerinfo(logger, "Arima will use {} workers for parallel processing".format(max_workers)) pool = pool_to_use( logger=None, processor=processor, num_tasks=num_tasks, max_workers=max_workers ) # Build 1 ARIMA model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo(logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "autoarima_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) args = (X_path, grp_hash, self.time_column, tmp_folder) kwargs = {} pool.submit_tryget(None, MyParallelAutoArimaTransformer_fit_async, args=args, kwargs=kwargs, out=self.models) pool.finish() for k, v in self.models.items(): self.models[k] = load_obj(v) if v is not None else None remove(v) try: shutil.rmtree(tmp_folder) loggerinfo(logger, "Arima cleaned up temporary file folder.") except: loggerwarning(logger, "Arima could not delete the temporary file folder.") return self
def transform(self, X: dt.Frame): """ Uses fitted models (1 per time group) to predict the target If self.is_train exists, it means we are doing in-sample predictions if it does not then we Arima is used to predict the future :param X: Datatable Frame containing the features :return: ARIMA predictions """ # Get the logger if it exists logger = None tmp_folder = str(uuid.uuid4()) + "_arima_folder/" if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir ) tmp_folder = self.context.experiment_tmp_dir + "/" + str(uuid.uuid4()) + "_arima_folder/" # Create a temp folder to store files used during multi processing experiment # This temp folder will be removed at the end of the process loggerinfo(logger, "Arima temp folder {}".format(tmp_folder)) try: os.mkdir(tmp_folder) except PermissionError: # This not occur so log a warning loggerwarning(logger, "Arima was denied temp folder creation rights") tmp_folder = temporary_files_path + "/" + str(uuid.uuid4()) + "_arima_folder/" os.mkdir(tmp_folder) except FileExistsError: # We should never be here since temp dir name is expected to be unique loggerwarning(logger, "Arima temp folder already exists") tmp_folder = self.context.experiment_tmp_dir + "/" + str(uuid.uuid4()) + "_arima_folder/" os.mkdir(tmp_folder) except: # Revert to temporary file path loggerwarning(logger, "Arima defaulted to create folder inside tmp directory.") tmp_folder = temporary_files_path + "/" + str(uuid.uuid4()) + "_arima_folder/" os.mkdir(tmp_folder) X = X.to_pandas() XX = X[self.tgc].copy() tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] assert len(XX_grp) > 0 num_tasks = len(XX_grp) def processor(out, res): out.append(res) pool_to_use = small_job_pool pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks) XX_paths = [] model_paths = [] nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just print where we are in the process of fitting models if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo(logger, "Auto ARIMA : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups)) # Create time group key to store and retrieve fitted models key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) # Create file path to store data and pass it to the fitting pool X_path = os.path.join(tmp_folder, "autoarima_Xt" + str(uuid.uuid4())) # Commented for performance, uncomment for debug # print("ARIMA - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash)) if grp_hash in self.models: model = self.models[grp_hash] model_path = os.path.join(tmp_folder, "autoarima_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X, X_path) model_paths.append(model_path) args = (model_path, X_path, self.nan_value, hasattr(self, 'is_train'), self.time_column, tmp_folder) kwargs = {} pool.submit_tryget(None, MyParallelAutoArimaTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) else: # Don't go through pools XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # unseen groups # Sync indices XX.index = X.index save_obj(XX, X_path) XX_paths.append(X_path) pool.finish() XX = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) try: shutil.rmtree(tmp_folder) loggerinfo(logger, "Arima cleaned up temporary file folder.") except: loggerwarning(logger, "Arima could not delete the temporary file folder.") return XX
def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits FB Prophet models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir ) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Convert to pandas XX = X[:, self.tgc].to_pandas() XX = XX.replace([None, np.nan], 0) XX.rename(columns={self.time_column: "ds"}, inplace=True) # Make sure labales are numeric if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) XX['y'] = np.array(y) # Set target prior self.nan_value = np.mean(y) # Group the input by TGC (Time group column) excluding the time column itself tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] self.models = {} self.priors = {} # Prepare for multi processing num_tasks = len(XX_grp) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo(logger, "Prophet will use {} workers for fitting".format(n_jobs)) pool = pool_to_use( logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs ) # Fit 1 FB Prophet model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo(logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) self.priors[grp_hash] = X['y'].mean() args = (X_path, grp_hash, tmp_folder) kwargs = {} pool.submit_tryget(None, MyParallelProphetTransformer_fit_async, args=args, kwargs=kwargs, out=self.models) pool.finish() for k, v in self.models.items(): self.models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def transform(self, X: dt.Frame): XX = X[:, self.tgc].to_pandas() XX = XX.replace([None, np.nan], 0) XX.rename(columns={self.time_column: "ds"}, inplace=True) tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] assert len(XX_grp) > 0 num_tasks = len(XX_grp) def processor(out, res): out.append(res) pool_to_use = small_job_pool pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks) XX_paths = [] model_paths = [] nb_groups = len(XX_grp) print("Nb Groups = ", nb_groups) for _i_g, (key, X) in enumerate(XX_grp): if (_i_g + 1) % max(1, nb_groups // 20) == 0: print(100 * (_i_g + 1) // nb_groups, " of Groups Transformed") key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) X_path = os.path.join(temporary_files_path, "fbprophet_Xt" + str(uuid.uuid4())) # Commented for performance, uncomment for debug # print("prophet - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash)) if grp_hash in self.models: model = self.models[grp_hash] model_path = os.path.join( temporary_files_path, "fbprophet_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X, X_path) model_paths.append(model_path) args = (model_path, X_path, self.nan_value) kwargs = {} pool.submit_tryget( None, MyParallelProphetTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) else: XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # unseen groups XX.index = X.index save_obj(XX, X_path) XX_paths.append(X_path) pool.finish() XX = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) return XX