def _create_tmp_folder(self, logger): # Create a temp folder to store files used during multi processing experiment # This temp folder will be removed at the end of the process # Set the default value without context available (required to pass acceptance test tmp_folder = str(uuid.uuid4()) + "_prophet_folder/" # Make a real tmp folder when experiment is available if self.context and self.context.experiment_id: tmp_folder = self.context.experiment_tmp_dir + "/" + str(uuid.uuid4()) + "_prophet_folder/" # Now let's try to create that folder try: os.mkdir(tmp_folder) except PermissionError: # This not occur so log a warning loggerwarning(logger, "Prophet was denied temp folder creation rights") tmp_folder = temporary_files_path + "/" + str(uuid.uuid4()) + "_prophet_folder/" os.mkdir(tmp_folder) except FileExistsError: # We should never be here since temp dir name is expected to be unique loggerwarning(logger, "Prophet temp folder already exists") tmp_folder = self.context.experiment_tmp_dir + "/" + str(uuid.uuid4()) + "_prophet_folder/" os.mkdir(tmp_folder) except: # Revert to temporary file path tmp_folder = temporary_files_path + "/" + str(uuid.uuid4()) + "_prophet_folder/" os.mkdir(tmp_folder) loggerinfo(logger, "Prophet temp folder {}".format(tmp_folder)) return tmp_folder
def _create_tmp_folder(self, logger): # Create a temp folder to store files # Set the default value without context available (required to pass acceptance test) tmp_folder = os.path.join(user_dir(), "%s_GAM_model_folder" % uuid.uuid4()) # Make a real tmp folder when experiment is available if self.context and self.context.experiment_id: tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_GAM_model_folder" % uuid.uuid4()) # Now let's try to create that folder try: os.mkdir(tmp_folder) except PermissionError: # This not occur so log a warning loggerwarning(logger, "GAM was denied temp folder creation rights") tmp_folder = os.path.join(user_dir(), "%s_GAM_model_folder" % uuid.uuid4()) os.mkdir(tmp_folder) except FileExistsError: # We should never be here since temp dir name is expected to be unique loggerwarning(logger, "GAM temp folder already exists") tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_GAM_model_folder" % uuid.uuid4()) os.mkdir(tmp_folder) except: # Revert to temporary file path tmp_folder = os.path.join(user_dir(), "%s_GAM_model_folder" % uuid.uuid4()) os.mkdir(tmp_folder) loggerinfo(logger, "GAM temp folder {}".format(tmp_folder)) return tmp_folder
def _clean_tmp_folder(self, logger, tmp_folder): try: shutil.rmtree(tmp_folder) loggerinfo(logger, "Prophet cleaned up temporary file folder.") except: loggerwarning( logger, "Prophet could not delete the temporary file folder.")
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): # system thing, doesn't need to be set in default or mutate, just at runtime in fit, into self.params so can see self.params["n_jobs"] = self.params_base.get('n_jobs', max(1, physical_cores_count)) params = self.params.copy() params = self.transcribe_params(params, train_shape=X.shape) loggerinfo(self.get_logger(**kwargs), "%s fit params: %s" % (self.display_name, dict(params))) loggerinfo(self.get_logger(**kwargs), "%s data: %s %s" % (self.display_name, X.shape, y.shape)) X = dt.Frame(X) orig_cols = list(X.names) if self.num_classes >= 2: model = KNeighborsClassifier(**params) lb = LabelEncoder() lb.fit(self.labels) y = lb.transform(y) else: model = KNeighborsRegressor(**params) X = self.basic_impute(X) X = X.to_numpy() if self.params.get('standardize', False): # self.params since params has it popped out standard_scaler = StandardScaler() X = standard_scaler.fit_transform(X) else: standard_scaler = None model.fit(X, y) importances = self.get_basic_importances(X, y) self.set_model_properties(model=(model, standard_scaler, self.min), features=orig_cols, importances=importances.tolist(), # abs(model.coef_[0]) iterations=0)
def fit(self, X: dt.Frame, y: np.array = None): """ Fits ARIMA models (1 per time group) using historical target values contained in y :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Import the ARIMA python module pm = importlib.import_module('pmdarima') # Create dictionary that will link models to groups self.models = {} # Convert to pandas X = X.to_pandas() # Keep the Time Group Columns XX = X[self.tgc].copy() # Add the target XX['y'] = np.array(y) self.mean_value = np.mean(y) self.ntrain = X.shape[0] # Get the logger if it exists logger = self._get_logger() # Group the input by TGC (Time group column) excluding the time column itself # What we want is being able to access the time series related to each group # So that we can predict future sales for each store/department independently tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] # Build 1 ARIMA model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) # print("auto arima - fitting on data of shape: %s for group: %s" % (str(X.shape), grp_hash)) order = np.argsort(X[self.time_column]) try: model = pm.auto_arima(X['y'].values[order], error_action='ignore') except Exception as e: loggerinfo(logger, "Auto ARIMA warning: {}".format(e)) model = None self.models[grp_hash] = model return self
def transform(self, X: dt.Frame): """ Uses fitted models (1 per time group) to predict the target If self.is_train exists, it means we are doing in-sample predictions if it does not then we Arima is used to predict the future :param X: Datatable Frame containing the features :return: ARIMA predictions """ X = X.to_pandas() XX = X[self.tgc].copy() tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) nb_groups = len(XX_grp) preds = [] for _i_g, (key, X) in enumerate(XX_grp): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups)) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) # print("auto arima - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash)) order = np.argsort(X[self.time_column]) if grp_hash in self.models: model = self.models[grp_hash] if model is not None: yhat = model.predict_in_sample() \ if hasattr(self, 'is_train') else model.predict(n_periods=X.shape[0]) yhat = yhat[order] XX = pd.DataFrame(yhat, columns=['yhat']) else: XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # invalid model else: XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # unseen groups XX.index = X.index preds.append(XX) XX = pd.concat(tuple(preds), axis=0).sort_index() return XX
def _get_n_jobs(self, logger, **kwargs): try: if config.fixed_num_folds == 0: n_jobs = max(1, int(int(max_threads() / min(config.num_folds, kwargs['max_workers'])))) else: n_jobs = max(1, int(int(max_threads() / min(config.fixed_num_folds, config.num_folds, kwargs['max_workers'])))) except KeyError: loggerinfo(logger, "Prophet No Max Worker in kwargs. Set n_jobs to 1") n_jobs = 1 return n_jobs
def fit(self, X: dt.Frame, y: np.array = None): """ Fits ARIMA models (1 per time group) using historical target values contained in y :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Import the ARIMA python module pm = importlib.import_module('pmdarima') # Init models self.models = {} # Convert to pandas X = X.to_pandas() XX = X[self.tgc].copy() XX['y'] = np.array(y) self.nan_value = np.mean(y) self.ntrain = X.shape[0] # Group the input by TGC (Time group column) excluding the time column itself tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) # Build 1 ARIMA model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) # print("auto arima - fitting on data of shape: %s for group: %s" % (str(X.shape), grp_hash)) order = np.argsort(X[self.time_column]) try: model = pm.auto_arima(X['y'].values[order], error_action='ignore') except: model = None self.models[grp_hash] = model return self
def _get_n_jobs(logger, **kwargs): if 'n_jobs_prophet' in config.recipe_dict: return min(config.recipe_dict['n_jobs_prophet'], max_threads()) try: if config.fixed_num_folds <= 0: n_jobs = max(1, int(int(max_threads() / min(config.num_folds, kwargs['max_workers'])))) else: n_jobs = max(1, int( int(max_threads() / min(config.fixed_num_folds, config.num_folds, kwargs['max_workers'])))) except KeyError: loggerinfo(logger, "Prophet No Max Worker in kwargs. Set n_jobs to 1") n_jobs = 1 return n_jobs if n_jobs > 1 else 1
def maybe_download(url, dest, logger=None): if not is_url(url): loggerinfo(logger, f"{url} is not a valid URL.") return dest_tmp = dest + ".tmp" if os.path.exists(dest): loggerinfo(logger, f"already downloaded {url} -> {dest}") return if os.path.exists(dest_tmp): loggerinfo( logger, f"Download has already started {url} -> {dest_tmp}. " f"Delete {dest_tmp} to download the file once more.") return loggerinfo(logger, f"Downloading {url} -> {dest}") url_data = requests.get(url, stream=True) if url_data.status_code != requests.codes.ok: msg = "Cannot get url %s, code: %s, reason: %s" % ( str(url), str(url_data.status_code), str(url_data.reason)) raise requests.exceptions.RequestException(msg) url_data.raw.decode_content = True if not os.path.isdir(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest), exist_ok=True) with open(dest_tmp, 'wb') as f: shutil.copyfileobj(url_data.raw, f) atomic_move(dest_tmp, dest)
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): # if config.hard_asserts: # from h2oaicore.utils import kwargs_has_stage, kwargs_missing_stage # assert kwargs_has_stage(kwargs), kwargs_missing_stage(kwargs) # system thing, doesn't need to be set in default or mutate, just at runtime in fit, into self.params so can see self.params["n_jobs"] = self.params_base.get( 'n_jobs', max(1, physical_cores_count)) params = self.params.copy() params = self.transcribe_params(params) if self._force_final_n_estimators > 0 and kwargs.get( 'IS_FINAL', False): self.params['n_estimators'] = params[ 'n_estimators'] = self._force_final_n_estimators loggerinfo(self.get_logger(**kwargs), "%s fit params: %s" % (self.display_name, dict(params))) loggerinfo(self.get_logger(**kwargs), "%s data: %s %s" % (self.display_name, X.shape, y.shape)) orig_cols = list(X.names) if self.num_classes >= 2: lb = LabelEncoder() lb.fit(self.labels) y = lb.transform(y) model = ExtraTreesClassifier(**params) else: params.pop('class_weight', None) model = ExtraTreesRegressor(**params) X = self.basic_impute(X) X = X.to_numpy() model.fit(X, y, sample_weight=sample_weight) importances = np.array(model.feature_importances_) self.set_model_properties(model=(model, self.min), features=orig_cols, importances=importances.tolist(), iterations=params['n_estimators'])
def xnn_initialize(features, ridge_functions=3, arch=[20,12], learning_rate=0.01, bg_samples=100, beta1=0.9, beta2=0.999, dec=0.0, ams=True, bseed=None, is_categorical=False): # # Prepare model architecture # # Input to the network, our observation containing all the features input = keras.layers.Input(shape=(features,), name='main_input') # Record current column names loggerinfo(logger, "XNN LOG") loggerdata(logger, "Feature list:") loggerdata(logger, str(orig_cols)) # Input to ridge function number i is the dot product of our original input vector times coefficients ridge_input = keras.layers.Dense(ridge_functions, name="projection_layer", activation='linear')(input) ridge_networks = [] # Each subnetwork uses only 1 neuron from the projection layer as input so we need to split it ridge_inputs = SplitLayer(ridge_functions)(ridge_input) for i, ridge_input in enumerate(ridge_inputs): # Generate subnetwork i mlp = _mlp(ridge_input, i, arch) ridge_networks.append(mlp) added = keras.layers.Concatenate(name='concatenate_1')(ridge_networks) # Add the correct output layer for the problem if is_categorical: out = keras.layers.Dense(1, activation='sigmoid', input_shape= (ridge_functions, ), name='main_output')(added) else: out = keras.layers.Dense(1, activation='linear', input_shape= (ridge_functions, ), name='main_output')(added) model = keras.models.Model(inputs=input, outputs=out) optimizer = keras.optimizers.Adam(lr=learning_rate, beta_1=beta1, beta_2=beta2, decay=dec, amsgrad=ams) # Use the correct loss for the problem if is_categorical: model.compile(loss={'main_output': 'binary_crossentropy'}, optimizer=optimizer) else: model.compile(loss={'main_output': 'mean_squared_error'}, optimizer=optimizer) return model
def transform(self, X: dt.Frame): logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) X = dt.Frame(X) original_zip_column_name = X.names[0] X = X[:, dt.str64(dt.f[0])] X.names = ['zip_key'] try: zip_list = dt.unique(X[~dt.isna(dt.f.zip_key), 0]).to_list()[0] + ['79936'] zip_features = [self.get_zipcode_features(x) for x in zip_list] X_g = dt.Frame({"zip_key": zip_list}) X_g.cbind(dt.Frame(zip_features)) X_g.key = 'zip_key' X_result = X[:, :, dt.join(X_g)] self._output_feature_names = [ "{}:{}.{}".format(self.transformer_name, original_zip_column_name, self.replaceBannedCharacters(f)) for f in list(X_result[:, 1:].names) ] self._feature_desc = [ "Property '{}' of zipcode column ['{}'] from US zipcode database (recipe '{}')" .format(f, original_zip_column_name, self.transformer_name) for f in list(X_result[:, 1:].names) ] return X_result[:, 1:] except ValueError as ve: loggerinfo( logger, "Column '{}' is not a zipcode: {}".format( original_zip_column_name, str(ve))) return self.get_zipcode_null_result(X, original_zip_column_name) except TypeError as te: loggerwarning( logger, "Column '{}' triggered TypeError: {}".format( original_zip_column_name, str(te))) raise te
def scale_target_per_time_group(self, X, tgc_wo_time, logger): loggerinfo(logger, 'Start of group scaling') if len(tgc_wo_time) > 0: X_groups = X.groupby(tgc_wo_time) else: X_groups = [([None], X)] if self.scalers is None: self.scalers = {} scaled_ys = [] for key, X_grp in X_groups: # Create dict key to store the min max scaler grp_hash = self.get_hash(key) # Scale target for current group self.scalers[grp_hash] = MinMaxScaler(feature_range=(1, 2)) y_skl = self.scalers[grp_hash].fit_transform(X_grp[['y' ]].values) # Put back in a DataFrame to keep track of original index y_skl_df = pd.DataFrame(y_skl, columns=['y']) # (0, 'A') (1, 4) (100, 1) (100, 1) # print(grp_hash, X_grp.shape, y_skl.shape, y_skl_df.shape) y_skl_df.index = X_grp.index scaled_ys.append(y_skl_df) else: scaled_ys = [] for key, X_grp in X_groups: # Create dict key to store the min max scaler grp_hash = self.get_hash(key) # Scale target for current group y_skl = self.scalers[grp_hash].transform(X_grp[['y']].values) # Put back in a DataFrame to keep track of original index y_skl_df = pd.DataFrame(y_skl, columns=['y']) # (0, 'A') (1, 4) (100, 1) (100, 1) # print(grp_hash, X_grp.shape, y_skl.shape, y_skl_df.shape) y_skl_df.index = X_grp.index scaled_ys.append(y_skl_df) loggerinfo(logger, 'End of group scaling') return pd.concat(tuple(scaled_ys), axis=0)
def mutate_params(self, accuracy=10, **kwargs): logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) loggerinfo(logger, "Mutate is called") # Default version is do no mutation # Otherwise, change self.params for this model holiday_choice = [None, "US", "UK", "DE", "FRA"] if accuracy >= 8: weekly_choice = [False, 'auto', 5, 7, 10, 15] yearly_choice = [False, 'auto', 5, 10, 15, 20, 30] monthly_choice = [False, 3, 5, 7, 10] quarterly_choice = [False, 3, 5, 7, 10] elif accuracy >= 5: weekly_choice = [False, 'auto', 10, 20] yearly_choice = [False, 'auto', 10, 20] monthly_choice = [False, 5] quarterly_choice = [False, 5] else: # No alternative seasonality, and no seasonality override for weekly and yearly weekly_choice = [False, 'auto'] yearly_choice = [False, 'auto'] monthly_choice = [False] quarterly_choice = [False] self.params["country_holidays"] = np.random.choice(holiday_choice) self.params["seasonality_mode"] = np.random.choice( ["additive", "multiplicative"]) self.params["weekly_seasonality"] = np.random.choice(weekly_choice) self.params["monthly_seasonality"] = np.random.choice(monthly_choice) self.params["quarterly_seasonality"] = np.random.choice( quarterly_choice) self.params["yearly_seasonality"] = np.random.choice(yearly_choice) self.params["growth"] = np.random.choice(["linear", "logistic"])
def predict_in_batch(self, func, X, **kwargs): # sklearn not very good at handling frames, no internal batching for row-by-row operations, # yet predict can use much more memory than fit for same frame size assert X is not None assert isinstance(X, np.ndarray) nrows = X.shape[0] # see what shape would be idx = X.shape[0] - 1 Xslice = X[idx:, :] preds_1 = func(Xslice) pred_cols = int(np.prod(preds_1.shape[1:])) # make empty numpy frame preds = np.ones((nrows, pred_cols)) * np.nan mem_used_per_row = 100E9 * (self.params['n_estimators'] * X.shape[1]) / (2000 * 100000 * 289) mem_max = 1E9 batch_size = max(1, int(mem_max / mem_used_per_row)) loggerinfo( self.get_logger(**kwargs), "%s predict using batch_size %d with %d batches" % (self.display_name, min( nrows, batch_size), max(1, ceil(nrows / batch_size)))) start = 0 while start < preds.shape[0]: end = min(start + batch_size, preds.shape[0]) Xslice = X[start:end, :] p = func(Xslice) preds[start:end, :] = p.reshape(end - start, pred_cols) start = end return preds
def transform(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target If self.is_train exists, it means we are doing in-sample predictions if it does not then we Arima is used to predict the future :param X: Datatable Frame containing the features :return: ARIMA predictions """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) X = X.to_pandas() XX = X[self.tgc].copy() tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] assert len(XX_grp) > 0 num_tasks = len(XX_grp) def processor(out, res): out.append(res) pool_to_use = small_job_pool loggerinfo(logger, "Arima will use {} workers for transform".format(n_jobs)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) XX_paths = [] model_paths = [] nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just print where we are in the process of fitting models if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups)) # Create time group key to store and retrieve fitted models key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) # Create file path to store data and pass it to the fitting pool X_path = os.path.join(tmp_folder, "autoarima_Xt" + str(uuid.uuid4())) # Commented for performance, uncomment for debug # print("ARIMA - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash)) if grp_hash in self.models: model = self.models[grp_hash] model_path = os.path.join( tmp_folder, "autoarima_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X, X_path) model_paths.append(model_path) args = (model_path, X_path, self.nan_value, hasattr(self, 'is_train'), self.time_column, self.pred_gap, tmp_folder) kwargs = {} pool.submit_tryget( None, MyParallelAutoArimaTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) else: # Don't go through pools XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # unseen groups # Sync indices XX.index = X.index save_obj(XX, X_path) XX_paths.append(X_path) pool.finish() XX = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) self._clean_tmp_folder(logger, tmp_folder) return XX
def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits ARIMA models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = None tmp_folder = str(uuid.uuid4()) + "_arima_folder/" if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Import the ARIMA python module pm = importlib.import_module('pmdarima') # Init models self.models = {} # Convert to pandas X = X.to_pandas() XX = X[self.tgc].copy() XX['y'] = np.array(y) self.nan_value = np.mean(y) self.ntrain = X.shape[0] # Group the input by TGC (Time group column) excluding the time column itself tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] # Prepare for multi processing num_tasks = len(XX_grp) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo( logger, "Arima will use {} workers for parallel processing".format(n_jobs)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) # Build 1 ARIMA model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "autoarima_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) args = (X_path, grp_hash, self.time_column, tmp_folder) kwargs = {} pool.submit_tryget(None, MyParallelAutoArimaTransformer_fit_async, args=args, kwargs=kwargs, out=self.models) pool.finish() for k, v in self.models.items(): self.models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def post_fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): # determine the largest number of trees (from 1 to N, where N is what DAI would normally do) that # abs(training_score - valid_score) <= abs-threshold IF abs-threshold > 0 ELSE true # AND # abs(training_score - valid_score) <= rel-threshold * abs(training_score) IF rel-threshold > 0 ELSE true # To enable, set at least one of the two configurations by pasting the following (with modifications) into # "Add to config.toml via toml string" under Expert Settings -> Experiment: # ##### # recipe_dict="{'max_rel_score_delta_train_valid': 0.1, 'max_abs_score_delta_train_valid': 0.01}" # ##### max_abs_deviation = config.recipe_dict.get( 'max_abs_score_delta_train_valid', 0.0) # set to > 0.0 to enable max_rel_deviation = config.recipe_dict.get( 'max_rel_score_delta_train_valid', 0.0) # set to > 0.0 to enable logger = self.get_logger(**kwargs) if max_abs_deviation > 0 or max_rel_deviation > 0: if not (self._predict_by_iteration and eval_set and self.best_iterations): # LightGBM/XGB/CatBoost only return if "IS_SHIFT" in kwargs or "IS_LEAKAGE" in kwargs: # don't change leakage/shift detection logic return # goal is to find the new best_iterations, from 1...self.best_iterations max_n = max(self.best_iterations, 1) min_n = 1 step_n = max(1, (max_n - min_n) // 20) # try up to 20 steps from 1 to N trees iter_range = range(min_n, max_n, step_n) if len(iter_range) == 0: loggerinfo( logger, "No steps to take, so no score to optimize between train and valid data" ) return mykwargs = {'output_margin': False, 'pred_contribs': False} self._predict_by_iteration = False # allow override below self.model = self.get_model() valid_X = eval_set[0][0] valid_y = eval_set[0][1] valid_w = sample_weight_eval_set[ 0] if sample_weight_eval_set else None best_n = None best_train_score = None best_valid_score = None scorer = self.get_score_f( ) # use the same scorer as the experiment for n in iter_range: mykwargs[ self. _predict_iteration_name] = n # fix number of trees for predict train_pred = self.predict_model_wrapper(X, **mykwargs) score_train = scorer(actual=y, predicted=train_pred, sample_weight=sample_weight, labels=self.labels) valid_pred = self.predict_model_wrapper(valid_X, **mykwargs) score_valid = scorer(actual=valid_y, predicted=valid_pred, sample_weight=valid_w, labels=self.labels) first_time = n == min_n abs_ok = max_abs_deviation <= 0 or \ np.abs(score_train - score_valid) <= max_abs_deviation rel_ok = max_rel_deviation <= 0 or \ np.abs(score_train - score_valid) <= max_rel_deviation * np.abs(score_train) if first_time or abs_ok and rel_ok: # use the largest number n that satisfies this condition best_n = n best_train_score = score_train best_valid_score = score_valid else: # optimization: assume monotonic cross-over break loggerinfo( logger, "Changing optimal iterations from %d to %d to " "keep train/valid %s gap below abs=%f, rel=%f: train: %f, valid: %f" % (max_n, best_n, scorer.__self__.display_name, max_abs_deviation, max_rel_deviation, best_train_score, best_valid_score)) self._predict_by_iteration = True # restore default behavior self.best_iterations = best_n # update best iters <- this is the only effect of this method else: loggerinfo( logger, "Train/valid gap control disabled - Must set at least one of the two settings to a value > 0.0, e.g.: " "recipe_dict=\"{'max_rel_score_delta_train_valid': 0.1, 'max_abs_score_delta_train_valid': 0.01}\"" )
def transform(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target :param X: Datatable Frame containing the features :return: FB Prophet predictions """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir ) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = X[:, self.tgc].to_pandas() # Fill NaNs or None X = X.replace([None, np.nan], 0) # Change date feature name to match Prophet requirements X.rename(columns={self.time_column: "ds"}, inplace=True) # Predict y using unique dates X_time = X[['ds']].groupby('ds').first().reset_index() with suppress_stdout_stderr(): y_avg = self.model.predict(X_time)[['ds', 'yhat']] # Prophet transforms the date column to datetime so we need to transfrom that to merge back X_time.sort_values('ds', inplace=True) X_time['yhat'] = y_avg['yhat'] X_time.sort_index(inplace=True) # Merge back into original frame on 'ds' # pd.merge wipes the index ... so keep it to provide it again indices = X.index X = pd.merge( left=X, right=X_time[['ds', 'yhat']], on='ds', how='left' ) X.index = indices # Go through groups and recover the scaled target for knowed groups if len(tgc_wo_time) > 0: X_groups = X.groupby(tgc_wo_time) else: X_groups = [([None], X)] inverted_ys = [] for key, X_grp in X_groups: grp_hash = self.get_hash(key) # Scale target for current group if grp_hash in self.scalers.keys(): inverted_y = self.scalers[grp_hash].inverse_transform(X_grp[['yhat']]) else: inverted_y = self.general_scaler.inverse_transform(X_grp[['yhat']]) # Put back in a DataFrame to keep track of original index inverted_df = pd.DataFrame(inverted_y, columns=['yhat']) inverted_df.index = X_grp.index inverted_ys.append(inverted_df) XX_general = pd.concat(tuple(inverted_ys), axis=0).sort_index() if self.top_groups: # Go though the groups and predict only top XX_paths = [] model_paths = [] def processor(out, res): out.append(res) num_tasks = len(self.top_groups) pool_to_use = small_job_pool pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) nb_groups = len(X_groups) for _i_g, (key, X_grp) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo(logger, "FB Prophet : %d%% of groups predicted" % (100 * (_i_g + 1) // nb_groups)) # Create dict key to store the min max scaler grp_hash = self.get_hash(key) X_path = os.path.join(tmp_folder, "fbprophet_Xt" + str(uuid.uuid4())) if grp_hash not in self.top_groups: XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) # unseen groups XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue if self.grp_models[grp_hash] is None: XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) # unseen groups XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue model = self.grp_models[grp_hash] model_path = os.path.join(tmp_folder, "fbprophet_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X_grp, X_path) model_paths.append(model_path) args = (model_path, X_path, self.priors[grp_hash], tmp_folder) kwargs = {} pool.submit_tryget(None, MyParallelProphetTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) pool.finish() XX_top_groups = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) self._clean_tmp_folder(logger, tmp_folder) features_df = pd.DataFrame() features_df[self.display_name + '_GrpAvg'] = XX_general['yhat'] if self.top_groups: features_df[self.display_name + f'_Top{self.top_n}Grp'] = XX_top_groups['yhat'] self._output_feature_names = list(features_df.columns) self._feature_desc = list(features_df.columns) return features_df
def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits FB Prophet models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir ) try: # Add value of prophet_top_n in recipe_dict variable inside of config.toml file # eg1: recipe_dict="{'prophet_top_n': 200}" # eg2: recipe_dict="{'prophet_top_n':10}" self.top_n = config.recipe_dict['prophet_top_n'] except KeyError: self.top_n = 50 loggerinfo(logger, f"Prophet will use {self.top_n} groups as well as average target data.") tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = X[:, self.tgc].to_pandas() # Fill NaNs or None X = X.replace([None, np.nan], 0) # Add target, Label encoder is only used for Classif. which we don't support... if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) X['y'] = np.array(y) self.nan_value = X['y'].mean() # Change date feature name to match Prophet requirements X.rename(columns={self.time_column: "ds"}, inplace=True) # Create a general scale now that will be used for unknown groups at prediction time # Can we do smarter than that ? self.general_scaler = MinMaxScaler().fit(X[['y', 'ds']].groupby('ds').median().values) # Go through groups and standard scale them if len(tgc_wo_time) > 0: X_groups = X.groupby(tgc_wo_time) else: X_groups = [([None], X)] self.scalers = {} scaled_ys = [] print(f'{datetime.now()} Start of group scaling') for key, X_grp in X_groups: # Create dict key to store the min max scaler grp_hash = self.get_hash(key) # Scale target for current group self.scalers[grp_hash] = MinMaxScaler() y_skl = self.scalers[grp_hash].fit_transform(X_grp[['y']].values) # Put back in a DataFrame to keep track of original index y_skl_df = pd.DataFrame(y_skl, columns=['y']) # (0, 'A') (1, 4) (100, 1) (100, 1) # print(grp_hash, X_grp.shape, y_skl.shape, y_skl_df.shape) y_skl_df.index = X_grp.index scaled_ys.append(y_skl_df) print(f'{datetime.now()} End of group scaling') # Set target back in original frame but keep original X['y_orig'] = X['y'] X['y'] = pd.concat(tuple(scaled_ys), axis=0) # Now Average groups X_avg = X[['ds', 'y']].groupby('ds').mean().reset_index() # Send that to Prophet params = { "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } mod = importlib.import_module('fbprophet') Prophet = getattr(mod, "Prophet") self.model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True) if params["country_holidays"] is not None: self.model.add_country_holidays(country_name=params["country_holidays"]) if params["monthly_seasonality"]: self.model.add_seasonality(name='monthly', period=30.5, fourier_order=5) with suppress_stdout_stderr(): self.model.fit(X[['ds', 'y']]) print(f'{datetime.now()} General Model Fitted') self.top_groups = None if len(tgc_wo_time) > 0: if self.top_n > 0: top_n_grp = X.groupby(tgc_wo_time).size().sort_values().reset_index()[tgc_wo_time].iloc[-self.top_n:].values self.top_groups = [ '_'.join(map(str, key)) for key in top_n_grp ] if self.top_groups: self.grp_models = {} self.priors = {} # Prepare for multi processing num_tasks = len(self.top_groups) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo(logger, f"Prophet will use {n_jobs} workers for fitting.") loggerinfo(logger, "Prophet parameters holidays {} / monthly {}".format(self.country_holidays, self.monthly_seasonality)) pool = pool_to_use( logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs ) # # Fit 1 FB Prophet model per time group columns nb_groups = len(X_groups) # Put y back to its unscaled value for top groups X['y'] = X['y_orig'] for _i_g, (key, X) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo(logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) grp_hash = self.get_hash(key) if grp_hash not in self.top_groups: continue self.priors[grp_hash] = X['y'].mean() params = { "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } args = (X_path, grp_hash, tmp_folder, params) kwargs = {} pool.submit_tryget(None, MyParallelProphetTransformer_fit_async, args=args, kwargs=kwargs, out=self.grp_models) pool.finish() for k, v in self.grp_models.items(): self.grp_models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): # Example use of logger, with required import of: # from h2oaicore.systemutils import make_experiment_logger, loggerinfo # Can use loggerwarning, loggererror, etc. for different levels logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger(experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) loggerinfo(logger, "TestLOGGER: Fit CatBoost") # Example task sync operations if hasattr(self, 'testcount'): self.test_count += 1 else: self.test_count = 0 # The below generates a message in the GUI notifications panel if self.test_count == 0 and self.context and self.context.experiment_id: warning = "TestWarning: First CatBoost fit for this model instance" loggerwarning(logger, warning) task = kwargs.get('task') if task: task.sync(key=self.context.experiment_id, progress=dict(type='warning', data=warning)) task.flush() # The below generates a message in the GUI top-middle panel above the progress wheel if self.test_count == 0 and self.context and self.context.experiment_id: message = "TestMessage: CatBoost" loggerinfo(logger, message) task = kwargs.get('task') if task: task.sync(key=self.context.experiment_id, progress=dict(type='update', message=message)) task.flush() from catboost import CatBoostClassifier, CatBoostRegressor, EFstrType lb = LabelEncoder() if self.num_classes >= 2: lb.fit(self.labels) y = lb.transform(y) if isinstance(X, dt.Frame): orig_cols = list(X.names) # dt -> lightgbm internally using buffer leaks, so convert here # assume predict is after pipeline collection or in subprocess so needs no protection X = X.to_numpy() # don't assign back to X so don't damage during predict X = np.ascontiguousarray(X, dtype=np.float32 if config.data_precision == "float32" else np.float64) if eval_set is not None: valid_X = eval_set[0][0].to_numpy() # don't assign back to X so don't damage during predict valid_X = np.ascontiguousarray(valid_X, dtype=np.float32 if config.data_precision == "float32" else np.float64) valid_y = eval_set[0][1] if self.num_classes >= 2: valid_y = lb.transform(valid_y) eval_set[0] = (valid_X, valid_y) else: orig_cols = list(X.columns) if self.num_classes == 1: model = CatBoostRegressor(**self.params) else: model = CatBoostClassifier(**self.params) # Hit sometimes: Exception: catboost/libs/data_new/quantization.cpp:779: All features are either constant or ignored. if self.num_classes == 1: # assume not mae, which would use median # baseline = [np.mean(y)] * len(y) baseline = None else: baseline = None model.fit(X, y=y, sample_weight=sample_weight, baseline=baseline, eval_set=eval_set, early_stopping_rounds=kwargs.get('early_stopping_rounds', None), verbose=self.params.get('verbose', False) ) # need to move to wrapper if model.get_best_iteration() is not None: iterations = model.get_best_iteration() + 1 else: iterations = self.params['iterations'] + 1 # must always set best_iterations self.set_model_properties(model=model, features=orig_cols, importances=model.feature_importances_, iterations=iterations)
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): # Get column names orig_cols = list(X.names) from h2oaicore.tensorflow_dynamic import got_cpu_tf, got_gpu_tf import tensorflow as tf import shap import scipy import pandas as pd self.setup_keras_session() import h2oaicore.keras as keras import matplotlib.pyplot as plt if not hasattr(self, 'save_model_path'): model_id = str(uuid.uuid4())[:8] self.save_model_path = os.path.join(user_dir(), "custom_xnn_model.hdf5") np.random.seed(self.random_state) my_init = keras.initializers.RandomUniform(seed=self.random_state) # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) # Set up temp folter tmp_folder = self._create_tmp_folder(logger) # define base model def xnn_initialize(features, ridge_functions=3, arch=[20, 12], learning_rate=0.01, bg_samples=100, beta1=0.9, beta2=0.999, dec=0.0, ams=True, bseed=None, is_categorical=False): # # Prepare model architecture # # Input to the network, our observation containing all the features input = keras.layers.Input(shape=(features, ), name='main_input') # Record current column names loggerinfo(logger, "XNN LOG") loggerdata(logger, "Feature list:") loggerdata(logger, str(orig_cols)) # Input to ridge function number i is the dot product of our original input vector times coefficients ridge_input = keras.layers.Dense(ridge_functions, name="projection_layer", activation='linear')(input) ridge_networks = [] # Each subnetwork uses only 1 neuron from the projection layer as input so we need to split it ridge_inputs = SplitLayer(ridge_functions)(ridge_input) for i, ridge_input in enumerate(ridge_inputs): # Generate subnetwork i mlp = _mlp(ridge_input, i, arch) ridge_networks.append(mlp) added = keras.layers.Concatenate( name='concatenate_1')(ridge_networks) # Add the correct output layer for the problem if is_categorical: out = keras.layers.Dense(1, activation='sigmoid', input_shape=(ridge_functions, ), name='main_output')(added) else: out = keras.layers.Dense(1, activation='linear', input_shape=(ridge_functions, ), name='main_output')(added) model = keras.models.Model(inputs=input, outputs=out) optimizer = keras.optimizers.Adam(lr=learning_rate, beta_1=beta1, beta_2=beta2, decay=dec, amsgrad=ams) # Use the correct loss for the problem if is_categorical: model.compile(loss={'main_output': 'binary_crossentropy'}, optimizer=optimizer) else: model.compile(loss={'main_output': 'mean_squared_error'}, optimizer=optimizer) return model def _mlp(input, idx, arch=[20, 12], activation='relu'): # Set up a submetwork # Hidden layers mlp = keras.layers.Dense(arch[0], activation=activation, name='mlp_{}_dense_0'.format(idx), kernel_initializer=my_init)(input) for i, layer in enumerate(arch[1:]): mlp = keras.layers.Dense(layer, activation=activation, name='mlp_{}_dense_{}'.format( idx, i + 1), kernel_initializer=my_init)(mlp) # Output of the MLP mlp = keras.layers.Dense( 1, activation='linear', name='mlp_{}_dense_last'.format(idx), kernel_regularizer=keras.regularizers.l1(1e-3), kernel_initializer=my_init)(mlp) return mlp def get_shap(X, model): # Calculate the Shap values np.random.seed(24) bg_samples = min(X.shape[0], 1000) if isinstance(X, pd.DataFrame): background = X.iloc[np.random.choice(X.shape[0], bg_samples, replace=False)] else: background = X[np.random.choice(X.shape[0], bg_samples, replace=False)] # Explain predictions of the model on the subset explainer = shap.DeepExplainer(model, background) shap_values = explainer.shap_values(X) # Return the mean absolute value of each shap value for each dataset xnn_shap = np.abs(shap_values[0]).mean(axis=0) return xnn_shap # Initialize the xnn's features = X.shape[1] orig_cols = list(X.names) if self.num_classes >= 2: lb = LabelEncoder() lb.fit(self.labels) y = lb.transform(y) self.is_cat = True xnn1 = xnn_initialize(features=features, ridge_functions=features, arch=self.params["arch"], learning_rate=self.params["lr"], beta1=self.params["beta_1"], beta2=self.params["beta_1"], dec=self.params["decay"], ams=self.params["amsgrad"], is_categorical=self.is_cat) xnn = xnn_initialize(features=features, ridge_functions=features, arch=self.params["arch"], learning_rate=self.params["lr"], beta1=self.params["beta_1"], beta2=self.params["beta_1"], dec=self.params["decay"], ams=self.params["amsgrad"], is_categorical=self.is_cat) else: self.is_cat = False xnn1 = xnn_initialize(features=features, ridge_functions=features, arch=self.params["arch"], learning_rate=self.params["lr"], beta1=self.params["beta_1"], beta2=self.params["beta_1"], dec=self.params["decay"], ams=self.params["amsgrad"], is_categorical=self.is_cat) xnn = xnn_initialize(features=features, ridge_functions=features, arch=self.params["arch"], learning_rate=self.params["lr"], beta1=self.params["beta_1"], beta2=self.params["beta_1"], dec=self.params["decay"], ams=self.params["amsgrad"], is_categorical=self.is_cat) # Replace missing values with a value smaller than all observed values self.min = dict() for col in X.names: XX = X[:, col] self.min[col] = XX.min1() if self.min[col] is None or np.isnan(self.min[col]): self.min[col] = -1e10 else: self.min[col] -= 1 XX.replace(None, self.min[col]) X[:, col] = XX assert X[dt.isna(dt.f[col]), col].nrows == 0 X = X.to_numpy() inputs = {'main_input': X} validation_set = 0 verbose = 0 # Train the neural network once with early stopping and a validation set history = keras.callbacks.History() es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min') history = xnn1.fit(inputs, y, epochs=self.params["n_estimators"], batch_size=self.params["batch_size"], validation_split=0.3, verbose=verbose, callbacks=[history, es]) # Train again on the full data number_of_epochs_it_ran = len(history.history['loss']) xnn.fit(inputs, y, epochs=number_of_epochs_it_ran, batch_size=self.params["batch_size"], validation_split=0.0, verbose=verbose) # Get the mean absolute Shapley values importances = np.array(get_shap(X, xnn)) int_output = {} int_weights = {} int_bias = {} int_input = {} original_activations = {} x_labels = list(map(lambda x: 'x' + str(x), range(features))) intermediate_output = [] # Record and plot the projection weights # weight_list = [] for layer in xnn.layers: layer_name = layer.get_config()['name'] if layer_name != "main_input": print(layer_name) weights = layer.get_weights() # Record the biases try: bias = layer.get_weights()[1] int_bias[layer_name] = bias except: print("No Bias") # Record outputs for the test set intermediate_layer_model = keras.models.Model( inputs=xnn.input, outputs=xnn.get_layer(layer_name).output) # Record the outputs from the training set if self.is_cat and (layer_name == 'main_output'): original_activations[layer_name] = scipy.special.logit( intermediate_layer_model.predict(X)) original_activations[ layer_name + "_p"] = intermediate_layer_model.predict(X) else: original_activations[ layer_name] = intermediate_layer_model.predict(X) # Record other weights, inputs, and outputs int_weights[layer_name] = weights int_input[layer_name] = layer.input int_output[layer_name] = layer.output # Plot the projection layers if "projection_layer" in layer.get_config()['name']: # print(layer.get_config()['name']) # Record the weights for each projection layer weights = [np.transpose(layer.get_weights()[0])] weight_list2 = [] for i, weight in enumerate(weights[0]): weight_list.append(weight) weight_list2.append( list(np.reshape(weight, (1, features))[0])) # Plot weights plt.bar(orig_cols, abs(np.reshape(weight, (1, features))[0]), 1, color="blue") plt.ylabel("Coefficient value") plt.title("Projection Layer Weights {}".format(i), fontdict={'fontsize': 10}) plt.xticks(rotation=90) plt.show() plt.savefig(os.path.join( tmp_folder, 'projection_layer_' + str(i) + '.png'), bbox_inches="tight") plt.clf() if "main_output" in layer.get_config()['name']: weights_main = layer.get_weights() print(weights_main) pd.DataFrame(weight_list2).to_csv(os.path.join(tmp_folder, "projection_data.csv"), index=False) intermediate_output = [] for feature_num in range(features): intermediate_layer_model = keras.models.Model( inputs=xnn.input, outputs=xnn.get_layer('mlp_' + str(feature_num) + '_dense_last').output) intermediate_output.append(intermediate_layer_model.predict(X)) # Record and plot the ridge functions ridge_x = [] ridge_y = [] for weight_number in range(len(weight_list)): ridge_x.append( list( sum(X[:, ii] * weight_list[weight_number][ii] for ii in range(features)))) ridge_y.append(list(intermediate_output[weight_number])) plt.plot( sum(X[:, ii] * weight_list[weight_number][ii] for ii in range(features)), intermediate_output[weight_number], 'o') plt.xlabel("Input") plt.ylabel("Subnetwork " + str(weight_number)) plt.title("Ridge Function {}".format(i), fontdict={'fontsize': 10}) plt.show() plt.savefig( os.path.join(tmp_folder, 'ridge_' + str(weight_number) + '.png')) plt.clf() # Output the ridge function importance weights2 = np.array([item[0] for item in list(weights)[0]]) output_activations = np.abs( np.array([ item * weights2 for item in list(original_activations["concatenate_1"]) ])).mean(axis=0) loggerinfo(logger, str(output_activations)) pd.DataFrame(output_activations).to_csv(os.path.join( tmp_folder, "ridge_weights.csv"), index=False) plt.bar(x_labels, output_activations, 1, color="blue") plt.xlabel("Ridge function number") plt.ylabel("Feature importance") plt.title("Ridge function importance", fontdict={'fontsize': 10}) plt.show() plt.savefig(os.path.join(tmp_folder, 'Ridge_function_importance.png')) pd.DataFrame(ridge_y).applymap(lambda x: x[0]).to_csv(os.path.join( tmp_folder, "ridge_y.csv"), index=False) pd.DataFrame(ridge_x).to_csv(os.path.join(tmp_folder, "ridge_x.csv"), index=False) pd.DataFrame(orig_cols).to_csv(os.path.join(tmp_folder, "input_columns.csv"), index=False) self.set_model_properties(model=xnn, features=orig_cols, importances=importances.tolist(), iterations=self.params['n_estimators'])
def transform(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target :param X: Datatable Frame containing the features :return: FB Prophet predictions """ # Get the logger if it exists logger = self.get_experiment_logger() tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) # Change date feature name to match Prophet requirements X = self.convert_to_prophet(X) y_predictions = self.predict_with_average_model(X, tgc_wo_time) y_predictions.columns = ['average_pred'] # Go through groups for grp_col in tgc_wo_time: # Get the unique dates to be predicted X_groups = X[['ds', grp_col]].groupby(grp_col) # Go though the groups and predict only top XX_paths = [] model_paths = [] def processor(out, res): out.append(res) num_tasks = len(X_groups) pool_to_use = small_job_pool pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) for _i_g, (key, X_grp) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, num_tasks // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups predicted" % (100 * (_i_g + 1) // num_tasks)) # Create dict key to store the min max scaler grp_hash = self.get_hash(key) X_path = os.path.join(tmp_folder, "fbprophet_Xt" + str(uuid.uuid4())) if grp_hash not in self.grp_models[grp_col]: # unseen groups XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue if self.grp_models[grp_col][grp_hash] is None: # known groups but not enough train data XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) XX.index = X_grp.index save_obj(XX, X_path) XX_paths.append(X_path) continue model = self.grp_models[grp_col][grp_hash] model_path = os.path.join( tmp_folder, "fbprophet_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X_grp, X_path) model_paths.append(model_path) args = (model_path, X_path, self.priors[grp_col][grp_hash], tmp_folder) kwargs = {} pool.submit_tryget( None, MyProphetOnSingleGroupsTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) pool.finish() y_predictions[f'{grp_col}_pred'] = pd.concat( (load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) # Now we can invert scale # But first get rid of NaNs for grp_col in tgc_wo_time: # Add time group to the predictions, will be used to invert scaling y_predictions[grp_col] = X[grp_col] # Fill NaN y_predictions[f'{grp_col}_pred'] = y_predictions[ f'{grp_col}_pred'].fillna(y_predictions['average_pred']) # Go through groups and recover the scaled target for knowed groups if len(tgc_wo_time) > 0: X_groups = y_predictions.groupby(tgc_wo_time) else: X_groups = [([None], y_predictions)] for _f in [f'{grp_col}_pred' for grp_col in tgc_wo_time] + ['average_pred']: inverted_ys = [] for key, X_grp in X_groups: grp_hash = self.get_hash(key) # Scale target for current group if grp_hash in self.scalers.keys(): inverted_y = self.scalers[grp_hash].inverse_transform( X_grp[[_f]]) else: inverted_y = self.general_scaler.inverse_transform( X_grp[[_f]]) # Put back in a DataFrame to keep track of original index inverted_df = pd.DataFrame(inverted_y, columns=[_f]) inverted_df.index = X_grp.index inverted_ys.append(inverted_df) y_predictions[_f] = pd.concat(tuple(inverted_ys), axis=0).sort_index()[_f] self._clean_tmp_folder(logger, tmp_folder) y_predictions.drop(tgc_wo_time, axis=1, inplace=True) self._output_feature_names = [ f'{self.display_name}_{_f}' for _f in y_predictions ] self._feature_desc = [ f'{self.display_name}_{_f}' for _f in y_predictions ] return y_predictions
def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits FB Prophet models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = self.get_experiment_logger() loggerinfo( logger, f"Prophet will use individual groups as well as average target data." ) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Reduce X to TGC tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = self.convert_to_prophet(X) # Add target, Label encoder is only used for Classif. which we don't support... if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) X['y'] = np.array(y) self.prior_value = X['y'].mean() self.general_scaler = self.fit_scaler_to_median_target(X) X = self.scale_target_for_each_time_group(X, tgc_wo_time) self.avg_model = self.fit_prophet_model_on_average_target(X) # Go through individual time group columns and create avg models self.grp_models = {} self.priors = {} for grp_col in tgc_wo_time: self.grp_models[grp_col] = {} self.priors[grp_col] = {} X_groups = X[['ds', 'y', grp_col]].groupby(grp_col) nb_groups = len(X_groups) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo(logger, f"Prophet will use {n_jobs} workers for fitting.") loggerinfo( logger, "Prophet parameters holidays {} / monthly {}".format( self.country_holidays, self.monthly_seasonality)) pool = pool_to_use(logger=None, processor=processor, num_tasks=nb_groups, max_workers=n_jobs) for _i_g, (key, X_grp) in enumerate(X_groups): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) # Save target average for current group grp_hash = self.get_hash(key) self.priors[grp_col][grp_hash] = X_grp['y'].mean() # Average by date X_grp_avg = X_grp.groupby('ds')['y'].mean().reset_index() save_obj(X_grp_avg, X_path) params = { "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } args = (X_path, grp_hash, tmp_folder, params) kwargs = {} pool.submit_tryget( None, MyProphetOnSingleGroupsTransformer_fit_async, args=args, kwargs=kwargs, out=self.grp_models[grp_col]) pool.finish() for k, v in self.grp_models[grp_col].items(): self.grp_models[grp_col][k] = load_obj( v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def transform(self, X: dt.Frame, **kwargs): """ Uses fitted models (1 per time group) to predict the target :param X: Datatable Frame containing the features :return: FB Prophet predictions """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) XX = X[:, self.tgc].to_pandas() XX = XX.replace([None, np.nan], 0) XX.rename(columns={self.time_column: "ds"}, inplace=True) tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] assert len(XX_grp) > 0 num_tasks = len(XX_grp) def processor(out, res): out.append(res) pool_to_use = small_job_pool loggerinfo(logger, "Prophet will use {} workers for transform".format(n_jobs)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) XX_paths = [] model_paths = [] nb_groups = len(XX_grp) print("Nb Groups = ", nb_groups) for _i_g, (key, X) in enumerate(XX_grp): # Log where we are in the transformation of the dataset if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups)) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) X_path = os.path.join(tmp_folder, "fbprophet_Xt" + str(uuid.uuid4())) # Commented for performance, uncomment for debug # print("prophet - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash)) if grp_hash in self.models: model = self.models[grp_hash] model_path = os.path.join( tmp_folder, "fbprophet_modelt" + str(uuid.uuid4())) save_obj(model, model_path) save_obj(X, X_path) model_paths.append(model_path) args = (model_path, X_path, self.priors[grp_hash], tmp_folder) kwargs = {} pool.submit_tryget( None, MyParallelProphetTransformer_transform_async, args=args, kwargs=kwargs, out=XX_paths) else: XX = pd.DataFrame(np.full((X.shape[0], 1), self.nan_value), columns=['yhat']) # unseen groups XX.index = X.index save_obj(XX, X_path) XX_paths.append(X_path) pool.finish() XX = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index() for p in XX_paths + model_paths: remove(p) self._clean_tmp_folder(logger, tmp_folder) return XX
def fit(self, X: dt.Frame, y: np.array = None, **kwargs): """ Fits FB Prophet models (1 per time group) using historical target values contained in y Model fitting is distributed over a pool of processes and uses file storage to share the data with workers :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger( experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) tmp_folder = self._create_tmp_folder(logger) n_jobs = self._get_n_jobs(logger, **kwargs) # Convert to pandas XX = X[:, self.tgc].to_pandas() XX = XX.replace([None, np.nan], 0) XX.rename(columns={self.time_column: "ds"}, inplace=True) # Make sure labales are numeric if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) XX['y'] = np.array(y) # Set target prior self.nan_value = np.mean(y) # Group the input by TGC (Time group column) excluding the time column itself tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) if len(tgc_wo_time) > 0: XX_grp = XX.groupby(tgc_wo_time) else: XX_grp = [([None], XX)] self.models = {} self.priors = {} # Prepare for multi processing num_tasks = len(XX_grp) def processor(out, res): out[res[0]] = res[1] pool_to_use = small_job_pool loggerinfo(logger, "Prophet will use {} workers for fitting".format(n_jobs)) loggerinfo( logger, "Prophet parameters holidays {} / monthly {}".format( self.country_holidays, self.monthly_seasonality)) pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs) # Fit 1 FB Prophet model per time group columns nb_groups = len(XX_grp) for _i_g, (key, X) in enumerate(XX_grp): # Just log where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "FB Prophet : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) X_path = os.path.join(tmp_folder, "fbprophet_X" + str(uuid.uuid4())) X = X.reset_index(drop=True) save_obj(X, X_path) key = key if isinstance(key, list) else [key] grp_hash = '_'.join(map(str, key)) self.priors[grp_hash] = X['y'].mean() params = { "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } args = (X_path, grp_hash, tmp_folder, params) kwargs = {} pool.submit_tryget(None, MyParallelProphetTransformer_fit_async, args=args, kwargs=kwargs, out=self.models) pool.finish() for k, v in self.models.items(): self.models[k] = load_obj(v) if v is not None else None remove(v) self._clean_tmp_folder(logger, tmp_folder) return self
def transform(self, X: dt.Frame): """ Uses fitted models (1 per time group) to predict the target If self.is_train exists, it means we are doing in-sample predictions if it does not then we Arima is used to predict the future :param X: Datatable Frame containing the features :return: ARIMA predictions """ logger = self._get_experiment_logger() # 0. Preliminary steps tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = X[:, self.tgc].to_pandas() # Fill NaNs or None X = X.replace([None, np.nan], 0) X.rename(columns={self.time_column: "ds"}, inplace=True) X['ds'] = pd.to_datetime( X['ds'], format=self.datetime_formats[self.time_column]) # 1. Predict with average model if self.avg_model is not None: X_time = X[['ds']].groupby('ds').first().reset_index() if hasattr(self, 'is_train'): yhat = self.avg_model.predict_in_sample() else: yhat = self.avg_model.predict(n_periods=self.pred_gap + X_time.shape[0]) # Assign predictions the same order the dates had yhat = yhat[self.pred_gap:] X_time.sort_values('ds', inplace=True) X_time['yhat'] = yhat X_time.sort_index(inplace=True) # Merge back the average prediction to all similar timestamps indices = X.index X = pd.merge(left=X, right=X_time[['ds', 'yhat']], on='ds', how='left') X.index = indices else: X['yhat'] = np.nan y_avg_model = X['yhat'].values y_predictions = pd.DataFrame(y_avg_model, columns=['average_pred']) # 2. Predict for individual group # Go through groups for i_tgc, grp_col in enumerate(tgc_wo_time): y_hat_tgc = np.zeros(X.shape[0]) # Get the unique dates to be predicted X_groups = X[['ds', grp_col]].groupby(grp_col) nb_groups = len(X_groups) dfs = [] for _i_g, (key, X_grp) in enumerate(X_groups): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups)) grp_hash = self.get_hash(grp_col, key) try: model = self.models[grp_hash] except KeyError: model = None # Find unique datetime X_time = X_grp[['ds']].groupby('ds').first().reset_index() X_time['ds'] = pd.to_datetime( X_time['ds'], format=self.datetime_formats[self.time_column]) X_time = X_time.sort_values('ds') if model is not None: # Get predictions from ARIMA model, make sure we include prediction gaps if hasattr(self, 'is_train'): print(X_grp.shape, model.predict_in_sample().shape) # It can happen that in_sample predictions are smaller than the training set used pred = model.predict_in_sample() tmp = np.zeros(X_time.shape[0]) tmp[:len(pred)] = pred X_time['yhat'] = tmp else: # In ARIMA, you provide the number of periods you predict on # So you have to yhat = model.predict(n_periods=self.pred_gap + X_time.shape[0]) X_time['yhat'] = yhat[self.pred_gap:] # Now merge back the predictions into X_grp indices = X_grp.index X_grp = pd.merge(left=X_grp, right=X_time[['ds', 'yhat']], on='ds', how='left') X_grp.index = indices else: X_grp = X_grp.copy() X_grp['yhat'] = np.nan dfs.append(X_grp['yhat']) y_predictions[f'{grp_col}_pred'] = pd.concat(dfs, axis=0) # Now we have to invert scale all this for grp_col in tgc_wo_time: # Add time group to the predictions, will be used to invert scaling y_predictions[grp_col] = X[grp_col].copy() # Fill NaN y_predictions[f'{grp_col}_pred'] = y_predictions[ f'{grp_col}_pred'].fillna(y_predictions['average_pred']) # Go through groups and recover the scaled target for knowed groups if len(tgc_wo_time) > 0: X_groups = y_predictions.groupby(tgc_wo_time) else: X_groups = [([None], y_predictions)] for _f in [f'{grp_col}_pred' for grp_col in tgc_wo_time] + ['average_pred']: inverted_ys = [] for key, X_grp in X_groups: grp_hash = self.get_hash(key) # Scale target for current group if grp_hash in self.scalers.keys(): inverted_y = self.scalers[grp_hash].inverse_transform( X_grp[[_f]]) else: inverted_y = self.general_scaler.inverse_transform( X_grp[[_f]]) # Put back in a DataFrame to keep track of original index inverted_df = pd.DataFrame(inverted_y, columns=[_f]) inverted_df.index = X_grp.index inverted_ys.append(inverted_df) y_predictions[_f] = pd.concat(tuple(inverted_ys), axis=0).sort_index()[_f] y_predictions.drop(tgc_wo_time, axis=1, inplace=True) self._output_feature_names = [ f'{self.display_name}{orig_feat_prefix}{self.time_column}{extra_prefix}{_f}' for _f in y_predictions ] self._feature_desc = self._output_feature_names return y_predictions
def fit(self, X: dt.Frame, y: np.array = None): """ Fits ARIMA models (1 per time group) using historical target values contained in y :param X: Datatable frame containing the features :param y: numpy array containing the historical values of the target :return: self """ # Import the ARIMA python module pm = importlib.import_module('pmdarima') self.scalers = None logger = self._get_experiment_logger() # 0. Preliminary steps tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column)) X = X[:, self.tgc].to_pandas() # Fill NaNs or None X = X.replace([None, np.nan], 0) # Add target, Label encoder is only used for Classif. which we don't support... if self.labels is not None: y = LabelEncoder().fit(self.labels).transform(y) X['y'] = np.array(y) # 0. Fit general scaler to make predictions for unknown groups X.rename(columns={self.time_column: "ds"}, inplace=True) self.general_scaler = MinMaxScaler(feature_range=(1, 2)).fit( X[['y', 'ds']].groupby('ds').median().values) # 1. Scale target for each individual group # Go through groups and standard scale them X['y_skl'] = self.scale_target_per_time_group(X, tgc_wo_time, logger) # 2. Make time a pandas datetime series so that we can order it X['ds'] = pd.to_datetime( X['ds'], format=self.datetime_formats[self.time_column]) # 3. Fit a model on averages X_avg = X[['ds', 'y_skl']].groupby('ds').mean().reset_index() order = np.argsort(X_avg['ds']) try: self.avg_model = pm.auto_arima(X_avg['y_skl'].values[order], error_action='ignore', seasonal=False) except Exception as e: loggerinfo(logger, "ARIMA: Average model error : {}".format(e)) self.avg_model = None # 4. Fit model for Average Groups self.models = {} # Go through groups for grp_col in tgc_wo_time: print(f'fitting {grp_col}') # Get the unique dates to be predicted X_groups = X[['ds', 'y_skl', grp_col]].groupby(grp_col) print(X.shape) nb_groups = len(X_groups) for _i_g, (key, X_grp) in enumerate(X_groups): # Just say where we are in the fitting process if (_i_g + 1) % max(1, nb_groups // 20) == 0: loggerinfo( logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups)) # Average over dates X_grp = X_grp.groupby('ds')['y_skl'].mean().reset_index() grp_hash = self.get_hash(grp_col, key) # print("auto arima - fitting on data of shape: %s for group: %s" % (str(X.shape), grp_hash)) X_grp['ds'] = pd.to_datetime( X_grp['ds'], format=self.datetime_formats[self.time_column]) order = np.argsort(X_grp['ds']) try: model = pm.auto_arima(X_grp['y_skl'].values[order], error_action='ignore', seasonal=False) except Exception as e: loggerinfo(logger, "Auto ARIMA warning: {}".format(e)) model = None self.models[grp_hash] = model return self
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs): orig_cols = list(X.names) import pandas as pd import numpy as np from skrules import SkopeRules from sklearn.preprocessing import OneHotEncoder from collections import Counter # Get the logger if it exists logger = None if self.context and self.context.experiment_id: logger = make_experiment_logger(experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir, experiment_tmp_dir=self.context.experiment_tmp_dir) # Set up temp folder tmp_folder = self._create_tmp_folder(logger) # Set up model if self.num_classes >= 2: lb = LabelEncoder() lb.fit(self.labels) y = lb.transform(y) model = SkopeRules(max_depth_duplication=self.params["max_depth_duplication"], n_estimators=self.params["n_estimators"], precision_min=self.params["precision_min"], recall_min=self.params["recall_min"], max_samples=self.params["max_samples"], max_samples_features=self.params["max_samples_features"], max_depth=self.params["max_depth"], max_features=self.params["max_features"], min_samples_split=self.params["min_samples_split"], bootstrap=self.params["bootstrap"], bootstrap_features=self.params["bootstrap_features"], random_state=self.params["random_state"], feature_names=orig_cols) else: # Skopes doesn't work for regression loggerinfo(logger, "PASS, no skopes model") pass # Find the datatypes X = X.to_pandas() X.columns = orig_cols # Change continuous features to categorical X_datatypes = [str(item) for item in list(X.dtypes)] # Change all float32 values to float64 for ii in range(len(X_datatypes)): if X_datatypes[ii] == 'float32': X = X.astype({orig_cols[ii]: np.float64}) X_datatypes = [str(item) for item in list(X.dtypes)] # List the categorical and numerical features self.X_categorical = [orig_cols[col_count] for col_count in range(len(orig_cols)) if (X_datatypes[col_count] == 'category') or (X_datatypes[col_count] == 'object')] self.X_numeric = [item for item in orig_cols if item not in self.X_categorical] # Find the levels and mode for each categorical feature # for use in the test set self.train_levels = {} for item in self.X_categorical: self.train_levels[item] = list(set(X[item])) self.train_mode[item] = Counter(X[item]).most_common(1)[0][0] # One hot encode the categorical features # And replace missing values with a Missing category if len(self.X_categorical) > 0: loggerinfo(logger, "PCategorical encode") for colname in self.X_categorical: X[colname] = list(X[colname].fillna("Missing")) self.enc = OneHotEncoder(handle_unknown='ignore') self.enc.fit(X[self.X_categorical]) self.encoded_categories = list(self.enc.get_feature_names(input_features=self.X_categorical)) X_enc = self.enc.transform(X[self.X_categorical]).toarray() X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1) # Replace missing values with a missing value code if len(self.X_numeric) > 0: for colname in self.X_numeric: X[colname] = list(X[colname].fillna(-999)) model.fit(np.array(X), np.array(y)) # Find the rule list self.rule_list = model.rules_ # Calculate feature importances var_imp = [] for var in orig_cols: var_imp.append(sum(int(var in item[0]) for item in self.rule_list)) if max(var_imp) != 0: importances = list(np.array(var_imp) / max(var_imp)) else: importances = [1] * len(var_imp) pd.DataFrame(model.rules_, columns=['Rule', '(Precision, Recall, nb)']).to_csv( os.path.join(tmp_folder, 'Skope_rules.csv'), index=False) self.mean_target = np.array(sum(y) / len(y)) # Set model properties self.set_model_properties(model=model, features=list(X.columns), importances=importances, iterations=self.params['n_estimators'])