def _gen_xgb_model(model_type: str, xgb_params: dict): """generate an xgboost model Multiple model types that can be estimated using the XGBoost Scikit-Learn API. Input can either be a predefined json model configuration or one of the five xgboost model types: "classifier", "regressor", "ranker", "rf_classifier", or "rf_regressor". In either case one can pass in a params dict to modify defaults values. Based on `mlutils.models.gen_sklearn_model`, see the function `sklearn_classifier` in this repository. :param model_type: one of "classifier", "regressor", "ranker", "rf_classifier", or "rf_regressor" :param xgb_params: class init parameters """ mtypes = { "classifier": "xgboost.XGBClassifier", "regressor": "xgboost.XGBRegressor", "ranker": "xgboost.XGBRanker", "rf_classifier": "xgboost.XGBRFClassifier", "rf_regressor": "xgboost.XGBRFRegressor", } if model_type.endswith("json"): model_config = model_type elif model_type in mtypes.keys(): model_config = mtypes[model_type] else: raise Exception("unrecognized model type, see help documentation") return gen_sklearn_model(model_config, xgb_params)
def train_model( context: MLClientCtx, model_pkg_class: str, dataset: DataItem, label_column: str = "labels", encode_cols: List[str] = [], sample: int = -1, test_size: float = 0.30, train_val_split: float = 0.70, test_set_key: str = "test_set", model_evaluator = None, models_dest: str = "", plots_dest: str = "plots", file_ext: str = "parquet", model_pkg_file: str = "", random_state: int = 1, ) -> None: """train a classifier An optional cutom model evaluator can be supplied that should have the signature: `my_custom_evaluator(context, xvalid, yvalid, model)` and return a dictionary of scalar "results", a "plots" keys with a list of PlotArtifacts, and and "tables" key containing a returned list of TableArtifacts. :param context: the function context :param model_pkg_class: the model to train, e.g, "sklearn.neural_networks.MLPClassifier", or json model config :param dataset: ("data") name of raw data file :param label_column: ground-truth (y) labels :param encode_cols: dictionary of names and prefixes for columns that are to hot be encoded. :param sample: Selects the first n rows, or select a sample starting from the first. If negative <-1, select a random sample :param test_size: (0.05) test set size :param train_val_split: (0.75) Once the test set has been removed the training set gets this proportion. :param test_set_key: key of held out data in artifact store :param model_evaluator: (None) a custom model evaluator can be specified :param models_dest: ("") models subfolder on artifact path :param plots_dest: plot subfolder on artifact path :param file_ext: ("parquet") format for test_set_key hold out data :param random_state: (1) sklearn rng seed """ models_dest = models_dest or "model" raw, labels, header = get_sample(dataset, sample, label_column) if encode_cols: raw = pd.get_dummies(raw, columns=list(encode_cols.keys()), prefix=list(encode_cols.values()), drop_first=True) (xtrain, ytrain), (xvalid, yvalid), (xtest, ytest) = get_splits(raw, labels, 3, test_size, 1-train_val_split, random_state) context.log_dataset(test_set_key, df=pd.concat([xtest, ytest.to_frame()], axis=1), format=file_ext, index=False, labels={"data-type": "held-out"}, artifact_path=context.artifact_subpath('data')) model_config = gen_sklearn_model(model_pkg_class, context.parameters.items()) model_config["FIT"].update({"X": xtrain, "y": ytrain.values}) ClassifierClass = create_class(model_config["META"]["class"]) model = ClassifierClass(**model_config["CLASS"]) model.fit(**model_config["FIT"]) artifact_path = context.artifact_subpath(models_dest) plots_path = context.artifact_subpath(models_dest, plots_dest) if model_evaluator: eval_metrics = model_evaluator(context, xvalid, yvalid, model, plots_artifact_path=plots_path) else: eval_metrics = eval_model_v2(context, xvalid, yvalid, model, plots_artifact_path=plots_path) context.set_label('class', model_pkg_class) context.log_model("model", body=dumps(model), artifact_path=artifact_path, extra_data=eval_metrics, model_file="model.pkl", metrics=context.results, labels={"class": model_pkg_class})
def train_model(context: MLClientCtx, dataset: DataItem, model_pkg_class: str, label_column: str = "label", train_validation_size: float = 0.75, sample: float = 1.0, models_dest: str = "models", test_set_key: str = "test_set", plots_dest: str = "plots", dask_key: str = "dask_key", dask_persist: bool = False, scheduler_key: str = '', file_ext: str = "parquet", random_state: int = 42) -> None: """ Train a sklearn classifier with Dask :param context: Function context. :param dataset: Raw data file. :param model_pkg_class: Model to train, e.g, "sklearn.ensemble.RandomForestClassifier", or json model config. :param label_column: (label) Ground-truth y labels. :param train_validation_size: (0.75) Train validation set proportion out of the full dataset. :param sample: (1.0) Select sample from dataset (n-rows/% of total), randomzie rows as default. :param models_dest: (models) Models subfolder on artifact path. :param test_set_key: (test_set) Mlrun db key of held out data in artifact store. :param plots_dest: (plots) Plot subfolder on artifact path. :param dask_key: (dask key) Key of dataframe in dask client "datasets" attribute. :param dask_persist: (False) Should the data be persisted (through the `client.persist`) :param scheduler_key: (scheduler) Dask scheduler configuration, json also logged as an artifact. :param file_ext: (parquet) format for test_set_key hold out data :param random_state: (42) sklearn seed """ if scheduler_key: client = Client(scheduler_key) else: client = Client() context.logger.info("Read Data") df = dataset.as_df(df_module=dd) context.logger.info("Prep Data") numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] df = df.select_dtypes(include=numerics) if df.isna().any().any().compute() == True: raise Exception('NAs valus found') df_header = df.columns df = df.sample(frac=sample).reset_index(drop=True) encoder = LabelEncoder() encoder = encoder.fit(df[label_column]) X = df.drop(label_column, axis=1).to_dask_array(lengths=True) y = encoder.transform(df[label_column]) classes = df[label_column].drop_duplicates() # no unique values in dask classes = [str(i) for i in classes] context.logger.info("Split and Train") X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, train_size=train_validation_size, random_state=random_state) scaler = StandardScaler() scaler = scaler.fit(X_train) X_train_transformed = scaler.transform(X_train) X_test_transformed = scaler.transform(X_test) model_config = gen_sklearn_model(model_pkg_class, context.parameters.items()) model_config["FIT"].update({"X": X_train_transformed, "y": y_train}) ClassifierClass = create_class(model_config["META"]["class"]) model = ClassifierClass(**model_config["CLASS"]) with joblib.parallel_backend("dask"): model = model.fit(**model_config["FIT"]) artifact_path = context.artifact_subpath(models_dest) plots_path = context.artifact_subpath(models_dest, plots_dest) context.logger.info("Evaluate") extra_data_dict = {} for report in (ROCAUC, ClassificationReport, ConfusionMatrix): report_name = str(report.__name__) plt.cla() plt.clf() plt.close() viz = report(model, classes=classes, per_class=True, is_fitted=True) viz.fit(X_train_transformed, y_train) # Fit the training data to the visualizer viz.score(X_test_transformed, y_test.compute()) # Evaluate the model on the test data plot = context.log_artifact(PlotArtifact(report_name, body=viz.fig, title=report_name), db_key=False) extra_data_dict[str(report)] = plot if report_name == 'ROCAUC': context.log_results({ "micro": viz.roc_auc.get("micro"), "macro": viz.roc_auc.get("macro") }) elif report_name == 'ClassificationReport': for score_name in viz.scores_: for score_class in viz.scores_[score_name]: context.log_results({ score_name + "-" + score_class: viz.scores_[score_name].get(score_class) }) viz = FeatureImportances(model, classes=classes, per_class=True, is_fitted=True, labels=df_header.delete( df_header.get_loc(label_column))) viz.fit(X_train_transformed, y_train) viz.score(X_test_transformed, y_test) plot = context.log_artifact(PlotArtifact("FeatureImportances", body=viz.fig, title="FeatureImportances"), db_key=False) extra_data_dict[str("FeatureImportances")] = plot plt.cla() plt.clf() plt.close() context.logger.info("Log artifacts") artifact_path = context.artifact_subpath(models_dest) plots_path = context.artifact_subpath(models_dest, plots_dest) context.set_label('class', model_pkg_class) context.log_model("model", body=dumps(model), artifact_path=artifact_path, model_file="model.pkl", extra_data=extra_data_dict, metrics=context.results, labels={"class": model_pkg_class}) context.log_artifact("standard_scaler", body=dumps(scaler), artifact_path=artifact_path, model_file="scaler.gz", label="standard_scaler") context.log_artifact("label_encoder", body=dumps(encoder), artifact_path=artifact_path, model_file="encoder.gz", label="label_encoder") df_to_save = delayed(np.column_stack)((X_test, y_test)).compute() context.log_dataset( test_set_key, df=pd.DataFrame(df_to_save, columns=df_header), # improve log dataset ability format=file_ext, index=False, labels={"data-type": "held-out"}, artifact_path=context.artifact_subpath('data')) context.logger.info("Done!")