def training(context: MLClientCtx, p1: int = 1, p2: int = 2) -> None: """Train a model. :param context: The runtime context object. :param p1: A model parameter. :param p2: Another model parameter. """ # access input metadata, values, and inputs print(f"Run: {context.name} (uid={context.uid})") print(f"Params: p1={p1}, p2={p2}") context.logger.info("started training") # <insert training code here> # log the run results (scalar values) context.log_result("accuracy", p1 * 2) context.log_result("loss", p1 * 3) # add a lable/tag to this run context.set_label("category", "tests") # log a simple artifact + label the artifact # If you want to upload a local file to the artifact repo add src_path=<local-path> context.log_artifact("somefile", body=b"abc is 123", local_path="myfile.txt") # create a dataframe artifact df = pd.DataFrame([{ "A": 10, "B": 100 }, { "A": 11, "B": 110 }, { "A": 12, "B": 120 }]) context.log_dataset("mydf", df=df) # Log an ML Model artifact, add metrics, params, and labels to it # and place it in a subdir ('models') under artifacts path context.log_model( "mymodel", body=b"abc is 123", model_file="model.txt", metrics={"accuracy": 0.85}, parameters={"xx": "abc"}, labels={"framework": "xgboost"}, artifact_path=context.artifact_subpath("models"), )
def sql_to_file( context: MLClientCtx, sql_query: str, database_url: str, file_ext: str = "parquet", ) -> None: """SQL Ingest - Ingest data using SQL query :param context: the function context :param sql_query: the sql query used to retrieve the data :param database_url: database connection URL :param file_ext: ("parquet") format for result file """ engine = create_engine(database_url) df = pd.read_sql(sql_query, engine) context.log_dataset( "query result", df=df, format=file_ext, artifact_path=context.artifact_subpath("data"), )
def train_model( context: MLClientCtx, model_pkg_class: str, dataset: DataItem, label_column: str = "labels", encode_cols: List[str] = [], sample: int = -1, test_size: float = 0.30, train_val_split: float = 0.75, test_set_key: str = "test_set", model_evaluator=None, models_dest: str = "", plots_dest: str = "plots", file_ext: str = "parquet", model_pkg_file: str = "", random_state: int = 1, ) -> None: """train a classifier An optional cutom model evaluator can be supplied that should have the signature: `my_custom_evaluator(context, xvalid, yvalid, model)` and return a dictionary of scalar "results", a "plots" keys with a list of PlotArtifacts, and and "tables" key containing a returned list of TableArtifacts. :param context: the function context :param model_pkg_class: the model to train, e.g, "sklearn.neural_networks.MLPClassifier", or json model config :param dataset: ("data") name of raw data file :param label_column: ground-truth (y) labels :param encode_cols: dictionary of names and prefixes for columns that are to hot be encoded. :param sample: Selects the first n rows, or select a sample starting from the first. If negative <-1, select a random sample :param test_size: (0.05) test set size :param train_val_split: (0.75) Once the test set has been removed the training set gets this proportion. :param test_set_key: key of held out data in artifact store :param model_evaluator: (None) a custom model evaluator can be specified :param models_dest: ("") models subfolder on artifact path :param plots_dest: plot subfolder on artifact path :param file_ext: ("parquet") format for test_set_key hold out data :param random_state: (1) sklearn rng seed """ models_dest = models_dest or "model" raw, labels, header = get_sample(dataset, sample, label_column) if encode_cols: raw = pd.get_dummies(raw, columns=list(encode_cols.keys()), prefix=list(encode_cols.values()), drop_first=True) (xtrain, ytrain), (xvalid, yvalid), (xtest, ytest) = get_splits( raw, labels, 3, test_size, 1 - train_val_split, random_state) context.log_dataset(test_set_key, df=pd.concat([xtest, ytest.to_frame()], axis=1), format=file_ext, index=False, labels={"data-type": "held-out"}, artifact_path=context.artifact_subpath('data')) model_config = gen_sklearn_model(model_pkg_class, context.parameters.items()) model_config["FIT"].update({"X": xtrain, "y": ytrain.values}) ClassifierClass = create_class(model_config["META"]["class"]) model = ClassifierClass(**model_config["CLASS"]) model.fit(**model_config["FIT"]) artifact_path = context.artifact_subpath(models_dest) plots_path = context.artifact_subpath(models_dest, plots_dest) if model_evaluator: eval_metrics = model_evaluator(context, xvalid, yvalid, model, plots_artifact_path=plots_path) else: eval_metrics = eval_model_v2(context, xvalid, yvalid, model, plots_artifact_path=plots_path) context.set_label('class', model_pkg_class) context.log_model("model", body=dumps(model), artifact_path=artifact_path, extra_data=eval_metrics, model_file="model.pkl", metrics=context.results, labels={"class": model_pkg_class})
def describe_spark(context: MLClientCtx, dataset: DataItem, artifact_path, bins: int = 30, describe_extended: bool = True): location = dataset.local() spark = SparkSession.builder.appName("Spark job").getOrCreate() df = spark.read.csv(location, header=True, inferSchema=True) kwargs = [] float_cols = [ item[0] for item in df.dtypes if item[1].startswith('float') or item[1].startswith('double') ] if describe_extended == True: table, variables, freq = describe(df, bins, float_cols, kwargs) tbl_1 = variables.reset_index() if len(freq) != 0: tbl_2 = pd.DataFrame.from_dict( freq, orient="index").sort_index().stack().reset_index() tbl_2.columns = ['col', 'key', 'val'] tbl_2['Merged'] = [{ key: val } for key, val in zip(tbl_2.key, tbl_2.val)] tbl_2 = tbl_2.groupby( 'col', as_index=False).agg(lambda x: tuple(x))[['col', 'Merged']] summary = pd.merge(tbl_1, tbl_2, how='left', left_on='index', right_on='col') else: summary = tbl_1 context.log_dataset("summary_stats", df=summary, format="csv", index=False, artifact_path=context.artifact_subpath('data')) context.log_results(table) else: tbl_1 = df.describe().toPandas() summary = tbl_1.T context.log_dataset("summary_stats", df=summary, format="csv", index=False, artifact_path=context.artifact_subpath('data')) spark.stop()
def train_model(context: MLClientCtx, dataset: DataItem, model_pkg_class: str, label_column: str = "label", train_validation_size: float = 0.75, sample: float = 1.0, models_dest: str = "models", test_set_key: str = "test_set", plots_dest: str = "plots", dask_key: str = "dask_key", dask_persist: bool = False, scheduler_key: str = '', file_ext: str = "parquet", random_state: int = 42) -> None: """ Train a sklearn classifier with Dask :param context: Function context. :param dataset: Raw data file. :param model_pkg_class: Model to train, e.g, "sklearn.ensemble.RandomForestClassifier", or json model config. :param label_column: (label) Ground-truth y labels. :param train_validation_size: (0.75) Train validation set proportion out of the full dataset. :param sample: (1.0) Select sample from dataset (n-rows/% of total), randomzie rows as default. :param models_dest: (models) Models subfolder on artifact path. :param test_set_key: (test_set) Mlrun db key of held out data in artifact store. :param plots_dest: (plots) Plot subfolder on artifact path. :param dask_key: (dask key) Key of dataframe in dask client "datasets" attribute. :param dask_persist: (False) Should the data be persisted (through the `client.persist`) :param scheduler_key: (scheduler) Dask scheduler configuration, json also logged as an artifact. :param file_ext: (parquet) format for test_set_key hold out data :param random_state: (42) sklearn seed """ if scheduler_key: client = Client(scheduler_key) else: client = Client() context.logger.info("Read Data") df = dataset.as_df(df_module=dd) context.logger.info("Prep Data") numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] df = df.select_dtypes(include=numerics) if df.isna().any().any().compute() == True: raise Exception('NAs valus found') df_header = df.columns df = df.sample(frac=sample).reset_index(drop=True) encoder = LabelEncoder() encoder = encoder.fit(df[label_column]) X = df.drop(label_column, axis=1).to_dask_array(lengths=True) y = encoder.transform(df[label_column]) classes = df[label_column].drop_duplicates() # no unique values in dask classes = [str(i) for i in classes] context.logger.info("Split and Train") X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, train_size=train_validation_size, random_state=random_state) scaler = StandardScaler() scaler = scaler.fit(X_train) X_train_transformed = scaler.transform(X_train) X_test_transformed = scaler.transform(X_test) model_config = gen_sklearn_model(model_pkg_class, context.parameters.items()) model_config["FIT"].update({"X": X_train_transformed, "y": y_train}) ClassifierClass = create_class(model_config["META"]["class"]) model = ClassifierClass(**model_config["CLASS"]) with joblib.parallel_backend("dask"): model = model.fit(**model_config["FIT"]) artifact_path = context.artifact_subpath(models_dest) plots_path = context.artifact_subpath(models_dest, plots_dest) context.logger.info("Evaluate") extra_data_dict = {} for report in (ROCAUC, ClassificationReport, ConfusionMatrix): report_name = str(report.__name__) plt.cla() plt.clf() plt.close() viz = report(model, classes=classes, per_class=True, is_fitted=True) viz.fit(X_train_transformed, y_train) # Fit the training data to the visualizer viz.score(X_test_transformed, y_test.compute()) # Evaluate the model on the test data plot = context.log_artifact(PlotArtifact(report_name, body=viz.fig, title=report_name), db_key=False) extra_data_dict[str(report)] = plot if report_name == 'ROCAUC': context.log_results({ "micro": viz.roc_auc.get("micro"), "macro": viz.roc_auc.get("macro") }) elif report_name == 'ClassificationReport': for score_name in viz.scores_: for score_class in viz.scores_[score_name]: context.log_results({ score_name + "-" + score_class: viz.scores_[score_name].get(score_class) }) viz = FeatureImportances(model, classes=classes, per_class=True, is_fitted=True, labels=df_header.delete( df_header.get_loc(label_column))) viz.fit(X_train_transformed, y_train) viz.score(X_test_transformed, y_test) plot = context.log_artifact(PlotArtifact("FeatureImportances", body=viz.fig, title="FeatureImportances"), db_key=False) extra_data_dict[str("FeatureImportances")] = plot plt.cla() plt.clf() plt.close() context.logger.info("Log artifacts") artifact_path = context.artifact_subpath(models_dest) plots_path = context.artifact_subpath(models_dest, plots_dest) context.set_label('class', model_pkg_class) context.log_model("model", body=dumps(model), artifact_path=artifact_path, model_file="model.pkl", extra_data=extra_data_dict, metrics=context.results, labels={"class": model_pkg_class}) context.log_artifact("standard_scaler", body=dumps(scaler), artifact_path=artifact_path, model_file="scaler.gz", label="standard_scaler") context.log_artifact("label_encoder", body=dumps(encoder), artifact_path=artifact_path, model_file="encoder.gz", label="label_encoder") df_to_save = delayed(np.column_stack)((X_test, y_test)).compute() context.log_dataset( test_set_key, df=pd.DataFrame(df_to_save, columns=df_header), # improve log dataset ability format=file_ext, index=False, labels={"data-type": "held-out"}, artifact_path=context.artifact_subpath('data')) context.logger.info("Done!")