def train_pipeline(params: PipelineParams): logger.info(f"Start train with params {params}.") data = read_data(params.train_data_path) logger.info(f"Data shape is {data.shape}") data_train, data_val = split_train_val_data(data, params.split_params) logger.info(f"Train data shape is {data_train.shape}") logger.info(f"Validation data shape is {data_val.shape}") target_train = extract_target(data_train, params.features_params) data_train = data_train.drop(columns=['target']) transformer = build_transformer(params.features_params) transformer.fit(data_train) features_train = make_features(transformer, data_train) logger.info(f"Train features shape is {features_train.shape}") target_val = extract_target(data_val, params.features_params) data_val = data_val.drop(columns=['target']) features_val = make_features(transformer, data_val) logger.info(f"Validation features shape is {features_val.shape}") model = train_model(features_train, target_train, params.train_params) predicts = predict_model(model, features_val) metrics = evaluate_model(predicts, target_val) with open(params.metric_path, "w") as metric_file: json.dump(metrics, metric_file) logger.info(f"Metrics are: {metrics}") path_to_model = dump_model(model, params.model_path) logger.info(f"Model saved at {params.model_path}") with open(params.transformer_path, "wb") as tr: pickle.dump(transformer, tr) logger.info(f"Feature transformer saved at {params.transformer_path}") logger.info("Finished.") return path_to_model, metrics
def train_pipeline(cfg): # 1. read data logger.info( f"start train pipeline with config: \n\n{OmegaConf.to_yaml(cfg)} \n") data = read_data(cfg.input_data_path) logger.info(f"data.shape is {data.shape}") # 2. split strategy X_train, X_val = split_train_val_data(data, cfg.splitting_strategy, cfg.splitting_params) logger.info( f"X_train.shape is {X_train.shape} X_val.shape is {X_val.shape}") # 3. preprocess data logger.info("preprocess data...") transformer = RawDataPreprocessor() X_train = transformer.fit_transform(X_train) X_val = transformer.transform(X_val) selected_features = select_features(X_train, strategy='default') # 4. train if cfg.model.name == "rf": model = RandomForestClassifier(**cfg.model.train_params.model_params) elif cfg.model.name == "lr": model = LogisticRegression(**cfg.model.train_params.model_params) else: raise NotImplementedError() if cfg.fit_model: logger.info("fit model") model.fit(X_train[selected_features], X_train['target']) # 5. save model if cfg.fit_model == False and cfg.serialize_model == True: assert 1 == 0, ('you`re trying to save model without fit() it!') if cfg.serialize_model: serialize_model(cfg.model_path, model, transformer, selected_features) # 4. validate logger.info("load model for validation") model, transformer, selected_features = load_model(cfg.model_path) train_preds = model.predict_proba(X_train[selected_features])[:, 1] train_score = roc_auc_score(X_train['target'], train_preds) if len(X_val) == 0: val_preds = None val_score = np.NaN else: val_preds = model.predict_proba(X_val[selected_features])[:, 1] val_score = roc_auc_score(X_val['target'], val_preds) logger.info(f'ROC AUC train: {train_score:.5f} val: {val_score:.5f}')
def train_pipeline(cfg: Config) -> None: logger.info("Started train pipeline") logger.debug(f"App config: \n{OmegaConf.to_yaml(cfg)}") data = read_data(get_path_from_root(cfg.main.input_data_path)) logger.debug(f"Data shape is {data.shape}") if cfg.split.name == "simple_split": train_data, val_data = split_train_val_data( data, typing.cast(SimpleSplitConfig, cfg.split) ) else: error_msg = f"Wrong split strategy {cfg.split.name}" logger.error(error_msg) raise ValueError(error_msg) train_features, train_target = separate_target(train_data, cfg.main.target_name) val_features, val_target = separate_target(val_data, cfg.main.target_name) logger.info("Started transforming data") transformer = HeartDatasetTransformer(cfg=cfg.transformer).fit( train_features, train_target ) train_features, train_target = transformer.transform(train_features, train_target) val_features, val_target = transformer.transform(val_features, val_target) logger.debug( "Transformed data shape\n" f"train_features: {train_features.shape}\n" f"train_target: {train_target.shape}\n" f"val_features: {val_features.shape}\n" f"val_target: {val_target.shape}" ) logger.info("Finished transforming data") logger.info("Started training a classifier") classifier = hydra.utils.instantiate(cfg.model).fit(train_features, train_target) logger.info("Finished training a classifier") logger.info("Started evaluating the classifier") val_predictions = classifier.predict(val_features) metrics = classification_report(val_target, val_predictions, output_dict=True) logger.debug(f"Metrics: \n{yaml.dump(metrics)}") logger.info("Finished evaluating the classifier") model = {"classifier": classifier, "transformer": transformer} if cfg.main.track.track_experiment: logger.info("Start saving experiment info") track_experiment(model, cfg, metrics) logger.info("Finished saving experiment info") if cfg.main.save_model.overwrite_main_model: logger.info("Start saving model") save_model(model, cfg.main.save_model) logger.info("Finished saving model") logger.info("Finished train pipeline")
def model_pipeline(params: TrainingConfigParams): ### implement all pipeline to get the model ### logger.info(f"start train pipeline with params {params}") model_folder = os.path.join(os.getcwd(), MODELS_DIR, params.model_folder) if not os.path.exists(model_folder): os.mkdir(model_folder) data = pd.read_csv( os.path.join(DATA_DIR, DATA_RAW_DIR, params.input_data_file)) data_no_target = data.drop(columns=[params.feature_params.target]) logger.debug(f"data.shape is {data.shape}") logger.info(f"transform the features {params.feature_params}") transformer = TransformerClass() data_processed = transformer.fit_transform(data_no_target, params.feature_params) transformer.save( os.path.join(os.getcwd(), MODELS_DIR, params.model_folder, params.transformer_params.file)) target = create_target(data, params.feature_params) logger.info(f"splitted data {params.splitting_params}") train_data, val_data, y_train, y_test = split_train_val_data( data_processed, target, params.splitting_params) logger.debug(f"train_data.shape is {train_data.shape}") logger.debug(f"val_data.shape is {val_data.shape}") logger.info(f"created model {params.model_params}") model = ModelClass() logger.info(f"train model") model.train(train_data, y_train, params.model_params, params.metric_params) logger.info(f"predict values") predicts = model.predict(val_data) # metrics = model.evaluate(predicts, y_test) logger.info(f"metrics are {metrics}") with open( os.path.join(os.getcwd(), MODELS_DIR, params.model_folder, params.metric_file), "w", ) as metric_file: json.dump(metrics, metric_file) model.serialize_model( os.path.join(os.getcwd(), MODELS_DIR, params.model_folder, params.model_file)) return model
def train_pipeline(training_pipeline_params: TrainingPipelineParams, model: SklearnClassifierModel): logger.info(f"start train pipeline with params {training_pipeline_params}") data = read_data(training_pipeline_params.input_data_path) logger.info(f"data.shape is {data.shape}") data = drop_columns(data, training_pipeline_params.feature_params) logger.info(f"data.shape after dropping some columns is {data.shape}") train_df, val_df = split_train_val_data( data, training_pipeline_params.splitting_params ) logger.info(f"train_df.shape is {train_df.shape}") logger.info(f"val_df.shape is {val_df.shape}") if train_df.shape[0] < NOT_ENOUGH_DATA_THRESHOLD: msg = "No enough data to build good model" logger.warning(msg) warning_logger.warning(msg) transformer = build_transformer(training_pipeline_params.feature_params) transformer.fit(train_df) train_features = make_features(transformer, train_df) train_target = extract_target(train_df, training_pipeline_params.feature_params) logger.info(f"train_features.shape is {train_features.shape}") model = train_model( train_features, train_target, model ) val_features = make_features(transformer, val_df) val_target = extract_target(val_df, training_pipeline_params.feature_params) logger.info(f"val_features.shape is {val_features.shape}") predicts = predict_model( model, val_features, training_pipeline_params.feature_params.use_log_trick, ) metrics = evaluate_model( predicts, val_target, use_log_trick=training_pipeline_params.feature_params.use_log_trick, ) with open(training_pipeline_params.metric_path, "w") as metric_file: json.dump(metrics, metric_file) logger.info(f"metrics is {metrics}") path_to_model = serialize_model(model, training_pipeline_params.output_model_path) return path_to_model, metrics
def train_pipeline(params: TrainingPipelineParams) -> float: logger.info(f"start train pipeline") df = read_data(params.input_data_path) logger.info(f"load data, shape: {df.shape}") logger.info(f"train/test spit") train_df, test_df = split_train_val_data(df, params.split_params) logger.debug(f"train shape: {train_df.shape}") logger.debug(f"test shape: {test_df.shape}") logger.info(f"feature engineering") transformer = build_transformer(params.feature_params) transformer.fit(train_df.drop(columns=['target'])) logger.info(f"create train features and target") train_features = make_features(transformer, train_df.drop(columns=['target'])) train_target = extract_target(train_df, params.feature_params) logger.info(f"fit model") model = Classifier(params.model_params) model.fit(train_features, train_target) logger.info(f"model is fitted") logger.info(f"create test features and target") test_features = make_features(transformer, test_df.drop(columns=['target'])) test_target = extract_target(test_df, params.feature_params) logger.info(f"made predictions") pred = model.predict(test_features) score = get_score(test_target, pred) logger.debug(f"ROC-AUC: {score}") logger.info(f"save model") model.dump(params.output_model_path) logger.info(f"save transformer") with open(params.output_transformer_path, "wb") as f: pickle.dump(transformer, f) logger.info(f"train pipeline is finished") return score
def test_split_train_val_data(dataset: pd.DataFrame, split_config: SimpleSplitConfig): train_data, val_data = split_train_val_data(dataset, split_config) assert len(train_data) > 0 assert len(val_data) > 0