def __init__( self, feature_start_time, feature_end_time, label_start_time, label_end_time, model_update_frequency, training_as_of_date_frequencies, max_training_histories, training_label_timespans, test_as_of_date_frequencies, test_durations, test_label_timespans, ): self.feature_start_time = dt_from_str( feature_start_time) # earliest time included in any feature self.feature_end_time = dt_from_str( feature_end_time) # all data included in features are < this time if self.feature_start_time > self.feature_end_time: raise ValueError("Feature start time after feature end time.") self.label_start_time = dt_from_str( label_start_time) # earliest time included in any label self.label_end_time = dt_from_str( label_end_time) # all data in any label are < this time if self.label_start_time > self.label_end_time: raise ValueError("Label start time after label end time.") # how frequently to retrain models self.model_update_frequency = convert_str_to_relativedelta( model_update_frequency) # time between rows for same entity in train matrix self.training_as_of_date_frequencies = utils.convert_to_list( training_as_of_date_frequencies) # time between rows for same entity in test matrix self.test_as_of_date_frequencies = utils.convert_to_list( test_as_of_date_frequencies) # how much history for each entity to train on self.max_training_histories = utils.convert_to_list( max_training_histories) # how long into the future to make predictions for each entity self.test_durations = utils.convert_to_list(test_durations) # how much time is included in a label in the train matrix self.training_label_timespans = utils.convert_to_list( training_label_timespans) # how much time is included in a label in the test matrix self.test_label_timespans = utils.convert_to_list(test_label_timespans)
def get_temporal_config_for_retrain(self, prediction_date): temporal_config = self.experiment_config['temporal_config'].copy() temporal_config['feature_end_time'] = datetime.strftime( prediction_date, "%Y-%m-%d") temporal_config['label_end_time'] = datetime.strftime( prediction_date + convert_str_to_relativedelta(self.test_label_timespan), "%Y-%m-%d") # just needs to be bigger than the gap between the label start and end times # to ensure we only get one time split for the retraining temporal_config['model_update_frequency'] = '%syears' % ( dt_from_str(temporal_config['label_end_time']).year - dt_from_str(temporal_config['label_start_time']).year + 10) return temporal_config
def generate_entity_date_table(self, as_of_date, entity_date_table_name): cohort_table_generator = EntityDateTableGenerator( db_engine=self.db_engine, query=self.experiment_config['cohort_config']['query'], entity_date_table_name=entity_date_table_name) cohort_table_generator.generate_entity_date_table( as_of_dates=[dt_from_str(as_of_date)])
def initialize_components(self): split_config = self.config["temporal_config"] self.chopper = Timechop(**split_config) cohort_config = self.config.get("cohort_config", {}) if "query" in cohort_config: self.cohort_table_name = "cohort_{}_{}".format( cohort_config.get('name', 'default'), self.cohort_hash) self.cohort_table_generator = EntityDateTableGenerator( entity_date_table_name=self.cohort_table_name, db_engine=self.db_engine, query=cohort_config["query"], replace=self.replace) else: logging.warning( "cohort_config missing or unrecognized. Without a cohort, " "you will not be able to make matrices, perform feature imputation, " "or save time by only computing features for that cohort.") self.features_ignore_cohort = True self.cohort_table_name = "cohort_{}".format(self.experiment_hash) self.cohort_table_generator = EntityDateTableGeneratorNoOp() self.subsets = [None] + self.config.get("scoring", {}).get( "subsets", []) if "label_config" in self.config: label_config = self.config["label_config"] self.labels_table_name = "labels_{}_{}".format( label_config.get('name', 'default'), filename_friendly_hash(label_config['query'])) self.label_generator = LabelGenerator( label_name=label_config.get("name", None), query=label_config["query"], replace=self.replace, db_engine=self.db_engine, ) else: self.labels_table_name = "labels_{}".format(self.experiment_hash) self.label_generator = LabelGeneratorNoOp() logging.warning( "label_config missing or unrecognized. Without labels, " "you will not be able to make matrices.") if "bias_audit_config" in self.config: bias_config = self.config["bias_audit_config"] self.bias_hash = filename_friendly_hash(bias_config) self.protected_groups_table_name = f"protected_groups_{self.bias_hash}" self.protected_groups_generator = ProtectedGroupsGenerator( db_engine=self.db_engine, from_obj=parse_from_obj(bias_config, 'bias_from_obj'), attribute_columns=bias_config.get("attribute_columns", None), entity_id_column=bias_config.get("entity_id_column", None), knowledge_date_column=bias_config.get("knowledge_date_column", None), protected_groups_table_name=self.protected_groups_table_name, replace=self.replace) else: self.protected_groups_generator = ProtectedGroupsGeneratorNoOp() logging.warning( "bias_audit_config missing or unrecognized. Without protected groups, " "you will not audit your models for bias and fairness.") self.feature_dictionary_creator = FeatureDictionaryCreator( features_schema_name=self.features_schema_name, db_engine=self.db_engine) self.feature_generator = FeatureGenerator( features_schema_name=self.features_schema_name, replace=self.replace, db_engine=self.db_engine, feature_start_time=split_config["feature_start_time"], materialize_subquery_fromobjs=self.materialize_subquery_fromobjs, features_ignore_cohort=self.features_ignore_cohort) self.feature_group_creator = FeatureGroupCreator( self.config.get("feature_group_definition", {"all": [True]})) self.feature_group_mixer = FeatureGroupMixer( self.config.get("feature_group_strategies", ["all"])) self.planner = Planner( feature_start_time=dt_from_str(split_config["feature_start_time"]), label_names=[ self.config.get("label_config", {}).get("name", DEFAULT_LABEL_NAME) ], label_types=["binary"], cohort_names=[ self.config.get("cohort_config", {}).get("name", None) ], user_metadata=self.config.get("user_metadata", {}), ) self.matrix_builder = MatrixBuilder( db_config={ "features_schema_name": self.features_schema_name, "labels_schema_name": "public", "labels_table_name": self.labels_table_name, "cohort_table_name": self.cohort_table_name, }, matrix_storage_engine=self.matrix_storage_engine, experiment_hash=self.experiment_hash, include_missing_labels_in_train_as=self.config.get( "label_config", {}).get("include_missing_labels_in_train_as", None), engine=self.db_engine, replace=self.replace, run_id=self.run_id, ) self.subsetter = Subsetter(db_engine=self.db_engine, replace=self.replace, as_of_times=self.all_as_of_times) self.trainer = ModelTrainer( experiment_hash=self.experiment_hash, model_storage_engine=self.model_storage_engine, model_grouper=ModelGrouper(self.config.get("model_group_keys", [])), db_engine=self.db_engine, replace=self.replace, run_id=self.run_id, ) self.predictor = Predictor( db_engine=self.db_engine, model_storage_engine=self.model_storage_engine, save_predictions=self.save_predictions, replace=self.replace, rank_order=self.config.get("prediction", {}).get("rank_tiebreaker", "worst"), ) self.individual_importance_calculator = IndividualImportanceCalculator( db_engine=self.db_engine, n_ranks=self.config.get("individual_importance", {}).get("n_ranks", 5), methods=self.config.get("individual_importance", {}).get("methods", ["uniform"]), replace=self.replace, ) self.evaluator = ModelEvaluator( db_engine=self.db_engine, testing_metric_groups=self.config.get("scoring", {}).get( "testing_metric_groups", []), training_metric_groups=self.config.get("scoring", {}).get( "training_metric_groups", []), bias_config=self.config.get("bias_audit_config", {})) self.model_train_tester = ModelTrainTester( matrix_storage_engine=self.matrix_storage_engine, model_evaluator=self.evaluator, model_trainer=self.trainer, individual_importance_calculator=self. individual_importance_calculator, predictor=self.predictor, subsets=self.subsets, protected_groups_generator=self.protected_groups_generator, cohort_hash=self.cohort_hash)
def initialize_components(self): split_config = self.config['temporal_config'] self.chopper = Timechop(**split_config) cohort_config = self.config.get('cohort_config', {}) if 'query' in cohort_config: self.state_table_generator = StateTableGeneratorFromQuery( experiment_hash=self.experiment_hash, db_engine=self.db_engine, query=cohort_config['query'] ) elif 'entities_table' in cohort_config: self.state_table_generator = StateTableGeneratorFromEntities( experiment_hash=self.experiment_hash, db_engine=self.db_engine, entities_table=cohort_config['entities_table'] ) elif 'dense_states' in cohort_config: self.state_table_generator = StateTableGeneratorFromDense( experiment_hash=self.experiment_hash, db_engine=self.db_engine, dense_state_table=cohort_config['dense_states']['table_name'] ) else: logging.warning('cohort_config missing or unrecognized. Without a cohort, you will not be able to make matrices or perform feature imputation.') self.state_table_generator = StateTableGeneratorNoOp() if 'label_config' in self.config: self.label_generator = LabelGenerator( label_name=self.config['label_config'].get('name', None), query=self.config['label_config']['query'], db_engine=self.db_engine, ) else: self.label_generator = LabelGeneratorNoOp() logging.warning('label_config missing or unrecognized. Without labels, you will not be able to make matrices.') self.feature_dictionary_creator = FeatureDictionaryCreator( features_schema_name=self.features_schema_name, db_engine=self.db_engine, ) self.feature_generator = FeatureGenerator( features_schema_name=self.features_schema_name, replace=self.replace, db_engine=self.db_engine, feature_start_time=split_config['feature_start_time'] ) self.feature_group_creator = FeatureGroupCreator( self.config.get('feature_group_definition', {'all': [True]}) ) self.feature_group_mixer = FeatureGroupMixer( self.config.get('feature_group_strategies', ['all']) ) self.planner = Planner( feature_start_time=dt_from_str(split_config['feature_start_time']), label_names=[self.config.get('label_config', {}).get('name', DEFAULT_LABEL_NAME)], label_types=['binary'], cohort_name=self.config.get('cohort_config', {}).get('name', None), states=self.config.get('cohort_config', {}).get('dense_states', {}) .get('state_filters', []), user_metadata=self.config.get('user_metadata', {}), ) self.matrix_builder = MatrixBuilder( db_config={ 'features_schema_name': self.features_schema_name, 'labels_schema_name': 'public', 'labels_table_name': self.labels_table_name, # TODO: have planner/builder take state table later on, so we # can grab it from the StateTableGenerator instead of # duplicating it here 'sparse_state_table_name': self.sparse_states_table_name, }, matrix_storage_engine=self.matrix_storage_engine, include_missing_labels_in_train_as=self.config.get('label_config', {}) .get('include_missing_labels_in_train_as', None), engine=self.db_engine, replace=self.replace ) self.trainer = ModelTrainer( experiment_hash=self.experiment_hash, model_storage_engine=self.model_storage_engine, model_grouper=ModelGrouper(self.config.get('model_group_keys', [])), db_engine=self.db_engine, replace=self.replace ) self.tester = ModelTester( model_storage_engine=self.model_storage_engine, matrix_storage_engine=self.matrix_storage_engine, replace=self.replace, db_engine=self.db_engine, individual_importance_config=self.config.get('individual_importance', {}), evaluator_config=self.config.get('scoring', {}) )
def initialize_components(self): split_config = self.config["temporal_config"] self.chopper = Timechop(**split_config) cohort_config = self.config.get("cohort_config", {}) if "query" in cohort_config: self.cohort_table_generator = CohortTableGenerator( cohort_table_name=self.cohort_table_name, db_engine=self.db_engine, query=cohort_config["query"], replace=self.replace ) else: logging.warning( "cohort_config missing or unrecognized. Without a cohort, " "you will not be able to make matrices or perform feature imputation." ) self.cohort_table_generator = CohortTableGeneratorNoOp() if "label_config" in self.config: self.label_generator = LabelGenerator( label_name=self.config["label_config"].get("name", None), query=self.config["label_config"]["query"], replace=self.replace, db_engine=self.db_engine, ) else: self.label_generator = LabelGeneratorNoOp() logging.warning( "label_config missing or unrecognized. Without labels, " "you will not be able to make matrices." ) self.feature_dictionary_creator = FeatureDictionaryCreator( features_schema_name=self.features_schema_name, db_engine=self.db_engine ) self.feature_generator = FeatureGenerator( features_schema_name=self.features_schema_name, replace=self.replace, db_engine=self.db_engine, feature_start_time=split_config["feature_start_time"], materialize_subquery_fromobjs=self.materialize_subquery_fromobjs ) self.feature_group_creator = FeatureGroupCreator( self.config.get("feature_group_definition", {"all": [True]}) ) self.feature_group_mixer = FeatureGroupMixer( self.config.get("feature_group_strategies", ["all"]) ) self.planner = Planner( feature_start_time=dt_from_str(split_config["feature_start_time"]), label_names=[ self.config.get("label_config", {}).get("name", DEFAULT_LABEL_NAME) ], label_types=["binary"], cohort_names=[self.config.get("cohort_config", {}).get("name", None)], user_metadata=self.config.get("user_metadata", {}), ) self.matrix_builder = MatrixBuilder( db_config={ "features_schema_name": self.features_schema_name, "labels_schema_name": "public", "labels_table_name": self.labels_table_name, "cohort_table_name": self.cohort_table_name, }, matrix_storage_engine=self.matrix_storage_engine, experiment_hash=self.experiment_hash, include_missing_labels_in_train_as=self.config.get("label_config", {}).get( "include_missing_labels_in_train_as", None ), engine=self.db_engine, replace=self.replace, ) self.trainer = ModelTrainer( experiment_hash=self.experiment_hash, model_storage_engine=self.model_storage_engine, model_grouper=ModelGrouper(self.config.get("model_group_keys", [])), db_engine=self.db_engine, replace=self.replace, ) self.tester = ModelTester( model_storage_engine=self.model_storage_engine, matrix_storage_engine=self.matrix_storage_engine, replace=self.replace, db_engine=self.db_engine, individual_importance_config=self.config.get("individual_importance", {}), evaluator_config=self.config.get("scoring", {}), )
def __init__( self, feature_start_time, feature_end_time, label_start_time, label_end_time, model_update_frequency, training_as_of_date_frequencies, max_training_histories, training_label_timespans, test_as_of_date_frequencies, test_durations, test_label_timespans, ): ''' Date strings should follow the format `YYYY-MM-DD`. Date intervals should be strings of the Postgres interval input format. This class is often used within the Triage experiment pipeline, and initialized using parameters from a Triage [experiment config](../../../experiments/experiment-config/#time-splitting) Arguments: feature_start_time (str): Earliest date included in any feature feature_end_time (str): Day after last feature date (all data included in features are before this date) label_start_time (str): Earliest date for which labels are available label_end_time (str): Day AFTER last label date (all dates in any model are before this date) model_update_frequency (str): how frequently to retrain models training_as_of_date_frequencies (str): time between rows for same entity in train matrix max_training_histories (str): Interval specifying how much history for each entity to train on training_label_timespans (str): how much time is included in a label in the train matrix test_as_of_date_frequencies (str): time between rows for same entity in test matrix test_durations (str): How long into the future to make predictions for each entity. Controls the length of time included in a test matrix test_label_timespans (str): How much time is included in a label in the test matrix. ''' self.feature_start_time = dt_from_str( feature_start_time ) self.feature_end_time = dt_from_str( feature_end_time ) if self.feature_start_time > self.feature_end_time: raise ValueError("Feature start time after feature end time.") self.label_start_time = dt_from_str( label_start_time ) self.label_end_time = dt_from_str( label_end_time ) if self.label_start_time > self.label_end_time: raise ValueError("Label start time after label end time.") self.model_update_frequency = convert_str_to_relativedelta( model_update_frequency ) self.training_as_of_date_frequencies = utils.convert_to_list( training_as_of_date_frequencies ) self.test_as_of_date_frequencies = utils.convert_to_list( test_as_of_date_frequencies ) self.max_training_histories = utils.convert_to_list(max_training_histories) self.test_durations = utils.convert_to_list(test_durations) self.training_label_timespans = utils.convert_to_list(training_label_timespans) self.test_label_timespans = utils.convert_to_list(test_label_timespans)
def predict_forward_with_existed_model(db_engine, project_path, model_id, as_of_date): """Predict forward given model_id and as_of_date and store the prediction in database Args: db_engine (sqlalchemy.db.engine) project_storage (catwalk.storage.ProjectStorage) model_id (int) The id of a given model in the database as_of_date (string) a date string like "YYYY-MM-DD" """ logger.spam("In PREDICT LIST................") upgrade_db(db_engine=db_engine) project_storage = ProjectStorage(project_path) matrix_storage_engine = project_storage.matrix_storage_engine() # 1. Get feature and cohort config from database (train_matrix_uuid, matrix_metadata) = train_matrix_info_from_model_id(db_engine, model_id) experiment_config = experiment_config_from_model_id(db_engine, model_id) # 2. Generate cohort cohort_table_name = f"triage_production.cohort_{experiment_config['cohort_config']['name']}" cohort_table_generator = EntityDateTableGenerator( db_engine=db_engine, query=experiment_config['cohort_config']['query'], entity_date_table_name=cohort_table_name) cohort_table_generator.generate_entity_date_table( as_of_dates=[dt_from_str(as_of_date)]) # 3. Generate feature aggregations feature_generator = FeatureGenerator( db_engine=db_engine, features_schema_name="triage_production", feature_start_time=experiment_config['temporal_config'] ['feature_start_time'], ) collate_aggregations = feature_generator.aggregations( feature_aggregation_config=experiment_config['feature_aggregations'], feature_dates=[as_of_date], state_table=cohort_table_name) feature_generator.process_table_tasks( feature_generator.generate_all_table_tasks(collate_aggregations, task_type='aggregation')) # 4. Reconstruct feature disctionary from feature_names and generate imputation reconstructed_feature_dict = FeatureGroup() imputation_table_tasks = OrderedDict() for aggregation in collate_aggregations: feature_group, feature_names = get_feature_names( aggregation, matrix_metadata) reconstructed_feature_dict[feature_group] = feature_names # Make sure that the features imputed in training should also be imputed in production features_imputed_in_train = get_feature_needs_imputation_in_train( aggregation, feature_names) features_imputed_in_production = get_feature_needs_imputation_in_production( aggregation, db_engine) total_impute_cols = set(features_imputed_in_production) | set( features_imputed_in_train) total_nonimpute_cols = set(f for f in set(feature_names) if '_imp' not in f) - total_impute_cols task_generator = feature_generator._generate_imp_table_tasks_for imputation_table_tasks.update( task_generator(aggregation, impute_cols=list(total_impute_cols), nonimpute_cols=list(total_nonimpute_cols))) feature_generator.process_table_tasks(imputation_table_tasks) # 5. Build matrix db_config = { "features_schema_name": "triage_production", "labels_schema_name": "public", "cohort_table_name": cohort_table_name, } matrix_builder = MatrixBuilder( db_config=db_config, matrix_storage_engine=matrix_storage_engine, engine=db_engine, experiment_hash=None, replace=True, ) feature_start_time = experiment_config['temporal_config'][ 'feature_start_time'] label_name = experiment_config['label_config']['name'] label_type = 'binary' cohort_name = experiment_config['cohort_config']['name'] user_metadata = experiment_config['user_metadata'] # Use timechop to get the time definition for production temporal_config = experiment_config["temporal_config"] temporal_config.update( temporal_params_from_matrix_metadata(db_engine, model_id)) timechopper = Timechop(**temporal_config) prod_definitions = timechopper.define_test_matrices( train_test_split_time=dt_from_str(as_of_date), test_duration=temporal_config['test_durations'][0], test_label_timespan=temporal_config['test_label_timespans'][0]) matrix_metadata = Planner.make_metadata( prod_definitions[-1], reconstructed_feature_dict, label_name, label_type, cohort_name, 'production', feature_start_time, user_metadata, ) matrix_metadata['matrix_id'] = str( as_of_date) + f'_model_id_{model_id}' + '_risklist' matrix_uuid = filename_friendly_hash(matrix_metadata) matrix_builder.build_matrix( as_of_times=[as_of_date], label_name=label_name, label_type=label_type, feature_dictionary=reconstructed_feature_dict, matrix_metadata=matrix_metadata, matrix_uuid=matrix_uuid, matrix_type="production", ) # 6. Predict the risk score for production predictor = Predictor( model_storage_engine=project_storage.model_storage_engine(), db_engine=db_engine, rank_order='best') predictor.predict( model_id=model_id, matrix_store=matrix_storage_engine.get_store(matrix_uuid), misc_db_parameters={}, train_matrix_columns=matrix_storage_engine.get_store( train_matrix_uuid).columns())
def predict(self, prediction_date): """Predict forward by creating a matrix using as_of_date = prediction_date and applying the retrain model on it Args: prediction_date(str) """ cohort_table_name = f"triage_production.cohort_{self.experiment_config['cohort_config']['name']}_predict" # 1. Generate cohort self.generate_entity_date_table(prediction_date, cohort_table_name) # 2. Generate feature aggregations collate_aggregations = self.get_collate_aggregations( prediction_date, cohort_table_name) self.feature_generator.process_table_tasks( self.feature_generator.generate_all_table_tasks( collate_aggregations, task_type='aggregation')) # 3. Reconstruct feature disctionary from feature_names and generate imputation reconstructed_feature_dict, imputation_table_tasks = self.get_feature_dict_and_imputation_task( collate_aggregations, self.retrain_model_id) self.feature_generator.process_table_tasks(imputation_table_tasks) # 4. Build matrix db_config = { "features_schema_name": "triage_production", "labels_schema_name": "public", "cohort_table_name": cohort_table_name, } matrix_builder = MatrixBuilder( db_config=db_config, matrix_storage_engine=self.matrix_storage_engine, engine=self.db_engine, experiment_hash=None, replace=True, ) # Use timechop to get the time definition for production temporal_config = self.get_temporal_config_for_retrain( dt_from_str(prediction_date)) timechopper = Timechop(**temporal_config) retrain_config = get_retrain_config_from_model_id( self.db_engine, self.retrain_model_id) prod_definitions = timechopper.define_test_matrices( train_test_split_time=dt_from_str(prediction_date), test_duration=retrain_config['test_duration'], test_label_timespan=retrain_config['test_label_timespan']) last_split_definition = prod_definitions[-1] matrix_metadata = Planner.make_metadata( matrix_definition=last_split_definition, feature_dictionary=reconstructed_feature_dict, label_name=self.label_name, label_type='binary', cohort_name=self.cohort_name, matrix_type='production', feature_start_time=self.feature_start_time, user_metadata=self.user_metadata, ) matrix_metadata['matrix_id'] = str( prediction_date ) + f'_model_id_{self.retrain_model_id}' + '_risklist' matrix_uuid = filename_friendly_hash(matrix_metadata) matrix_builder.build_matrix( as_of_times=[prediction_date], label_name=self.label_name, label_type='binary', feature_dictionary=reconstructed_feature_dict, matrix_metadata=matrix_metadata, matrix_uuid=matrix_uuid, matrix_type="production", ) # 5. Predict the risk score for production predictor = Predictor( model_storage_engine=self.project_storage.model_storage_engine(), db_engine=self.db_engine, rank_order='best') predictor.predict( model_id=self.retrain_model_id, matrix_store=self.matrix_storage_engine.get_store(matrix_uuid), misc_db_parameters={}, train_matrix_columns=self.matrix_storage_engine.get_store( self.retrain_matrix_uuid).columns(), ) self.predict_matrix_uuid = matrix_uuid
def retrain(self, prediction_date): """Retrain a model by going back one split from prediction_date, so the as_of_date for training would be (prediction_date - training_label_timespan) Args: prediction_date(str) """ # Retrain config and hash retrain_config = { "model_group_id": self.model_group_id, "prediction_date": prediction_date, "test_label_timespan": self.test_label_timespan, "test_duration": self.test_duration, } self.retrain_hash = save_retrain_and_get_hash(retrain_config, self.db_engine) with get_for_update(self.db_engine, Retrain, self.retrain_hash) as retrain: retrain.prediction_date = prediction_date # Timechop prediction_date = dt_from_str(prediction_date) temporal_config = self.get_temporal_config_for_retrain(prediction_date) timechopper = Timechop(**temporal_config) chops = timechopper.chop_time() assert len(chops) == 1 chops_train_matrix = chops[0]['train_matrix'] as_of_date = datetime.strftime(chops_train_matrix['last_as_of_time'], "%Y-%m-%d") retrain_definition = { 'first_as_of_time': chops_train_matrix['first_as_of_time'], 'last_as_of_time': chops_train_matrix['last_as_of_time'], 'matrix_info_end_time': chops_train_matrix['matrix_info_end_time'], 'as_of_times': [as_of_date], 'training_label_timespan': chops_train_matrix['training_label_timespan'], 'max_training_history': chops_train_matrix['max_training_history'], 'training_as_of_date_frequency': chops_train_matrix['training_as_of_date_frequency'], } # Set ExperimentRun run = TriageRun( start_time=datetime.now(), git_hash=infer_git_hash(), triage_version=infer_triage_version(), python_version=infer_python_version(), run_type="retrain", run_hash=self.retrain_hash, last_updated_time=datetime.now(), current_status=TriageRunStatus.started, installed_libraries=infer_installed_libraries(), platform=platform.platform(), os_user=getpass.getuser(), working_directory=os.getcwd(), ec2_instance_type=infer_ec2_instance_type(), log_location=infer_log_location(), experiment_class_path=classpath(self.__class__), random_seed=retrieve_experiment_seed_from_run_id( self.db_engine, self.triage_run_id), ) run_id = None with scoped_session(self.db_engine) as session: session.add(run) session.commit() run_id = run.run_id if not run_id: raise ValueError("Failed to retrieve run_id from saved row") # set ModelTrainer's run_id and experiment_hash for Retrain run self.model_trainer.run_id = run_id self.model_trainer.experiment_hash = self.retrain_hash # 1. Generate all labels self.generate_all_labels(as_of_date) record_labels_table_name(run_id, self.db_engine, self.labels_table_name) # 2. Generate cohort cohort_table_name = f"triage_production.cohort_{self.experiment_config['cohort_config']['name']}_retrain" self.generate_entity_date_table(as_of_date, cohort_table_name) record_cohort_table_name(run_id, self.db_engine, cohort_table_name) # 3. Generate feature aggregations collate_aggregations = self.get_collate_aggregations( as_of_date, cohort_table_name) feature_aggregation_table_tasks = self.feature_generator.generate_all_table_tasks( collate_aggregations, task_type='aggregation') self.feature_generator.process_table_tasks( feature_aggregation_table_tasks) # 4. Reconstruct feature disctionary from feature_names and generate imputation reconstructed_feature_dict, imputation_table_tasks = self.get_feature_dict_and_imputation_task( collate_aggregations, self.model_group_info['model_id_last_split'], ) feature_group_creator = FeatureGroupCreator( self.experiment_config['feature_group_definition']) feature_group_mixer = FeatureGroupMixer(["all"]) feature_group_dict = feature_group_mixer.generate( feature_group_creator.subsets(reconstructed_feature_dict))[0] self.feature_generator.process_table_tasks(imputation_table_tasks) # 5. Build new matrix db_config = { "features_schema_name": "triage_production", "labels_schema_name": "public", "cohort_table_name": cohort_table_name, "labels_table_name": self.labels_table_name, } record_matrix_building_started(run_id, self.db_engine) matrix_builder = MatrixBuilder( db_config=db_config, matrix_storage_engine=self.matrix_storage_engine, engine=self.db_engine, experiment_hash=None, replace=True, ) new_matrix_metadata = Planner.make_metadata( matrix_definition=retrain_definition, feature_dictionary=feature_group_dict, label_name=self.label_name, label_type='binary', cohort_name=self.cohort_name, matrix_type='train', feature_start_time=dt_from_str(self.feature_start_time), user_metadata=self.user_metadata, ) new_matrix_metadata['matrix_id'] = "_".join([ self.label_name, 'binary', str(as_of_date), 'retrain', ]) matrix_uuid = filename_friendly_hash(new_matrix_metadata) matrix_builder.build_matrix( as_of_times=[as_of_date], label_name=self.label_name, label_type='binary', feature_dictionary=feature_group_dict, matrix_metadata=new_matrix_metadata, matrix_uuid=matrix_uuid, matrix_type="train", ) retrain_model_comment = 'retrain_' + str(datetime.now()) misc_db_parameters = { 'train_end_time': dt_from_str(as_of_date), 'test': False, 'train_matrix_uuid': matrix_uuid, 'training_label_timespan': self.training_label_timespan, 'model_comment': retrain_model_comment, } # get the random seed from the last split last_split_train_matrix_uuid, last_split_matrix_metadata = train_matrix_info_from_model_id( self.db_engine, model_id=self.model_group_info['model_id_last_split']) random_seed = self.model_trainer.get_or_generate_random_seed( model_group_id=self.model_group_id, matrix_metadata=last_split_matrix_metadata, train_matrix_uuid=last_split_train_matrix_uuid) # create retrain model hash retrain_model_hash = self.model_trainer._model_hash( self.matrix_storage_engine.get_store(matrix_uuid).metadata, class_path=self.model_group_info['model_type'], parameters=self.model_group_info['hyperparameters'], random_seed=random_seed, ) associate_models_with_retrain(self.retrain_hash, (retrain_model_hash, ), self.db_engine) record_model_building_started(run_id, self.db_engine) retrain_model_id = self.model_trainer.process_train_task( matrix_store=self.matrix_storage_engine.get_store(matrix_uuid), class_path=self.model_group_info['model_type'], parameters=self.model_group_info['hyperparameters'], model_hash=retrain_model_hash, misc_db_parameters=misc_db_parameters, random_seed=random_seed, retrain=True, model_group_id=self.model_group_id) self.retrain_model_hash = retrieve_model_hash_from_id( self.db_engine, retrain_model_id) self.retrain_matrix_uuid = matrix_uuid self.retrain_model_id = retrain_model_id return { 'retrain_model_comment': retrain_model_comment, 'retrain_model_id': retrain_model_id }