Ejemplo n.º 1
0
def test_fill_timechop_config_missing():
    remove_keys = [
        'model_update_frequency',
        'training_as_of_date_frequencies',
        'test_as_of_date_frequencies',
        'max_training_histories',
        'test_durations',
        'feature_start_time',
        'feature_end_time',
        'label_start_time',
        'label_end_time',
        'training_label_timespans',
        'test_label_timespans'
        ]

    # ensure redundant keys properly raise errors
    config = sample_config()
    config['temporal_config']['label_timespans'] = '1y'
    with pytest.raises(KeyError):
        timechop_config = fill_timechop_config_missing(config, None)

    with testing.postgresql.Postgresql() as postgresql:
        db_engine = create_engine(postgresql.url())
        ensure_db(db_engine)
        populate_source_data(db_engine)
        config = sample_config()

        for key in remove_keys:
            config['temporal_config'].pop(key)
        config['temporal_config']['label_timespans'] = '1y'

        timechop_config = fill_timechop_config_missing(config, db_engine)

        assert timechop_config['model_update_frequency'] == '100y'
        assert timechop_config['training_as_of_date_frequencies'] == '100y'
        assert timechop_config['test_as_of_date_frequencies'] == '100y'
        assert timechop_config['max_training_histories'] == '0d'
        assert timechop_config['test_durations'] == '0d'
        assert timechop_config['training_label_timespans'] == '1y'
        assert timechop_config['test_label_timespans'] == '1y'
        assert 'label_timespans' not in timechop_config.keys()
        assert timechop_config['feature_start_time'] == '2010-10-01'
        assert timechop_config['feature_end_time'] == '2013-10-01'
        assert timechop_config['label_start_time'] == '2010-10-01'
        assert timechop_config['label_end_time'] == '2013-10-01'
Ejemplo n.º 2
0
    def __init__(
        self,
        config,
        db_engine,
        project_path=None,
        matrix_storage_class=CSVMatrixStore,
        replace=True,
        cleanup=False,
        cleanup_timeout=None,
        materialize_subquery_fromobjs=True,
        features_ignore_cohort=False,
        profile=False,
        save_predictions=True,
        skip_validation=False,
        partial_run=False,
    ):
        # For a partial run, skip validation and avoid cleaning up
        # we'll also skip filling default config values below
        if partial_run:
            cleanup = False
            skip_validation = True

        experiment_kwargs = bind_kwargs(
            self.__class__, **{
                key: value
                for (key, value) in locals().items()
                if key not in {'db_engine', 'config', 'self'}
            })

        self._check_config_version(config)
        self.config = config

        self.config['random_seed'] = self.config.get('random_seed',
                                                     random.randint(1, 1e7))

        random.seed(self.config['random_seed'])

        self.project_storage = ProjectStorage(project_path)
        self.model_storage_engine = ModelStorageEngine(self.project_storage)
        self.matrix_storage_engine = MatrixStorageEngine(
            self.project_storage, matrix_storage_class)
        self.project_path = project_path
        self.replace = replace
        self.save_predictions = save_predictions
        self.skip_validation = skip_validation
        self.db_engine = db_engine
        results_schema.upgrade_if_clean(dburl=self.db_engine.url)

        self.features_schema_name = "features"
        self.materialize_subquery_fromobjs = materialize_subquery_fromobjs
        self.features_ignore_cohort = features_ignore_cohort

        # only fill default values for full runs
        if not partial_run:
            ## Defaults to sane values
            self.config['temporal_config'] = fill_timechop_config_missing(
                self.config, self.db_engine)
            ## Defaults to all the entities found in the features_aggregation's from_obj
            self.config['cohort_config'] = fill_cohort_config_missing(
                self.config)
            ## Defaults to all the feature_aggregation's prefixes
            self.config[
                'feature_group_definition'] = fill_feature_group_definition(
                    self.config)

        grid_config = fill_model_grid_presets(self.config)
        self.config.pop('model_grid_preset', None)
        if grid_config is not None:
            self.config['grid_config'] = grid_config

        ###################### RUBICON ######################

        self.experiment_hash = save_experiment_and_get_hash(
            self.config, self.db_engine)
        self.run_id = initialize_tracking_and_get_run_id(
            self.experiment_hash,
            experiment_class_path=classpath(self.__class__),
            experiment_kwargs=experiment_kwargs,
            db_engine=self.db_engine)
        self.initialize_components()

        self.cleanup = cleanup
        if self.cleanup:
            logging.info(
                "cleanup is set to True, so intermediate tables (labels and cohort) "
                "will be removed after matrix creation and subset tables will be "
                "removed after model training and testing")
        else:
            logging.info(
                "cleanup is set to False, so intermediate tables (labels, cohort, and subsets) "
                "will not be removed")
        self.cleanup_timeout = (self.cleanup_timeout if cleanup_timeout is None
                                else cleanup_timeout)
        self.profile = profile
        logging.info("Generate profiling stats? (profile option): %s",
                     self.profile)
Ejemplo n.º 3
0
    def __init__(
        self,
        config,
        db_engine,
        project_path=None,
        matrix_storage_class=CSVMatrixStore,
        replace=True,
        cleanup=False,
        cleanup_timeout=None,
        materialize_subquery_fromobjs=True,
        features_ignore_cohort=False,
        additional_bigtrain_classnames=None,
        profile=False,
        save_predictions=True,
        skip_validation=False,
        partial_run=False,
    ):
        # For a partial run, skip validation and avoid cleaning up
        # we'll also skip filling default config values below
        if partial_run:
            cleanup = False
            skip_validation = True

        experiment_kwargs = bind_kwargs(
            self.__class__,
            **{
                key: value
                for (key, value) in locals().items()
                if key not in {"db_engine", "config", "self"}
            },
        )

        self._check_config_version(config)
        self.config = config

        if self.config.get("cohort_config") is not None:
            self.config["cohort_config"] = load_query_if_needed(
                self.config["cohort_config"]
            )
        if self.config.get("label_config") is not None:
            self.config["label_config"] = load_query_if_needed(
                self.config["label_config"]
            )

        self.project_storage = ProjectStorage(project_path)
        self.model_storage_engine = ModelStorageEngine(self.project_storage)
        self.matrix_storage_engine = MatrixStorageEngine(
            self.project_storage, matrix_storage_class
        )
        self.project_path = project_path
        logger.verbose(
            f"Matrices and trained models will be saved in {self.project_path}"
        )
        self.replace = replace
        if self.replace:
            logger.notice(
                f"Replace flag is set to true. Matrices, models, "
                "evaluations and predictions (if they exist) will be replaced"
            )

        self.save_predictions = save_predictions
        if not self.save_predictions:
            logger.notice(
                f"Save predictions flag is set to false. "
                "Individual predictions won't be stored in the predictions "
                "table. This will decrease both the running time "
                "of an experiment and also decrease the space needed in the db"
            )

        self.skip_validation = skip_validation
        if self.skip_validation:
            logger.notice(
                f"Warning: Skip validation flag is set to true. "
                "The experiment config file specified won't be validated. "
                "This will reduce (a little) the running time of the experiment, "
                "but has some potential risks, e.g. the experiment could fail"
                "after some time due to some misconfiguration. Proceed with care."
            )

        self.db_engine = db_engine
        results_schema.upgrade_if_clean(dburl=self.db_engine.url)

        self.features_schema_name = "features"

        self.materialize_subquery_fromobjs = materialize_subquery_fromobjs
        if not self.materialize_subquery_fromobjs:
            logger.notice(
                "Materialize from_objs is set to false. "
                "The from_objs will be calculated on the fly every time."
            )

        self.features_ignore_cohort = features_ignore_cohort
        if self.features_ignore_cohort:
            logger.notice(
                "Features will be calculated for all the entities "
                "(i.e. ignoring cohort) this setting will have the effect "
                "that more db space will be used, but potentially could save "
                "time if you are running several similar experiments with "
                "different cohorts."
            )

        self.additional_bigtrain_classnames = additional_bigtrain_classnames
        # only fill default values for full runs
        if not partial_run:
            ## Defaults to sane values
            self.config["temporal_config"] = fill_timechop_config_missing(
                self.config, self.db_engine
            )
            ## Defaults to all the entities found in the features_aggregation's from_obj
            self.config["cohort_config"] = fill_cohort_config_missing(self.config)
            ## Defaults to all the feature_aggregation's prefixes
            self.config["feature_group_definition"] = fill_feature_group_definition(
                self.config
            )

        grid_config = fill_model_grid_presets(self.config)
        self.config.pop("model_grid_preset", None)
        if grid_config is not None:
            self.config["grid_config"] = grid_config

        if not self.config.get("random_seed", None):
            logger.notice(
                "Random seed not specified. A random seed will be provided. "
                "This could have interesting side effects, "
                "e.g. new models per model group are trained, "
                "tested and evaluated everytime that you run this experiment configuration"
            )

        self.random_seed = self.config.pop("random_seed", random.randint(1, 1e7))

        logger.verbose(
            f"Using random seed [{self.random_seed}] for running the experiment"
        )
        random.seed(self.random_seed)

        ###################### RUBICON ######################

        self.experiment_hash = save_experiment_and_get_hash(self.config, self.db_engine)
        logger.debug(f"Experiment hash [{self.experiment_hash}] assigned")
        self.run_id = initialize_tracking_and_get_run_id(
            self.experiment_hash,
            experiment_class_path=classpath(self.__class__),
            random_seed=self.random_seed,
            experiment_kwargs=experiment_kwargs,
            db_engine=self.db_engine,
        )
        logger.debug(f"Experiment run id [{self.run_id}] assigned")

        self.initialize_components()

        self.cleanup = cleanup
        if self.cleanup:
            logger.notice(
                "Cleanup is set to true, so intermediate tables (labels and cohort) "
                "will be removed after matrix creation and subset tables will be "
                "removed after model training and testing"
            )

        self.cleanup_timeout = (
            self.cleanup_timeout if cleanup_timeout is None else cleanup_timeout
        )

        self.profile = profile
        if self.profile:
            logger.spam("Profiling will be stored using cProfile")