示例#1
0
def run_exp(config_file, plot_timechops=True, run_exp=True, n_jobs=1):
    if plot_timechops:
        visualize_timechop(config_file)

    config, sql_engine, proj_folder = setup_experiment(config_file)
    
    if run_exp: 
        if n_jobs> 1:
            experiment = MultiCoreExperiment(
                config=config,
                db_engine=sql_engine,
                n_processes=n_jobs,
                n_db_processes=n_jobs,
                project_path=proj_folder,
                replace=False,
                cleanup=True
            )
        else:
            experiment = SingleThreadedExperiment(
                config=config,
                db_engine=sql_engine,
                project_path=proj_folder,
                cleanup=True
            )

        st = time.time()
        experiment.run()
        en = time.time()

        print('Took {} seconds to run the experiement'.format(en-st))
示例#2
0
def test_experiment_tracker(test_engine, project_path):
    experiment = MultiCoreExperiment(
        config=sample_config(),
        db_engine=test_engine,
        project_path=project_path,
        n_processes=4,
    )
    experiment_run = Session(bind=test_engine).query(ExperimentRun).get(experiment.run_id)
    assert experiment_run.current_status == ExperimentRunStatus.started
    assert experiment_run.experiment_hash == experiment.experiment_hash
    assert experiment_run.experiment_class_path == 'triage.experiments.multicore.MultiCoreExperiment'
    assert experiment_run.platform
    assert experiment_run.os_user
    assert experiment_run.installed_libraries
    assert experiment_run.matrices_skipped == 0
    assert experiment_run.matrices_errored == 0
    assert experiment_run.matrices_made == 0
    assert experiment_run.models_skipped == 0
    assert experiment_run.models_errored == 0
    assert experiment_run.models_made == 0

    experiment.run()
    experiment_run = Session(bind=test_engine).query(ExperimentRun).get(experiment.run_id)
    assert experiment_run.start_method == "run"
    assert experiment_run.matrices_made == len(experiment.matrix_build_tasks)
    assert experiment_run.matrices_skipped == 0
    assert experiment_run.matrices_errored == 0
    assert experiment_run.models_skipped == 0
    assert experiment_run.models_errored == 0
    assert experiment_run.models_made == len(list(task['train_kwargs']['model_hash'] for batch in experiment._all_train_test_batches() for task in batch.tasks))
    assert isinstance(experiment_run.matrix_building_started, datetime.datetime)
    assert isinstance(experiment_run.model_building_started, datetime.datetime)
    assert isinstance(experiment_run.last_updated_time, datetime.datetime)
    assert not experiment_run.stacktrace
    assert experiment_run.current_status == ExperimentRunStatus.completed
示例#3
0
 def experiment(self):
     self.root.setup()  # Loading configuration (if exists)
     db_url = self.root.db_url
     config = self._load_config()
     db_engine = create_engine(db_url)
     common_kwargs = {
         "db_engine": db_engine,
         "project_path": self.args.project_path,
         "config": config,
         "replace": self.args.replace,
         "materialize_subquery_fromobjs": self.args.materialize_fromobjs,
         "features_ignore_cohort": self.args.features_ignore_cohort,
         "matrix_storage_class": self.matrix_storage_map[self.args.matrix_format],
         "profile": self.args.profile,
         "save_predictions": self.args.save_predictions,
         "skip_validation": not self.args.validate
     }
     if self.args.n_db_processes > 1 or self.args.n_processes > 1:
         experiment = MultiCoreExperiment(
             n_db_processes=self.args.n_db_processes,
             n_processes=self.args.n_processes,
             **common_kwargs,
         )
     else:
         experiment = SingleThreadedExperiment(**common_kwargs)
     return experiment
示例#4
0
 def experiment(self):
     self.root.setup()  # Loading configuration (if exists)
     db_url = self.root.db_url
     config = yaml.load(self.args.config)
     db_engine = create_engine(db_url)
     common_kwargs = {
         "db_engine":
         db_engine,
         "project_path":
         self.args.project_path,
         "config":
         config,
         "replace":
         self.args.replace,
         "matrix_storage_class":
         self.matrix_storage_map[self.args.matrix_format],
     }
     if self.args.n_db_processes > 1 or self.args.n_processes > 1:
         experiment = MultiCoreExperiment(
             n_db_processes=self.args.n_db_processes,
             n_processes=self.args.n_processes,
             **common_kwargs,
         )
     else:
         experiment = SingleThreadedExperiment(**common_kwargs)
     return experiment
示例#5
0
def test_experiment_tracker(test_engine, project_path):
    with mock.patch("triage.util.conf.open",
                    side_effect=open_side_effect) as mock_file:
        experiment = MultiCoreExperiment(
            config=sample_config(),
            db_engine=test_engine,
            project_path=project_path,
            n_processes=4,
        )
    experiment_run = Session(bind=test_engine).query(TriageRun).get(
        experiment.run_id)
    assert experiment_run.current_status == TriageRunStatus.started
    assert experiment_run.run_hash == experiment.experiment_hash
    assert experiment_run.run_type == "experiment"
    assert (experiment_run.experiment_class_path ==
            "triage.experiments.multicore.MultiCoreExperiment")
    assert experiment_run.platform
    assert experiment_run.os_user
    assert experiment_run.installed_libraries
    assert experiment_run.matrices_skipped == 0
    assert experiment_run.matrices_errored == 0
    assert experiment_run.matrices_made == 0
    assert experiment_run.models_skipped == 0
    assert experiment_run.models_errored == 0
    assert experiment_run.models_made == 0

    experiment.run()
    experiment_run = Session(bind=test_engine).query(TriageRun).get(
        experiment.run_id)
    assert experiment_run.start_method == "run"
    assert experiment_run.matrices_made == len(experiment.matrix_build_tasks)
    assert experiment_run.matrices_skipped == 0
    assert experiment_run.matrices_errored == 0
    assert experiment_run.models_skipped == 0
    assert experiment_run.models_errored == 0
    assert experiment_run.models_made == len(
        list(task["train_kwargs"]["model_hash"]
             for batch in experiment._all_train_test_batches()
             for task in batch.tasks))
    assert isinstance(experiment_run.matrix_building_started,
                      datetime.datetime)
    assert isinstance(experiment_run.model_building_started, datetime.datetime)
    assert isinstance(experiment_run.last_updated_time, datetime.datetime)
    assert not experiment_run.stacktrace
    assert experiment_run.current_status == TriageRunStatus.completed
示例#6
0
def test_serializable_engine_check_sqlalchemy_fail():
    """If we pass a vanilla sqlalchemy engine to the experiment we should blow up"""
    with testing.postgresql.Postgresql() as postgresql:
        db_engine = sqlalchemy.create_engine(postgresql.url())
        with TemporaryDirectory() as temp_dir:
            with pytest.raises(TypeError):
                MultiCoreExperiment(
                    config=sample_config(),
                    db_engine=db_engine,
                    project_path=os.path.join(temp_dir, "inspections"),
                )
示例#7
0
 def experiment(self):
     self.root.setup()  # Loading configuration (if exists)
     db_url = self.root.db_url
     config = self._load_config()
     db_engine = create_engine(db_url)
     common_kwargs = {
         "db_engine":
         db_engine,
         "project_path":
         self.args.project_path,
         "config":
         config,
         "replace":
         self.args.replace,
         "materialize_subquery_fromobjs":
         self.args.materialize_fromobjs,
         "features_ignore_cohort":
         self.args.features_ignore_cohort,
         "matrix_storage_class":
         self.matrix_storage_map[self.args.matrix_format],
         "profile":
         self.args.profile,
         "save_predictions":
         self.args.save_predictions,
         "skip_validation":
         not self.args.validate
     }
     logger.info(f"Setting up the experiment")
     logger.info(f"Configuration file: {self.args.config}")
     logger.info(f"Results will be stored in DB: {self.root.db_url}")
     logger.info(f"Artifacts will be saved in {self.args.project_path}")
     try:
         if self.args.n_db_processes > 1 or self.args.n_processes > 1:
             experiment = MultiCoreExperiment(
                 n_db_processes=self.args.n_db_processes,
                 n_processes=self.args.n_processes,
                 **common_kwargs,
             )
             logger.info(
                 f"Experiment will run in multi core  mode using {self.args.n_processes} processes and {self.args.n_db_processes} db processes"
             )
         else:
             experiment = SingleThreadedExperiment(**common_kwargs)
             logger.info("Experiment will run in serial fashion")
         return experiment
     except Exception:
         logger.exception("Error occurred while creating the experiment!")
         logger.info(
             f"Experiment [config file: {self.args.config}] failed at creation"
         )
示例#8
0
 def experiment(self):
     db_url = self.root.db_url
     config = yaml.load(self.args.config)
     db_engine = create_engine(db_url)
     common_kwargs = {
         'db_engine': db_engine,
         'project_path': self.args.project_path,
         'config': config,
         'replace': self.args.replace,
     }
     if self.args.n_db_processes > 1 or self.args.n_processes > 1:
         experiment = MultiCoreExperiment(
             n_db_processes=self.args.n_db_processes,
             n_processes=self.args.n_processes,
             **common_kwargs
         )
     else:
         experiment = SingleThreadedExperiment(**common_kwargs)
     return experiment
示例#9
0
文件: run.py 项目: dssg/peeps-chili
def run(config_filename, verbose, replace, predictions, validate_only):
    # configure logging
    log_filename = 'logs/modeling_{}'.format(
        str(datetime.datetime.now()).replace(' ', '_').replace(':', ''))
    if verbose:
        logging_level = logging.DEBUG
    else:
        logging_level = logging.INFO
    logging.basicConfig(
        format='%(asctime)s %(process)d %(levelname)s: %(message)s',
        level=logging_level,
        handlers=[logging.FileHandler(log_filename),
                  logging.StreamHandler()])

    #    config_filename = 'experiment_config'
    features_directory = 'features'

    # load main experiment config
    with open('config/{}.yaml'.format(config_filename)) as f:
        experiment_config = yaml.load(f)

    # load feature configs and update experiment config with their contents
    all_feature_aggregations = []
    for filename in os.listdir('config/{}/'.format(features_directory)):
        with open('config/{}/{}'.format(features_directory, filename)) as f:
            feature_aggregations = yaml.load(f)
            for aggregation in feature_aggregations:
                all_feature_aggregations.append(aggregation)
    experiment_config['feature_aggregations'] = all_feature_aggregations

    with open('config/db_default_profile.json') as f:
        DB_CONFIG = json.load(f)

    db_engine = create_engine(
        f"postgresql://{DB_CONFIG['user']}:{DB_CONFIG['pass']}@{DB_CONFIG['host']}/{DB_CONFIG['db']}"
    )

    experiment = MultiCoreExperiment(
        config=experiment_config,
        db_engine=db_engine,
        project_path=PROJECT_PATH,
        replace=replace,
        n_db_processes=4,
        n_processes=40,
        save_predictions=predictions,
    )
    experiment.validate()
    if not validate_only:
        experiment.run()
示例#10
0
def run_experiment(experiment_file, output_path, replace):

    start_time = datetime.datetime.now()
    logging.info(
        f"Reading the file experiment configuration from {experiment_file}")

    # Load the experiment configuration file
    s3 = s3fs.S3FileSystem()
    with s3.open(experiment_file, 'rb') as f:
        experiment_config = yaml.load(f.read())

    host = os.environ['POSTGRES_HOST']
    user = os.environ['POSTGRES_USER']
    db = os.environ['POSTGRES_DB']
    password = os.environ['POSTGRES_PASSWORD']
    port = os.environ['POSTGRES_PORT']

    db_url = f"postgresql://{user}:{password}@{host}:{port}/{db}"

    logging.info(
        f"Using the database: postgresql://{user}:XXXXX@{host}:{port}/{db}")

    try:
        n_processes = int(os.environ.get('NUMBER_OF_PROCESSES', 12))
    except ValueError:
        n_processes = 12
    try:
        n_db_processes = int(os.environ.get('NUMBER_OF_DB_PROCESSES', 6))
    except ValueError:
        n_db_processes = 6

    logging.info(f"The experiment will use {n_processes} cores in the host")

    logging.info(
        f"The output (matrices and models) of this experiment will be stored in {output_path}"
    )

    logging.info(
        f"The experiment will utilize any preexisting matrix or model: {not replace}"
    )

    logging.info(f"Creating experiment object")

    experiment = MultiCoreExperiment(
        n_processes=n_processes,
        n_db_processes=n_db_processes,
        config=experiment_config,
        db_engine=triage.create_engine(db_url),
        project_path=output_path,
        #matrix_storage_class=HDFMatrixStore,
        replace=replace,
        cleanup=True,
        cleanup_timeout=2)

    logging.info(
        f"Experiment created: all the file permissions, and db connections are OK"
    )

    logging.info(f"Validating the experiment")

    experiment.validate()

    logging.info("""
           The experiment configuration doesn't contain any obvious errors.
           Any error that occurs possibly is related to number of columns or collision in
           the column names, both due to PostgreSQL limitations.
    """)

    logging.debug(f"Experiment configuration: {experiment.config}")

    experiment_name = os.path.splitext(os.path.split(experiment_file)[1])[0]

    logging.info(f"Running the experiment: {experiment_name}")

    experiment.run()

    end_time = datetime.datetime.now()

    logging.info(
        f"Experiment {experiment_file} completed in {end_time - start_time} seconds"
    )

    logging.info("Done!")
示例#11
0
)

db_engine = create_engine(db_url)

# loading config file

with open('donors-choose-config.yaml', 'r') as fin:
    config = yaml.load(fin)

# generating temporal config plot
chopper = Timechop(**config['temporal_config'])

# We aren't interested in seeing the entire feature_start_time represented
# in our timechop plot. That would hide the interesting information. So we
# set it to equal label_start_time for the plot.

chopper.feature_start_time = chopper.label_start_time

visualize_chops(chopper, save_target='triage_output/timechop.png')

# creating experiment object

experiment = MultiCoreExperiment(
    config=config,
    db_engine=db_engine,
    project_path='s3://dsapp-education-migrated/donors-choose',
    n_processes=32,
    n_db_processes=4,
    replace=False)

experiment.run()