Beispiel #1
0
    def test_record_with_timestamp(self, mock_datetime):
        fake_timestamp(mock_datetime)

        self.std_output = StdOutput(with_timestamp=True)

        with redirect_stdout(self.str_out):
            self.std_output.record('', 'DOWEL')

        self.std_output.dump()

        self.str_out.seek(0)
        contents = self.str_out.read()
        assert contents == '{} | DOWEL\n'.format(FAKE_TIMESTAMP_SHORT)
Beispiel #2
0
    def read_cmd(cls, env_cls):
        logger.add_output(StdOutput())

        parser = argparse.ArgumentParser()
        parser.add_argument("folder", nargs="+")
        # Adaptation parameters
        parser.add_argument("--adapt-rollouts",
                            nargs="?",
                            default=10,
                            type=int)
        parser.add_argument("--test-rollouts", nargs="?", default=10, type=int)
        parser.add_argument("--max-path-length",
                            nargs="?",
                            default=100,
                            type=int)
        # Number of workers
        parser.add_argument("--parallel", nargs="?", default=0, type=int)
        # Skip iteration that has existing meta-testing result.
        parser.add_argument("--skip-exist", action='store_true', default=True)
        # Merge all meta-testing result to meta-test.csv
        parser.add_argument("--merge", action='store_true', default=True)
        # Skip some iterations.
        # e.g. stride=3 sample 1 iteration every 3 iterations.
        parser.add_argument("--stride", default=1, type=int)

        args = parser.parse_args()
        meta_train_dirs = args.folder
        workers = args.parallel
        adapt_rollout_per_task = args.adapt_rollouts
        test_rollout_per_task = args.test_rollouts
        max_path_length = args.max_path_length
        skip_existing = args.skip_exist
        to_merge = args.merge
        stride = args.stride

        helper = cls(meta_task_cls=env_cls,
                     max_path_length=max_path_length,
                     adapt_rollout_per_task=adapt_rollout_per_task,
                     test_rollout_per_task=test_rollout_per_task)

        helper.test_many_folders(folders=meta_train_dirs,
                                 workers=workers,
                                 skip_existing=skip_existing,
                                 to_merge=to_merge,
                                 stride=stride)
Beispiel #3
0
 def setup_method(self):
     self.tabular = TabularInput()
     self.std_output = StdOutput(with_timestamp=False)
     self.str_out = io.StringIO()
Beispiel #4
0
def main():
    # Create Logs folder if not created
    make_path(ABS_ALGO_EXP_LOGS_PATH)
    make_path(ABS_ALGO_HYPERPARAMS_PATH)
    make_path(ABS_ALGO_PREDICTIONS_PATH)

    # Get the Arguments parsed from file execution
    args = get_args()

    experiment_name = args.model_type + '_' + args.problem_type + '_' + datetime.now(
    ).strftime(DATE_NAME_FORMAT)

    # Init Loggers
    log_path = join(ABS_ALGO_EXP_LOGS_PATH, experiment_name + '.log')

    logger.add_output(StdOutput())
    logger.add_output(TextOutput(log_path))
    logger.log("Running Project Varro")
    logger.log("Purpose: " + args.purpose)

    if args.verbose:
        logger.set_timer(True)

    if args.hyper_opt is not None:
        if args.hyper_opt == 'grid_search':
            from varro.algo.hyperparam_opt.grid_search import grid_search
            checkpoint_dir = join(GRID_SEARCH_CHECKPOINTS_PATH, 'tmp')
            make_path(checkpoint_dir)
            grid_search()
        elif args.hyper_opt == 'bayesian_opt':
            raise NotImplementedError
        else:
            raise ValueError("Unknown hyperparameter optimization method.")
        return
    else:
        checkpoint_dir = join(EXPERIMENT_CHECKPOINTS_PATH, experiment_name)
        make_path(checkpoint_dir)

    # Check if we're fitting or predicting
    if args.purpose == 'fit':
        # Start Optimization

        logger.start_timer()
        fit(model_type=args.model_type,
            problem_type=args.problem_type,
            strategy=args.strategy,
            cxpb=args.cxpb,
            mutpb=args.mutpb,
            imutpb=args.imutpb,
            imutmu=args.imutmu,
            imutsigma=args.imutsigma,
            sample_size=args.samplesize,
            popsize=args.popsize,
            elitesize=args.elitesize,
            ngen=args.ngen,
            ckpt=args.ckpt,
            ckpt_freq=args.ckpt_freq,
            novelty_metric=args.novelty_metric,
            halloffamesize=args.halloffamesize,
            earlystop=args.earlystop,
            ckpt_dir=checkpoint_dir)
        logger.stop_timer('EXPERIMENT.PY Fitting complete')

    else:
        if args.ckptfolder:
            # Make predictions using the best individual from each generation in ckptfolder

            logger.start_timer()
            save_dir = join(ABS_ALGO_PREDICTIONS_PATH,
                            args.ckptfolder.split('/')[-1])
            make_path(save_dir)
            ckpt_files = [
                join(args.ckptfolder, f) for f in listdir(args.ckptfolder)
                if isfile(join(args.ckptfolder, f))
            ]
            for ckpt in ckpt_files:
                predict(model_type=args.model_type,
                        problem_type=args.problem_type,
                        strategy=args.strategy,
                        input_data=args.input_data,
                        ckpt=ckpt,
                        save_dir=save_dir)

            logger.stop_timer(
                'EXPERIMENT.PY Making predictions using the best individual from each generation'
            )

        else:
            # Make a single prediction

            logger.start_timer()
            save_dir = join(ABS_ALGO_PREDICTIONS_PATH,
                            args.ckpt.split('/')[-2])
            make_path(save_dir)
            predict(model_type=args.model_type,
                    problem_type=args.problem_type,
                    strategy=args.strategy,
                    input_data=args.input_data,
                    ckpt=args.ckpt,
                    save_dir=save_dir)

            logger.stop_timer('EXPERIMENT.PY Making a single prediction')