Ejemplo n.º 1
0
 def restore_state(self, args_, scen, root_logger):
     # Check for folder and files
     rh_path = os.path.join(args_.restore_state, "runhistory.json")
     stats_path = os.path.join(args_.restore_state, "stats.json")
     traj_path = os.path.join(args_.restore_state, "traj_aclib2.json")
     scen_path = os.path.join(args_.restore_state, "scenario.txt")
     if not os.path.isdir(args_.restore_state):
         raise FileNotFoundError(
             "Could not find folder from which to restore.")
     # Load runhistory and stats
     rh = RunHistory(aggregate_func=None)
     rh.load_json(rh_path, scen.cs)
     root_logger.debug("Restored runhistory from %s", rh_path)
     stats = Stats(scen)
     stats.load(stats_path)
     root_logger.debug("Restored stats from %s", stats_path)
     trajectory = TrajLogger.read_traj_aclib_format(fn=traj_path,
                                                    cs=scen.cs)
     incumbent = trajectory[-1]["incumbent"]
     root_logger.debug("Restored incumbent %s from %s", incumbent,
                       traj_path)
     # Copy traj if output_dir of specified scenario-file is different than
     # the output_dir of the scenario-file in the folder from which to restore.
     if scen.output_dir != InputReader().read_scenario_file(
             scen_path)['output_dir']:
         new_traj_path = os.path.join(scen.output_dir, "traj_aclib2.json")
         shutil.copy(traj_path, new_traj_path)
         root_logger.debug("Copied traj %s", rh_path)
     return rh, stats, incumbent
Ejemplo n.º 2
0
    def test_multi_config_design(self):
        stats = Stats(scenario=self.scenario)
        stats.start_timing()
        self.ta.stats = stats
        tj = TrajLogger(output_dir=None, stats=stats)
        rh = RunHistory(aggregate_func=average_cost)
        self.ta.runhistory = rh
        rng = np.random.RandomState(seed=12345)

        intensifier = Intensifier(tae_runner=self.ta,
                                  stats=stats,
                                  traj_logger=tj,
                                  rng=rng,
                                  instances=[None],
                                  run_obj_time=False)

        configs = [
            Configuration(configuration_space=self.cs, values={"x1": 4}),
            Configuration(configuration_space=self.cs, values={"x1": 2})
        ]
        dc = MultiConfigInitialDesign(tae_runner=self.ta,
                                      scenario=self.scenario,
                                      stats=stats,
                                      traj_logger=tj,
                                      runhistory=rh,
                                      rng=rng,
                                      configs=configs,
                                      intensifier=intensifier,
                                      aggregate_func=average_cost)

        inc = dc.run()
        self.assertTrue(stats.ta_runs == 2)
        self.assertTrue(len(rh.data) == 2)
        self.assertTrue(rh.get_cost(inc) == 4)
Ejemplo n.º 3
0
    def setUp(self):
        unittest.TestCase.setUp(self)

        self.rh = RunHistory()
        self.cs = get_config_space()
        self.config1 = Configuration(self.cs,
                                     values={'a': 7, 'b': 11})
        self.config2 = Configuration(self.cs,
                                     values={'a': 13, 'b': 17})
        self.config3 = Configuration(self.cs,
                                     values={'a': 0, 'b': 7})
        self.config4 = Configuration(self.cs,
                                     values={'a': 29, 'b': 31})

        self.scen = Scenario({"cutoff_time": 2, 'cs': self.cs,
                              "run_obj": 'runtime',
                              "output_dir": ''})
        self.stats = Stats(scenario=self.scen)
        self.stats.start_timing()

        # Create the base object
        self.intensifier = SimpleIntensifier(
            stats=self.stats,
            traj_logger=TrajLogger(output_dir=None, stats=self.stats),
            rng=np.random.RandomState(12345),
            deterministic=True,
            run_obj_time=False,
            instances=[1],
        )
Ejemplo n.º 4
0
def main():
    # Initialize scenario, using runcount_limit as budget.
    orig_scen_dict = {
        'algo': 'python cmdline_wrapper.py',
        'paramfile': 'param_config_space.pcs',
        'run_obj': 'quality',
        'runcount_limit': 25,
        'deterministic': True,
        'output_dir': 'restore_me'
    }
    original_scenario = Scenario(orig_scen_dict)
    smac = SMAC(scenario=original_scenario)
    smac.optimize()

    print(
        "\n########## BUDGET EXHAUSTED! Restoring optimization: ##########\n")

    # Now the output is in the folder 'restore_me'
    #
    # We could simply modify the scenario-object, stored in
    # 'smac.solver.scenario' and start optimization again:

    #smac.solver.scenario.ta_run_limit = 50
    #smac.optimize()

    # Or, to show the whole process of recovering a SMAC-run from the output
    # directory, create a new scenario with an extended budget:
    new_scenario = Scenario(
        orig_scen_dict,
        cmd_args={
            'runcount_limit': 50,  # overwrite these args
            'output_dir': 'restored'
        })

    # We load the runhistory, ...
    rh_path = os.path.join(original_scenario.output_dir, "runhistory.json")
    runhistory = RunHistory(aggregate_func=None)
    runhistory.load_json(rh_path, new_scenario.cs)
    # ... stats, ...
    stats_path = os.path.join(original_scenario.output_dir, "stats.json")
    stats = Stats(new_scenario)
    stats.load(stats_path)
    # ... and trajectory.
    traj_path = os.path.join(original_scenario.output_dir, "traj_aclib2.json")
    trajectory = TrajLogger.read_traj_aclib_format(fn=traj_path,
                                                   cs=new_scenario.cs)
    incumbent = trajectory[-1]["incumbent"]
    # Because we changed the output_dir, we might want to copy the old
    # trajectory-file (runhistory and stats will be complete)
    new_traj_path = os.path.join(new_scenario.output_dir, "traj_aclib2.json")
    shutil.copy(traj_path, new_traj_path)

    # Now we can initialize SMAC with the recovered objects and restore the
    # state where we left off. By providing stats and a restore_incumbent, SMAC
    # automatically detects the intention of restoring a state.
    smac = SMAC(scenario=new_scenario,
                runhistory=runhistory,
                stats=stats,
                restore_incumbent=incumbent)
    smac.optimize()
Ejemplo n.º 5
0
    def test_crashed_cost_value(self, test_run):
        '''
            test cost on crashed runs
        '''
        # Patch run-function for custom-return
        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality'
        },
                        cmd_options=None)
        stats = Stats(scen)
        stats.start_timing()
        stats.ta_runs += 1

        # Check quality
        test_run.return_value = StatusType.CRASHED, np.nan, np.nan, {}
        eta = ExecuteTARun(ta=lambda *args: None,
                           stats=stats,
                           run_obj='quality',
                           cost_for_crash=100)
        self.assertEqual(100, eta.start(config={}, instance=1)[1])

        # Check runtime
        eta = ExecuteTARun(ta=lambda *args: None,
                           stats=stats,
                           run_obj='runtime',
                           cost_for_crash=10.7)
        self.assertEqual(20.0, eta.start(config={}, instance=1, cutoff=20)[1])
Ejemplo n.º 6
0
 def restore_state_before_scen(self, args_):
     """Read in files for state-restoration: runhistory, stats, trajectory.
     """
     # Construct dummy-scenario for object-creation (mainly cs is needed)
     tmp_scen = InputReader().read_scenario_file(args_.scenario_file)
     tmp_scen = Scenario(tmp_scen, cmd_args={'output_dir': ''})
     # Check for folder and files
     rh_path = os.path.join(args_.restore_state, "runhistory.json")
     stats_path = os.path.join(args_.restore_state, "stats.json")
     traj_path_aclib = os.path.join(args_.restore_state, "traj_aclib2.json")
     traj_path_old = os.path.join(args_.restore_state, "traj_old.csv")
     scen_path = os.path.join(args_.restore_state, "scenario.txt")
     if not os.path.isdir(args_.restore_state):
         raise FileNotFoundError(
             "Could not find folder from which to restore.")
     # Load runhistory and stats
     rh = RunHistory(aggregate_func=None)
     rh.load_json(rh_path, tmp_scen.cs)
     self.logger.debug("Restored runhistory from %s", rh_path)
     stats = Stats(
         tmp_scen)  # Need to inject actual scenario later for output_dir!
     stats.load(stats_path)
     self.logger.debug("Restored stats from %s", stats_path)
     with open(traj_path_aclib, 'r') as traj_fn:
         traj_list_aclib = traj_fn.readlines()
     with open(traj_path_old, 'r') as traj_fn:
         traj_list_old = traj_fn.readlines()
     return rh, stats, traj_list_aclib, traj_list_old
Ejemplo n.º 7
0
 def restore_state(
     self,
     scen: Scenario,
     restore_state: str,
 ) -> typing.Tuple[RunHistory, Stats, typing.List, typing.List]:
     """Read in files for state-restoration: runhistory, stats, trajectory.
     """
     # Check for folder and files
     rh_path = os.path.join(restore_state, "runhistory.json")
     stats_path = os.path.join(restore_state, "stats.json")
     traj_path_aclib = os.path.join(restore_state, "traj_aclib2.json")
     traj_path_old = os.path.join(restore_state, "traj_old.csv")
     _ = os.path.join(restore_state, "scenario.txt")
     if not os.path.isdir(restore_state):
         raise FileNotFoundError(
             "Could not find folder from which to restore.")
     # Load runhistory and stats
     rh = RunHistory()
     rh.load_json(rh_path,
                  scen.cs)  # type: ignore[attr-defined] # noqa F821
     self.logger.debug("Restored runhistory from %s", rh_path)
     stats = Stats(scen)
     stats.load(stats_path)
     self.logger.debug("Restored stats from %s", stats_path)
     with open(traj_path_aclib, 'r') as traj_fn:
         traj_list_aclib = traj_fn.readlines()
     with open(traj_path_old, 'r') as traj_fn:
         traj_list_old = traj_fn.readlines()
     return rh, stats, traj_list_aclib, traj_list_old
Ejemplo n.º 8
0
    def test_start_tae_return_abort(self, test_run):
        '''
            testing abort
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.ABORT, 12345.0, 1.2345, {}

        scen = Scenario(
            scenario={
                'cs': ConfigurationSpace(),
                'run_obj': 'quality',
                'output_dir': '',
            },
            cmd_options=None,
        )
        stats = Stats(scen)
        stats.start_timing()
        eta = SerialRunner(ta=lambda *args: None, stats=stats)

        _, run_value = eta.run_wrapper(
            RunInfo(config=None,
                    instance=1,
                    instance_specific=None,
                    cutoff=30,
                    seed=None,
                    capped=False,
                    budget=0.0))
        self.assertEqual(run_value.status, StatusType.ABORT)
Ejemplo n.º 9
0
    def setUp(self):
        unittest.TestCase.setUp(self)

        self.rh = RunHistory()
        self.cs = get_config_space()
        self.config1 = Configuration(self.cs, values={'a': 7, 'b': 11})
        self.config2 = Configuration(self.cs, values={'a': 13, 'b': 17})
        self.config3 = Configuration(self.cs, values={'a': 0, 'b': 7})
        self.config4 = Configuration(self.cs, values={'a': 29, 'b': 31})
        self.config5 = Configuration(self.cs, values={'a': 31, 'b': 33})

        self.scen = Scenario({
            "cutoff_time": 2,
            'cs': self.cs,
            "run_obj": 'runtime',
            "output_dir": ''
        })
        self.stats = Stats(scenario=self.scen)
        self.stats.start_timing()

        # Create the base object
        self.HB = Hyperband(
            stats=self.stats,
            traj_logger=TrajLogger(output_dir=None, stats=self.stats),
            rng=np.random.RandomState(12345),
            deterministic=False,
            run_obj_time=False,
            instances=[1, 2, 3, 4, 5],
            n_seeds=2,
            initial_budget=2,
            max_budget=5,
            eta=2,
        )
Ejemplo n.º 10
0
def restore_state(scenario: typing.Union[Scenario, ScenarioProperties]):
    r"""Read in files for state-restoration: runhistory, stats, trajectory.

    :param scenario: Scenario whose state shall be loaded.
    :return: (RunHistory, Stats, dict)-tuple
    """
    # Check for folder and files
    rh_path = os.path.join(scenario.output_dir_for_this_run, 'runhistory.json')
    stats_path = os.path.join(scenario.output_dir_for_this_run, 'stats.json')
    traj_path_aclib = os.path.join(scenario.output_dir_for_this_run, 'traj_aclib2.json')
    if not os.path.isdir(scenario.output_dir_for_this_run):
        raise FileNotFoundError('Could not find folder from which to restore.')

    # Load runhistory and stats
    rh = RunHistory(aggregate_func=None)
    rh.load_json(rh_path, scenario.cs)
    log.debug('Restored runhistory from %s', rh_path)

    stats = Stats(scenario)
    stats.load(stats_path)
    log.debug('Restored stats from %s', stats_path)

    trajectory = TrajLogger.read_traj_aclib_format(fn=traj_path_aclib, cs=scenario.cs)
    incumbent = trajectory[-1]['incumbent']
    log.debug('Restored incumbent %s from %s', incumbent, traj_path_aclib)
    return rh, stats, incumbent
Ejemplo n.º 11
0
    def test_parallel_same_as_serial_HB(self):
        """Makes sure we behave the same as a serial run at the end"""

        # Get the run_history for a_HB instance run:
        rh = RunHistory()
        stats = Stats(scenario=self.scen)
        stats.start_timing()
        _HB = _Hyperband(
            stats=stats,
            traj_logger=TrajLogger(output_dir=None, stats=stats),
            rng=np.random.RandomState(12345),
            deterministic=True,
            run_obj_time=False,
            instances=[1, 2, 3, 4, 5],
            initial_budget=2,
            max_budget=5,
            eta=2,
        )
        incumbent, inc_perf = self._exhaust_run_and_get_incumbent(
            _HB, rh, num_workers=1)

        # Just to make sure nothing has changed from the_HB instance side to make
        # this check invalid:
        # We add config values, so config 3 with 0 and 7 should be the lesser cost
        self.assertEqual(incumbent, self.config3)
        self.assertEqual(inc_perf, 7.0)

        # Do the same for HB, but have multiple_HB instance in there
        # This_HB instance will be created via num_workers==2
        # in self._exhaust_run_and_get_incumbent
        HB = Hyperband(
            stats=self.stats,
            traj_logger=TrajLogger(output_dir=None, stats=self.stats),
            rng=np.random.RandomState(12345),
            deterministic=True,
            run_obj_time=False,
            instances=[1, 2, 3, 4, 5],
            initial_budget=2,
            max_budget=5,
            eta=2,
        )
        incumbent_phb, inc_perf_phb = self._exhaust_run_and_get_incumbent(
            HB, self.rh)
        self.assertEqual(incumbent, incumbent_phb)

        # This makes sure there is a single incumbent in HB
        self.assertEqual(inc_perf, inc_perf_phb)

        # We don't want to loose any configuration, and particularly
        # we want to make sure the values of_HB instance to HB match
        self.assertEqual(len(self.rh.data), len(rh.data))

        # Because it is a deterministic run, the run histories must be the
        # same on exhaustion
        self.assertDictEqual(self.rh.data, rh.data)
Ejemplo n.º 12
0
 def get_tae(obj):
     """ Create SerialRunner-object for testing. """
     scen = Scenario(scenario={
         'cs': ConfigurationSpace(),
         'run_obj': obj,
         'cutoff_time': '10'
     },
                     cmd_options=None)
     stats = Stats(scen)
     stats.start_timing()
     # Add first run to not trigger FirstRunCrashedException
     stats.submitted_ta_runs += 1
     eta = SerialRunner(ta=lambda *args: None, stats=stats, run_obj=obj)
     return eta
Ejemplo n.º 13
0
 def setUp(self):
     self.cs = ConfigurationSpace()
     self.cs.add_hyperparameter(UniformFloatHyperparameter(
         name="x1", lower=1, upper=10, default_value=1)
     )
     self.scenario = Scenario({
         'cs': self.cs,
         'run_obj': 'quality',
         'output_dir': '',
         'ta_run_limit': 100,
     })
     self.stats = Stats(scenario=self.scenario)
     self.rh = RunHistory()
     self.ta = ExecuteTAFuncDict(lambda x: x["x1"]**2, stats=self.stats, runhistory=self.rh)
    def test_single_default_config_design(self):
        stats = Stats(scenario=self.scenario)
        stats.start_timing()
        self.ta.stats = stats
        tj = TrajLogger(output_dir=None, stats=stats)
        rh = RunHistory(aggregate_func=average_cost)

        dc = DefaultConfiguration(tae_runner=self.ta, scenario=self.scenario,
                                  stats=stats, traj_logger=tj,
                                  rng=np.random.RandomState(seed=12345))

        inc = dc.run()
        self.assertTrue(stats.ta_runs==1)
        self.assertTrue(len(rh.data)==0)
Ejemplo n.º 15
0
def eval_challenger(
    run_info: RunInfo,
    taf: ExecuteTAFuncDict,
    stats: Stats,
    runhistory: RunHistory,
):
    """
    Wrapper over challenger evaluation

    SMBO objects handles run history now, but to keep
    same testing functionality this function is a small
    wrapper to launch the taf and add it to the history
    """
    # evaluating configuration
    run_info, result = taf.run_wrapper(
        run_info=run_info,
    )
    stats.ta_time_used += float(result.time)
    runhistory.add(
        config=run_info.config,
        cost=result.cost,
        time=result.time,
        status=result.status,
        instance_id=run_info.instance,
        seed=run_info.seed,
        budget=run_info.budget,
    )
    stats.n_configs = len(runhistory.config_ids)
    return result
Ejemplo n.º 16
0
    def _set_param_space(self, param_space):
        self.param_space = param_space

        from ConfigSpace.hyperparameters import UniformFloatHyperparameter
        from smac.configspace import ConfigurationSpace
        from smac.optimizer.objective import average_cost
        from smac.runhistory.runhistory import RunHistory
        from smac.scenario.scenario import Scenario
        from smac.stats.stats import Stats
        from smac.utils.io.traj_logging import TrajLogger

        self.cs = ConfigurationSpace()
        for param in param_space:
            if param.type == 'continuous':
                var = UniformFloatHyperparameter(param.name, param.low,
                                                 param.high)
                self.cs.add_hyperparameter(var)
        self.runhistory = RunHistory(aggregate_func=average_cost)
        self.scenario = Scenario({
            'run_obj': 'quality',
            'runcount-limit': self.budget,
            'cs': self.cs
        })
        self.stats = Stats(self.scenario)
        self.traj_logger = TrajLogger(output_dir=__scratch__, stats=self.stats)
Ejemplo n.º 17
0
 def test_init(self):
     scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs, 'output_dir': ''})
     stats = Stats(scen)
     with tempfile.TemporaryDirectory() as tmpdir:
         path = os.path.join(tmpdir, 'tmp_test_folder')
         TrajLogger(output_dir=path, stats=stats)
         self.assertTrue(os.path.exists(path))
Ejemplo n.º 18
0
    def setUp(self):
        base_directory = os.path.split(__file__)[0]
        base_directory = os.path.abspath(
            os.path.join(base_directory, '..', '..'))
        self.current_dir = os.getcwd()
        os.chdir(base_directory)

        logging.basicConfig()
        self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
        self.logger.setLevel(logging.DEBUG)
        self.rng = np.random.RandomState(seed=42)
        self.scen_fn = 'test/test_files/validation/scenario.txt'
        self.train_insts = ['0', '1', '2']
        self.test_insts = ['3', '4', '5']
        self.inst_specs = {'0': 'null', '1': 'one', '2': 'two',
                           '3': 'three', '4': 'four', '5': 'five'}
        self.feature_dict = {'0': np.array((1, 2, 3)),
                             '1': np.array((1, 2, 3)),
                             '2': np.array((1, 2, 3)),
                             '3': np.array((1, 2, 3)),
                             '4': np.array((1, 2, 3)),
                             '5': np.array((1, 2, 3))}
        self.output_rh = 'test/test_files/validation/'
        scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
        self.stats = Stats(scen)
        self.trajectory = TrajLogger.read_traj_aclib_format(
            fn='test/test_files/validation/test_validation_traj.json', cs=scen.cs)
        self.output_dirs = [self.output_rh + 'test']
        self.output_files = [self.output_rh + 'validated_runhistory_EPM.json',
                             self.output_rh + 'validated_runhistory.json']

        self.maxDiff = None
Ejemplo n.º 19
0
def optimize(optimizer, scenario, trajectory=None):
    then = time.time()
    best_conf = optimizer.optimize()
    print(best_conf)
    print('training   time:', time.time() - then)

    traj_logger = TrajLogger(None, Stats(scenario))
    trajectory = trajectory or traj_logger.read_traj_aclib_format(
        "smac-output/run_1/traj_aclib2.json", scenario.cs)
    validator = Validator(scenario, trajectory, rng=np.random.RandomState(42))

    # evaluate on test instances and calculate cpu time
    then = time.time()
    runhis_dev = validator.validate(config_mode="def", instance_mode="test")
    runhis_inc = validator.validate(config_mode="inc", instance_mode="test")
    print('validating time:', time.time() - then)

    default_conf = runhis_dev.ids_config[1]
    incumbent_conf = runhis_inc.ids_config[1]
    dev_vals = get_instance_costs_for_config(runhis_dev, default_conf)
    inc_vals = get_instance_costs_for_config(runhis_inc, incumbent_conf)

    # ###### Filter runs for plotting #######
    dev_x = []
    inc_x = []
    for key in set(dev_vals.keys()) & set(inc_vals.keys()):
        dev_x.append(dev_vals[key])
        inc_x.append(inc_vals[key])

    # print(dev_vals)
    # print(inc_vals)
    print(dev_x)
    print(inc_x)

    print('PAR10:', np.mean(inc_x), '/', np.mean(dev_x))
    max_x = 1000.0
    par1er = lambda xx: np.mean([(x / 10 if x == max_x else x) for x in xx])
    print('PAR1 :', par1er(inc_x), '/', par1er(dev_x))
    to_counter = lambda xx: len([x for x in xx if x == max_x])
    print('TOs  :', to_counter(inc_x), '/', to_counter(dev_x))
    print('wins :', len([i for i in range(len(dev_x)) if dev_x[i] > inc_x[i]]),
          '/', len(dev_x))

    fig, ax = plt.subplots()
    ax.scatter(dev_x, inc_x, marker="x")
    ax.set_xlabel("Default Configuration")
    ax.set_ylabel("Incumbent Configuration")

    lims = [
        np.min([ax.get_xlim(), ax.get_ylim()]),
        np.max([ax.get_xlim(), ax.get_ylim()])
    ]
    ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
    # ax.set_xlim(lims)
    # ax.set_ylim(lims)

    ax.set_xscale('log')
    ax.set_yscale('log')

    fig.savefig("fig-smac.png")
Ejemplo n.º 20
0
    def _do_dummy_prediction(self, datamanager, num_run):

        # When using partial-cv it makes no sense to do dummy predictions
        if self._resampling_strategy in ['partial-cv',
                                         'partial-cv-iterative-fit']:
            return num_run

        self._logger.info("Starting to create dummy predictions.")
        memory_limit = int(self._ml_memory_limit)
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = self._time_for_task
        # This stats object is a hack - maybe the SMAC stats object should
        # already be generated here!
        stats = Stats(scenario_mock)
        stats.start_timing()
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    stats=stats,
                                    metric=self._metric,
                                    memory_limit=memory_limit,
                                    disable_file_output=self._disable_evaluator_output,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions: %s ',
                               str(additional_info))

        return ta.num_run
Ejemplo n.º 21
0
 def test_oserror(self):
     scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs, 'output_dir': ''})
     stats = Stats(scen)
     # test OSError
     with patch('os.makedirs') as osMock:
         osMock.side_effect = OSError()
         self.assertRaises(OSError, TrajLogger, output_dir='random_directory', stats=stats)
Ejemplo n.º 22
0
    def test_status(self):

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality',
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)

        eta = ExecuteTARunOld(ta=shlex.split(""), stats=stats)

        def test_success(**kwargs):
            return "Result of this algorithm run: SUCCESS,1,1,1,12354", ""

        eta._call_ta = test_success
        status, cost, runtime, ar_info = eta.run(config={}, )
        self.assertEqual(status, StatusType.SUCCESS)

        def test_success(**kwargs):
            return "Result of this algorithm run: SUCESS,1,1,1,12354", ""

        eta._call_ta = test_success
        status, cost, runtime, ar_info = eta.run(config={}, )
        self.assertEqual(status, StatusType.CRASHED)

        def test_success(**kwargs):
            return "Result of this algorithm run: success,1,1,1,12354", ""

        eta._call_ta = test_success
        status, cost, runtime, ar_info = eta.run(config={}, )
        self.assertEqual(status, StatusType.SUCCESS)
Ejemplo n.º 23
0
    def test_status(self):

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality',
            'output_dir': ''
        },
                        cmd_options=None)
        stats = Stats(scen)

        eta = ExecuteTARunAClib(ta=shlex.split(""), stats=stats)

        def test_success(**kwargs):
            results = {"status": "SUCCESS", "cost": 1234567890}
            return results, "", ""

        eta._call_ta = test_success
        status, cost, runtime, ar_info = eta.run(config={}, instance='0')
        self.assertEqual(status, StatusType.SUCCESS)

        def test_success(**kwargs):
            results = {"status": "SUCESS", "cost": 1234567890}
            return results, "", ""

        eta._call_ta = test_success
        status, cost, runtime, ar_info = eta.run(config={}, instance='0')
        self.assertEqual(status, StatusType.CRASHED)

        def test_success(**kwargs):
            results = {"status": "success", "cost": 1234567890}
            return results, "", ""

        eta._call_ta = test_success
        status, cost, runtime, ar_info = eta.run(config={}, instance='0')
        self.assertEqual(status, StatusType.CRASHED)
Ejemplo n.º 24
0
 def test_illegal_input(self):
     """
     Testing illegal input in smbo
     """
     cs = ConfigurationSpace()
     cs.add_hyperparameter(UniformFloatHyperparameter('test', 1, 10, 5))
     scen = Scenario({'run_obj': 'quality', 'cs': cs})
     stats = Stats(scen)
     # Recorded runs but no incumbent.
     stats.ta_runs = 10
     smac = SMAC(scen, stats=stats, rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
     # Incumbent but no recoreded runs.
     incumbent = cs.get_default_configuration()
     smac = SMAC(scen, restore_incumbent=incumbent,
                 rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
Ejemplo n.º 25
0
    def test_start_tae_return_abort(self, test_run):
        '''
            testing abort
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.ABORT, 12345.0, 1.2345, {}

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(ta=lambda *args: None, stats=stats)

        self.assertRaises(TAEAbortException, eta.start, config={}, instance=1)
Ejemplo n.º 26
0
    def setUp(self):
        self.datamanager = get_multiclass_classification_datamanager()
        self.tmp = os.path.join(os.getcwd(), '.test_evaluation')
        self.logger = logging.getLogger()
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = 10
        scenario_mock.algo_runs_timelimit = 1000
        scenario_mock.ta_run_limit = 100
        self.scenario = scenario_mock
        stats = Stats(scenario_mock)
        stats.start_timing()
        self.stats = stats

        try:
            shutil.rmtree(self.tmp)
        except:
            pass
Ejemplo n.º 27
0
    def test_memory_limit_usage(self, run_mock):
        run_mock.return_value = StatusType.SUCCESS, 12345.0, 1.2345, {}

        stats = Stats(Scenario({}))
        stats.start_timing()
        tae = ExecuteTARun(lambda x: x**2, stats)

        self.assertRaisesRegex(
            ValueError, 'Target algorithm executor '
            'ExecuteTARun does not support restricting the memory usage.',
            tae.start, {'x': 2},
            'a',
            memory_limit=123)
        tae._supports_memory_limit = True

        rval = tae.start({'x': 2}, 'a', memory_limit=10)
        self.assertEqual(rval, run_mock.return_value)
Ejemplo n.º 28
0
    def setUp(self):
        self.datamanager = get_multiclass_classification_datamanager()
        self.tmp = os.path.join(os.getcwd(), '.test_evaluation')
        self.logger = logging.getLogger()
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = 10
        scenario_mock.algo_runs_timelimit = 1000
        scenario_mock.ta_run_limit = 100
        self.scenario = scenario_mock
        stats = Stats(scenario_mock)
        stats.start_timing()
        self.stats = stats

        try:
            shutil.rmtree(self.tmp)
        except:
            pass
Ejemplo n.º 29
0
 def setUp(self):
     self.cs = ConfigurationSpace()
     self.scenario = Scenario({
         'cs': self.cs,
         'run_obj': 'quality',
         'output_dir': ''
     })
     self.stats = Stats(scenario=self.scenario)
Ejemplo n.º 30
0
    def setUp(self):
        self.datamanager = get_multiclass_classification_datamanager()
        self.tmp = os.path.join(os.getcwd(), '.test_evaluation')
        os.mkdir(self.tmp)
        self.logger_port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = 10
        scenario_mock.algo_runs_timelimit = 1000
        scenario_mock.ta_run_limit = 100
        self.scenario = scenario_mock
        stats = Stats(scenario_mock)
        stats.start_timing()
        self.stats = stats

        try:
            shutil.rmtree(self.tmp)
        except Exception:
            pass
Ejemplo n.º 31
0
    def setUp(self):
        unittest.TestCase.setUp(self)

        self.rh = RunHistory(aggregate_func=average_cost)
        self.cs = get_config_space()
        self.config1 = Configuration(self.cs, values={'a': 0, 'b': 100})
        self.config2 = Configuration(self.cs, values={'a': 100, 'b': 0})
        self.config3 = Configuration(self.cs, values={'a': 100, 'b': 100})

        self.scen = Scenario({
            "cutoff_time": 2,
            'cs': self.cs,
            "output_dir": ""
        })
        self.stats = Stats(scenario=self.scen)
        self.stats.start_timing()

        self.logger = logging.getLogger("Test")
Ejemplo n.º 32
0
    def get_tuned_config(self, scenario: ASlibScenario):
        '''
            uses SMAC3 to determine a well-performing configuration in the configuration space self.cs on the given scenario

            Arguments
            ---------
            scenario: ASlibScenario
                ASlib Scenario at hand

            Returns
            -------
            Configuration
                best incumbent configuration found by SMAC
        '''

        taf = ExecuteTAFunc(functools.partial(self.run_cv, scenario=scenario))

        ac_scenario = Scenario({"run_obj": "quality",  # we optimize quality
                                # at most 10 function evaluations
                                "runcount-limit": 10,
                                "cs": self.cs,  # configuration space
                                "deterministic": "true"
                                })

        # necessary to use stats options related to scenario information
        AC_Stats.scenario = ac_scenario

        # Optimize
        self.logger.info(
            ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        self.logger.info("Start Configuration")
        self.logger.info(
            ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        smbo = SMBO(scenario=ac_scenario, tae_runner=taf,
                    rng=np.random.RandomState(42))
        smbo.run(max_iters=999)

        AC_Stats.print_stats()
        self.logger.info("Final Incumbent: %s" % (smbo.incumbent))

        return smbo.incumbent
if is_test:
    memory_limit_factor = 1
else:
    memory_limit_factor = 2

for entry in trajectory:
    incumbent_id = entry.incumbent_id
    train_performance = entry.train_perf
    if incumbent_id not in incumbent_id_to_model:
        config = entry.incumbent

        logger = logging.getLogger('Testing:)')
        stats = Stats(
            Scenario({
                'cutoff_time': per_run_time_limit * 2,
                'run_obj': 'quality',
            })
        )
        stats.start_timing()
        # To avoid the output "first run crashed"...
        stats.ta_runs += 1
        ta = ExecuteTaFuncWithQueue(backend=automl._automl._backend,
                                    autosklearn_seed=seed,
                                    resampling_strategy='test',
                                    memory_limit=memory_limit_factor * automl_arguments['ml_memory_limit'],
                                    disable_file_output=True,
                                    logger=logger,
                                    stats=stats,
                                    all_scoring_functions=True,
                                    metric=metric)
        status, cost, runtime, additional_run_info = ta.start(