Exemplo n.º 1
0
    def test_memory_limit_usage(self, run_mock):
        run_mock.return_value = StatusType.SUCCESS, 12345.0, 1.2345, {}

        stats = Stats(Scenario({}))
        stats.start_timing()
        tae = ExecuteTARun(lambda x: x**2, stats)

        self.assertRaisesRegex(
            ValueError, 'Target algorithm executor '
            'ExecuteTARun does not support restricting the memory usage.',
            tae.start, {'x': 2},
            'a',
            memory_limit=123)
        tae._supports_memory_limit = True

        rval = tae.start({'x': 2}, 'a', memory_limit=10)
        self.assertEqual(rval, run_mock.return_value)
Exemplo n.º 2
0
    def test_crashed_cost_value(self, test_run):
        '''
            test cost on crashed runs
        '''
        # Patch run-function for custom-return
        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality'
        },
                        cmd_options=None)
        stats = Stats(scen)
        stats.start_timing()
        stats.ta_runs += 1

        # Check quality
        test_run.return_value = StatusType.CRASHED, np.nan, np.nan, {}
        eta = ExecuteTARun(ta=lambda *args: None,
                           stats=stats,
                           run_obj='quality',
                           cost_for_crash=100)
        self.assertEqual(100, eta.start(config={}, instance=1)[1])

        # Check runtime
        eta = ExecuteTARun(ta=lambda *args: None,
                           stats=stats,
                           run_obj='runtime',
                           cost_for_crash=10.7)
        self.assertEqual(20.0, eta.start(config={}, instance=1, cutoff=20)[1])
Exemplo n.º 3
0
 def get_tae(obj):
     """ Create ExecuteTARun-object for testing. """
     scen = Scenario(scenario={
         'cs': ConfigurationSpace(),
         'run_obj': obj,
         'cutoff_time': '10'
     },
                     cmd_options=None)
     stats = Stats(scen)
     stats.start_timing()
     # Add first run to not trigger FirstRunCrashedException
     stats.ta_runs += 1
     eta = ExecuteTARun(ta=lambda *args: None, stats=stats, run_obj=obj)
     return eta
Exemplo n.º 4
0
    def test_start_tae_return_abort(self, test_run):
        '''
            testing abort
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.ABORT, 12345.0, 1.2345, {}

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(ta=lambda *args: None, stats=stats)

        self.assertRaises(TAEAbortException, eta.start, config={}, instance=1)
Exemplo n.º 5
0
def _unbound_tae_starter(
    tae: ExecuteTARun, *args: typing.Any, **kwargs: typing.Any
) -> typing.Tuple[StatusType, float, float, typing.Dict]:
    """
    Unbound function to be used by joblibs Parallel, since directly passing the
    TAE results in pickling-problems.

    Parameters
    ----------
    tae: ExecuteTARun
        tae to be used
    *args, **kwargs: various
        arguments to the tae

    Returns
    -------
    tae_results: tuple
        return from tae.start
    """
    return tae.start(*args, **kwargs)
Exemplo n.º 6
0
    def test_start_crash_first_run(self, test_run):
        '''
            testing crash-on-first-run
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.CRASHED, 12345.0, 1.2345, {}

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(ta=lambda *args: None, stats=stats)

        self.assertRaises(FirstRunCrashedException,
                          eta.start,
                          config={},
                          instance=1)
Exemplo n.º 7
0
    def test_start_exhausted_budget(self):
        '''
            testing exhausted budget
        '''
        # Set time-limit negative in scenario-options to trigger exception
        scen = Scenario(scenario={
            'wallclock_limit': -1,
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(
            ta=lambda *args: None,  # Dummy-function
            stats=stats)

        self.assertRaises(BudgetExhaustedException,
                          eta.start,
                          config={},
                          instance=1)
Exemplo n.º 8
0
    def validate(self,
                 config_mode: str = 'def',
                 instance_mode: str = 'test',
                 repetitions: int = 1,
                 n_jobs: int = 1,
                 backend: str = 'threading',
                 runhistory: RunHistory = None,
                 tae: ExecuteTARun = None):
        """
        Validate configs on instances and save result in runhistory.

        Parameters
        ----------
        config_mode: string
            what configurations to validate
            from [def, inc, def+inc, time, all], time means evaluation at
            timesteps 2^-4, 2^-3, 2^-2, 2^-1, 2^0, 2^1, ...
        instance_mode: string
            what instances to use for validation, from [train, test, train+test]
        repetitions: int
            number of repetitions in nondeterministic algorithms
        n_jobs: int
            number of parallel processes used by joblib
        runhistory: RunHistory or string or None
            runhistory to take data from
        tae: ExecuteTARun
            tae to be used. if none, will initialize ExecuteTARunOld

        Returns
        -------
        runhistory: RunHistory
            runhistory with validated runs
        """
        self.logger.debug(
            "Validating configs '%s' on instances '%s', repeating %d times"
            " with %d parallel runs on backend '%s'.", config_mode,
            instance_mode, repetitions, n_jobs, backend)
        # Reset runhistory
        self.rh = RunHistory(average_cost)

        # Get relevant configurations and instances
        configs = self._get_configs(config_mode)
        instances = self._get_instances(instance_mode)

        # If runhistory is given as string, load into memory
        if isinstance(runhistory, str):
            fn = runhistory
            runhistory = RunHistory(average_cost)
            runhistory.load_json(fn, self.scen.cs)

        # Get all runs needed as list
        runs = self.get_runs(configs,
                             instances,
                             repetitions=repetitions,
                             runhistory=runhistory)

        # Create new Stats without limits
        inf_scen = Scenario({
            'run_obj': self.scen.run_obj,
            'cutoff_time': self.scen.cutoff,
            'output_dir': None
        })
        inf_stats = Stats(inf_scen)
        inf_stats.start_timing()

        # Create TAE
        if not tae:
            tae = ExecuteTARunOld(ta=self.scen.ta,
                                  stats=inf_stats,
                                  run_obj=self.scen.run_obj,
                                  par_factor=self.scen.par_factor,
                                  cost_for_crash=self.scen.cost_for_crash)
        else:
            # Inject endless-stats
            tae.stats = inf_stats

        # Validate!
        run_results = self._validate_parallel(tae, runs, n_jobs, backend)

        # tae returns (status, cost, runtime, additional_info)
        # Add runs to RunHistory
        idx = 0
        for result in run_results:
            self.rh.add(config=runs[idx]['config'],
                        cost=result[1],
                        time=result[2],
                        status=result[0],
                        instance_id=runs[idx]['inst'],
                        seed=runs[idx]['seed'],
                        additional_info=result[3])
            idx += 1

        # Save runhistory
        if not self.output.endswith('.json'):
            old = self.output
            self.output = os.path.join(self.output,
                                       'validated_runhistory.json')
            self.logger.debug("Output is \"%s\", changing to \"%s\"!", old,
                              self.output)
        base = os.path.split(self.output)[0]
        if not os.path.exists(base):
            self.logger.debug("Folder (\"%s\") doesn't exist, creating.", base)
            os.makedirs(base)
        self.logger.info("Saving validation-results in %s", self.output)
        self.rh.save_json(self.output)
        return self.rh
Exemplo n.º 9
0
    def validate(
        self,
        config_mode: Union[str, typing.List[Configuration]] = 'def',
        instance_mode: Union[str, typing.List[str]] = 'test',
        repetitions: int = 1,
        n_jobs: int = 1,
        backend: str = 'threading',
        runhistory: RunHistory = None,
        tae: ExecuteTARun = None,
        output_fn: str = "",
    ) -> RunHistory:
        """
        Validate configs on instances and save result in runhistory.
        If a runhistory is provided as input it is important that you run it on the same/comparable hardware.

        side effect: if output is specified, saves runhistory to specified
        output directory.

        Parameters
        ----------
        config_mode: str or list<Configuration>
            string or directly a list of Configuration.
            string from [def, inc, def+inc, wallclock_time, cpu_time, all].
            time evaluates at cpu- or wallclock-timesteps of:
            [max_time/2^0, max_time/2^1, max_time/2^3, ..., default]
            with max_time being the highest recorded time
        instance_mode: str or list<str>
            what instances to use for validation, either from
            [train, test, train+test] or directly a list of instances
        repetitions: int
            number of repetitions in nondeterministic algorithms
        n_jobs: int
            number of parallel processes used by joblib
        backend: str
            what backend joblib should use for parallel runs
        runhistory: RunHistory
            optional, RunHistory-object to reuse runs
        tae: ExecuteTARun
            tae to be used. if None, will initialize ExecuteTARunOld
        output_fn: str
            path to runhistory to be saved. if the suffix is not '.json', will
            be interpreted as directory and filename will be
            'validated_runhistory.json'

        Returns
        -------
        runhistory: RunHistory
            runhistory with validated runs
        """
        self.logger.debug(
            "Validating configs '%s' on instances '%s', repeating %d times"
            " with %d parallel runs on backend '%s'.", config_mode,
            instance_mode, repetitions, n_jobs, backend)

        # Get all runs to be evaluated as list
        runs, validated_rh = self._get_runs(config_mode, instance_mode,
                                            repetitions, runhistory)

        # Create new Stats without limits
        inf_scen = Scenario({
            'run_obj': self.scen.run_obj,
            'cutoff_time': self.scen.cutoff,
            'output_dir': ""
        })
        inf_stats = Stats(inf_scen)
        inf_stats.start_timing()

        # Create TAE
        if not tae:
            tae = ExecuteTARunOld(ta=self.scen.ta,
                                  stats=inf_stats,
                                  run_obj=self.scen.run_obj,
                                  par_factor=self.scen.par_factor,
                                  cost_for_crash=self.scen.cost_for_crash)
        else:
            # Inject endless-stats
            tae.stats = inf_stats

        # Validate!
        run_results = self._validate_parallel(tae, runs, n_jobs, backend)

        # tae returns (status, cost, runtime, additional_info)
        # Add runs to RunHistory
        idx = 0
        for result in run_results:
            validated_rh.add(config=runs[idx].config,
                             cost=result[1],
                             time=result[2],
                             status=result[0],
                             instance_id=runs[idx].inst,
                             seed=runs[idx].seed,
                             additional_info=result[3])
            idx += 1

        if output_fn:
            self._save_results(validated_rh,
                               output_fn,
                               backup_fn="validated_runhistory.json")
        return validated_rh
Exemplo n.º 10
0
def runhistory_builder(ta,scenario_dic,rng):

    tae_runner  = ExecuteTARun(ta=ta)
    scenario = Scenario(scenario_dic)
    stats = Stats(scenario=scenario)
    traj_logger = TrajLogger(stats=stats,output_dir="/home/dfki/Desktop/temp")

    # if tae_runner.stats is None:
    #     new_smac =SMAC(scenario=scenario,tae_runner=tae_runner)
    #     tae_runner.stats = new_smac.stats

    stats.start_timing()
    deful_config_builder = DefaultConfiguration(tae_runner,scenario,stats,traj_logger,rng)
    config_milad =deful_config_builder._select_configuration()
    config_milad._values = None
    config_milad._values = {'balancing:strategy': 'none', 'categorical_encoding:__choice__': 'one_hot_encoding', 'classifier:__choice__': 'random_forest', 'imputation:strategy': 'mean', 'preprocessor:__choice__': 'no_preprocessing', 'rescaling:__choice__': 'standardize', 'categorical_encoding:one_hot_encoding:use_minimum_fraction': 'True', 'classifier:random_forest:bootstrap': 'True', 'classifier:random_forest:criterion': 'gini', 'classifier:random_forest:max_depth': 10, 'classifier:random_forest:max_features': 0.5, 'classifier:random_forest:max_leaf_nodes': 'None', 'classifier:random_forest:min_impurity_decrease': 0.0, 'classifier:random_forest:min_samples_leaf': 1, 'classifier:random_forest:min_samples_split': 2, 'classifier:random_forest:min_weight_fraction_leaf': 0.0, 'classifier:random_forest:n_estimators': 100, 'categorical_encoding:one_hot_encoding:minimum_fraction': 0.01}
    # config_milad._values = {'balancing:strategy': 'none',
    #  'categorical_encoding:__choice__': 'no_encoding',
    #  'classifier:__choice__': 'random_forest',
    #  'imputation:strategy': 'mean',
    #  'preprocessor:__choice__': 'pca',
    #  'preprocessor:copy':True,
    #  'preprocessor:iterated_power':'auto',
    #  'preprocessor:n_components':'None',
    #  'preprocessor:random_state':'None',
    #  'preprocessor:svd_solver':'auto',
    #  'preprocessor:tol':0.0,
    #  'preprocessor:whiten':'False',
    #  'rescaling:__choice__': 'None',
    #  'classifier:random_forest:bootstrap': 'True',
    #  'classifier:random_forest:class_weight': 'None',
    #  'classifier:random_forest:criterion': 'gini',
    #  'classifier:random_forest:max_depth': 'None',
    #  'classifier:random_forest:max_features': 'auto',
    #  'classifier:random_forest:max_leaf_nodes': 'None',
    #  'classifier:random_forest:min_impurity_decrease': 0.0,
    #  'classifier:random_forest:min_impurity_split': '1e-07',
    #  'classifier:random_forest:min_samples_leaf': 1,
    #  'classifier:random_forest:min_samples_split': 2,
    #  'classifier:random_forest:min_weight_fraction_leaf': 0.0,
    #  'classifier:random_forest:n_estimators': 10,
    #  'classifier:random_forest:n_jobs': 1,
    #  'classifier:random_forest:oob_score': 'False',
    #  'classifier:random_forest:random_state': 'None',
    #  'classifier:random_forest:verbose': 0,
    #  'classifier:random_forest:warm_start': 'False',
    # }
    # config_milad._vector =None


    status, cost, runtime, additional_info = tae_runner.start(config=config_milad,instance=None)



    print(status, cost, runtime, additional_info)

    runhistory = RunHistory(aggregate_func=average_cost)
    runhistory.add( config=config_milad,
                    cost=cost,
                    time=runtime,
                    status=status,
                    instance_id=None,
                    additional_info=additional_info)

    return runhistory