Esempio n. 1
0
    def test_crashed_cost_value(self, test_run):
        '''
            test cost on crashed runs
        '''
        # Patch run-function for custom-return
        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality'
        },
                        cmd_options=None)
        stats = Stats(scen)
        stats.start_timing()
        stats.ta_runs += 1

        # Check quality
        test_run.return_value = StatusType.CRASHED, np.nan, np.nan, {}
        eta = ExecuteTARun(ta=lambda *args: None,
                           stats=stats,
                           run_obj='quality',
                           cost_for_crash=100)
        self.assertEqual(100, eta.start(config={}, instance=1)[1])

        # Check runtime
        eta = ExecuteTARun(ta=lambda *args: None,
                           stats=stats,
                           run_obj='runtime',
                           cost_for_crash=10.7)
        self.assertEqual(20.0, eta.start(config={}, instance=1, cutoff=20)[1])
Esempio n. 2
0
 def get_tae(obj):
     """ Create ExecuteTARun-object for testing. """
     scen = Scenario(scenario={
         'cs': ConfigurationSpace(),
         'run_obj': obj,
         'cutoff_time': '10'
     },
                     cmd_options=None)
     stats = Stats(scen)
     stats.start_timing()
     # Add first run to not trigger FirstRunCrashedException
     stats.ta_runs += 1
     eta = ExecuteTARun(ta=lambda *args: None, stats=stats, run_obj=obj)
     return eta
Esempio n. 3
0
    def test_memory_limit_usage(self, run_mock):
        run_mock.return_value = StatusType.SUCCESS, 12345.0, 1.2345, {}

        stats = Stats(Scenario({}))
        stats.start_timing()
        tae = ExecuteTARun(lambda x: x**2, stats)

        self.assertRaisesRegex(
            ValueError, 'Target algorithm executor '
            'ExecuteTARun does not support restricting the memory usage.',
            tae.start, {'x': 2},
            'a',
            memory_limit=123)
        tae._supports_memory_limit = True

        rval = tae.start({'x': 2}, 'a', memory_limit=10)
        self.assertEqual(rval, run_mock.return_value)
Esempio n. 4
0
    def test_start_tae_return_abort(self, test_run):
        '''
            testing abort
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.ABORT, 12345.0, 1.2345, {}

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(ta=lambda *args: None, stats=stats)

        self.assertRaises(TAEAbortException, eta.start, config={}, instance=1)
Esempio n. 5
0
    def test_start_crash_first_run(self, test_run):
        '''
            testing crash-on-first-run
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.CRASHED, 12345.0, 1.2345, {}

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(ta=lambda *args: None, stats=stats)

        self.assertRaises(FirstRunCrashedException,
                          eta.start,
                          config={},
                          instance=1)
Esempio n. 6
0
    def test_start_exhausted_budget(self):
        '''
            testing exhausted budget
        '''
        # Set time-limit negative in scenario-options to trigger exception
        scen = Scenario(scenario={
            'wallclock_limit': -1,
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(
            ta=lambda *args: None,  # Dummy-function
            stats=stats)

        self.assertRaises(BudgetExhaustedException,
                          eta.start,
                          config={},
                          instance=1)
Esempio n. 7
0
def runhistory_builder(ta,scenario_dic,rng):

    tae_runner  = ExecuteTARun(ta=ta)
    scenario = Scenario(scenario_dic)
    stats = Stats(scenario=scenario)
    traj_logger = TrajLogger(stats=stats,output_dir="/home/dfki/Desktop/temp")

    # if tae_runner.stats is None:
    #     new_smac =SMAC(scenario=scenario,tae_runner=tae_runner)
    #     tae_runner.stats = new_smac.stats

    stats.start_timing()
    deful_config_builder = DefaultConfiguration(tae_runner,scenario,stats,traj_logger,rng)
    config_milad =deful_config_builder._select_configuration()
    config_milad._values = None
    config_milad._values = {'balancing:strategy': 'none', 'categorical_encoding:__choice__': 'one_hot_encoding', 'classifier:__choice__': 'random_forest', 'imputation:strategy': 'mean', 'preprocessor:__choice__': 'no_preprocessing', 'rescaling:__choice__': 'standardize', 'categorical_encoding:one_hot_encoding:use_minimum_fraction': 'True', 'classifier:random_forest:bootstrap': 'True', 'classifier:random_forest:criterion': 'gini', 'classifier:random_forest:max_depth': 10, 'classifier:random_forest:max_features': 0.5, 'classifier:random_forest:max_leaf_nodes': 'None', 'classifier:random_forest:min_impurity_decrease': 0.0, 'classifier:random_forest:min_samples_leaf': 1, 'classifier:random_forest:min_samples_split': 2, 'classifier:random_forest:min_weight_fraction_leaf': 0.0, 'classifier:random_forest:n_estimators': 100, 'categorical_encoding:one_hot_encoding:minimum_fraction': 0.01}
    # config_milad._values = {'balancing:strategy': 'none',
    #  'categorical_encoding:__choice__': 'no_encoding',
    #  'classifier:__choice__': 'random_forest',
    #  'imputation:strategy': 'mean',
    #  'preprocessor:__choice__': 'pca',
    #  'preprocessor:copy':True,
    #  'preprocessor:iterated_power':'auto',
    #  'preprocessor:n_components':'None',
    #  'preprocessor:random_state':'None',
    #  'preprocessor:svd_solver':'auto',
    #  'preprocessor:tol':0.0,
    #  'preprocessor:whiten':'False',
    #  'rescaling:__choice__': 'None',
    #  'classifier:random_forest:bootstrap': 'True',
    #  'classifier:random_forest:class_weight': 'None',
    #  'classifier:random_forest:criterion': 'gini',
    #  'classifier:random_forest:max_depth': 'None',
    #  'classifier:random_forest:max_features': 'auto',
    #  'classifier:random_forest:max_leaf_nodes': 'None',
    #  'classifier:random_forest:min_impurity_decrease': 0.0,
    #  'classifier:random_forest:min_impurity_split': '1e-07',
    #  'classifier:random_forest:min_samples_leaf': 1,
    #  'classifier:random_forest:min_samples_split': 2,
    #  'classifier:random_forest:min_weight_fraction_leaf': 0.0,
    #  'classifier:random_forest:n_estimators': 10,
    #  'classifier:random_forest:n_jobs': 1,
    #  'classifier:random_forest:oob_score': 'False',
    #  'classifier:random_forest:random_state': 'None',
    #  'classifier:random_forest:verbose': 0,
    #  'classifier:random_forest:warm_start': 'False',
    # }
    # config_milad._vector =None


    status, cost, runtime, additional_info = tae_runner.start(config=config_milad,instance=None)



    print(status, cost, runtime, additional_info)

    runhistory = RunHistory(aggregate_func=average_cost)
    runhistory.add( config=config_milad,
                    cost=cost,
                    time=runtime,
                    status=status,
                    instance_id=None,
                    additional_info=additional_info)

    return runhistory