Esempio n. 1
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )

        # The following should not fail because abort on first config crashed is false
        info = ta.start(config=None, instance=None, cutoff=60)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {
            'configuration_origin': 'UNKNOWN',
            'error': "Result queue is empty"
        })

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {
            'configuration_origin': 'UNKNOWN',
            'error': "Result queue is empty"
        })
Esempio n. 2
0
    def test_eval_with_limits_holdout_timeout_with_results_in_queue(
            self, pynisher_mock):
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({
                'status': StatusType.SUCCESS,
                'loss': 0.5,
                'additional_run_info': {}
            })

        m1 = unittest.mock.Mock()
        m2 = unittest.mock.Mock()
        m1.return_value = m2
        pynisher_mock.return_value = m1
        m2.side_effect = side_effect
        m2.exit_status = pynisher.TimeoutException
        m2.wall_clock_time = 30

        # Test for a succesful run
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.SUCCESS)
        self.assertEqual(info[1], 0.5)
        self.assertIsInstance(info[2], float)

        # And a crashed run which is in the queue
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({
                'status': StatusType.CRASHED,
                'loss': 2.0,
                'additional_run_info': {}
            })

        m2.side_effect = side_effect
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
Esempio n. 3
0
 def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats)
     self.stats.ta_runs = 1
     ta.start(None, cutoff=30, instance=None)
     self.assertEqual(pynisher_mock.call_args[1]['wall_time_in_s'], 4)
     self.assertIsInstance(pynisher_mock.call_args[1]['wall_time_in_s'], int)
Esempio n. 4
0
 def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 metric=accuracy)
     self.stats.ta_runs = 1
     ta.start(None, cutoff=30, instance=None)
     self.assertEqual(pynisher_mock.call_args[1]['wall_time_in_s'], 4)
     self.assertIsInstance(pynisher_mock.call_args[1]['wall_time_in_s'], int)
Esempio n. 5
0
 def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(
         backend=BackendMock(),
         autosklearn_seed=1,
         resampling_strategy='holdout',
         logger=self.logger,
         stats=self.stats,
         metric=accuracy,
         cost_for_crash=get_cost_of_crash(accuracy),
         abort_on_first_run_crash=False,
     )
     self.stats.ta_runs = 1
     ta.start(None, cutoff=30, instance=None)
     self.assertEqual(pynisher_mock.call_args[1]['wall_time_in_s'], 4)
     self.assertIsInstance(pynisher_mock.call_args[1]['wall_time_in_s'],
                           int)
Esempio n. 6
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                    autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)

        self.assertRaisesRegex(FirstRunCrashedException,
                               "First run crashed, abort. \(To prevent this, "
                               "toggle the "
                               "'abort_on_first_run_crash'-option!\)",
                               ta.start,
                               config=None,
                               instance=None,
                               cutoff=30)

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {'error': "Result queue is empty"})
Esempio n. 7
0
    def test_eval_with_limits_holdout_2(self, eval_houldout_mock):
        def side_effect(*args, **kwargs):
            queue = kwargs['queue']
            queue.put({
                'status': StatusType.SUCCESS,
                'loss': 0.5,
                'additional_run_info': kwargs['instance']
            })

        eval_houldout_mock.side_effect = side_effect
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                    autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)
        self.scenario.wallclock_limit = 180
        instance = "{'subsample': 30}"
        info = ta.start(None, cutoff=30, instance=instance)
        self.assertEqual(info[0], StatusType.SUCCESS)
        self.assertEqual(info[-1], {
            'message': "{'subsample': 30}",
            'configuration_origin': 'UNKNOWN'
        })
Esempio n. 8
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                    autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)

        self.assertRaisesRegex(FirstRunCrashedException,
                               "First run crashed, abort. Please check your "
                               "setup -- we assume that your "
                               "defaultconfiguration does not crashes. \(To "
                               "deactivate this exception, use the SMAC "
                               "scenario option 'abort_on_first_run_crash'\)",
                               ta.start,
                               config=None,
                               instance=None,
                               cutoff=30)

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {
            'configuration_origin': 'UNKNOWN',
            'error': "Result queue is empty"
        })
Esempio n. 9
0
 def test_zero_or_negative_cutoff(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats)
     self.scenario.wallclock_limit = 5
     info = ta.start(None, instance=None, cutoff=10)
     fixture = (StatusType.ABORT, np.nan, 0, {"misc": "exhausted bugdet -- ABORT"})
     self.assertEqual(info, fixture)
Esempio n. 10
0
    def test_eval_with_limits_holdout_timeout_with_results_in_queue(self, pynisher_mock):
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({'status': StatusType.SUCCESS,
                       'loss': 0.5,
                       'additional_run_info': {}})
        m1 = unittest.mock.Mock()
        m2 = unittest.mock.Mock()
        m1.return_value = m2
        pynisher_mock.return_value = m1
        m2.side_effect = side_effect
        m2.exit_status = pynisher.TimeoutException
        m2.wall_clock_time = 30

        # Test for a succesful run
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.SUCCESS)
        self.assertEqual(info[1], 0.5)
        self.assertIsInstance(info[2], float)

        # And a crashed run which is in the queue
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({'status': StatusType.CRASHED,
                       'loss': 2.0,
                       'additional_run_info': {}})
        m2.side_effect = side_effect
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
Esempio n. 11
0
 def test_eval_with_limits_holdout(self, pynisher_mock):
     pynisher_mock.side_effect = safe_eval_success_mock
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[1], 0.5)
     self.assertIsInstance(info[2], float)
Esempio n. 12
0
 def test_eval_with_limits_holdout(self, pynisher_mock):
     pynisher_mock.side_effect = safe_eval_success_mock
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[1], 0.5)
     self.assertIsInstance(info[2], float)
Esempio n. 13
0
 def test_eval_with_limits_holdout(self, eval_houldout_mock):
     def side_effect(*args, **kwargs):
         queue = kwargs['queue']
         queue.put((StatusType.SUCCESS, 0.5, 0.12345, kwargs['subsample']))
     eval_houldout_mock.side_effect = side_effect
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072)
     self.scenario.wallclock_limit = 180
     info = ta.start(None, cutoff=30, instance=None,
                     instance_specific='subsample=30')
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[-1], 30)
Esempio n. 14
0
 def test_exception_in_target_function(self, eval_holdout_mock):
     eval_holdout_mock.side_effect = ValueError
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     self.stats.ta_runs += 1
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.CRASHED)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
     self.assertEqual(info[3]['error'], 'ValueError()')
     self.assertIn('traceback', info[3])
Esempio n. 15
0
    def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
        pynisher_mock.side_effect = MemoryError
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=log_loss)
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.MEMOUT)

        # For logloss, worst possible result is MAXINT
        worst_possible_result = MAXINT
        self.assertEqual(info[1], worst_possible_result)
        self.assertIsInstance(info[2], float)
Esempio n. 16
0
 def test_exception_in_target_function(self, eval_holdout_mock):
     eval_holdout_mock.side_effect = ValueError
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     self.stats.ta_runs += 1
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.CRASHED)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
     self.assertEqual(info[3]['error'], 'ValueError()')
     self.assertIn('traceback', info[3])
Esempio n. 17
0
 def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
     m1 = unittest.mock.Mock()
     m2 = unittest.mock.Mock()
     m1.return_value = m2
     pynisher_mock.return_value = m1
     m2.exit_status = pynisher.TimeoutException
     m2.wall_clock_time = 30
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     info = ta.start(config=None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.TIMEOUT)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Esempio n. 18
0
 def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
     m1 = unittest.mock.Mock()
     m2 = unittest.mock.Mock()
     m1.return_value = m2
     pynisher_mock.return_value = m1
     m2.exit_status = pynisher.TimeoutException
     m2.wall_clock_time = 30
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     info = ta.start(config=None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.TIMEOUT)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Esempio n. 19
0
 def test_eval_with_limits_holdout_2(self, eval_houldout_mock):
     def side_effect(*args, **kwargs):
         queue = kwargs['queue']
         queue.put({'status': StatusType.SUCCESS,
                    'loss': 0.5,
                    'additional_run_info': kwargs['instance']})
     eval_houldout_mock.side_effect = side_effect
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     self.scenario.wallclock_limit = 180
     instance = "{'subsample': 30}"
     info = ta.start(None, cutoff=30, instance=instance)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[-1], {'message': "{'subsample': 30}",
                                 'configuration_origin': 'UNKNOWN'})
Esempio n. 20
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)

        self.assertRaisesRegex(FirstRunCrashedException,
                               "First run crashed, abort. Please check your "
                               "setup -- we assume that your "
                               "defaultconfiguration does not crashes. \(To "
                               "deactivate this exception, use the SMAC "
                               "scenario option 'abort_on_first_run_crash'\)",
                               ta.start, config=None, instance=None, cutoff=30)

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {'configuration_origin': 'UNKNOWN',
                                   'error': "Result queue is empty"})
Esempio n. 21
0
def run_configuration(backend, config_id, task_id, configuration, run_args,
                      memory_limit, per_run_time_limit):
    evaluation, iterative_fit, early_stopping, N_FOLDS, searchspace = run_args

    # TODO make this an argument from the command line!
    scenario_mock = unittest.mock.Mock()
    scenario_mock.wallclock_limit = per_run_time_limit * 100
    scenario_mock.algo_runs_timelimit = per_run_time_limit * 100
    scenario_mock.ta_run_limit = np.inf
    stats = Stats(scenario_mock)
    stats.ta_runs = 2

    # Resampling strategies
    kwargs = {}
    if evaluation == "holdout" and iterative_fit:
        resampling_strategy = 'holdout-iterative-fit'
    elif evaluation == "holdout" and not iterative_fit:
        resampling_strategy = 'holdout'
    elif evaluation == "CV" and not iterative_fit:
        resampling_strategy = 'cv'
        kwargs = {'folds': N_FOLDS}
    elif evaluation == "CV" and iterative_fit:
        resampling_strategy = 'cv-iterative-fit'
        kwargs = {'folds': N_FOLDS}
    else:
        raise ValueError("Unknown resampling strategy", evaluation)

    iterative_wo_early_stopping = [
        'extra_trees', 'PassiveAggressiveWOEarlyStopping', 'random_forest',
        'SGDWOEarlyStopping', 'GradientBoostingClassifierWOEarlyStopping'
    ]
    iterative_w_early_stopping = [
        'extra_trees', 'passive_aggressive', 'random_forest', 'sgd',
        'gradient_boosting'
    ]

    if not early_stopping:
        add_classifier_wo_early_stopping()

    if searchspace == "iterative":
        include_estimator = iterative_w_early_stopping if early_stopping else iterative_wo_early_stopping
        include_preprocessor = [
            "no_preprocessing",
        ]
    elif searchspace == "full":
        assert early_stopping is True
        include_estimator = None
        include_preprocessor = None
    # elif searchspace == 'only-iterative-nopreproc':
    #    include_estimator = iterative_w_early_stopping if early_stopping else iterative_wo_early_stopping
    #    include_preprocessor = ["no_preprocessing", ]
    # elif searchspace == 'only-iterative-cheappreproc':
    #    include_estimator = iterative_w_early_stopping if early_stopping else iterative_wo_early_stopping
    #    include_preprocessor = ["no_preprocessing", 'kitchen_sinks', 'polynomial', 'select_percentile_classification', 'select_rates']
    # elif searchspace == 'only-iterative':
    #    include_estimator = iterative_w_early_stopping if early_stopping else iterative_wo_early_stopping
    #    include_preprocessor = None
    # elif searchspace == "gb":
    #    include_estimator = ['GradientBoostingClassifierWOEarlyStopping'] if early_stopping else ['GradientBoostingClassifierWEarlyStopping']
    #    include_preprocessor = None
    else:
        raise ValueError(searchspace)

    stats.start_timing()
    tae = ExecuteTaFuncWithQueue(
        backend=backend,
        autosklearn_seed=3,
        resampling_strategy=resampling_strategy,
        metric=balanced_accuracy,
        logger=logging.getLogger(name="%s_%s" % (task_id, config_id)),
        initial_num_run=2,
        stats=stats,
        runhistory=None,
        run_obj='quality',
        par_factor=1,
        all_scoring_functions=False,
        output_y_hat_optimization=True,
        include={
            "classifier": include_estimator,
            "feature_preprocessor": include_preprocessor
        },
        exclude=None,
        memory_limit=memory_limit,
        disable_file_output=True,
        init_params=None,
        **kwargs)

    # Finally run configuration
    status, cost, runtime, additional_run_info = tae.start(
        config=configuration,
        instance=None,
        cutoff=per_run_time_limit,
        instance_specific=None,
        capped=False,
    )

    return status, cost, runtime, additional_run_info
Esempio n. 22
0
                'run_obj': 'quality',
            }))
        stats.start_timing()
        # To avoid the output "first run crashed"...
        stats.ta_runs += 1
        ta = ExecuteTaFuncWithQueue(backend=automl._automl._backend,
                                    autosklearn_seed=seed,
                                    resampling_strategy='test',
                                    memory_limit=memory_limit_factor *
                                    automl_arguments['ml_memory_limit'],
                                    disable_file_output=True,
                                    logger=logger,
                                    stats=stats,
                                    all_scoring_functions=True,
                                    metric=metric)
        status, cost, runtime, additional_run_info = ta.start(
            config=config, instance=None, cutoff=per_run_time_limit * 3)

        if status == StatusType.SUCCESS:
            assert len(additional_run_info) > 1, additional_run_info

        # print(additional_run_info)

        validated_trajectory.append(
            list(entry) + [task_id] + [additional_run_info])

validated_trajectory = [
    entry[:2] + [entry[2].get_dictionary()] + entry[3:]
    for entry in validated_trajectory
]
validated_trajectory_file = os.path.join(tmp_dir, 'smac3-output',
                                         'run_%d' % seed,
                'run_obj': 'quality',
            })
        )
        stats.start_timing()
        # To avoid the output "first run crashed"...
        stats.ta_runs += 1
        ta = ExecuteTaFuncWithQueue(backend=automl._automl._backend,
                                    autosklearn_seed=seed,
                                    resampling_strategy='test',
                                    memory_limit=memory_limit_factor * automl_arguments['ml_memory_limit'],
                                    disable_file_output=True,
                                    logger=logger,
                                    stats=stats,
                                    all_scoring_functions=True,
                                    metric=metric)
        status, cost, runtime, additional_run_info = ta.start(
            config=config, instance=None, cutoff=per_run_time_limit*3)

        if status == StatusType.SUCCESS:
            assert len(additional_run_info) > 1, additional_run_info

        # print(additional_run_info)

        validated_trajectory.append(list(entry) + [task_id] +
                                    [additional_run_info])

validated_trajectory = [entry[:2] + [entry[2].get_dictionary()] + entry[3:]
                        for entry in validated_trajectory]
validated_trajectory_file = os.path.join(tmp_dir,
                                         'smac3-output',
                                         'run_%d' % seed,
                                         'validation_trajectory.json')