Exemplo n.º 1
0
 def test_eval_with_limits_holdout(self, pynisher_mock):
     pynisher_mock.side_effect = safe_eval_success_mock
     config = unittest.mock.Mock()
     config.config_id = 198
     ta = ExecuteTaFuncWithQueue(
         backend=BackendMock(),
         autosklearn_seed=1,
         resampling_strategy='holdout',
         logger=self.logger,
         stats=self.stats,
         memory_limit=3072,
         metric=accuracy,
         cost_for_crash=get_cost_of_crash(accuracy),
         abort_on_first_run_crash=False,
     )
     info = ta.run_wrapper(
         RunInfo(config=config,
                 cutoff=30,
                 instance=None,
                 instance_specific=None,
                 seed=1,
                 capped=False))
     self.assertEqual(info[0].config.config_id, 198)
     self.assertEqual(info[1].status, StatusType.SUCCESS)
     self.assertEqual(info[1].cost, 0.5)
     self.assertIsInstance(info[1].time, float)
Exemplo n.º 2
0
    def test_eval_with_limits_holdout_2(self, eval_houldout_mock):
        def side_effect(*args, **kwargs):
            queue = kwargs['queue']
            queue.put({
                'status': StatusType.SUCCESS,
                'loss': 0.5,
                'additional_run_info': kwargs['instance']
            })

        eval_houldout_mock.side_effect = side_effect
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                    autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)
        self.scenario.wallclock_limit = 180
        instance = "{'subsample': 30}"
        info = ta.start(None, cutoff=30, instance=instance)
        self.assertEqual(info[0], StatusType.SUCCESS)
        self.assertEqual(info[-1], {
            'message': "{'subsample': 30}",
            'configuration_origin': 'UNKNOWN'
        })
Exemplo n.º 3
0
    def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
        config = unittest.mock.Mock()
        config.config_id = 198

        m1 = unittest.mock.Mock()
        m2 = unittest.mock.Mock()
        m1.return_value = m2
        pynisher_mock.return_value = m1
        m2.exit_status = pynisher.TimeoutException
        m2.wall_clock_time = 30
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )
        info = ta.run_wrapper(
            RunInfo(config=config,
                    cutoff=30,
                    instance=None,
                    instance_specific=None,
                    seed=1,
                    capped=False))
        self.assertEqual(info[1].status, StatusType.TIMEOUT)
        self.assertEqual(info[1].cost, 1.0)
        self.assertIsInstance(info[1].time, float)
Exemplo n.º 4
0
    def test_exception_in_target_function(self, eval_holdout_mock):
        config = unittest.mock.Mock()
        config.config_id = 198

        eval_holdout_mock.side_effect = ValueError
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )
        self.stats.submitted_ta_runs += 1
        info = ta.run_wrapper(
            RunInfo(config=config,
                    cutoff=30,
                    instance=None,
                    instance_specific=None,
                    seed=1,
                    capped=False))
        self.assertEqual(info[1].status, StatusType.CRASHED)
        self.assertEqual(info[1].cost, 1.0)
        self.assertIsInstance(info[1].time, float)
        self.assertEqual(info[1].additional_info['error'], 'ValueError()')
        self.assertIn('traceback', info[1].additional_info)
Exemplo n.º 5
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )

        # The following should not fail because abort on first config crashed is false
        info = ta.start(config=None, instance=None, cutoff=60)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {
            'configuration_origin': 'UNKNOWN',
            'error': "Result queue is empty"
        })

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {
            'configuration_origin': 'UNKNOWN',
            'error': "Result queue is empty"
        })
Exemplo n.º 6
0
    def _do_dummy_prediction(self, datamanager, num_run):

        # When using partial-cv it makes no sense to do dummy predictions
        if self._resampling_strategy in ['partial-cv',
                                         'partial-cv-iterative-fit']:
            return num_run

        self._logger.info("Starting to create dummy predictions.")
        memory_limit = int(self._ml_memory_limit)
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = self._time_for_task
        # This stats object is a hack - maybe the SMAC stats object should
        # already be generated here!
        stats = Stats(scenario_mock)
        stats.start_timing()
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    stats=stats,
                                    metric=self._metric,
                                    memory_limit=memory_limit,
                                    disable_file_output=self._disable_evaluator_output,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions: %s ',
                               str(additional_info))

        return ta.num_run
Exemplo n.º 7
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                    autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)

        self.assertRaisesRegex(FirstRunCrashedException,
                               "First run crashed, abort. Please check your "
                               "setup -- we assume that your "
                               "defaultconfiguration does not crashes. \(To "
                               "deactivate this exception, use the SMAC "
                               "scenario option 'abort_on_first_run_crash'\)",
                               ta.start,
                               config=None,
                               instance=None,
                               cutoff=30)

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {
            'configuration_origin': 'UNKNOWN',
            'error': "Result queue is empty"
        })
Exemplo n.º 8
0
    def _do_dummy_prediction(self, datamanager, num_run):

        # When using partial-cv it makes no sense to do dummy predictions
        if self._resampling_strategy in ['partial-cv',
                                         'partial-cv-iterative-fit']:
            return num_run

        self._logger.info("Starting to create dummy predictions.")
        memory_limit = int(self._ml_memory_limit)
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = self._time_for_task
        # This stats object is a hack - maybe the SMAC stats object should
        # already be generated here!
        stats = Stats(scenario_mock)
        stats.start_timing()
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    stats=stats,
                                    metric=self._metric,
                                    memory_limit=memory_limit,
                                    disable_file_output=self._disable_evaluator_output,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions: %s ',
                               str(additional_info))

        return ta.num_run
 def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
     config = unittest.mock.Mock()
     config.config_id = 198
     ta = ExecuteTaFuncWithQueue(
         backend=self.backend,
         autosklearn_seed=1,
         port=self.logger_port,
         resampling_strategy='holdout',
         stats=self.stats,
         metric=accuracy,
         cost_for_crash=get_cost_of_crash(accuracy),
         abort_on_first_run_crash=False,
         pynisher_context='forkserver',
     )
     self.stats.ta_runs = 1
     ta.run_wrapper(
         RunInfo(config=config,
                 cutoff=30,
                 instance=None,
                 instance_specific=None,
                 seed=1,
                 capped=False))
     self.assertEqual(pynisher_mock.call_args[1]['wall_time_in_s'], 4)
     self.assertIsInstance(pynisher_mock.call_args[1]['wall_time_in_s'],
                           int)
Exemplo n.º 10
0
    def _do_dummy_prediction(self, datamanager, num_run):

        self._logger.info("Starting to create dummy predictions.")
        # time_limit = int(self._time_for_task / 6.)
        memory_limit = int(self._ml_memory_limit)
        ta = ExecuteTaFuncWithQueue(
            backend=self._backend,
            autosklearn_seed=self._seed,
            resampling_strategy=self._resampling_strategy,
            initial_num_run=num_run,
            logger=self._logger,
            **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task, memory_limit=memory_limit)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions:%s ',
                               additional_info)

        #status, cost, runtime, additional_info = \
        #    ta.run(2, cutoff=time_limit, memory_limit=memory_limit)
        #if status == StatusType.SUCCESS:
        #    self._logger.info("Finished creating dummy prediction 2/2.")
        #else:
        #    self._logger.error('Error creating dummy prediction 2/2 %s',
        #                       additional_info)

        return ta.num_run
Exemplo n.º 11
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                    autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)

        self.assertRaisesRegex(FirstRunCrashedException,
                               "First run crashed, abort. \(To prevent this, "
                               "toggle the "
                               "'abort_on_first_run_crash'-option!\)",
                               ta.start,
                               config=None,
                               instance=None,
                               cutoff=30)

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {'error': "Result queue is empty"})
Exemplo n.º 12
0
    def _do_dummy_prediction(self, datamanager, num_run):

        self._logger.info("Starting to create dummy predictions.")
        # time_limit = int(self._time_for_task / 6.)
        memory_limit = int(self._ml_memory_limit)
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task, memory_limit=memory_limit)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions:%s ',
                               additional_info)

        #status, cost, runtime, additional_info = \
        #    ta.run(2, cutoff=time_limit, memory_limit=memory_limit)
        #if status == StatusType.SUCCESS:
        #    self._logger.info("Finished creating dummy prediction 2/2.")
        #else:
        #    self._logger.error('Error creating dummy prediction 2/2 %s',
        #                       additional_info)

        return ta.num_run
Exemplo n.º 13
0
    def test_eval_with_limits_holdout_2(self, eval_houldout_mock):
        config = unittest.mock.Mock()
        config.config_id = 198

        def side_effect(*args, **kwargs):
            queue = kwargs['queue']
            queue.put({'status': StatusType.SUCCESS,
                       'loss': 0.5,
                       'additional_run_info': kwargs['instance']})
        eval_houldout_mock.side_effect = side_effect
        ta = ExecuteTaFuncWithQueue(backend=self.backend, autosklearn_seed=1,
                                    port=self.logger_port,
                                    resampling_strategy='holdout',
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy,
                                    cost_for_crash=get_cost_of_crash(accuracy),
                                    abort_on_first_run_crash=False,
                                    pynisher_context='fork',
                                    )
        self.scenario.wallclock_limit = 180
        instance = "{'subsample': 30}"
        info = ta.run_wrapper(RunInfo(config=config, cutoff=30, instance=instance,
                                      instance_specific=None, seed=1, capped=False))
        self.assertEqual(info[1].status, StatusType.SUCCESS)
        self.assertEqual(len(info[1].additional_info), 2)
        self.assertIn('configuration_origin', info[1].additional_info)
        self.assertEqual(info[1].additional_info['message'], "{'subsample': 30}")
Exemplo n.º 14
0
    def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
        pynisher_mock.side_effect = MemoryError
        config = unittest.mock.Mock()
        config.config_id = 198
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=log_loss,
            cost_for_crash=get_cost_of_crash(log_loss),
            abort_on_first_run_crash=False,
        )
        info = ta.run_wrapper(
            RunInfo(config=config,
                    cutoff=30,
                    instance=None,
                    instance_specific=None,
                    seed=1,
                    capped=False))
        self.assertEqual(info[1].status, StatusType.MEMOUT)

        # For logloss, worst possible result is MAXINT
        worst_possible_result = MAXINT
        self.assertEqual(info[1].cost, worst_possible_result)
        self.assertIsInstance(info[1].time, float)
Exemplo n.º 15
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        config = unittest.mock.Mock()
        config.origin = 'MOCK'
        config.config_id = 198
        ta = ExecuteTaFuncWithQueue(
            backend=self.backend,
            autosklearn_seed=1,
            port=self.logger_port,
            resampling_strategy='holdout',
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
            pynisher_context='fork',
        )

        # The following should not fail because abort on first config crashed is false
        info = ta.run_wrapper(
            RunInfo(config=config,
                    cutoff=60,
                    instance=None,
                    instance_specific=None,
                    seed=1,
                    capped=False))
        self.assertEqual(info[1].status, StatusType.CRASHED)
        self.assertEqual(info[1].cost, 1.0)
        self.assertIsInstance(info[1].time, float)
        self.assertEqual(
            info[1].additional_info, {
                'configuration_origin': 'MOCK',
                'error': "Result queue is empty",
                'exit_status': 0,
                'exitcode': 0,
                'subprocess_stdout': '',
                'subprocess_stderr': ''
            })

        self.stats.submitted_ta_runs += 1
        info = ta.run_wrapper(
            RunInfo(config=config,
                    cutoff=30,
                    instance=None,
                    instance_specific=None,
                    seed=1,
                    capped=False))
        self.assertEqual(info[1].status, StatusType.CRASHED)
        self.assertEqual(info[1].cost, 1.0)
        self.assertIsInstance(info[1].time, float)
        self.assertEqual(
            info[1].additional_info, {
                'configuration_origin': 'MOCK',
                'error': "Result queue is empty",
                'exit_status': 0,
                'exitcode': 0,
                'subprocess_stdout': '',
                'subprocess_stderr': ''
            })
Exemplo n.º 16
0
 def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
     pynisher_mock.return_value = None
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger)
     info = ta.run(None, cutoff=30, memory_limit=3000)
     self.assertEqual(info[0], StatusType.CRASHED)
     self.assertEqual(info[1], 2.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 17
0
 def test_eval_with_limits_holdout(self, pynisher_mock):
     pynisher_mock.side_effect = safe_eval_success_mock
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger)
     info = ta.run(None, cutoff=30, memory_limit=3000)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[1], 0.5)
     self.assertIsInstance(info[2], float)
Exemplo n.º 18
0
 def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
     pynisher_mock.side_effect = pynisher.TimeoutException
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger)
     info = ta.run(None, cutoff=30, memory_limit=3000)
     self.assertEqual(info[0], StatusType.TIMEOUT)
     self.assertEqual(info[1], 2.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 19
0
 def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072)
     ta.run(None, cutoff=30)
     self.assertEqual(pynisher_mock.call_args[1]['wall_time_in_s'], 4)
     self.assertIsInstance(pynisher_mock.call_args[1]['wall_time_in_s'], int)
Exemplo n.º 20
0
 def test_zero_or_negative_cutoff(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats)
     self.scenario.wallclock_limit = 5
     info = ta.start(None, instance=None, cutoff=10)
     fixture = (StatusType.ABORT, np.nan, 0, {"misc": "exhausted bugdet -- ABORT"})
     self.assertEqual(info, fixture)
Exemplo n.º 21
0
 def test_eval_with_limits_holdout(self, pynisher_mock):
     pynisher_mock.side_effect = safe_eval_success_mock
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                 autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger)
     info = ta.run(None, cutoff=30, memory_limit=3000)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[1], 0.5)
     self.assertIsInstance(info[2], float)
Exemplo n.º 22
0
 def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 metric=accuracy)
     self.stats.ta_runs = 1
     ta.start(None, cutoff=30, instance=None)
     self.assertEqual(pynisher_mock.call_args[1]['wall_time_in_s'], 4)
     self.assertIsInstance(pynisher_mock.call_args[1]['wall_time_in_s'], int)
Exemplo n.º 23
0
 def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
     pynisher_mock.return_value = None
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                 autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger)
     info = ta.run(None, cutoff=30, memory_limit=3000)
     self.assertEqual(info[0], StatusType.CRASHED)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 24
0
 def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
     pynisher_mock.side_effect = MemoryError
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(),
                                 autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger)
     info = ta.run(None, cutoff=30, memory_limit=3000)
     self.assertEqual(info[0], StatusType.MEMOUT)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 25
0
 def test_cutoff_lower_than_remaining_time(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 metric=accuracy)
     self.stats.ta_runs = 1
     ta.start(None, cutoff=30, instance=None)
     self.assertEqual(pynisher_mock.call_args[1]['wall_time_in_s'], 4)
     self.assertIsInstance(pynisher_mock.call_args[1]['wall_time_in_s'], int)
Exemplo n.º 26
0
 def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
     pynisher_mock.side_effect = pynisher.TimeoutException
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072)
     info = ta.run(None, cutoff=30)
     self.assertEqual(info[0], StatusType.TIMEOUT)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 27
0
 def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
     pynisher_mock.side_effect = MemoryError
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.MEMOUT)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 28
0
 def test_eval_with_limits_holdout_timeout_with_results_in_queue(self, pynisher_mock):
     def side_effect(**kwargs):
         queue = kwargs['queue']
         queue.put((StatusType.SUCCESS, 0.5, 0.12345, ''))
     pynisher_mock.side_effect = side_effect
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072)
     info = ta.run(None, cutoff=30)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[1], 0.5)
     self.assertIsInstance(info[2], float)
Exemplo n.º 29
0
    def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock):
        pynisher_mock.side_effect = MemoryError
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=log_loss)
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.MEMOUT)

        # For logloss, worst possible result is MAXINT
        worst_possible_result = MAXINT
        self.assertEqual(info[1], worst_possible_result)
        self.assertIsInstance(info[2], float)
Exemplo n.º 30
0
 def test_exception_in_target_function(self, eval_holdout_mock):
     eval_holdout_mock.side_effect = ValueError
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     self.stats.ta_runs += 1
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.CRASHED)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
     self.assertEqual(info[3]['error'], 'ValueError()')
     self.assertIn('traceback', info[3])
Exemplo n.º 31
0
 def test_exception_in_target_function(self, eval_holdout_mock):
     eval_holdout_mock.side_effect = ValueError
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     self.stats.ta_runs += 1
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.CRASHED)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
     self.assertEqual(info[3]['error'], 'ValueError()')
     self.assertIn('traceback', info[3])
Exemplo n.º 32
0
 def test_eval_with_limits_holdout(self, eval_houldout_mock):
     def side_effect(*args, **kwargs):
         queue = kwargs['queue']
         queue.put((StatusType.SUCCESS, 0.5, 0.12345, kwargs['subsample']))
     eval_houldout_mock.side_effect = side_effect
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072)
     self.scenario.wallclock_limit = 180
     info = ta.start(None, cutoff=30, instance=None,
                     instance_specific='subsample=30')
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[-1], 30)
Exemplo n.º 33
0
 def test_zero_or_negative_cutoff(self, pynisher_mock):
     config = unittest.mock.Mock()
     config.config_id = 198
     ta = ExecuteTaFuncWithQueue(backend=self.backend, autosklearn_seed=1,
                                 port=self.logger_port,
                                 resampling_strategy='holdout',
                                 stats=self.stats,
                                 metric=accuracy,
                                 cost_for_crash=get_cost_of_crash(accuracy),
                                 abort_on_first_run_crash=False,
                                 )
     self.scenario.wallclock_limit = 5
     self.stats.submitted_ta_runs += 1
     run_info, run_value = ta.run_wrapper(RunInfo(config=config, cutoff=9, instance=None,
                                          instance_specific=None, seed=1, capped=False))
     self.assertEqual(run_value.status, StatusType.STOP)
Exemplo n.º 34
0
 def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
     m1 = unittest.mock.Mock()
     m2 = unittest.mock.Mock()
     m1.return_value = m2
     pynisher_mock.return_value = m1
     m2.exit_status = pynisher.TimeoutException
     m2.wall_clock_time = 30
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     info = ta.start(config=None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.TIMEOUT)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 35
0
 def test_eval_with_limits_holdout(self, pynisher_mock):
     pynisher_mock.side_effect = safe_eval_success_mock
     ta = ExecuteTaFuncWithQueue(
         backend=BackendMock(),
         autosklearn_seed=1,
         resampling_strategy='holdout',
         logger=self.logger,
         stats=self.stats,
         memory_limit=3072,
         metric=accuracy,
         cost_for_crash=get_cost_of_crash(accuracy),
         abort_on_first_run_crash=False,
     )
     info = ta.start(None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[1], 0.5)
     self.assertIsInstance(info[2], float)
Exemplo n.º 36
0
 def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
     m1 = unittest.mock.Mock()
     m2 = unittest.mock.Mock()
     m1.return_value = m2
     pynisher_mock.return_value = m1
     m2.exit_status = pynisher.TimeoutException
     m2.wall_clock_time = 30
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     info = ta.start(config=None, instance=None, cutoff=30)
     self.assertEqual(info[0], StatusType.TIMEOUT)
     self.assertEqual(info[1], 1.0)
     self.assertIsInstance(info[2], float)
Exemplo n.º 37
0
    def test_eval_with_limits_holdout_timeout_with_results_in_queue(self, pynisher_mock):
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({'status': StatusType.SUCCESS,
                       'loss': 0.5,
                       'additional_run_info': {}})
        m1 = unittest.mock.Mock()
        m2 = unittest.mock.Mock()
        m1.return_value = m2
        pynisher_mock.return_value = m1
        m2.side_effect = side_effect
        m2.exit_status = pynisher.TimeoutException
        m2.wall_clock_time = 30

        # Test for a succesful run
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.SUCCESS)
        self.assertEqual(info[1], 0.5)
        self.assertIsInstance(info[2], float)

        # And a crashed run which is in the queue
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({'status': StatusType.CRASHED,
                       'loss': 2.0,
                       'additional_run_info': {}})
        m2.side_effect = side_effect
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
Exemplo n.º 38
0
 def test_eval_with_limits_holdout_2(self, eval_houldout_mock):
     def side_effect(*args, **kwargs):
         queue = kwargs['queue']
         queue.put({'status': StatusType.SUCCESS,
                    'loss': 0.5,
                    'additional_run_info': kwargs['instance']})
     eval_houldout_mock.side_effect = side_effect
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 memory_limit=3072,
                                 metric=accuracy)
     self.scenario.wallclock_limit = 180
     instance = "{'subsample': 30}"
     info = ta.start(None, cutoff=30, instance=instance)
     self.assertEqual(info[0], StatusType.SUCCESS)
     self.assertEqual(info[-1], {'message': "{'subsample': 30}",
                                 'configuration_origin': 'UNKNOWN'})
Exemplo n.º 39
0
 def test_zero_or_negative_cutoff(self, pynisher_mock):
     ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                 resampling_strategy='holdout',
                                 logger=self.logger,
                                 stats=self.stats,
                                 metric=accuracy)
     self.scenario.wallclock_limit = 5
     self.stats.ta_runs += 1
     self.assertRaises(BudgetExhaustedException, ta.start, None,
                       instance=None, cutoff=9)
Exemplo n.º 40
0
    def test_silent_exception_in_target_function(self):
        config = unittest.mock.Mock()
        config.config_id = 198

        delattr(self.backend, 'save_targets_ensemble')
        ta = ExecuteTaFuncWithQueue(
            backend=self.backend,
            port=self.logger_port,
            autosklearn_seed=1,
            resampling_strategy='holdout',
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
            iterative=False,
            pynisher_context='fork',
        )
        ta.pynisher_logger = unittest.mock.Mock()
        self.stats.submitted_ta_runs += 1
        info = ta.run_wrapper(
            RunInfo(config=config,
                    cutoff=3000,
                    instance=None,
                    instance_specific=None,
                    seed=1,
                    capped=False))
        self.assertEqual(info[1].status,
                         StatusType.CRASHED,
                         msg=str(info[1].additional_info))
        self.assertEqual(info[1].cost, 1.0)
        self.assertIsInstance(info[1].time, float)
        self.assertIn(
            info[1].additional_info['error'],
            ("""AttributeError("'BackendMock' object has no attribute """
             """'save_targets_ensemble'",)""",
             """AttributeError("'BackendMock' object has no attribute """
             """'save_targets_ensemble'")""",
             """AttributeError('save_targets_ensemble')"""))
        self.assertNotIn('exitcode', info[1].additional_info)
        self.assertNotIn('exit_status', info[1].additional_info)
        self.assertNotIn('traceback', info[1])
Exemplo n.º 41
0
    def test_eval_with_limits_holdout_timeout_with_results_in_queue(
            self, pynisher_mock):
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({
                'status': StatusType.SUCCESS,
                'loss': 0.5,
                'additional_run_info': {}
            })

        m1 = unittest.mock.Mock()
        m2 = unittest.mock.Mock()
        m1.return_value = m2
        pynisher_mock.return_value = m1
        m2.side_effect = side_effect
        m2.exit_status = pynisher.TimeoutException
        m2.wall_clock_time = 30

        # Test for a succesful run
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.SUCCESS)
        self.assertEqual(info[1], 0.5)
        self.assertIsInstance(info[2], float)

        # And a crashed run which is in the queue
        def side_effect(**kwargs):
            queue = kwargs['queue']
            queue.put({
                'status': StatusType.CRASHED,
                'loss': 2.0,
                'additional_run_info': {}
            })

        m2.side_effect = side_effect
        ta = ExecuteTaFuncWithQueue(
            backend=BackendMock(),
            autosklearn_seed=1,
            resampling_strategy='holdout',
            logger=self.logger,
            stats=self.stats,
            memory_limit=3072,
            metric=accuracy,
            cost_for_crash=get_cost_of_crash(accuracy),
            abort_on_first_run_crash=False,
        )
        info = ta.start(None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
Exemplo n.º 42
0
    def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock):
        pynisher_mock.return_value = None
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)

        self.assertRaisesRegex(FirstRunCrashedException,
                               "First run crashed, abort. Please check your "
                               "setup -- we assume that your "
                               "defaultconfiguration does not crashes. \(To "
                               "deactivate this exception, use the SMAC "
                               "scenario option 'abort_on_first_run_crash'\)",
                               ta.start, config=None, instance=None, cutoff=30)

        self.stats.ta_runs += 1
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.CRASHED)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
        self.assertEqual(info[3], {'configuration_origin': 'UNKNOWN',
                                   'error': "Result queue is empty"})
        logger = logging.getLogger('Testing:)')
        stats = Stats(
            Scenario({
                'cutoff_time': per_run_time_limit * 2,
                'run_obj': 'quality',
            })
        )
        stats.start_timing()
        # To avoid the output "first run crashed"...
        stats.ta_runs += 1
        ta = ExecuteTaFuncWithQueue(backend=automl._automl._backend,
                                    autosklearn_seed=seed,
                                    resampling_strategy='test',
                                    memory_limit=memory_limit_factor * automl_arguments['ml_memory_limit'],
                                    disable_file_output=True,
                                    logger=logger,
                                    stats=stats,
                                    all_scoring_functions=True,
                                    metric=metric)
        status, cost, runtime, additional_run_info = ta.start(
            config=config, instance=None, cutoff=per_run_time_limit*3)

        if status == StatusType.SUCCESS:
            assert len(additional_run_info) > 1, additional_run_info

        # print(additional_run_info)

        validated_trajectory.append(list(entry) + [task_id] +
                                    [additional_run_info])