예제 #1
0
 def test_get_results(self):
     queue_ = multiprocessing.Queue()
     for i in range(5):
         queue_.put((i * 1, 1 - (i * 0.2), 0, "", StatusType.SUCCESS))
     result = read_queue(queue_)
     self.assertEqual(len(result), 5)
     self.assertEqual(result[0][0], 0)
     self.assertAlmostEqual(result[0][1], 1.0)
예제 #2
0
    def test_cv(self, pipeline_mock):
        D = get_binary_classification_datamanager(
            resampling_strategy=CrossValTypes.k_fold_cross_validation)

        pipeline_mock.predict_proba.side_effect = \
            lambda X, batch_size=None: np.tile([0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = create(self.tmp_dir,
                             self.output_dir,
                             prefix='autoPyTorch')
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   metric=accuracy,
                                   budget=0,
                                   pipeline_config={
                                       'budget_type': 'epochs',
                                       'epochs': 50
                                   })
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, {})

        evaluator.fit_predict_and_loss()

        rval = read_queue(evaluator.queue)
        self.assertEqual(len(rval), 1)
        result = rval[0]['loss']
        self.assertEqual(len(rval[0]), 3)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(result, 0.46235467431119603)
        self.assertEqual(pipeline_mock.fit.call_count, 5)
        # 9 calls because of the training, holdout and
        # test set (3 sets x 5 folds = 15)
        self.assertEqual(pipeline_mock.predict_proba.call_count, 15)
        # as the optimisation preds in cv is concatenation of the 5 folds,
        # so it is 5*splits
        self.assertEqual(
            evaluator.file_output.call_args[0][0].shape[0],
            # Notice this - 1: It is because the dataset D
            # has shape ((69, )) which is not divisible by 5
            5 * len(D.splits[0][1]) - 1,
            evaluator.file_output.call_args)
        self.assertIsNone(evaluator.file_output.call_args[0][1])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.test_tensors[1].shape[0])
예제 #3
0
    def test_holdout(self, pipeline_mock):
        pipeline_mock.fit_dictionary = {'budget_type': 'epochs', 'epochs': 50}
        # Binary iris, contains 69 train samples, 31 test samples
        D = get_binary_classification_datamanager()
        pipeline_mock.predict_proba.side_effect = \
            lambda X, batch_size=None: np.tile([0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = create(self.tmp_dir,
                             self.output_dir,
                             prefix='autoPyTorch')
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   metric=accuracy,
                                   budget=0,
                                   pipeline_config={
                                       'budget_type': 'epochs',
                                       'epochs': 50
                                   })
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, {})

        evaluator.fit_predict_and_loss()

        rval = read_queue(evaluator.queue)
        self.assertEqual(len(rval), 1)
        result = rval[0]['loss']
        self.assertEqual(len(rval[0]), 3)
        self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)

        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(result, 0.5652173913043479)
        self.assertEqual(pipeline_mock.fit.call_count, 1)
        # 3 calls because of train, holdout and test set
        self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
        self.assertEqual(evaluator.file_output.call_count, 1)
        self.assertEqual(evaluator.file_output.call_args[0][0].shape[0],
                         len(D.splits[0][1]))
        self.assertIsNone(evaluator.file_output.call_args[0][1])
        self.assertEqual(evaluator.file_output.call_args[0][2].shape[0],
                         D.test_tensors[1].shape[0])
        self.assertEqual(evaluator.pipeline.fit.call_count, 1)
예제 #4
0
    def test_additional_metrics_during_training(self, pipeline_mock):
        pipeline_mock.fit_dictionary = {'budget_type': 'epochs', 'epochs': 50}
        # Binary iris, contains 69 train samples, 31 test samples
        D = get_binary_classification_datamanager()
        pipeline_mock.predict_proba.side_effect = \
            lambda X, batch_size=None: np.tile([0.6, 0.4], (len(X), 1))
        pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
        pipeline_mock.get_additional_run_info.return_value = None

        # Binary iris, contains 69 train samples, 31 test samples
        D = get_binary_classification_datamanager()

        configuration = unittest.mock.Mock(spec=Configuration)
        backend_api = create(self.tmp_dir,
                             self.output_dir,
                             prefix='autoPyTorch')
        backend_api.load_datamanager = lambda: D
        queue_ = multiprocessing.Queue()

        evaluator = TrainEvaluator(backend_api,
                                   queue_,
                                   configuration=configuration,
                                   metric=accuracy,
                                   budget=0,
                                   pipeline_config={
                                       'budget_type': 'epochs',
                                       'epochs': 50
                                   },
                                   all_supported_metrics=True)
        evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
        evaluator.file_output.return_value = (None, {})

        evaluator.fit_predict_and_loss()

        rval = read_queue(evaluator.queue)
        self.assertEqual(len(rval), 1)
        result = rval[0]
        self.assertIn('additional_run_info', result)
        self.assertIn('opt_loss', result['additional_run_info'])
        self.assertGreater(
            len(result['additional_run_info']['opt_loss'].keys()), 1)
예제 #5
0
파일: tae.py 프로젝트: automl/Auto-PyTorch
    def run(
        self,
        config: Configuration,
        instance: Optional[str] = None,
        cutoff: Optional[float] = None,
        seed: int = 12345,
        budget: float = 0.0,
        instance_specific: Optional[str] = None,
    ) -> Tuple[StatusType, float, float, Dict[str, Any]]:

        context = multiprocessing.get_context(self.pynisher_context)
        preload_modules(context)
        queue: multiprocessing.queues.Queue = context.Queue()

        if not (instance_specific is None or instance_specific == '0'):
            raise ValueError(instance_specific)
        init_params = {'instance': instance}
        if self.init_params is not None:
            init_params.update(self.init_params)

        if self.logger_port is None:
            logger: Union[logging.Logger,
                          PicklableClientLogger] = logging.getLogger(
                              "pynisher")
        else:
            logger = get_named_client_logger(
                name="pynisher",
                port=self.logger_port,
            )

        pynisher_arguments = dict(
            logger=logger,
            # Pynisher expects seconds as a time indicator
            wall_time_in_s=int(cutoff) if cutoff is not None else None,
            mem_in_mb=self.memory_limit,
            capture_output=True,
            context=context,
        )

        if isinstance(config, (int, str)):
            num_run = self.initial_num_run
        else:
            num_run = config.config_id + self.initial_num_run

        self.logger.debug("Search space updates for {}: {}".format(
            num_run, self.search_space_updates))
        obj_kwargs = dict(
            queue=queue,
            config=config,
            backend=self.backend,
            metric=self.metric,
            seed=self.seed,
            num_run=num_run,
            output_y_hat_optimization=self.output_y_hat_optimization,
            include=self.include,
            exclude=self.exclude,
            disable_file_output=self.disable_file_output,
            instance=instance,
            init_params=init_params,
            budget=budget,
            budget_type=self.budget_type,
            pipeline_config=self.pipeline_config,
            logger_port=self.logger_port,
            all_supported_metrics=self.all_supported_metrics,
            search_space_updates=self.search_space_updates)

        info: Optional[List[RunValue]]
        additional_run_info: Dict[str, Any]
        try:
            obj = pynisher.enforce_limits(**pynisher_arguments)(self.ta)
            obj(**obj_kwargs)
        except Exception as e:
            exception_traceback = traceback.format_exc()
            error_message = repr(e)
            additional_run_info = {
                'traceback': exception_traceback,
                'error': error_message
            }
            return StatusType.CRASHED, self.cost_for_crash, 0.0, additional_run_info

        if obj.exit_status in (pynisher.TimeoutException,
                               pynisher.MemorylimitException):
            # Even if the pynisher thinks that a timeout or memout occured,
            # it can be that the target algorithm wrote something into the queue
            #  - then we treat it as a successful run
            try:
                info = read_queue(queue)  # type: ignore
                result = info[-1]['loss']  # type: ignore
                status = info[-1]['status']  # type: ignore
                additional_run_info = info[-1][
                    'additional_run_info']  # type: ignore

                if obj.stdout:
                    additional_run_info['subprocess_stdout'] = obj.stdout
                if obj.stderr:
                    additional_run_info['subprocess_stderr'] = obj.stderr

                if obj.exit_status is pynisher.TimeoutException:
                    additional_run_info[
                        'info'] = 'Run stopped because of timeout.'
                elif obj.exit_status is pynisher.MemorylimitException:
                    additional_run_info[
                        'info'] = 'Run stopped because of memout.'

                if status in [StatusType.SUCCESS, StatusType.DONOTADVANCE]:
                    cost = result
                else:
                    cost = self.worst_possible_result

            except Empty:
                info = None
                if obj.exit_status is pynisher.TimeoutException:
                    status = StatusType.TIMEOUT
                    additional_run_info = {'error': 'Timeout'}
                elif obj.exit_status is pynisher.MemorylimitException:
                    status = StatusType.MEMOUT
                    additional_run_info = {
                        'error':
                        'Memout (used more than {} MB).'.format(
                            self.memory_limit)
                    }
                else:
                    raise ValueError(obj.exit_status)
                cost = self.worst_possible_result

        elif obj.exit_status is TAEAbortException:
            info = None
            status = StatusType.ABORT
            cost = self.worst_possible_result
            additional_run_info = {
                'error': 'Your configuration of '
                'autoPyTorch does not work!',
                'exit_status': _encode_exit_status(obj.exit_status),
                'subprocess_stdout': obj.stdout,
                'subprocess_stderr': obj.stderr,
            }

        else:
            try:
                info = read_queue(queue)  # type: ignore
                result = info[-1]['loss']  # type: ignore
                status = info[-1]['status']  # type: ignore
                additional_run_info = info[-1][
                    'additional_run_info']  # type: ignore

                if obj.exit_status == 0:
                    cost = result
                else:
                    status = StatusType.CRASHED
                    cost = self.worst_possible_result
                    additional_run_info['info'] = 'Run treated as crashed ' \
                                                  'because the pynisher exit ' \
                                                  'status %s is unknown.' % \
                                                  str(obj.exit_status)
                    additional_run_info['exit_status'] = _encode_exit_status(
                        obj.exit_status)
                    additional_run_info['subprocess_stdout'] = obj.stdout
                    additional_run_info['subprocess_stderr'] = obj.stderr
            except Empty:
                info = None
                additional_run_info = {
                    'error': 'Result queue is empty',
                    'exit_status': _encode_exit_status(obj.exit_status),
                    'subprocess_stdout': obj.stdout,
                    'subprocess_stderr': obj.stderr,
                    'exitcode': obj.exitcode
                }
                status = StatusType.CRASHED
                cost = self.worst_possible_result

        if ((self.budget_type is None or budget == 0)
                and status == StatusType.DONOTADVANCE):
            status = StatusType.SUCCESS

        if not isinstance(additional_run_info, dict):
            additional_run_info = {'message': additional_run_info}

        if (info is not None and self.resampling_strategy
                in ['holdout-iterative-fit', 'cv-iterative-fit']
                and status != StatusType.CRASHED):
            learning_curve = extract_learning_curve(info)
            learning_curve_runtime = extract_learning_curve(info, 'duration')
            if len(learning_curve) > 1:
                additional_run_info['learning_curve'] = learning_curve
                additional_run_info[
                    'learning_curve_runtime'] = learning_curve_runtime

            train_learning_curve = extract_learning_curve(info, 'train_loss')
            if len(train_learning_curve) > 1:
                additional_run_info[
                    'train_learning_curve'] = train_learning_curve
                additional_run_info[
                    'learning_curve_runtime'] = learning_curve_runtime

            if self._get_validation_loss:
                validation_learning_curve = extract_learning_curve(
                    info, 'validation_loss')
                if len(validation_learning_curve) > 1:
                    additional_run_info['validation_learning_curve'] = \
                        validation_learning_curve
                    additional_run_info[
                        'learning_curve_runtime'] = learning_curve_runtime

            if self._get_test_loss:
                test_learning_curve = extract_learning_curve(info, 'test_loss')
                if len(test_learning_curve) > 1:
                    additional_run_info[
                        'test_learning_curve'] = test_learning_curve
                    additional_run_info[
                        'learning_curve_runtime'] = learning_curve_runtime

        if isinstance(config, int):
            origin = 'DUMMY'
        elif isinstance(config, str):
            origin = 'traditional'
        else:
            origin = getattr(config, 'origin', 'UNKNOWN')
        additional_run_info['configuration_origin'] = origin

        runtime = float(obj.wall_clock_time)

        empty_queue(queue)
        self.logger.debug("Finish function evaluation {}.\n"
                          "Status: {}, Cost: {}, Runtime: {},\n"
                          "Additional information:\n{}".format(
                              str(num_run), status, cost, runtime,
                              dict_repr(additional_run_info)))
        return status, cost, runtime, additional_run_info