예제 #1
0
    def test_budgets(self):
        rh = RunHistory()
        cs = get_config_space()
        config1 = Configuration(cs, values={"a": 1, "b": 2})
        config2 = Configuration(cs, values={"a": 2, "b": 3})

        rh.add(
            config=config1,
            cost=[0, 50],
            time=5,
            status=StatusType.SUCCESS,
            instance_id=1,
            seed=1,
            budget=5,
        )

        rh.add(
            config=config1,
            cost=[40, 100],
            time=10,
            status=StatusType.SUCCESS,
            instance_id=1,
            seed=1,
            budget=15,
        )

        # SMAC does not overwrite by default
        rh.add(
            config=config1,
            cost=[50, 100],
            time=10,
            status=StatusType.SUCCESS,
            instance_id=1,
            seed=1,
            budget=15,
        )

        rh.add(
            config=config2,
            cost=[0, 150],
            time=15,
            status=StatusType.SUCCESS,
            instance_id=1,
            seed=1,
            budget=5,
        )

        self.assertEqual(rh.objective_bounds[0], (0, 40))
        self.assertEqual(rh.objective_bounds[1], (50, 150))

        # Average cost returns us the cost of the latest budget
        self.assertEqual(rh.average_cost(config1), 0.75)
        self.assertEqual(rh.average_cost(config2), 0.5)
예제 #2
0
    def _compare_configs(self,
                         incumbent: Configuration,
                         challenger: Configuration,
                         run_history: RunHistory,
                         log_traj: bool = True) -> typing.Optional[Configuration]:
        """
        Compare two configuration wrt the runhistory and return the one which
        performs better (or None if the decision is not safe)

        Decision strategy to return x as being better than y:
            1. x has at least as many runs as y
            2. x performs better than y on the intersection of runs on x and y

        Implicit assumption:
            Challenger was evaluated on the same instance-seed pairs as
            incumbent

        Parameters
        ----------
        incumbent: Configuration
            Current incumbent
        challenger: Configuration
            Challenger configuration
        run_history: smac.runhistory.runhistory.RunHistory
            Stores all runs we ran so far
        log_traj: bool
            Whether to log changes of incumbents in trajectory

        Returns
        -------
        None or better of the two configurations x,y
        """

        inc_runs = run_history.get_runs_for_config(incumbent, only_max_observed_budget=True)
        chall_runs = run_history.get_runs_for_config(challenger, only_max_observed_budget=True)
        to_compare_runs = set(inc_runs).intersection(chall_runs)

        # performance on challenger runs
        chal_perf = run_history.average_cost(challenger, to_compare_runs)
        inc_perf = run_history.average_cost(incumbent, to_compare_runs)

        # Line 15
        if chal_perf > inc_perf and len(chall_runs) >= self.minR:
            # Incumbent beats challenger
            self.logger.debug("Incumbent (%.4f) is better than challenger "
                              "(%.4f) on %d runs." %
                              (inc_perf, chal_perf, len(chall_runs)))
            return incumbent

        # Line 16
        if not set(inc_runs) - set(chall_runs):

            # no plateau walks
            if chal_perf >= inc_perf:
                self.logger.debug("Incumbent (%.4f) is at least as good as the "
                                  "challenger (%.4f) on %d runs." %
                                  (inc_perf, chal_perf, len(chall_runs)))
                return incumbent

            # Challenger is better than incumbent
            # and has at least the same runs as inc
            # -> change incumbent
            n_samples = len(chall_runs)
            self.logger.info("Challenger (%.4f) is better than incumbent (%.4f)"
                             " on %d runs." % (chal_perf, inc_perf, n_samples))
            self._log_incumbent_changes(incumbent, challenger)

            if log_traj:
                self.stats.inc_changed += 1
                self.traj_logger.add_entry(train_perf=chal_perf,
                                           incumbent_id=self.stats.inc_changed,
                                           incumbent=challenger)
            return challenger

        # undecided
        return None
예제 #3
0
    def test_instances(self):
        rh = RunHistory()
        cs = get_config_space()
        config1 = Configuration(cs, values={"a": 1, "b": 2})
        config2 = Configuration(cs, values={"a": 2, "b": 3})

        rh.add(
            config=config1,
            cost=[0, 10],
            time=5,
            status=StatusType.SUCCESS,
            instance_id=1,
            seed=1,
            budget=0,
        )

        rh.add(
            config=config1,
            cost=[50, 20],
            time=10,
            status=StatusType.SUCCESS,
            instance_id=2,
            seed=1,
            budget=0,
        )

        rh.add(
            config=config1,
            cost=[75, 20],
            time=10,
            status=StatusType.SUCCESS,
            instance_id=3,
            seed=1,
            budget=0,
        )

        rh.add(
            config=config2,
            cost=[100, 30],
            time=15,
            status=StatusType.SUCCESS,
            instance_id=1,
            seed=1,
            budget=0,
        )

        rh.add(
            config=config2,
            cost=[0, 30],
            time=15,
            status=StatusType.SUCCESS,
            instance_id=2,
            seed=1,
            budget=0,
        )

        self.assertEqual(rh.objective_bounds[0], (0, 100))
        self.assertEqual(rh.objective_bounds[1], (10, 30))

        # Average cost returns us the cost of the latest budget
        self.assertEqual(rh.average_cost(config1), 0.375)
        self.assertEqual(rh.average_cost(config2), 0.75)