Ejemplo n.º 1
0
 def _log_incumbent_changes(
     self,
     incumbent: Configuration,
     challenger: Configuration,
 ) -> None:
     params = sorted([(param, incumbent[param], challenger[param]) for param in challenger.keys()])
     self.logger.info("Changes in incumbent:")
     for param in params:
         if param[1] != param[2]:
             self.logger.info("  %s : %r -> %r" % param)
         else:
             self.logger.debug("  %s remains unchanged: %r", param[0], param[1])
Ejemplo n.º 2
0
    def config_to_html(self, default: Configuration, incumbent: Configuration):
        """Create HTML-table to compare Configurations.
        Removes unused parameters.

        Parameters
        ----------
        default, incumbent: Configurations
            configurations to be converted

        Returns
        -------
        table: str
            HTML-table comparing default and incumbent
        """
        # Remove unused parameters
        keys = [k for k in default.keys() if default[k] or incumbent[k]]
        default = [
            default[k] if default[k] != None else "inactive" for k in keys
        ]
        incumbent = [
            incumbent[k] if incumbent[k] != None else "inactive" for k in keys
        ]
        table = list(zip(keys, default, incumbent))
        # Show first parameters that changed
        same = [x for x in table if x[1] == x[2]]
        diff = [x for x in table if x[1] != x[2]]
        table = []
        if len(diff) > 0:
            table.extend([("-------------- Changed parameters: "\
                           "--------------", "-----", "-----")])
            table.extend(diff)
        if len(same) > 0:
            table.extend([("-------------- Unchanged parameters: "\
                           "--------------", "-----", "-----")])
            table.extend(same)
        keys, table = [k[0] for k in table], [k[1:] for k in table]
        df = DataFrame(data=table,
                       columns=["Default", "Incumbent"],
                       index=keys)
        table = df.to_html()
        return table
Ejemplo n.º 3
0
    def _race_challenger(self, challenger: Configuration,
                         incumbent: Configuration, run_history: RunHistory,
                         aggregate_func: typing.Callable):
        '''
            aggressively race challenger against incumbent

            Parameters
            ----------
            challenger : Configuration
                configuration which challenges incumbent
            incumbent : Configuration
                best configuration so far
            run_history : RunHistory
                stores all runs we ran so far
            aggregate_func: typing.Callable
                aggregate performance across instances

            Returns
            -------
            new_incumbent: Configuration
                either challenger or incumbent
        '''
        # at least one run of challenger
        # to increase chall_indx counter
        first_run = False
        inc_perf = run_history.get_cost(incumbent)

        learning_curve = []

        self._num_run += 1
        self._chall_indx += 1

        pc = None
        for epoch in range(self.max_epochs):
            status, cost, time, add_info = self.tae_runner.start(
                config=challenger,
                instance=None,
                seed=0,
                cutoff=2**32 - 1,
                instance_specific=None,
                pc=pc)
            try:
                pc = add_info["model"]
            except KeyError:  # model building failed, e.g. because of nan
                break

            learning_curve.append(cost)

            if len(self.learning_curves) > 10 and epoch > self.max_epochs / 4:
                seen_curves = np.array(self.learning_curves)[:, epoch]
                if cost > np.median(seen_curves):
                    self.logger.info("Abort run (%f vs %f)" %
                                     (cost, np.median(seen_curves)))
                    break

        # delete model in runhistory to be more memory efficient
        chall_id = run_history.config_ids[challenger]
        runkey = RunKey(chall_id, None, 0)
        runvalue = run_history.data[runkey]
        try:
            del runvalue.additional_info["model"]
        except KeyError:
            pass

        if epoch == self.max_epochs - 1:
            self.learning_curves.append(learning_curve)

        chal_perf = cost

        if cost < inc_perf:
            self.logger.info(
                "Challenger (%.4f) is better than incumbent (%.4f)" %
                (chal_perf, inc_perf))
            # Show changes in the configuration
            params = sorted([(param, incumbent[param], challenger[param])
                             for param in challenger.keys()])
            self.logger.info("Changes in incumbent:")
            for param in params:
                if param[1] != param[2]:
                    self.logger.info("  %s : %r -> %r" % (param))
                else:
                    self.logger.debug("  %s remains unchanged: %r" %
                                      (param[0], param[1]))
            incumbent = challenger
            self.stats.inc_changed += 1
            self.traj_logger.add_entry(train_perf=chal_perf,
                                       incumbent_id=self.stats.inc_changed,
                                       incumbent=challenger)
        else:
            self.logger.debug(
                "Incumbent (%.4f) is better than challenger (%.4f)" %
                (inc_perf, chal_perf))

        return incumbent