示例#1
0
    def _maximize(self,
                  runhistory: RunHistory,
                  stats: Stats,
                  num_points: int,
                  _sorted: bool = False,
                  **kwargs) -> List[Tuple[float, Configuration]]:
        """DifferentialEvolutionSolver

        Parameters
        ----------
        runhistory: ~smac.runhistory.runhistory.RunHistory
            runhistory object
        stats: ~smac.stats.stats.Stats
            current stats object
        num_points: int
            number of points to be sampled
        _sorted: bool
            whether random configurations are sorted according to acquisition function
        **kwargs
            not used

        Returns
        -------
        iterable
            An iterable consistng of
            tuple(acqusition_value, :class:`smac.configspace.Configuration`).
        """

        from scipy.optimize._differentialevolution import DifferentialEvolutionSolver
        configs = []

        def func(x):
            return -self.acquisition_function(
                [Configuration(self.config_space, vector=x)])

        ds = DifferentialEvolutionSolver(func,
                                         bounds=[[0, 1], [0, 1]],
                                         args=(),
                                         strategy='best1bin',
                                         maxiter=1000,
                                         popsize=50,
                                         tol=0.01,
                                         mutation=(0.5, 1),
                                         recombination=0.7,
                                         seed=self.rng.randint(1000),
                                         polish=True,
                                         callback=None,
                                         disp=False,
                                         init='latinhypercube',
                                         atol=0)

        rval = ds.solve()
        for pop, val in zip(ds.population, ds.population_energies):
            rc = Configuration(self.config_space, vector=pop)
            rc.origin = 'DifferentialEvolution'
            configs.append((-val, rc))

        configs.sort(key=lambda t: t[0])
        configs.reverse()
        return configs
示例#2
0
    def load_json(self, fn, cs):
        """Load and runhistory in json representation from disk.

        Overwrites current runthistory!

        Parameters
        ----------
        fn : str
            file name to load from
        cs : ConfigSpace
            instance of configuration space
        """

        with open(fn) as fp:
            all_data = json.load(fp)

        self.ids_config = {
            int(id_): Configuration(cs, values=values)
            for id_, values in all_data["configs"].items()
        }

        self.config_ids = {
            Configuration(cs, values=values): int(id_)
            for id_, values in all_data["configs"].items()
        }

        self._n_id = len(self.config_ids)

        self.data = {
            self.RunKey(int(k[0]), k[1], int(k[2])):
            self.RunValue(float(v[0]), float(v[1]), v[2], v[3])
            for k, v in all_data["data"]
        }
示例#3
0
    def test_multi_config_design(self):
        stats = Stats(scenario=self.scenario)
        stats.start_timing()
        self.ta.stats = stats
        tj = TrajLogger(output_dir=None, stats=stats)
        rh = RunHistory(aggregate_func=average_cost)
        self.ta.runhistory = rh
        rng = np.random.RandomState(seed=12345)

        intensifier = Intensifier(tae_runner=self.ta,
                                  stats=stats,
                                  traj_logger=tj,
                                  rng=rng,
                                  instances=[None],
                                  run_obj_time=False)

        configs = [
            Configuration(configuration_space=self.cs, values={"x1": 4}),
            Configuration(configuration_space=self.cs, values={"x1": 2})
        ]
        dc = MultiConfigInitialDesign(tae_runner=self.ta,
                                      scenario=self.scenario,
                                      stats=stats,
                                      traj_logger=tj,
                                      runhistory=rh,
                                      rng=rng,
                                      configs=configs,
                                      intensifier=intensifier,
                                      aggregate_func=average_cost)

        inc = dc.run()
        self.assertTrue(stats.ta_runs == 2)
        self.assertTrue(len(rh.data) == 2)
        self.assertTrue(rh.get_cost(inc) == 4)
示例#4
0
    def _call_ta(self, config: Configuration, instance: str,
                 instance_specific: str, cutoff: float, seed: int):

        # TODO: maybe replace fixed instance specific and cutoff_length (0) to
        # other value
        cmd = []
        cmd.extend(self.ta)
        cmd.extend([
            "--instance", instance, "--cutoff",
            str(cutoff), "--seed",
            str(seed), "--config"
        ])

        for p in config:
            if not config.get(p) is None:
                cmd.extend(["-" + str(p), str(config[p])])

        self.logger.debug("Calling: %s" % (" ".join(cmd)))
        p = Popen(cmd,
                  shell=False,
                  stdout=PIPE,
                  stderr=PIPE,
                  universal_newlines=True)
        stdout_, stderr_ = p.communicate()

        self.logger.debug("Stdout: %s" % (stdout_))
        self.logger.debug("Stderr: %s" % (stderr_))

        results = {"status": "CRASHED", "cost": 1234567890}
        for line in stdout_.split("\n"):
            if line.startswith("Result of this algorithm run:"):
                fields = ":".join(line.split(":")[1:])
                results = json.loads(fields)

        return results, stdout_, stderr_
示例#5
0
    def _maximize(
            self,
            runhistory: RunHistory,
            stats: Stats,
            num_points: int,
            _sorted: bool=False,
            **kwargs
    ) -> List[Tuple[float, Configuration]]:
        size=int(np.floor(num_points * (1.0 - self.ratio)))
        if size != 1:
            rand_configs = self.config_space.sample_configuration(size=size)
        else:
            rand_configs = [self.config_space.sample_configuration(size=size)]

        for i in range(len(rand_configs)):
            rand_configs[i].origin = 'Forest Search (random)'

        (lows, highs), value = self.acquisition_function.model.get_minimum()
        points = np.array(
            list(self.rng.uniform(max(low, 0), min(high, 1), int(np.ceil(num_points * self.ratio)))
                 for low, high in zip(lows, highs))
        ).transpose()
        for point in points:
            rand_configs.append(Configuration(self.config_space, vector=point, origin="Forset Search (forest)"))
        self.rng.shuffle(rand_configs)

        if _sorted:
            return self._sort_configs_by_acq_value(rand_configs)
        else:
            return [(0, rand_configs[i]) for i in range(len(rand_configs))]
示例#6
0
 def test_run_execute_func_for_fmin(self, mock):
     mock.return_value = {'x1': 2, 'x2': 1}
     c = Configuration(configuration_space=self.cs, values={})
     target = lambda x: x[0]**2 + x[1]
     taf = ExecuteTAFuncArray(target, stats=self.stats)
     rval = taf._call_ta(target, c)
     self.assertEqual(rval, 5)
示例#7
0
    def _call_ta(
        self,
        config: Configuration,
        instance: str,
        instance_specific: str,
        cutoff: float,
        seed: int,
    ) -> typing.Tuple[str, str]:

        # TODO: maybe replace fixed instance specific and cutoff_length (0) to other value
        cmd = []  # type: typing.List[str]
        if not isinstance(self.ta, (list, tuple)):
            raise TypeError(
                "self.ta needs to be of type list or tuple, but is %s" % type(self.ta)
            )
        cmd.extend(self.ta)
        cmd.extend([instance, instance_specific, str(cutoff), "0", str(seed)])
        for p in config:
            if not config.get(p) is None:
                cmd.extend(["-" + str(p), str(config[p])])

        self.logger.debug("Calling: %s" % (" ".join(cmd)))
        p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, universal_newlines=True)
        stdout_, stderr_ = p.communicate()

        self.logger.debug("Stdout: %s" % stdout_)
        self.logger.debug("Stderr: %s" % stderr_)

        return stdout_, stderr_
示例#8
0
    def test_ambigious_categoricals(self, mock_stats):
        mock_stats.ta_time_used = 0.5
        mock_stats.get_used_wallclock_time = self.mocked_get_used_wallclock_time
        mock_stats.ta_runs = 1

        with tempfile.TemporaryDirectory() as tmpdir:
            tl = TrajLogger(output_dir=tmpdir, stats=mock_stats)

            problem_config = Configuration(
                self.cs, {
                    'param_a': 0.0,
                    'param_b': 2,
                    'param_c': 'value',
                    'ambigous_categorical': True
                })  # not recoverable without json
            tl.add_entry(0.9, 1, problem_config)

            from_aclib2 = tl.read_traj_aclib_format(
                os.path.join(tmpdir, 'traj_aclib2.json'), self.cs)
            from_alljson = tl.read_traj_alljson_format(
                os.path.join(tmpdir, 'traj.json'), self.cs)

        # Wrong! but passes:
        self.assertIsInstance(
            from_aclib2[0]['incumbent']['ambigous_categorical'], str)
        # Works good for alljson:
        self.assertIsInstance(
            from_alljson[0]['incumbent']['ambigous_categorical'], bool)
示例#9
0
    def get_default_incumbent(self):
        instantiated_hyperparameters = {}
        for hp in self.space.get_hyperparameters():
            conditions = self.space._get_parent_conditions_of(hp.name)
            active = True
            for condition in conditions:
                parent_names = [
                    c.parent.name for c in condition.get_descendant_literal_conditions()
                ]

                parents = {
                    parent_name: instantiated_hyperparameters[parent_name]
                    for parent_name in parent_names
                }

                if not condition.evaluate(parents):
                    # TODO find out why a configuration is illegal!
                    active = False

            if not active:
                instantiated_hyperparameters[hp.name] = None
            elif isinstance(hp, Constant):
                instantiated_hyperparameters[hp.name] = hp.value
            else:
                instantiated_hyperparameters[hp.name] = hp.default_value

        config = Configuration(self.space, instantiated_hyperparameters)
        return config
示例#10
0
    def cross(self, parent1, parent2, percent):
        """Crosses two configuration and creates a new one.
        With probability percent choose value from parent1 for each
        hyperparameter that is not dependent on any other,
        then also include children of this hp from same parent.
        """
        cs = self.scenario.cs
        hyp_names = cs.get_hyperparameter_names()

        new_values = dict()

        for name in hyp_names:
            # print("Hyperparameter: %s" % (name))
            if name not in new_values.keys() and cs.get_parents_of(name) == []:
                ran = np.random.randint(100)
                if ran % math.ceil(100 / percent) == 0:
                    parent = parent1.get_dictionary()
                else:
                    parent = parent2.get_dictionary()

                # print("Chosen parent: %s" % (ran % 2))
                val = parent[name]
                new_values[name] = val

                childs = cs.get_children_of(name)
                # print("Children of %s: %s" % (name, childs))
                for child in childs:
                    # this condition can be excluded
                    if child.name in parent.keys():
                        new_values[child.name] = parent[child.name]

        config = Configuration(cs, values=new_values)
        return config
示例#11
0
    def test_get_runs_capped(self):
        ''' test if capped, crashed and aborted runs are ignored
            during rh-recovery '''
        scen = Scenario(self.scen_fn,
                        cmd_options={'run_obj': 'quality',
                                     'instances': ['0']})

        validator = Validator(scen, self.trajectory, self.rng)

        # Get runhistory
        old_configs = [Configuration(scen.cs, values={'x1': i, 'x2': i}) for i in range(1, 7)]
        old_rh = RunHistory()
        old_rh.add(old_configs[0], 1, 1, StatusType.SUCCESS, instance_id='0', seed=0)
        old_rh.add(old_configs[1], 1, 1, StatusType.TIMEOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[2], 1, 1, StatusType.CRASHED, instance_id='0', seed=0)
        old_rh.add(old_configs[3], 1, 1, StatusType.ABORT, instance_id='0', seed=0)
        old_rh.add(old_configs[4], 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
        old_rh.add(old_configs[5], 1, 1, StatusType.CAPPED, instance_id='0', seed=0)

        # Get multiple configs
        expected = [_Run(inst_specs='0', seed=0, inst='0', config=old_configs[2]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[3]),
                    _Run(inst_specs='0', seed=0, inst='0', config=old_configs[5])]

        runs = validator._get_runs(old_configs, ['0'], repetitions=1, runhistory=old_rh)
        self.assertEqual(runs[0], expected)
示例#12
0
    def load_json(self, fn: str, cs: ConfigurationSpace):
        """Load and runhistory in json representation from disk.

        Overwrites current runhistory!

        Parameters
        ----------
        fn : str
            file name to load from
        cs : ConfigSpace
            instance of configuration space
        """
        with open(fn) as fp:
            all_data = json.load(fp, object_hook=StatusType.enum_hook)

        self.ids_config = {int(id_): Configuration(cs, values=values)
                           for id_, values in all_data["configs"].items()}

        self.config_ids = {config: id_ for id_, config in
                           self.ids_config.items()}

        self._n_id = len(self.config_ids)

        # important to use add method to use all data structure correctly
        for k, v in all_data["data"]:
            self.add(config=self.ids_config[int(k[0])],
                     cost=float(v[0]),
                     time=float(v[1]),
                     status=StatusType(v[2]),
                     instance_id=k[1],
                     seed=int(k[2]),
                     additional_info=v[3])
示例#13
0
 def setUp(self):
     logging.basicConfig()
     self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
     self.logger.setLevel(logging.DEBUG)
     self.value = 0
     self.cs = ConfigurationSpace()
     self.cs.add_hyperparameters([
         UniformFloatHyperparameter('param_a', -0.2, 1.77, 1.1),
         UniformIntegerHyperparameter('param_b', -3, 10, 1),
         Constant('param_c', 'value'),
         CategoricalHyperparameter('ambigous_categorical', choices=['True', True, 5]),  # True is ambigous here
     ])
     self.test_config = Configuration(self.cs, {'param_a': 0.5,
                                                'param_b': 1,
                                                'param_c': 'value',
                                                'ambigous_categorical': 5})
示例#14
0
    def _call_ta(
        self,
        obj: typing.Callable,
        config: Configuration,
        obj_kwargs: typing.Dict[str, typing.Union[int, str, float, None]],
    ) -> typing.Union[float, typing.Tuple[float, typing.Dict]]:

        x = np.array([val for _, val in sorted(config.get_dictionary().items())],
                     dtype=np.float)
        return obj(x, **obj_kwargs)
示例#15
0
    def crossover(self, a: Configuration, b: Configuration) -> [Configuration]:
        a_array = a.get_array()
        b_array = b.get_array()

        c_point = int(self.rng.uniform(0, 1) * len(a_array))

        for i in range(c_point, len(a_array)):
            t = a_array[i]
            a_array[i] = b_array[i]
            b_array[i] = t

        # new configurations with new arrays
        new_a = Configuration(configuration_space=a.configuration_space,
                              vector=a_array,
                              origin="Crossover")
        new_b = Configuration(configuration_space=b.configuration_space,
                              vector=b_array,
                              origin="Crossover")

        return [new_a, new_b]
示例#16
0
    def load_json(self, fn: str, cs: ConfigurationSpace, id_set=set()):
        """Load and runhistory in json representation from disk.

        Overwrites current runhistory!

        Parameters
        ----------
        fn : str
            file name to load from
        cs : ConfigSpace
            instance of configuration space
        """
        try:
            txt = self.file_system.read_txt(fn)
            all_data = json.loads(txt, object_hook=StatusType.enum_hook)
        except Exception as e:
            self.logger.warning(
                'Encountered exception %s while reading runhistory from %s. '
                'Not adding any runs!',
                e,
                fn,
            )
            return

        config_origins = all_data.get("config_origins", {})
        self.ids_config = {}
        updated_id_set = deepcopy(id_set)
        for i, (id_, values) in enumerate(all_data["configs"].items()):
            id_ = int(id_)
            if id_ not in id_set:
                self.ids_config[id_] = Configuration(cs,
                                                     values=values,
                                                     origin=config_origins.get(
                                                         str(id_), None))
                updated_id_set.add(id_)

        self.config_ids = {
            config: id_
            for id_, config in self.ids_config.items()
        }

        self._n_id = len(self.config_ids)
        # important to use add method to use all data structure correctly
        for k, v in all_data["data"]:
            id_ = int(k[0])
            if id_ in self.ids_config:
                self.add(config=self.ids_config[id_],
                         cost=float(v[0]),
                         time=float(v[1]),
                         status=StatusType(v[2]),
                         instance_id=k[1],
                         seed=int(k[2]),
                         additional_info=v[3])
        return updated_id_set
示例#17
0
 def _log_incumbent_changes(
     self,
     incumbent: Configuration,
     challenger: Configuration,
 ) -> None:
     params = sorted([(param, incumbent[param], challenger[param]) for param in challenger.keys()])
     self.logger.info("Changes in incumbent:")
     for param in params:
         if param[1] != param[2]:
             self.logger.info("  %s : %r -> %r" % param)
         else:
             self.logger.debug("  %s remains unchanged: %r", param[0], param[1])
示例#18
0
    def load_json(self, fn: str, cs: ConfigurationSpace) -> None:
        """Load and runhistory in json representation from disk.

        Overwrites current runhistory!

        Parameters
        ----------
        fn : str
            file name to load from
        cs : ConfigSpace
            instance of configuration space
        """
        try:
            with open(fn) as fp:
                all_data = json.load(fp, object_hook=StatusType.enum_hook)
        except Exception as e:
            self.logger.warning(
                'Encountered exception %s while reading runhistory from %s. '
                'Not adding any runs!',
                e,
                fn,
            )
            return

        config_origins = all_data.get("config_origins", {})

        self.ids_config = {
            int(id_): Configuration(cs,
                                    values=values,
                                    origin=config_origins.get(id_, None))
            for id_, values in all_data["configs"].items()
        }

        self.config_ids = {
            config: id_
            for id_, config in self.ids_config.items()
        }

        self._n_id = len(self.config_ids)

        # important to use add method to use all data structure correctly
        for k, v in all_data["data"]:
            self.add(config=self.ids_config[int(k[0])],
                     cost=float(v[0]),
                     time=float(v[1]),
                     status=StatusType(v[2]),
                     instance_id=k[1],
                     seed=int(k[2]),
                     budget=float(k[3]) if len(k) == 4 else 0,
                     starttime=v[3],
                     endtime=v[4],
                     additional_info=v[5])
示例#19
0
    def tell(self, config, performance):

        if not config:
            return

        config = copy.copy(config)

        for key in self.switch_optiones.keys():
            config[key] = [
                x for x in self.switch_optiones[key]
                if any(x in val for val in config.keys())
            ][0]

        if self.maximize_metric:
            performance = 1 - performance[1]

        config = Configuration(self.cspace, values=config)

        self.optimizer.stats.ta_runs += 1

        # first incubment setting
        if self.runtime == 0:
            self.optimizer.incumbent = Configuration(
                self.cspace, values=config)  #self.challengers[0])
            self.optimizer.start()
        else:
            if self.optimizer.stats.is_budget_exhausted():
                self.budget_exhausted = True

        self.optimizer.runhistory.add(config=config,
                                      cost=performance,
                                      time=0,
                                      status=StatusType.SUCCESS,
                                      seed=0)

        self.runtime += 1
def build_config_obj(config_space: ConfigurationSpace, values_dict: dict):
    unconditional_parameters = config_space.get_all_unconditional_hyperparameters(
    )

    for hyperparam in unconditional_parameters:
        choice = values_dict.get(hyperparam, None)
        if choice is None:
            continue

        algorithm = Mapper.get_class(choice)
        for param in algorithm.params:
            param_name = encode_parameter(param.name, algorithm.name)
            if param_name not in values_dict:
                values_dict[param_name] = param.default_value

    return Configuration(configuration_space=config_space, values=values_dict)
示例#21
0
def maxsat(n_eval, n_variables, random_seed):
    assert n_variables in [28, 43, 60]
    if n_variables == 28:
        evaluator = MaxSAT28(random_seed)
    elif n_variables == 43:
        evaluator = MaxSAT43(random_seed)
    elif n_variables == 60:
        evaluator = MaxSAT60(random_seed)
    name_tag = 'maxsat' + str(n_variables) + '_' + datetime.now().strftime(
        "%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(n_variables):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)
    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(n_variables)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor(
            [int(x['x' + str(j + 1).zfill(2)]) for j in range(n_variables)])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
示例#22
0
def pest_control(n_eval, random_seed):
    evaluator = PestControl(random_seed)
    name_tag = 'pestcontrol_' + datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(PESTCONTROL_N_STAGES):
        car_var = CategoricalHyperparameter(
            'x' + str(i + 1).zfill(2),
            [str(elm) for elm in range(PESTCONTROL_N_CHOICE)],
            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = sample_init_points([PESTCONTROL_N_CHOICE] *
                                           PESTCONTROL_N_STAGES, 20,
                                           random_seed).long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(PESTCONTROL_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(PESTCONTROL_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
示例#23
0
def branin(n_eval):
    evaluator = Branin()
    name_tag = '_'.join(
        ['branin', datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")])
    cs = ConfigurationSpace()
    for i in range(len(evaluator.n_vertices)):
        car_var = UniformIntegerHyperparameter('x' + str(i + 1).zfill(2),
                                               0,
                                               int(evaluator.n_vertices[i]) -
                                               1,
                                               default_value=25)
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): int(init_points_numpy[i][j])
                    for j in range(len(evaluator.n_vertices))
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(len(evaluator.n_vertices))
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
示例#24
0
def contamination(n_eval, lamda, random_seed_pair):
    evaluator = Contamination(lamda, random_seed_pair)
    name_tag = '_'.join([
        'contamination', ('%.2E' % lamda),
        datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    ])
    cs = ConfigurationSpace()
    for i in range(CONTAMINATION_N_STAGES):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(CONTAMINATION_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(CONTAMINATION_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
示例#25
0
    def read_str(data: str,
                 cs: ConfigurationSpace,
                 aggregate_func: callable = average_cost):
        """Read a string line and transform it to a ConfigHistory. The input
        should be valid. For example, "0.8(config) 1(#runhistory) 0.6 1.2 1234"

        Parameters
        ----------
        data : str
            A list of strings containing config and runhistory info.
        cs : ConfigurationSpace
            The ConfigurationSpace.
        aggregate_func : callable, default = average_cost
            The aggregate function.

        Returns
        -------
        Return : ConfigHistory
            Return a ConfigHistory.
        """
        # 首先将这行分开,读入Configuration
        line = data.split()
        # 用ConfigSpace计算超参的个数
        num_config = len(cs.get_hyperparameters())
        config = Configuration(cs, vector=np.array(
            [float(param) for param in line[:num_config]]))

        # 初始化参数,每个config对应一个runhistory
        runhistory = RunHistory(aggregate_func=aggregate_func)
        # 读取runhistory的数量
        num_runhistory = int(float(line[num_config]))
        counter = num_config + 1
        # 之后,读取每三对的数作为runhistory
        for i in range(num_runhistory):
            cost = float(line[counter])
            time = float(line[counter + 1])
            seed = int(float(line[counter + 2]))
            counter += 3
            # 添加到runhistory
            runhistory.add(config, cost, time, StatusType.SUCCESS, seed=seed)

        # 返回本个runhistory
        config_history = ConfigHistory(config, cs, runhistory=runhistory,
                                       aggregate_func=aggregate_func)
        return config_history
示例#26
0
    def tell(self, config: dict, performance: float, runtime: float = 2.0):
        if self.flag0:
            config[self.temp[0]] = self.temp[1]

        performance = performance[1]
        self.optimizer.stats.ta_runs += 1
        self.optimizer.stats.ta_time_used += runtime

        hash_config = self.hashable_dict(config)
        self.memory[hash_config] = performance

        # convert dictionary to list in correct order
        config = Configuration(self.space, values=config)
        if self.run_obj == "runtime":
            performance = -runtime
            if runtime > self.cutoff_time:
                runtime = self.cutoff_time
                self.optimizer.runhistory.add(
                    config=config,
                    cost=performance,
                    time=runtime,
                    status=StatusType.TIMEOUT,
                )
            else:
                self.optimizer.runhistory.add(
                    config=config,
                    cost=performance,
                    time=runtime,
                    status=StatusType.SUCCESS,
                )
        else:
            self.optimizer.runhistory.add(
                config=config, cost=performance, time=runtime, status=StatusType.SUCCESS
            )

        if self.flag:
            pass
        else:
            self.optimizer.incumbent = self.compare_configs(
                self.optimizer.incumbent, config
            )
示例#27
0
    def config_to_html(self, default: Configuration, incumbent: Configuration):
        """Create HTML-table to compare Configurations.
        Removes unused parameters.

        Parameters
        ----------
        default, incumbent: Configurations
            configurations to be converted

        Returns
        -------
        table: str
            HTML-table comparing default and incumbent
        """
        # Remove unused parameters
        keys = [k for k in default.keys() if default[k] or incumbent[k]]
        default = [
            default[k] if default[k] != None else "inactive" for k in keys
        ]
        incumbent = [
            incumbent[k] if incumbent[k] != None else "inactive" for k in keys
        ]
        table = list(zip(keys, default, incumbent))
        # Show first parameters that changed
        same = [x for x in table if x[1] == x[2]]
        diff = [x for x in table if x[1] != x[2]]
        table = []
        if len(diff) > 0:
            table.extend([("-------------- Changed parameters: "\
                           "--------------", "-----", "-----")])
            table.extend(diff)
        if len(same) > 0:
            table.extend([("-------------- Unchanged parameters: "\
                           "--------------", "-----", "-----")])
            table.extend(same)
        keys, table = [k[0] for k in table], [k[1:] for k in table]
        df = DataFrame(data=table,
                       columns=["Default", "Incumbent"],
                       index=keys)
        table = df.to_html()
        return table
 def _create_run_history(self):
     runhistory = RunHistory()
     candidates = []
     candidates.extend(
         self.parameter_domain.get_top_results(
             int(self.candidates_for_warmstart_history * 0.5)))
     if len(candidates) == int(self.candidates_for_warmstart_history * 0.5):
         for _ in range(int(self.candidates_for_warmstart_history * 0.5)):
             candidates.append(self.parameter_domain.get_random_result())
     for score, candidate in candidates:
         runhistory.add(
             config=Configuration(
                 self.configuration_space,
                 values=self._vector_to_smac_dict(candidate),
             ),
             cost=(1 - score),
             time=self.pipeline_evaluation_timeout,
             status=StatusType.SUCCESS,
             seed=self.seed,
         )
     return runhistory
示例#29
0
 def generate_random_configuration(self):
     """
     The function generates random configurations by randomizing each
     hyparameters in the configuration until a valid configuration is
     returned.
     """
     hyper_params = self.scenario.cs.get_hyperparameters()
     candidate_conf = None
     found = False
     while not found:
         values = dict()
         for param in hyper_params:
             key, val = self.random_parameter(param)
             values[key] = val
         try:
             candidate_conf = Configuration(configuration_space=self.scenario.cs,
                                            values=values,
                                            allow_inactive_with_values=True)
             found = True
         except ValueError as e:
             continue
     return candidate_conf
    def _call_ta(self, config: Configuration, instance: str,
                 instance_specific: str, cutoff: float, seed: int):

        # TODO: maybe replace fixed instance specific and cutoff_length (0) to
        # other value
        cmd = []
        cmd.extend(self.ta)
        cmd.extend([instance, instance_specific, str(cutoff), "0", str(seed)])
        for p in config:
            if not config.get(p) is None:
                cmd.extend(["-" + str(p), str(config[p])])

        self.logger.debug("Calling: %s" % (" ".join(cmd)))
        p = Popen(cmd,
                  shell=False,
                  stdout=PIPE,
                  stderr=PIPE,
                  universal_newlines=True)
        stdout_, stderr_ = p.communicate()

        self.logger.debug("Stdout: %s" % (stdout_))
        self.logger.debug("Stderr: %s" % (stderr_))

        return stdout_, stderr_