コード例 #1
0
    def get_benchmark(self, L=8, fuzziness=1.5, seed=0):
        """
        Get Benchmark from DAC paper

        Parameters
        -------
        L : int
            Minimum sequence lenght, was 8, 16 or 32 in the paper
        fuzziness : float
            Amount of noise applied. Was 1.5 for most of the experiments
        seed : int
            Environment seed

        Returns
        -------
        env : LubyEnv
            Luby environment
        """
        self.config = objdict(LUBY_DEFAULTS.copy())
        self.config.min_steps = L
        self.config.seed = seed
        self.config.instance_set = [[0, 0]]
        self.config.reward_range = (-10, 10)
        env = LubyEnv(self.config)
        rng = np.random.RandomState(self.config.seed)

        def fuzz():
            return rng.normal(-1, fuzziness)

        fuzzy_env = RewardNoiseWrapper(env, noise_function=fuzz)
        return fuzzy_env
コード例 #2
0
    def test_from_and_to_json(self):
        bench1 = AbstractBenchmark(config_path="tests/test_config.json")
        json1 = bench1.serialize_config()
        bench2 = AbstractBenchmark(config=objdict(json1))
        json2 = bench1.serialize_config()

        print(json1)
        print(json2)
        self.assertEqual(json1, json2)
コード例 #3
0
ファイル: test_onell.py プロジェクト: automl/DACBench
 def make_env(self, config):
     config["instance_set"] = {
         0: objdict({
             "size": 2000,
             "max_evals": 30000
         })
     }
     env = OneLLEnv(config)
     return env
コード例 #4
0
ファイル: onell_benchmark.py プロジェクト: automl/DACBench
    def read_instance_set(self):
        """Read instance set from file"""
        path = (os.path.dirname(os.path.abspath(__file__)) + "/" +
                self.config.instance_set_path)
        self.config["instance_set"] = pd.read_csv(path,
                                                  index_col=0).to_dict("id")

        for key, val in self.config["instance_set"].items():
            self.config["instance_set"][key] = objdict(val)
コード例 #5
0
    def test_config_file_management(self):
        bench = AbstractBenchmark()

        bench.config = objdict({"seed": 0})
        test_config = objdict({"seed": 10})
        with open("test_conf.json", "w+") as fp:
            json.dump(test_config, fp)
        self.assertTrue(bench.config.seed == 0)
        bench.read_config_file("test_conf.json")
        self.assertTrue(bench.config.seed == 10)
        self.assertTrue(len(bench.config.keys()) == 1)
        os.remove("test_conf.json")

        bench.save_config("test_conf2.json")
        with open("test_conf2.json", "r") as fp:
            recovered = json.load(fp)
        self.assertTrue(recovered["seed"] == 10)
        self.assertTrue(len(recovered.keys()) == 2)
        os.remove("test_conf2.json")
コード例 #6
0
 def make_env(self):
     config = objdict({})
     config.budget = 20
     config.datapath = "."
     config.threshold = 1e-8
     config.instance_set = [[10, 12, 0, np.ones(11)]]
     config.cutoff = 10
     config.action_space = spaces.MultiDiscrete(
         [2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3])
     config.observation_space = spaces.Box(low=-np.inf * np.ones(5),
                                           high=np.inf * np.ones(5))
     config.reward_range = (-(10**12), 0)
     env = ModeaEnv(config)
     return env
コード例 #7
0
    def __init__(self, config_path=None):
        """
        Initialize Sigmoid Benchmark

        Parameters
        -------
        config_path : str
            Path to config file (optional)
        """
        super(SigmoidBenchmark, self).__init__(config_path)
        if not self.config:
            self.config = objdict(SIGMOID_DEFAULTS.copy())

        for key in SIGMOID_DEFAULTS:
            if key not in self.config:
                self.config[key] = SIGMOID_DEFAULTS[key]
コード例 #8
0
ファイル: cma_benchmark.py プロジェクト: automl/DACBench
    def __init__(self, config_path=None, config=None):
        """
        Initialize CMA Benchmark

        Parameters
        -------
        config_path : str
            Path to config file (optional)
        """
        super(CMAESBenchmark, self).__init__(config_path, config)
        if not self.config:
            self.config = objdict(CMAES_DEFAULTS.copy())

        for key in CMAES_DEFAULTS:
            if key not in self.config:
                self.config[key] = CMAES_DEFAULTS[key]
コード例 #9
0
ファイル: modea_benchmark.py プロジェクト: mwever/DACBench
    def __init__(self, config_path=None):
        """
        Initialize Modea Benchmark

        Parameters
        -------
        config_path : str
            Path to config file (optional)
        """
        super(ModeaBenchmark, self).__init__(config_path)
        if not self.config:
            self.config = objdict(MODEA_DEFAULTS.copy())

        for key in MODEA_DEFAULTS:
            if key not in self.config:
                self.config[key] = MODEA_DEFAULTS[key]
コード例 #10
0
    def __init__(self, config_path=None):
        """
        Initialize FD Benchmark

        Parameters
        -------
        config_path : str
            Path to config file (optional)
        """
        super(FastDownwardBenchmark, self).__init__(config_path)
        if not self.config:
            self.config = objdict(FD_DEFAULTS.copy())

        for key in FD_DEFAULTS:
            if key not in self.config:
                self.config[key] = FD_DEFAULTS[key]
コード例 #11
0
ファイル: test_sgd.py プロジェクト: automl/DACBench
    def test_step(self):
        benchmark = SGDBenchmark()
        benchmark.config = objdict(SGD_DEFAULTS.copy())
        benchmark.read_instance_set()

        for reward_type in Reward:
            benchmark.config.reward_type = reward_type
            env = SGDEnv(benchmark.config)
            env = ObservationWrapper(env)
            self.assertTrue(env.reward_range == reward_type.func.frange)

            env.reset()
            state, reward, done, meta = env.step(1.0)
            self.assertTrue(reward >= env.reward_range[0])
            self.assertTrue(reward <= env.reward_range[1])
            self.assertFalse(done)
            self.assertTrue(len(meta.keys()) == 0)
コード例 #12
0
    def test_getters_and_setters(self):
        bench = AbstractBenchmark()
        bench.config = objdict({"seed": 0})
        config = bench.get_config()
        self.assertTrue(issubclass(type(config), dict))

        bench.set_seed(100)
        self.assertTrue(bench.config.seed == 100)

        bench.set_action_space("Discrete", [4])
        self.assertTrue(bench.config.action_space == "Discrete")
        self.assertTrue(bench.config.action_space_args == [4])

        bench.set_observation_space("Box", [[1], [0]], float)
        self.assertTrue(bench.config.observation_space == "Box")
        self.assertTrue(bench.config.observation_space_args[0] == [1])
        self.assertTrue(bench.config.observation_space_type == float)
コード例 #13
0
    def get_benchmark(self, dimension=None, seed=0):
        """
        Get Benchmark from DAC paper

        Parameters
        -------
        dimension : int
            Sigmoid dimension, was 1, 2, 3 or 5 in the paper
        seed : int
            Environment seed

        Returns
        -------
        env : SigmoidEnv
            Sigmoid environment
        """
        self.config = objdict(SIGMOID_DEFAULTS.copy())
        if dimension == 1:
            self.set_action_values([3])
        if dimension == 2:
            self.set_action_values([3, 3])
        if dimension == 3:
            self.set_action_values((3, 3, 3))
        if dimension == 5:
            self.set_action_values((3, 3, 3, 3, 3))
        self.config.seed = seed
        self.config.instance_set = [0]
        env = SigmoidEnv(self.config)

        def sample_sigmoid():
            rng = np.random.default_rng()
            shifts = rng.normal(
                self.config.cutoff / 2,
                self.config.cutoff / 4,
                self.config.action_space_args[0],
            )
            slopes = (
                rng.choice([-1, 1], self.config.action_space_args[0])
                * rng.uniform(size=self.config.action_space_args[0])
                * self.config.slope_multiplier
            )
            return np.concatenate((shifts, slopes))

        sampling_env = InstanceSamplingWrapper(env, sampling_function=sample_sigmoid)
        return sampling_env
コード例 #14
0
ファイル: modea_benchmark.py プロジェクト: mwever/DACBench
    def get_benchmark(self, seed=0):
        """
        Get benchmark

        Parameters
        -------
        seed : int
            Environment seed

        Returns
        -------
        env : ModeaEnv
            Modea environment
        """
        self.config = objdict(MODEA_DEFAULTS.copy())
        self.config.seed = seed
        self.read_instance_set()
        return ModeaEnv(self.config)
コード例 #15
0
    def test_objdict(self):
        d = objdict({"dummy": 0})

        self.assertTrue(d["dummy"] == d.dummy)
        with pytest.raises(KeyError):
            d["error"]
        with pytest.raises(AttributeError):
            d.error

        d["error"] = 12
        self.assertTrue(d.error == 12)
        del d.error
        self.assertFalse("error" in d.keys())

        with pytest.raises(KeyError):
            del d["error"]
        with pytest.raises(AttributeError):
            del d.error
コード例 #16
0
ファイル: cma_benchmark.py プロジェクト: mwever/DACBench
    def get_benchmark(self, seed=0):
        """
        Get benchmark from the LTO paper

        Parameters
        -------
        seed : int
            Environment seed

        Returns
        -------
        env : CMAESEnv
            CMAES environment
        """
        self.config = objdict(CMAES_DEFAULTS.copy())
        self.config.seed = seed
        self.read_instance_set()
        return CMAESEnv(self.config)
コード例 #17
0
    def get_benchmark(self, seed=0):
        """
        Get published benchmark

        Parameters
        -------
        seed : int
            Environment seed

        Returns
        -------
        env : FastDownwardEnv
            FD environment
        """
        self.config = objdict(FD_DEFAULTS.copy())
        self.read_instance_set()
        self.config.seed = seed
        env = FastDownwardEnv(self.config)
        return env
コード例 #18
0
ファイル: test_sgd.py プロジェクト: automl/DACBench
    def test_reward_type(self):
        benchmark = SGDBenchmark()
        benchmark.config = objdict(SGD_DEFAULTS.copy())
        benchmark.read_instance_set()

        env = SGDEnv(benchmark.config)
        self.assertEqual(env.reward_type, SGD_DEFAULTS.reward_type)

        benchmark.config.reward_type = SGD_DEFAULTS.reward_type.name
        env = SGDEnv(benchmark.config)
        self.assertEqual(env.reward_type, SGD_DEFAULTS.reward_type)

        benchmark.config.reward_type = 'invalid_reward'
        with self.assertRaises(ValueError):
            env = SGDEnv(benchmark.config)

        benchmark.config.reward_type = 0
        with self.assertRaises(ValueError):
            env = SGDEnv(benchmark.config)
コード例 #19
0
    def __init__(self, config_path=None, **kwargs):
        """
        Initialize SGD Benchmark

        Parameters
        -------
        config_path : str
            Path to config file (optional)
        """
        super(SGDBenchmark, self).__init__(config_path)
        if not self.config:
            self.config = objdict(SGD_DEFAULTS.copy())

        for key in SGD_DEFAULTS:
            if key not in self.config:
                self.config[key] = SGD_DEFAULTS[key]

        for k in kwargs:
            self.config[k] = kwargs[k]
コード例 #20
0
    def get_benchmark(self, instance_set_path=None, seed=0):
        """
        Get benchmark from the LTO paper

        Parameters
        -------
        seed : int
            Environment seed

        Returns
        -------
        env : SGDEnv
            SGD environment
        """
        self.config = objdict(SGD_DEFAULTS.copy())
        if instance_set_path is not None:
            self.config["instance_set_path"] = instance_set_path
        self.config.seed = seed
        self.read_instance_set()
        return SGDEnv(self.config)
コード例 #21
0
    def get_benchmark(self, dimension=None, seed=0):
        """
        Get Benchmark from DAC paper

        Parameters
        -------
        dimension : int
            Sigmoid dimension, was 1, 2, 3 or 5 in the paper
        seed : int
            Environment seed

        Returns
        -------
        env : SigmoidEnv
            Sigmoid environment
        """
        self.config = objdict(SIGMOID_DEFAULTS.copy())
        if dimension == 1:
            self.set_action_values([3])
            self.config.instance_set_path = (
                "../instance_sets/sigmoid/sigmoid_1D3M_train.csv")
            self.config.benchmark_info["state_description"] = [
                "Remaining Budget",
                "Shift (dimension 1)",
                "Slope (dimension 1)",
                "Action",
            ]
        if dimension == 2:
            self.set_action_values([3, 3])
        if dimension == 3:
            self.set_action_values((3, 3, 3))
            self.config.instance_set_path = (
                "../instance_sets/sigmoid/sigmoid_3D3M_train.csv")
            self.config.benchmark_info["state_description"] = [
                "Remaining Budget",
                "Shift (dimension 1)",
                "Slope (dimension 1)",
                "Shift (dimension 2)",
                "Slope (dimension 2)",
                "Shift (dimension 3)",
                "Slope (dimension 3)",
                "Action 1",
                "Action 2",
                "Action 3",
            ]
        if dimension == 5:
            self.set_action_values((3, 3, 3, 3, 3))
            self.config.instance_set_path = (
                "../instance_sets/sigmoid/sigmoid_5D3M_train.csv")
            self.config.benchmark_info["state_description"] = [
                "Remaining Budget",
                "Shift (dimension 1)",
                "Slope (dimension 1)",
                "Shift (dimension 2)",
                "Slope (dimension 2)",
                "Shift (dimension 3)",
                "Slope (dimension 3)",
                "Shift (dimension 4)",
                "Slope (dimension 4)",
                "Shift (dimension 5)",
                "Slope (dimension 5)",
                "Action 1",
                "Action 2",
                "Action 3",
                "Action 4",
                "Action 5",
            ]
        self.config.seed = seed
        self.read_instance_set()
        env = SigmoidEnv(self.config)
        return env
コード例 #22
0
    ],
}

SIGMOID_DEFAULTS = objdict({
    "action_space_class":
    "Discrete",
    "action_space_args": [int(np.prod(ACTION_VALUES))],
    "observation_space_class":
    "Box",
    "observation_space_type":
    np.float32,
    "observation_space_args": [
        np.array([-np.inf for _ in range(1 + len(ACTION_VALUES) * 3)]),
        np.array([np.inf for _ in range(1 + len(ACTION_VALUES) * 3)]),
    ],
    "reward_range": (0, 1),
    "cutoff":
    10,
    "action_values":
    ACTION_VALUES,
    "slope_multiplier":
    2.0,
    "seed":
    0,
    "instance_set_path":
    "../instance_sets/sigmoid/sigmoid_2D3M_train.csv",
    "benchmark_info":
    INFO,
})


class SigmoidBenchmark(AbstractBenchmark):
コード例 #23
0
ファイル: modea_benchmark.py プロジェクト: mwever/DACBench
        "Generation Size", "Sigma", "Remaining Budget", "Function ID",
        "Instance ID"
    ]
}

MODEA_DEFAULTS = objdict({
    "action_space_class":
    "MultiDiscrete",
    "action_space_args": [[2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3]],
    "observation_space_class":
    "Box",
    "observation_space_args": [-np.inf * np.ones(5), np.inf * np.ones(5)],
    "observation_space_type":
    np.float32,
    "reward_range": (-(10**12), 0),
    "budget":
    100,
    "cutoff":
    1e6,
    "seed":
    0,
    "instance_set_path":
    "../instance_sets/modea/modea_train.csv",
    "benchmark_info":
    INFO
})


class ModeaBenchmark(AbstractBenchmark):
    """
    Benchmark with default configuration & relevant functions for Modea
コード例 #24
0
FD_DEFAULTS = objdict(
    {
        "heuristics": HEURISTICS,
        "action_space_class": "Discrete",
        "action_space_args": [len(HEURISTICS)],
        "observation_space_class": "Box",
        "observation_space_type": np.float32,
        "observation_space_args": [
            np.array([-np.inf for _ in range(5 * len(HEURISTICS))]),
            np.array([np.inf for _ in range(5 * len(HEURISTICS))]),
        ],
        "reward_range": (-np.inf, 0),
        "cutoff": 1e6,
        "use_general_state_info": True,
        "host": "",
        "port": 54322,
        "control_interval": 0,
        "fd_seed": 0,
        "num_steps": None,
        "state_type": 2,
        "config_dir": ".",
        "port_file_id": None,
        "seed": 0,
        "max_rand_steps": 0,
        "instance_set_path": "../instance_sets/fast_downward/train",
        "fd_path": os.path.dirname(os.path.abspath(__file__))
        + "/../envs/rl-plan/fast-downward/fast-downward.py",
        "parallel": True,
        "fd_logs": None,
    }
)
コード例 #25
0
ファイル: cma_benchmark.py プロジェクト: automl/DACBench
CMAES_DEFAULTS = objdict({
    "action_space_class":
    "Box",
    "action_space_args": [np.array([0]), np.array([10])],
    "config_space":
    DEFAULT_CFG_SPACE,
    "observation_space_class":
    "Dict",
    "observation_space_type":
    None,
    "observation_space_args": [{
        "current_loc":
        spaces.Box(low=-np.inf, high=np.inf, shape=np.arange(INPUT_DIM).shape),
        "past_deltas":
        spaces.Box(low=-np.inf,
                   high=np.inf,
                   shape=np.arange(HISTORY_LENGTH).shape),
        "current_ps":
        spaces.Box(low=-np.inf, high=np.inf, shape=(1, )),
        "current_sigma":
        spaces.Box(low=-np.inf, high=np.inf, shape=(1, )),
        "history_deltas":
        spaces.Box(low=-np.inf,
                   high=np.inf,
                   shape=np.arange(HISTORY_LENGTH * 2).shape),
        "past_sigma_deltas":
        spaces.Box(low=-np.inf,
                   high=np.inf,
                   shape=np.arange(HISTORY_LENGTH).shape),
    }],
    "reward_range": (-(10**9), 0),
    "cutoff":
    1e6,
    "hist_length":
    HISTORY_LENGTH,
    "popsize":
    10,
    "seed":
    0,
    "instance_set_path":
    "../instance_sets/cma/cma_train.csv",
    "test_set_path":
    "../instance_sets/cma/cma_test.csv",
    "benchmark_info":
    INFO,
})
コード例 #26
0
ファイル: configs.py プロジェクト: automl/DACBench
onell_lbd_theory = objdict({
    "name":
    "lbd_theory",
    "action_space_class":
    "Box",
    "action_space_args": [np.array([1]), np.array([np.inf])],
    "action_description":
    "lbd",
    "observation_space_class":
    "Box",
    "observation_space_type":
    np.int32,
    "observation_space_args": [np.array([1, 0]),
                               np.array([np.inf, np.inf])],
    "observation_description":
    "n, f(x)",
    "reward_range": [
        -np.inf,
        np.inf,
    ],  # the true reward range is instance dependent
    "cutoff":
    1e9,  # we don't really use this,
    # the real cutoff is in instance_set_path and is instance dependent
    "include_xprime":
    True,  # if True, xprime is included in the selection after crossover phase
    "count_different_inds_only":
    True,  # if True, only count an evaluation of a child if it is different from both of its parents
    "seed":
    0,
    "problem":
    "OneMax",
    "instance_set_path":
    "../instance_sets/onell/onemax_2000.csv",
    "benchmark_info":
    INFO,
})
コード例 #27
0
 def test_attributes(self):
     bench = AbstractBenchmark()
     bench.config = objdict({"seed": 0})
     self.assertTrue(bench.config.seed == bench.config["seed"])
     bench.config.seed = 42
     self.assertTrue(bench.config["seed"] == 42)
コード例 #28
0
ファイル: sigmoid.py プロジェクト: automl/DACBench
if __name__ == '__main__':
    from dacbench.abstract_benchmark import objdict
    config = objdict(
        {
            "action_space_class": "Box",
            "action_space_args": [
                np.array([-np.inf for _ in range(1 + 2 * 3)]),
                np.array([np.inf for _ in range(1 + 2 * 3)]),
            ],
            "observation_space_class": "Box",
            "observation_space_type": np.float32,
            "observation_space_args": [
                np.array([-np.inf for _ in range(1 + 2 * 3)]),
                np.array([np.inf for _ in range(1 + 2 * 3)]),
            ],
            "reward_range": (0, 1),
            "cutoff": 10,
            "action_values": (2, 2),
            "slope_multiplier": 2.0,
            "seed": 0,
            "instance_set_path": "../instance_sets/sigmoid/sigmoid_2D3M_train.csv",
            "benchmark_info": None,
            'instance_set': {0:[5.847291747472278,6.063505157165379,5.356361033331866,8.473324526654427],
                             1:[5.699459023308639,0.17993881762205755,3.4218338308013356,8.486280024502191],
                             2:[5.410536230957515,5.700091608324946,-5.3540400976249165,2.76787147719077],
                             3:[1.5799464875295817,6.374885201056433,1.0378986341827443,4.219330699379608],
                             4:[2.61235568666599,6.478051235772757,7.622760392199338,-3.0898869570275167]},
        }
    )
    env = ContinuousSigmoidEnv(config)
    done = False
コード例 #29
0
import os
import csv

MAX_STEPS = 2 ** 6
LUBY_SEQUENCE = np.log2([next(luby_gen(i)) for i in range(1, 2 * MAX_STEPS + 2)])
HISTORY_LENGTH = 5

LUBY_DEFAULTS = objdict(
    {
        "action_space_class": "Discrete",
        "action_space_args": [int(np.log2(MAX_STEPS))],
        "observation_space_class": "Box",
        "observation_space_type": np.float32,
        "observation_space_args": [
            np.array([-1 for _ in range(HISTORY_LENGTH + 1)]),
            np.array([2 ** max(LUBY_SEQUENCE + 1) for _ in range(HISTORY_LENGTH + 1)]),
        ],
        "reward_range": (-1, 0),
        "cutoff": MAX_STEPS,
        "hist_length": HISTORY_LENGTH,
        "min_steps": 2 ** 3,
        "seed": 0,
        "instance_set_path": "../instance_sets/luby/luby_default.csv",
    }
)


class LubyBenchmark(AbstractBenchmark):
    """
    Benchmark with default configuration & relevant functions for Sigmoid
    """
コード例 #30
0
SGD_DEFAULTS = objdict(
    {
        "config_space": DEFAULT_CFG_SPACE,
        "action_space_class": "Box",
        "action_space_args": [np.array([0]), np.array([10])],
        "observation_space_class": "Dict",
        "observation_space_type": None,
        "observation_space_args": [
            {
                "predictiveChangeVarDiscountedAverage": spaces.Box(
                    low=-np.inf, high=np.inf, shape=(1,)
                ),
                "predictiveChangeVarUncertainty": spaces.Box(
                    low=0, high=np.inf, shape=(1,)
                ),
                "lossVarDiscountedAverage": spaces.Box(
                    low=-np.inf, high=np.inf, shape=(1,)
                ),
                "lossVarUncertainty": spaces.Box(low=0, high=np.inf, shape=(1,)),
                "currentLR": spaces.Box(low=0, high=1, shape=(1,)),
                "trainingLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
                "validationLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
                "step": spaces.Box(low=0, high=np.inf, shape=(1,)),
                "alignment": spaces.Box(low=0, high=1, shape=(1,)),
                "crashed": spaces.Discrete(2),
            }
        ],
        "reward_type": Reward.LogDiffTraining,
        "cutoff": 1e3,
        "lr": 1e-3,
        "discount_factor": 0.9,
        "optimizer": "rmsprop",
        "loss_function": __default_loss_function,
        "loss_function_kwargs": {},
        "val_loss_function": __default_loss_function,
        "val_loss_function_kwargs": {},
        "training_batch_size": 64,
        "validation_batch_size": 64,
        "train_validation_ratio": 0.8,
        "dataloader_shuffle": True,
        "no_cuda": False,
        "beta1": 0.9,
        "beta2": 0.9,
        "epsilon": 1.0e-06,
        "clip_grad": (-1.0, 1.0),
        "seed": 0,
        "cd_paper_reconstruction": False,
        "cd_bias_correction": True,
        "terminate_on_crash": False,
        "crash_penalty": 0.0,
        "instance_set_path": "../instance_sets/sgd/sgd_train_100instances.csv",
        "benchmark_info": INFO,
        "features": [
            "predictiveChangeVarDiscountedAverage",
            "predictiveChangeVarUncertainty",
            "lossVarDiscountedAverage",
            "lossVarUncertainty",
            "currentLR",
            "trainingLoss",
            "validationLoss",
            "step",
            "alignment",
            "crashed"
        ],
    }
)