def test_everything_with_rules(self): def a_func(): return 0 def b_func(): return 0 def c_func(): return 0 x1 = WorkflowListTask(is_ordered=False, name="x1", tasks=["x1__p1", "x1__p2", "x1__p3"]) x2 = WorkflowListTask(is_ordered=False, name="x2", tasks=["x2__p1", "x2__p2", "x2__p3"]) start = WorkflowChoiceScenario(name="root", scenarios=[x1, x2]) sampler = { "x1__p1": Parameter("x1__p1", [0, 1], "uniform", "float"), "x1__p2": Parameter("x1__p2", [1, 2], "choice", "int"), "x1__p3": Parameter("x1__p3", ["v", "w", "x", "y", "z"], "choice", "string"), "x2__p1": Parameter("x2__p1", ["a", "b", "c"], "choice", "string"), "x2__p2": Parameter("x2__p2", [a_func, b_func, c_func], "choice", "int"), "x2__p3": Parameter("x2__p3", "lol", "constant", "string"), } rules = [ ChildRule(applied_to=["x2__p2"], parent="x2__p1", value=["a", "c"]), ValueRule(constraints=[("x1__p2", 1), ("x1__p3", "v")]), ValueRule(constraints=[("x1__p2", 2), ("x1__p3", "w")]) ] def evaluate(config, bestconfig): return random.uniform(0, 1) env = Env(evaluate, scenario=start, sampler=sampler, rules=rules) mcts = MCTS(env=env) mcts.run(n=100)
class Search: """ Search optimal pipeline using Monte-Carlo Tree Search Parameters: ---------- environment: object environment class extending AbstractEnvironment time_budget: int overall time budget seed: int random seed bandit_policy: dict bandit policy used in MCTS. Available choice are uct, besa, puct. Example {"policy_name": "uct", "c_ub": 1.41}, {"policy_name": "besa"} exec_dir: str directory to store tmp files Attributes ---------- logger: class <logging> Logger used mcts : class <mosaic.MCTS> object that run MCTS algorithm """ def __init__(self, environment, time_budget=3600, verbose=False, exec_dir=None, bandit_policy=None, seed=1, coef_progressive_widening=0.6): """Init method. """ # config logger self.logger = logging.getLogger('mcts') self.logger.setLevel(logging.DEBUG) # Default bandit policy if bandit_policy is None: bandit_policy = {"policy_name": "uct", "c_uct": np.sqrt(2)} # execution directory if exec_dir is None: exec_dir = tempfile.mkdtemp() else: os.makedirs(exec_dir) hdlr = logging.FileHandler(os.path.join(exec_dir, "mcts.log"), mode='w') formatter = logging.Formatter( '%(asctime)s :: %(levelname)s :: %(funcName)s :: %(message)s') hdlr.setFormatter(formatter) self.logger.addHandler(hdlr) if verbose: handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) self.logger.addHandler(handler) self.mcts = MCTS(env=environment, time_budget=time_budget, exec_dir=exec_dir, bandit_policy=bandit_policy, coef_progressive_widening=coef_progressive_widening) np.random.seed(seed) def run(self, nb_simulation=10, initial_configurations=[], step_to_generate_img=-1): """Run MCTS algorithm Parameters: ---------- nb_simulation: int number of MCTS simulation to run (default is 10) initial_configurations: list of object set of configuration to start with (default is []) step_to_generate_img: int or None set of initial configuration (default -1, generate image for each MCTS iteration) Do not generate images if None. Returns: ---------- configuration: object best configuration """ self.logger.info("# Run {0} iterations of MCTS".format(nb_simulation)) self.mcts.run(nb_simulation, initial_configurations, step_to_generate_img) return self.mcts.best_config, self.mcts.best_score
class Search: """ Main class to tune pipeline using Monte-Carlo Tree Search ... Attributes ---------- mcts : class <mosaic.MCTS> object that run MCTS algorithm Methods ------- run(nb_simulation=10, initial_configurations=[], nb_iter_to_generate_img=-1) Run nb_simulation of MCTS and initialize with initial_configurations """ def __init__(self, environment, time_budget=3600, seed=1, policy_arg={}, exec_dir=None, verbose=False): """Initialization algorithm. :param environment: environment class extending AbstractEnvironment :param time_budget: overall time budget :param seed: random seed :param policy_arg: specific option for MCTS policy :param exec_dir: directory to store tmp files """ # config logger self.logger = logging.getLogger('mcts') self.logger.setLevel(logging.DEBUG) # execution directory if exec_dir is None: exec_dir = tempfile.mkdtemp() else: os.makedirs(exec_dir) hdlr = logging.FileHandler(os.path.join(exec_dir, "mcts.log"), mode='w') formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(funcName)s :: %(message)s') hdlr.setFormatter(formatter) self.logger.addHandler(hdlr) if verbose: handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) self.logger.addHandler(handler) env = environment self.mcts = MCTS(env=env, time_budget=time_budget, policy_arg=policy_arg, exec_dir=exec_dir) np.random.seed(seed) def run(self, nb_simulation=10, initial_configurations=[], nb_iter_to_generate_img=1, pb=None, status_txt=None, chart=None, status_txt2=None): """Run MCTS algorithm :param nb_simulation: number of MCTS simulation to run :param initial_configurations: path for generated image , optional :param nb_iter_to_generate_img: set of initial configuration, optional :return: configuration: best configuration found """ self.logger.info("# Run {0} iterations of MCTS".format(nb_simulation)) self.mcts.run(nb_simulation, initial_configurations, nb_iter_to_generate_img, pb, status_txt, chart, status_txt2) return self.mcts.bestconfig, self.mcts.bestscore