def __init__(self, problem, run, evaluator, **kwargs): _args = vars(self.parse_args('')) kwargs['problem'] = problem kwargs['run'] = run kwargs['evaluator'] = evaluator _args.update(kwargs) _args['problem'] = problem _args['run'] = run self.args = Namespace(**_args) self.problem = util.generic_loader(problem, 'Problem') self.run_func = util.generic_loader(run, 'run') logger.info('Evaluator will execute the function: ' + run) if kwargs.get('cache_key') is None: self.evaluator = Evaluator.create(self.run_func, method=evaluator) else: self.evaluator = Evaluator.create(self.run_func, method=evaluator, cache_key=kwargs['cache_key']) self.num_workers = self.evaluator.num_workers logger.info(f'Options: ' + pformat(self.args.__dict__, indent=4)) logger.info('Hyperparameter space definition: ' + pformat(self.problem.space, indent=4)) logger.info(f'Created {self.args.evaluator} evaluator') logger.info(f'Evaluator: num_workers is {self.num_workers}')
def __init__(self, comm, problem, run, evaluator, **kwargs): """Constructor.""" self.comm = comm self.rank = comm.Get_rank() self.problem = util.generic_loader(problem, "Problem") self.run_func = util.generic_loader(run, "run") self.evaluator = Evaluator.create(self.run_func, method=evaluator)
def __init__( self, problem: str, run: str, evaluator: str, max_evals: int = 1000000, seed: int = None, num_nodes_master: int = 1, num_workers: int = None, log_dir: int = None, **kwargs, ): kwargs["problem"] = problem kwargs["run"] = run kwargs["evaluator"] = evaluator kwargs["max_evals"] = max_evals # * For retro compatibility kwargs["seed"] = seed # Loading problem instance and run function self.problem = util.generic_loader(problem, "Problem") if self.problem.seed == None: self.problem.seed = seed else: kwargs["seed"] = self.problem.seed self.run_func = util.generic_loader(run, "run") notice = f"Maximizing the return value of function: {run}" logger.info(notice) util.banner(notice) self.evaluator = Evaluator.create( self.run_func, method=evaluator, num_nodes_master=num_nodes_master, num_workers=num_workers, **kwargs, ) self.num_workers = self.evaluator.num_workers self.max_evals = max_evals self.log_dir = os.getcwd() if log_dir is None else log_dir # set the random seed np.random.seed(self.problem.seed) logger.info(f"Options: " + pformat(kwargs, indent=4)) logger.info("Hyperparameter space definition: " + pformat(self.problem.space, indent=4)) logger.info(f"Created {evaluator} evaluator") logger.info(f"Evaluator: num_workers is {self.num_workers}")
def validate(problem, run, workflow): """Validate problem, run, and workflow""" from balsam.core.models import BalsamJob print("Validating Problem...", end="", flush=True) prob = generic_loader(problem, "Problem") assert isinstance(prob, (Problem, BaseProblem)), f"{prob} is not a Problem instance" print("OK", flush=True) print("Validating run...", end="", flush=True) run = generic_loader(run, "run") assert callable(run), f"{run} must be a a callable" print("OK", flush=True) qs = BalsamJob.objects.filter(workflow=workflow) if qs.exists(): print(f"There are already jobs matching workflow {workflow}") print("Please remove these, or use a unique workflow name") sys.exit(1)
def validate(problem, run, workflow): """Validate problem, run, and workflow""" from balsam.core.models import BalsamJob print("Validating Problem...", end='', flush=True) prob = generic_loader(problem, 'Problem') assert isinstance(prob, Problem), f'{prob} is not a Problem instance' print("OK", flush=True) print("Validating run...", end='', flush=True) run = generic_loader(run, 'run') assert callable(run), f'{run} must be a a callable' print("OK", flush=True) qs = BalsamJob.objects.filter(workflow=workflow) if qs.exists(): print(f'There are already jobs matching workflow {workflow}') print('Please remove these, or use a unique workflow name') sys.exit(1)
def __init__(self, problem: str, run: str, evaluator: str, max_evals: int = 100, seed: int = None, **kwargs): kwargs['problem'] = problem kwargs['run'] = run kwargs['evaluator'] = evaluator kwargs['max_evals'] = max_evals # * For retro compatibility kwargs['seed'] = seed self.problem = util.generic_loader(problem, 'Problem') if self.problem.seed == None: self.problem.seed = seed else: kwargs['seed'] = self.problem.seed self.run_func = util.generic_loader(run, 'run') notice = f'Maximizing the return value of function: {run}' logger.info(notice) util.banner(notice) self.evaluator = Evaluator.create(self.run_func, method=evaluator, **kwargs) self.num_workers = self.evaluator.num_workers self.max_evals = max_evals # set the random seed np.random.seed(self.problem.seed) logger.info(f'Options: ' + pformat(kwargs, indent=4)) logger.info('Hyperparameter space definition: ' + pformat(self.problem.space, indent=4)) logger.info(f'Created {evaluator} evaluator') logger.info(f'Evaluator: num_workers is {self.num_workers}')
def validate(problem, run, workflow): """Validate problem, run, and workflow""" current_dir = os.getcwd() print("Validating Workflow...", end="", flush=True) assert not (workflow in os.listdir(current_dir) ), f"{workflow} already exist in current directory" print("OK", flush=True) print("Validating Problem...", end="", flush=True) prob = generic_loader(problem, "Problem") assert isinstance( prob, (Problem, BaseProblem)), f"{prob} is not a Problem instance" print("OK", flush=True) #! issue if some packages can't be imported from login nodes... # print("Validating run...", end="", flush=True) # run = generic_loader(run, "run") # assert callable(run), f"{run} must be a a callable" print("OK", flush=True)
def __init__(self, problem, fbest, evaluator, **kwargs): if MPI is None: self.free_workers = 1 else: nranks = MPI.COMM_WORLD.Get_size() if evaluator == 'balsam': balsam_launcher_nodes = int( os.environ.get('BALSAM_LAUNCHER_NODES', 1)) deephyper_workers_per_node = int( os.environ.get('DEEPHYPER_WORKERS_PER_NODE', 1)) n_free_nodes = balsam_launcher_nodes - nranks # Number of free nodes self.free_workers = n_free_nodes * \ deephyper_workers_per_node # Number of free workers else: self.free_workers = 1 _args = vars(self.parse_args('')) kwargs['problem'] = problem kwargs['p_f_best'] = fbest kwargs['evaluator'] = evaluator _args.update(kwargs) self.args = Namespace(**_args) self.problem = util.generic_loader(problem, 'Problem') if kwargs.get('cache_key') is None: self.evaluator = Evaluator.create(run_function=train, method=evaluator) else: self.evaluator = Evaluator.create(run_function=train, method=evaluator, cache_key=kwargs['cache_key']) self.num_workers = self.evaluator.num_workers logger.info(f'Options: ' + pformat(self.args.__dict__, indent=4)) logger.info('Problem definition: ' + pformat(self.problem.space, indent=4)) logger.info(f'Created {self.args.evaluator} evaluator') logger.info(f'Evaluator: num_workers is {self.num_workers}')
def __init__(self, problem, fbest, evaluator, **kwargs): if MPI is None: self.free_workers = 1 else: nranks = MPI.COMM_WORLD.Get_size() if evaluator == "balsam": balsam_launcher_nodes = int( os.environ.get("BALSAM_LAUNCHER_NODES", 1)) deephyper_workers_per_node = int( os.environ.get("DEEPHYPER_WORKERS_PER_NODE", 1)) n_free_nodes = balsam_launcher_nodes - nranks # Number of free nodes self.free_workers = (n_free_nodes * deephyper_workers_per_node ) # Number of free workers else: self.free_workers = 1 _args = vars(self.parse_args("")) kwargs["problem"] = problem kwargs["p_f_best"] = fbest kwargs["evaluator"] = evaluator _args.update(kwargs) self.args = Namespace(**_args) self.problem = util.generic_loader(problem, "Problem") if kwargs.get("cache_key") is None: self.evaluator = Evaluator.create(run_function=train, method=evaluator) else: self.evaluator = Evaluator.create(run_function=train, method=evaluator, cache_key=kwargs["cache_key"]) self.num_workers = self.evaluator.num_workers logger.info(f"Options: " + pformat(self.args.__dict__, indent=4)) logger.info("Problem definition: " + pformat(self.problem.space, indent=4)) logger.info(f"Created {self.args.evaluator} evaluator") logger.info(f"Evaluator: num_workers is {self.num_workers}")