def test_srbf_async(): max_evals = 200 rbf = RBFInterpolant( dim=ackley.dim, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) slhd = SymmetricLatinHypercube( dim=ackley.dim, num_pts=2*(ackley.dim+1)) # Create a strategy and a controller controller = ThreadController() controller.strategy = SRBFStrategy( max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=None) for _ in range(num_threads): worker = BasicWorkerThread(controller, ackley.eval) controller.launch_worker(worker) controller.run() check_strategy(controller)
def init_serial(): rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) # Create a strategy and a controller controller = SerialController(ackley.eval) controller.strategy = DYCORSStrategy(max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True) # Wrap controller in checkpoint object controller = CheckpointController(controller, fname=fname) controller.run()
def test_sop_serial(): max_evals = 200 rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) # Create a strategy and a controller controller = SerialController(ackley.eval) controller.strategy = SOPStrategy(max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True, ncenters=4) controller.run() check_strategy(controller)
def init(): print("\nInitializing run...") rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) # Create a strategy and a controller controller = ThreadController() controller.strategy = SRBFStrategy(max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_threads) print("Number of threads: {}".format(num_threads)) print("Maximum number of evaluations: {}".format(max_evals)) print("Strategy: {}".format(controller.strategy.__class__.__name__)) print("Experimental design: {}".format(slhd.__class__.__name__)) print("Surrogate: {}".format(rbf.__class__.__name__)) # Launch the threads and give them access to the objective function for _ in range(num_threads): worker = BasicWorkerThread(controller, ackley.eval) controller.launch_worker(worker) # Wrap controller in checkpoint object controller = CheckpointController(controller, fname=fname) result = controller.run() print("Best value found: {0}".format(result.value)) print("Best solution found: {0}\n".format( np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True)))
def start(self, max_evals): """Starts a new pySOT run.""" self.history = [] self.proposals = [] # Symmetric Latin hypercube design des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)]) slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts) # Warped RBF interpolant rbf = RBFInterpolant(dim=self.opt.dim, lb=self.opt.lb, ub=self.opt.ub, kernel=CubicKernel(), tail=LinearTail(self.opt.dim), eta=1e-4) # Optimization strategy ''' self.strategy = SRBFStrategy( max_evals=self.max_evals, opt_prob=self.opt, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=1 , use_restarts=True, ) ''' self.strategy = DYCORSStrategy( max_evals=self.max_evals, opt_prob=self.opt, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=1, use_restarts=True, )
def pysot_cube(objective, scale, n_trials, n_dim, with_count=False): if False: if not os.path.exists("./logfiles"): os.makedirs("logfiles") if os.path.exists("./logfiles/example_simple.log"): os.remove("./logfiles/example_simple.log") logging.basicConfig(filename="./logfiles/example_simple.log", level=logging.INFO) num_threads = 2 max_evals = n_trials gp = GenericProblem(dim=n_dim, objective=objective, scale=scale) rbf = RBFInterpolant(dim=n_dim, lb=np.array([-scale] * n_dim), ub=np.array([scale] * n_dim), kernel=CubicKernel(), tail=LinearTail(n_dim)) slhd = SymmetricLatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1)) # Create a strategy and a controller controller = ThreadController() controller.strategy = SRBFStrategy(max_evals=max_evals, opt_prob=gp, exp_design=slhd, surrogate=rbf, asynchronous=True) # Launch the threads and give them access to the objective function for _ in range(num_threads): worker = BasicWorkerThread(controller, gp.eval) controller.launch_worker(worker) # Run the optimization strategy result = controller.run() return (result.value, gp.feval_count) if with_count else result.value
def pysot_cube(objective, n_trials, n_dim, with_count=False, method=None, design=None): """ Minimize :param objective: :param n_trials: :param n_dim: :param with_count: :return: """ logging.getLogger('pySOT').setLevel(logging.ERROR) num_threads = 1 asynchronous = True max_evals = n_trials gp = GenericProblem(dim=n_dim, objective=objective) if design == 'latin': exp_design = LatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1)) elif design == 'symmetric': exp_design = SymmetricLatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1)) elif design == 'factorial': exp_design = TwoFactorial(dim=n_dim) else: raise ValueError('design should be latin, symmetric or factorial') # Create a strategy and a controller # SRBFStrategy, EIStrategy, DYCORSStrategy,RandomStrategy, LCBStrategy controller = ThreadController() if method.lower() == 'srbf': surrogate = RBFInterpolant(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim), kernel=CubicKernel(), tail=LinearTail(n_dim)) controller.strategy = SRBFStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'ei': surrogate = GPRegressor(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim)) controller.strategy = EIStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'dycors': surrogate = RBFInterpolant(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim), kernel=CubicKernel(), tail=LinearTail(n_dim)) controller.strategy = DYCORSStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'lcb': surrogate = GPRegressor(dim=n_dim, lb=np.array([0.0] * n_dim), ub=np.array([1.0] * n_dim)) controller.strategy = LCBStrategy(max_evals=max_evals, opt_prob=gp, exp_design=exp_design, surrogate=surrogate, asynchronous=asynchronous) elif method.lower() == 'random': controller.strategy = RandomStrategy(max_evals=max_evals, opt_prob=gp) else: raise ValueError("Didn't recognize method passed to pysot") # Launch the threads and give them access to the objective function for _ in range(num_threads): worker = BasicWorkerThread(controller, gp.eval) controller.launch_worker(worker) # Run the optimization strategy result = controller.run() best_x = result.params[0].tolist() return (result.value, best_x, gp.feval_count) if with_count else (result.value, best_x)
def main_master(num_workers): if not os.path.exists("./logfiles"): os.makedirs("logfiles") if os.path.exists("./logfiles/test_subprocess_mpi.log"): os.remove("./logfiles/test_subprocess_mpi.log") logging.basicConfig(filename="./logfiles/test_subprocess_mpi.log", level=logging.INFO) print("\nTesting the POAP MPI controller with {0} workers".format(num_workers)) print("Maximum number of evaluations: 200") print("Search strategy: Candidate DYCORS") print("Experimental design: Symmetric Latin Hypercube") print("Surrogate: Cubic RBF") assert os.path.isfile(path), "You need to build sphere_ext" max_evals = 200 sphere = Sphere(dim=10) rbf = RBFInterpolant(dim=sphere.dim, lb=sphere.lb, ub=sphere.ub, kernel=CubicKernel(), tail=LinearTail(sphere.dim)) slhd = SymmetricLatinHypercube(dim=sphere.dim, num_pts=2 * (sphere.dim + 1)) # Create a strategy and a controller strategy = SRBFStrategy( max_evals=max_evals, opt_prob=sphere, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_workers ) controller = MPIController(strategy) print("Number of threads: {}".format(num_workers)) print("Maximum number of evaluations: {}".format(max_evals)) print("Strategy: {}".format(controller.strategy.__class__.__name__)) print("Experimental design: {}".format(slhd.__class__.__name__)) print("Surrogate: {}".format(rbf.__class__.__name__)) # Run the optimization strategy result = controller.run() print("Best value found: {0}".format(result.value)) print( "Best solution found: {0}\n".format( np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) ) )
def example_extra_vals(): if not os.path.exists("./logfiles"): os.makedirs("logfiles") if os.path.exists("./logfiles/example_extra_vals.log"): os.remove("./logfiles/example_extra_vals.log") logging.basicConfig(filename="./logfiles/example_extra_vals.log", level=logging.INFO) num_threads = 4 max_evals = 500 ackley = Ackley(dim=10) num_extra = 10 extra = np.random.uniform(ackley.lb, ackley.ub, (num_extra, ackley.dim)) extra_vals = np.nan * np.ones((num_extra, 1)) for i in range(num_extra): # Evaluate every second point if i % 2 == 0: extra_vals[i] = ackley.eval(extra[i, :]) rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) # Create a strategy and a controller controller = ThreadController() controller.strategy = SRBFStrategy( max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_threads, extra_points=extra, extra_vals=extra_vals, ) print("Number of threads: {}".format(num_threads)) print("Maximum number of evaluations: {}".format(max_evals)) print("Strategy: {}".format(controller.strategy.__class__.__name__)) print("Experimental design: {}".format(slhd.__class__.__name__)) print("Surrogate: {}".format(rbf.__class__.__name__)) # Append the known function values to the POAP database since # POAP won't evaluate these points for i in range(len(extra_vals)): if not np.isnan(extra_vals[i]): record = EvalRecord(params=(np.ravel(extra[i, :]), ), status="completed") record.value = extra_vals[i] record.feasible = True controller.fevals.append(record) # Launch the threads and give them access to the objective function for _ in range(num_threads): worker = BasicWorkerThread(controller, ackley.eval) controller.launch_worker(worker) # Run the optimization strategy result = controller.run() print("Best value found: {0}".format(result.value)) print("Best solution found: {0}\n".format( np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True)))
def fit(self, X, y=None, **kwargs): """Run training with cross validation. :param X: training data :param **: parameters to be passed to GridSearchCV """ # wrap for pySOT class Target(OptimizationProblem): def __init__(self, outer): self.outer = outer param_def = outer.param_def self.lb = np.array([param['lb'] for param in param_def]) self.ub = np.array([param['ub'] for param in param_def]) self.dim = len(param_def) self.int_var = np.array([ idx for idx, param in enumerate(param_def) if param['integer'] ]) self.cont_var = np.array([ idx for idx, param in enumerate(param_def) if idx not in self.int_var ]) def eval_(self, x): print('Eval {0} ...'.format(x)) param_def = self.outer.param_def outer = self.outer # prepare parameters grid for gridsearchcv param_grid = ({ param['name']: [int(x[idx]) if param['integer'] else x[idx]] for idx, param in enumerate(param_def) }) # create gridsearchcv to evaluate the cv gs = GridSearchCV(outer.estimator, param_grid, refit=False, **outer.kwargs) # never refit during iteration, refit at the end gs.fit(X, y=y, **kwargs) gs_score = gs.best_score_ # gridsearchcv score is better when greater if not outer.best_score_ or gs_score > outer.best_score_: outer.best_score_ = gs_score outer.best_params_ = gs.best_params_ # also record history outer.params_history_.append(x) outer.score_history_.append(gs_score) print('Eval {0} => {1}'.format(x, gs_score)) # pySOT score is the lower the better, so return the negated return -gs_score # pySOT routine # TODO: make this configurable target = Target(self) rbf = SurrogateUnitBox(RBFInterpolant(dim=target.dim, kernel=CubicKernel(), tail=LinearTail(target.dim)), lb=target.lb, ub=target.ub) slhd = SymmetricLatinHypercube(dim=target.dim, num_pts=2 * (target.dim + 1)) # Create a strategy and a controller controller = SerialController(objective=target.eval_) controller.strategy = SRBFStrategy(max_evals=self.n_iter, batch_size=1, opt_prob=target, exp_design=slhd, surrogate=rbf, asynchronous=False) print('Maximum number of evaluations: {0}'.format(self.n_iter)) print('Strategy: {0}'.format(controller.strategy.__class__.__name__)) print('Experimental design: {0}'.format(slhd.__class__.__name__)) print('Surrogate: {0}'.format(rbf.__class__.__name__)) # Run the optimization strategy result = controller.run() print('Best value found: {0}'.format(result.value)) print('Best solution found: {0}\n'.format( np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True)))