def __init__(self, worker_id, data, response_surface, maxeval, nsamples, exp_design=None, search_procedure=None, extra=None): """Initialize the optimization strategy. :param worker_id: Start ID in a multistart setting :param data: Problem parameter data structure :param response_surface: Surrogate model object :param maxeval: Function evaluation budget :param nsamples: Number of simultaneous fevals allowed :param exp_design: Experimental design :param search_procedure: Search procedure for finding points to evaluate :param extra: Points to be added to the experimental design """ if self.__class__.__name__ == "SyncStrategyNoConstraints": assert not hasattr(data, "eval_ineq_constraints"), "Objective function has constraints,\n" \ "SyncStrategyNoConstraints can't handle constraints" self.worker_id = worker_id self.data = data self.fhat = response_surface if self.fhat is None: self.fhat = RBFInterpolant(surftype=CubicRBFSurface, eta=1e-8, maxp=maxeval) self.maxeval = maxeval self.nsamples = nsamples self.extra = extra # Default to generate sampling points using Symmetric Latin Hypercube self.design = exp_design if self.design is None: if self.data.dim > 50: self.design = LatinHypercube(data.dim, data.dim+1) else: self.design = SymmetricLatinHypercube(data.dim, 2*(data.dim+1)) self.xrange = np.asarray(data.xup - data.xlow) # algorithm parameters self.sigma_min = 0.005 self.sigma_max = 0.2 self.sigma_init = 0.2 self.failtol = max(5, data.dim) self.succtol = 3 self.numeval = 0 self.status = 0 self.sigma = 0 self.resubmitter = RetryStrategy() self.xbest = None self.fbest = np.inf self.fbest_old = None # Set up search procedures and initialize self.search = search_procedure if self.search is None: self.search = CandidateDYCORS(data) # Start with first experimental design self.sample_initial()
class SyncStrategyNoConstraints(BaseStrategy): """Parallel synchronous optimization strategy without non-bound constraints. This class implements the parallel synchronous SRBF strategy described by Regis and Shoemaker. After the initial experimental design (which is embarrassingly parallel), the optimization proceeds in phases. During each phase, we allow nsamples simultaneous function evaluations. We insist that these evaluations run to completion -- if one fails for whatever reason, we will resubmit it. Samples are drawn randomly from around the current best point, and are sorted according to a merit function based on distance to other sample points and predicted function values according to the response surface. After several successive significant improvements, we increase the sampling radius; after several failures to improve the function value, we decrease the sampling radius. We restart once the sampling radius decreases below a threshold. """ def __init__(self, worker_id, data, response_surface, maxeval, nsamples, exp_design=None, search_procedure=None, extra=None): """Initialize the optimization strategy. :param worker_id: Start ID in a multistart setting :param data: Problem parameter data structure :param response_surface: Surrogate model object :param maxeval: Function evaluation budget :param nsamples: Number of simultaneous fevals allowed :param exp_design: Experimental design :param search_procedure: Search procedure for finding points to evaluate :param extra: Points to be added to the experimental design """ if self.__class__.__name__ == "SyncStrategyNoConstraints": assert not hasattr(data, "eval_ineq_constraints"), "Objective function has constraints,\n" \ "SyncStrategyNoConstraints can't handle constraints" self.worker_id = worker_id self.data = data self.fhat = response_surface if self.fhat is None: self.fhat = RBFInterpolant(surftype=CubicRBFSurface, eta=1e-8, maxp=maxeval) self.maxeval = maxeval self.nsamples = nsamples self.extra = extra # Default to generate sampling points using Symmetric Latin Hypercube self.design = exp_design if self.design is None: if self.data.dim > 50: self.design = LatinHypercube(data.dim, data.dim+1) else: self.design = SymmetricLatinHypercube(data.dim, 2*(data.dim+1)) self.xrange = np.asarray(data.xup - data.xlow) # algorithm parameters self.sigma_min = 0.005 self.sigma_max = 0.2 self.sigma_init = 0.2 self.failtol = max(5, data.dim) self.succtol = 3 self.numeval = 0 self.status = 0 self.sigma = 0 self.resubmitter = RetryStrategy() self.xbest = None self.fbest = np.inf self.fbest_old = None # Set up search procedures and initialize self.search = search_procedure if self.search is None: self.search = CandidateDYCORS(data) # Start with first experimental design self.sample_initial() def to_unit_box(self, x): return (np.copy(x) - self.data.xlow) / (self.data.xup - self.data.xlow) def from_unit_box(self, x): return self.data.xlow + (self.data.xup - self.data.xlow) * np.copy(x) def log_completion(self, record): """Record a completed evaluation to the log. :param record: Record of the function evaluation """ xstr = np.array_str(record.params[0], max_line_width=np.inf, precision=5, suppress_small=True) logger.info("Feasible {:.3e} @ {}".format(record.value, xstr)) def adjust_step(self): """Adjust the sampling radius sigma. After succtol successful steps, we cut the sampling radius; after failtol failed steps, we double the sampling radius. :ivar Fnew: Best function value in new step :ivar fbest: Previous best function evaluation """ # Initialize if this is the first adaptive step if self.fbest_old is None: self.fbest_old = self.fbest return # Check if we succeeded at significant improvement if self.fbest < self.fbest_old - 1e-3 * math.fabs(self.fbest_old): self.status = max(1, self.status + 1) else: self.status = min(-1, self.status - 1) self.fbest_old = self.fbest # Check if step needs adjusting if self.status <= -self.failtol: self.status = 0 self.sigma /= 2 logger.info("Reducing sigma") if self.status >= self.succtol: self.status = 0 self.sigma = min([2.0 * self.sigma, self.sigma_max]) logger.info("Increasing sigma") def sample_initial(self): """Generate and queue an initial experimental design. """ if self.numeval == 0: logger.info("=== Start ===") else: logger.info("=== Restart ===") self.fhat.reset() self.sigma = self.sigma_init self.status = 0 self.xbest = None self.fbest_old = None self.fbest = np.inf self.fhat.reset() start_sample = self.design.generate_points() # Add extra evaluation points provided by the user if self.extra is not None: start_sample = np.vstack((start_sample, self.to_unit_box(self.extra))) start_sample = round_vars(self.data, start_sample) for j in range(min(start_sample.shape[0], self.maxeval - self.numeval)): proposal = self.propose_eval(self.from_unit_box(start_sample[j, :])) self.resubmitter.rput(proposal) self.search.init(start_sample, self.maxeval - self.numeval, True, self.fhat) def evals(self, xx, d=None, scaling=False): """Predict function values As a measure of promising function values we let all infeasible points have the value of the feasible candidate point with the worst function value, since large penalties makes it impossible to distinguish between feasible points. :param xx: Data points :return: Predicted function values """ return self.fhat.evals(xx, d) def derivs(self, xx, d=None): return np.atleast_2d(self.fhat.deriv(xx, d)) def sample_adapt(self): """Generate and queue samples from the search strategy """ self.adjust_step() nsamples = min(self.nsamples, self.maxeval - self.numeval) self.search.make_points(self.xbest, self.sigma, self.evals, self.derivs) for _ in range(nsamples): proposal = self.propose_eval(np.ravel(self.from_unit_box(self.search.next()))) self.resubmitter.rput(proposal) def start_batch(self): """Generate and queue a new batch of points """ if self.sigma < self.sigma_min: self.sample_initial() else: self.sample_adapt() def propose_action(self): """Propose an action """ if self.numeval == self.maxeval: return self.propose_terminate() elif self.resubmitter.num_eval_outstanding == 0: self.start_batch() return self.resubmitter.get() def on_complete(self, record): """Handle completed function evaluation. When a function evaluation is completed we need to ask the constraint handler if the function value should be modified which is the case for say a penalty method. We also need to print the information to the logfile, update the best value found so far and notify the GUI that an evaluation has completed. :param record: Evaluation record """ self.log_completion(record) self.numeval += 1 record.worker_id = self.worker_id record.worker_numeval = self.numeval self.fhat.add_point(self.to_unit_box(record.params[0]), record.value) if record.value < self.fbest: self.xbest = self.to_unit_box(record.params[0]) self.fbest = record.value