def pysot_suggest(self, n_suggestions=1): if self.pysot_batch_size is None: # First call to suggest self.pysot_batch_size = n_suggestions self.start() # Set the tolerances pretending like we are running batch d, p = float(self.opt.dim), float(n_suggestions) self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p))) # Now we can make suggestions x_w = [] self.proposals = [] for _ in range(n_suggestions): proposal = self.strategy.propose_action() record = EvalRecord(proposal.args, status="pending") proposal.record = record proposal.accept() # This triggers all the callbacks # It is possible that pySOT proposes a previously evaluated point # when all variables are integers, so we just abort in this case # since we have likely converged anyway. See PySOT issue #30. x = list(proposal.record.params) # From tuple to list x_unwarped, = self.space_x.unwarp(x) if x_unwarped in self.history: warnings.warn("pySOT proposed the same point twice") self.start() return self.suggest(n_suggestions=n_suggestions) # NOTE: Append unwarped to avoid rounding issues self.history.append(copy(x_unwarped)) self.proposals.append(proposal) x_w.append(copy(x_unwarped)) return x_w
def pysot_get_suggest(self, suggests): turbo_suggest_warps = self.space_x.warp(suggests) for i, warps in enumerate(turbo_suggest_warps): proposal = self.strategy.make_proposal(warps) proposal.add_callback(self.strategy.on_initial_proposal) record = EvalRecord(proposal.args, status="pending") proposal.record = record proposal.accept() self.history.append(copy(suggests[i])) self.proposals.append(proposal)
def suggest(self, n_suggestions=1): """Get a suggestion from the optimizer. Parameters ---------- n_suggestions : int Desired number of parallel suggestions in the output Returns ------- next_guess : list of dict List of `n_suggestions` suggestions to evaluate the objective function. Each suggestion is a dictionary where each key corresponds to a parameter being optimized. """ if self.batch_size is None: # First call to suggest self.batch_size = n_suggestions self.start(self.max_evals) # Set the tolerances pretending like we are running batch d, p = float(self.opt.dim), float(n_suggestions) self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p))) # Now we can make suggestions x_w = [] self.proposals = [] for _ in range(n_suggestions): proposal = self.strategy.propose_action() record = EvalRecord(proposal.args, status="pending") proposal.record = record proposal.accept() # This triggers all the callbacks # It is possible that pySOT proposes a previously evaluated point # when all variables are integers, so we just abort in this case # since we have likely converged anyway. See PySOT issue #30. x = list(proposal.record.params) # From tuple to list x_unwarped, = self.space_x.unwarp(x) if x_unwarped in self.history: warnings.warn("pySOT proposed the same point twice") self.start(self.max_evals) return self.suggest(n_suggestions=n_suggestions) # NOTE: Append unwarped to avoid rounding issues self.history.append(copy(x_unwarped)) self.proposals.append(proposal) x_w.append(copy(x_unwarped)) return x_w
def new_feval(self, params, extra_args=None): """Add a function evaluation record to the database. In addition to adding the record with status 'pending', we run the feval_callbacks on the new record. Args: params: Parameters to the objective function Returns: New EvalRecord object """ record = EvalRecord(params, extra_args=extra_args, status='pending') self.fevals.append(record) logger.debug("Call new feval callbacks") for callback in self.feval_callbacks: callback(record) return record