def _minimize_qpso(fun, x0, confunc=None, g=.96, max_iter=1000, stable_iter=40, ptol=1e-6, ctol=1e-6, levy_rate=0, decay_rate=0, reduction_rate=0.5, callback=None, verbose=False, savefile=None): """Internal implementation for ``psopy.minimize_qpso``. See Also -------- psopy.minimize_qpso : The SciPy compatible interface to this function. Refer to its documentation for an explanation of the parameters. psopy.gen_confunc : Utility function to convert SciPy style constraints to the form required by this function. Parameters ---------- x0 : array_like of shape (N, D) Initial position to begin QPSO from, where ``N`` is the number of points and ``D`` the dimensionality of each point. For the constrained case these points should satisfy all constraints. fun : callable The objective function to be minimized. Must be in the form ``fun(pos, *args)``. The argument ``pos``, is a 2-D array for initial positions, where each row specifies the position of a different particle, and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. confunc : callable The function that describes constraints. Must be of the form ``confunc(pos)`` that returns the constraint matrix. levy_rate: float Whether to run the levy decay qpso or not. > 0 value turns on levy walk decay_rate: float Whether to turn on the decay function or not. > 0 value turns on the decay rate Notes ----- Chaotic Quantum PSO Using this function directly allows for a slightly faster implementation that does away with the need for the additional recursive calls needed to wrap the constraint and objective functions for compatibility with Scipy. """ if verbose: message = setup_print(x0.shape[1], max_iter, confunc is not None) if savefile: iterinfo = [] position = np.copy(x0) nparam = len(position) pbest = np.copy(position) gbest = pbest[np.argmin(fun(pbest))] oldfit = fun(gbest[None])[0] stable_count = 0 dimension = len(position[0]) #simple levy walk. Make a decay function which will push particles around using stable_iter,max_iter is reached, pushing them away from pbest beta = 3 / 2 sigma = (gamma(1 + beta) * sin(pi * beta / 2) / (gamma( (1 + beta) / 2) * beta * 2**((beta - 1) / 2)))**(1 / beta) decay = 1 stepsize = 1.0 for ii in range(max_iter): mbest = np.sum(pbest, axis=0) / pbest.shape[0] u = np.random.normal(0, 1, size=dimension) * sigma v = np.random.normal(0, 1, size=dimension) step = u / abs(v)**(1 / beta) psi_1 = uniform(0, 1) psi_2 = uniform(0, 1) dv_g = psi_1 * gbest if confunc is not None: leaders = np.argmin(distance.cdist(position, pbest, 'sqeuclidean'), axis=1) dv_l = psi_2 * pbest[leaders] else: dv_l = psi_2 * pbest P = (dv_g + dv_l) / (psi_1 + psi_2) u = uniform(0, 1, nparam) stepsize = 1.0 for i in range(0, nparam): if levy_rate > 0: stepsize = 0.01 * step * (1 / (0.0000001 + position[i] - gbest[i])) if decay_rate > 0: decay = stepsize * 5 * (0.001)**(ii / (max_iter * 0.05)) + 1 if uniform(0, 1) > 0.5: position[i] = P[i] - mbest * np.log(1 / u[i]) * decay else: position[i] = P[i] + mbest * np.log(1 / u[i]) * decay to_update = (fun(position) < fun(pbest)) if confunc is not None: to_update &= (confunc(position).sum(axis=1) < ctol) if to_update.any(): pbest[to_update] = position[to_update] gbest = pbest[np.argmin(fun(pbest))] # Termination criteria. fval = fun(gbest[None])[0] if np.abs(oldfit - fval) < ptol: stable_count += 1 if stable_count == stable_iter: break else: stable_count = 0 oldfit = fval if verbose or savefile: info = [ii, gbest, fval] if confunc is not None: cv = np.max(confunc(gbest[None])) info.append(cv) if verbose: print(message.format(*info)) if savefile: iterinfo.append(info) # Final callback. if callback is not None: position = callback(position) if savefile: save_info(savefile, iterinfo, constraints=confunc is not None) result = OptimizeResult(x=gbest, fun=fun(gbest[None])[0], nit=ii, nsit=stable_count) violation = False if confunc is not None: convec = confunc(gbest[None]) result.maxcv = np.max(convec) result.cvec = convec if convec.sum() > ctol: violation = True if violation: result.status = 2 elif ii == max_iter: result.status = 1 else: result.status = 0 result.success = not result.status return result
def _minimize_pso(fun, x0, confunc=None, friction=.8, max_velocity=5., g_rate=.8, l_rate=.5, max_iter=1000, stable_iter=100, ptol=1e-6, ctol=1e-6, callback=None, verbose=False, savefile=None): """Internal implementation for ``psopy.minimize``. See Also -------- psopy.minimize : The SciPy compatible interface to this function. Refer to its documentation for an explanation of the parameters. psopy.gen_confunc : Utility function to convert SciPy style constraints to the form required by this function. Parameters ---------- x0 : array_like of shape (N, D) Initial position to begin PSO from, where ``N`` is the number of points and ``D`` the dimensionality of each point. For the constrained case these points should satisfy all constraints. fun : callable The objective function to be minimized. Must be in the form ``fun(pos, *args)``. The argument ``pos``, is a 2-D array for initial positions, where each row specifies the position of a different particle, and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. confunc : callable The function that describes constraints. Must be of the form ``confunc(pos)`` that returns the constraint matrix. Notes ----- Using this function directly allows for a slightly faster implementation that does away with the need for the additional recursive calls needed to wrap the constraint and objective functions for compatibility with Scipy. Examples -------- These examples are identical to those laid out in ``psopy.minimize`` and serve to illustrate the additional overhead in ensuring compatibility. >>> import numpy as np >>> from psopy import _minimize_pso Consider the problem of minimizing the Rosenbrock function implemented as ``scipy.optimize.rosen``. >>> from scipy.optimize import rosen >>> fun = lambda x: np.apply_along_axis(rosen, 1, x) Initialize 1000 particles and run the minimization function. >>> x0 = np.random.uniform(0, 2, (1000, 5)) >>> res = _minimize_pso(fun, x0, stable_iter=50) >>> res.x array([1.00000003, 1.00000017, 1.00000034, 1.0000006 , 1.00000135]) Consider the constrained optimization problem with the objective function defined as: >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 >>> fun_ = lambda x: np.apply_along_axis(fun, 1, x) and constraints defined as: >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}, ... {'type': 'ineq', 'fun': lambda x: x[0]}, ... {'type': 'ineq', 'fun': lambda x: x[1]}) Initializing the constraint function and feasible solutions: >>> from psopy import init_feasible, gen_confunc >>> x0 = init_feasible(cons, low=0., high=2., shape=(1000, 2)) >>> confunc = gen_confunc(cons) Running the constrained version of the function: >>> res = _minimize_pso(fun_, x0, confunc=confunc, options={ ... 'g_rate': 1., 'l_rate': 1., 'max_velocity': 4., 'stable_iter': 50}) >>> res.x array([ 1.39985398, 1.69992748]) """ if verbose: message = setup_print(x0.shape[1], max_iter, confunc is not None) if savefile: iterinfo = [] position = np.copy(x0) velocity = np.random.uniform(-max_velocity, max_velocity, position.shape) pbest = np.copy(position) gbest = pbest[np.argmin(fun(pbest))] oldfit = fun(gbest[None])[0] stable_count = 0 for ii in range(max_iter): # Determine global and local gradient. dv_g = g_rate * uniform(0, 1) * (gbest - position) if confunc is not None: leaders = np.argmin(distance.cdist(position, pbest, 'sqeuclidean'), axis=1) dv_l = l_rate * uniform(0, 1) * (pbest[leaders] - position) else: dv_l = l_rate * uniform(0, 1) * (pbest - position) # Update velocity and position of particles. velocity *= friction velocity += (dv_g + dv_l) np.clip(velocity, -max_velocity, max_velocity, out=velocity) position += velocity to_update = (fun(position) < fun(pbest)) if confunc is not None: to_update &= (confunc(position).sum(axis=1) < ctol) if to_update.any(): pbest[to_update] = position[to_update] gbest = pbest[np.argmin(fun(pbest))] # Termination criteria. fval = fun(gbest[None])[0] if np.abs(oldfit - fval) < ptol: stable_count += 1 if stable_count == stable_iter: break else: stable_count = 0 oldfit = fval if verbose or savefile: info = [ii, gbest, fval] if confunc is not None: cv = np.max(confunc(gbest[None])) info.append(cv) if verbose: print(message.format(*info)) if savefile: iterinfo.append(info) # Final callback. if callback is not None: position = callback(position) if savefile: save_info(savefile, iterinfo, constraints=confunc is not None) result = OptimizeResult(x=gbest, fun=fun(gbest[None])[0], nit=ii, nsit=stable_count) violation = False if confunc is not None: convec = confunc(gbest[None]) result.maxcv = np.max(convec) result.cvec = convec if convec.sum() > ctol: violation = True if violation: result.status = 2 elif ii == max_iter: result.status = 1 else: result.status = 0 result.success = not result.status return result
def optimize_minimize_mhmcmc_cluster(objective, bounds, args=(), x0=None, T=1, N=3, burnin=100000, maxiter=1000000, target_ar=0.4, ar_tolerance=0.05, cluster_eps=DEFAULT_CLUSTER_EPS, rnd_seed=None, collect_samples=None, logger=None): """ Minimize objective function and return up to N local minima solutions. :param objective: Objective function to minimize. Takes unpacked args as function call arguments and returns a float. :type objective: Callable(\*args) -> float :param bounds: Bounds of the parameter space. :type bounds: scipy.optimize.Bounds :param args: Any additional fixed parameters needed to completely specify the objective function. :type args: tuple or list :param x0: Initial guess. If None, will be selected randomly and uniformly within the parameter bounds. :type x0: numpy.array with same shape as elements of bounds :param T: The "temperature" parameter for the accept or reject criterion. To sample the domain well, should be in the order of the typical difference in local minima objective valuations. :type T: float :param N: Maximum number of minima to return :type N: int :param burnin: Number of random steps to discard before starting to accumulate statistics. :type burnin: int :param maxiter: Maximum number of steps to take (including burnin). :type maxiter: int :param target_ar: Target acceptance rate of point samples generated by stepping. :type target_ar: float between 0 and 1 :param ar_tolerance: Tolerance on the acceptance rate before actively adapting the step size. :type ar_tolerance: float :param cluster_eps: Point proximity tolerance for DBSCAN clustering, in normalized bounds coordinates. :type cluster_eps: float :param rnd_seed: Random seed to force deterministic behaviour :type rnd_seed: int :param collect_samples: If not None and integral type, collect collect_samples at regular intervals and return as part of solution. :type collect_samples: int or NoneType :param logger: Logger instance for outputting log messages. :return: OptimizeResult containing solution(s) and solver data. :rtype: scipy.optimize.OptimizeResult with additional attributes """ @call_counter def obj_counted(*args): return objective(*args) # end func assert maxiter >= 2 * burnin, "maxiter {} should be at least twice burnin steps {}".format( maxiter, burnin) main_iter = maxiter - burnin if collect_samples is not None: assert isinstance(collect_samples, int), "collect_samples expected to be integral type" assert collect_samples > 0, "collect_samples expected to be positive" # end if beta = 1.0 / T if rnd_seed is None: rnd_seed = int(time.time() * 1000) % (1 << 31) # end if np.random.seed(rnd_seed) if logger: logger.info('Using random seed {}'.format(rnd_seed)) # end if x0 is None: x0 = np.random.uniform(bounds.lb, bounds.ub) # end if assert np.all((x0 >= bounds.lb) & (x0 <= bounds.ub)) x = x0.copy() funval = obj_counted(x, *args) # Set up stepper with adaptive acceptance rate stepper = BoundedRandNStepper(bounds) stepper = AdaptiveStepsize(stepper, accept_rate=target_ar, ar_tolerance=ar_tolerance, interval=50) # ------------------------------- # DO BURN-IN rejected_randomly = 0 accepted_burnin = 0 tracked_range = tqdm(range(burnin), total=burnin, desc='BURN-IN') if logger: stepper.logger = lambda msg: tracked_range.write(logger.name + ':' + msg) else: stepper.logger = tracked_range.write # end if for _ in tracked_range: x_new = stepper(x) funval_new = obj_counted(x_new, *args) log_alpha = -(funval_new - funval) * beta if log_alpha > 0 or np.log(np.random.rand()) <= log_alpha: x = x_new funval = funval_new stepper.notify_accept() accepted_burnin += 1 elif log_alpha <= 0: rejected_randomly += 1 # end if # end for ar = float(accepted_burnin) / burnin if logger: logger.info("Burn-in acceptance rate: {}".format(ar)) # end if # ------------------------------- # DO MAIN LOOP if collect_samples is not None: nsamples = min(collect_samples, main_iter) sample_cadence = main_iter / nsamples samples = np.zeros((nsamples, len(x))) samples_fval = np.zeros(nsamples) # end if accepted = 0 rejected_randomly = 0 minima_sorted = SortedList( key=lambda rec: rec[1]) # Sort by objective function value hist = HistogramIncremental(bounds, nbins=100) # Cached a lot of potential minimum values, as these need to be clustered before return N results N_cached = int(np.ceil(N * main_iter / 500)) next_sample = 0.0 sample_count = 0 tracked_range = tqdm(range(main_iter), total=main_iter, desc='MAIN') if logger: stepper.logger = lambda msg: tracked_range.write(logger.name + ':' + msg) else: stepper.logger = tracked_range.write # end if for i in tracked_range: if collect_samples and i >= next_sample: assert sample_count < collect_samples samples[sample_count] = x samples_fval[sample_count] = funval sample_count += 1 next_sample += sample_cadence # end if x_new = stepper(x) funval_new = obj_counted(x_new, *args) log_alpha = -(funval_new - funval) * beta if log_alpha > 0 or np.log(np.random.rand()) <= log_alpha: x = x_new funval = funval_new minima_sorted.add((x, funval)) if len(minima_sorted) > N_cached: minima_sorted.pop() # end if stepper.notify_accept() hist += x accepted += 1 elif log_alpha <= 0: rejected_randomly += 1 # end if # end for stepper.logger = None ar = float(accepted) / main_iter if logger: logger.info("Acceptance rate: {}".format(ar)) logger.info("Best minima (before clustering):\n{}".format( np.array([_mx[0] for _mx in minima_sorted[:10]]))) # end if # ------------------------------- # Cluster minima and associate each cluster with a local minimum. # Using a normalized coordinate space for cluster detection. x_range = bounds.ub - bounds.lb pts = np.array([x[0] for x in minima_sorted]) fvals = np.array([x[1] for x in minima_sorted]) pts_norm = (pts - bounds.lb) / x_range _, labels = dbscan(pts_norm, eps=cluster_eps, min_samples=21, n_jobs=-1) # Compute mean of each cluster and evaluate objective function at cluster mean locations. minima_candidates = [] for grp in range(max(labels) + 1): mask = (labels == grp) mean_loc = np.mean(pts[mask, :], axis=0) # Evaluate objective function precisely at the mean location of each cluster fval = obj_counted(mean_loc, *args) minima_candidates.append((mean_loc, grp, fval)) # end for # Rank minima locations by objective function. minima_candidates.sort(key=lambda c: c[2]) # Pick up to N solutions solutions = minima_candidates[:N] # Put results into OptimizeResult container. # Add histograms to output result (in form of scipy.stats.rv_histogram) solution = OptimizeResult() solution.x = np.array([s[0] for s in solutions]) solution.clusters = [pts[(labels == s[1])] for s in solutions] solution.cluster_funvals = [fvals[(labels == s[1])] for s in solutions] solution.bins = hist.bins solution.distribution = hist.histograms solution.acceptance_rate = ar solution.success = True solution.status = 0 if len(solutions) > 0: solution.message = 'SUCCESS: Found {} local minima'.format( len(solutions)) else: solution.message = 'WARNING: Found no clusters within tolerance {}'.format( cluster_eps) # end if solution.fun = np.array([s[2] for s in solutions]) solution.jac = None solution.nfev = obj_counted.counter solution.njev = 0 solution.nit = main_iter solution.maxcv = None solution.samples = samples if collect_samples else None solution.sample_funvals = samples_fval if collect_samples else None solution.bounds = bounds solution.version = 's0.3' # Solution version for future traceability solution.rnd_seed = rnd_seed return solution