def optimize (self, w=0.7, c1=1.7, c2=1.7, alpha=1.2, chaos_iters=500, max_pso_iters=10000, tol=1e-2, print_iters=False) : """ Optimization loop of plain PSO """ pbest, gbest = self._optim_init() # Set the chaotic generator if not previously set if self.cgen is None : self.cgen = cg.Tent((chaos_iters, self.D), mu=self.mu, gens=1) i = 0 while True : # Velocity update equation r1, r2 = np.random.rand(self.Np, self.D), np.random.rand(self.Np, self.D) self.velocity = w*self.velocity + c1*r1*(pbest - self.particles) + c2*r2*(gbest - self.particles) # Perform velocity clipping before running ipcd() to minimize any violations self.velocity = pso.vclip(self.velocity, self.vmax) ###################################################################### # Perform "Inverse Parabolic Confined Distribution" technique for # boundary handling. Also returns updated particle position and velocity ###################################################################### self.particles, self.velocity = pso.ipcd(self.particles, self.velocity, self.llim, self.rlim, alpha) # Update pbest, gbest less = self.obj(self.particles) < self.obj(pbest) pbest[less] = np.copy(self.particles[less]) gbest_ind = np.argmin(self.obj(pbest)).flatten()[0] # Chaotic search cp = pbest[gbest_ind] + self.rrat*(self.rlim - self.llim)*(2*self.cgen.chaosPoints(1) - 1) obj_cp = np.where(np.logical_and(self.llim.reshape(1,-1) <= cp, cp <= self.rlim.reshape(1,-1)).all(axis=1), self.obj(cp), np.inf) gbest_p = np.argmin(obj_cp).flatten()[0] # Update after chaotic search if feasible if obj_cp[gbest_p] != np.inf and obj_cp[gbest_p] < self.objkey(pbest[gbest_ind]) : new_vel = cp[gbest_p] - self.particles[gbest_ind] self.velocity[gbest_ind] = np.random.rand(self.D)*self.vmax*new_vel/np.linalg.norm(new_vel) pbest[gbest_ind] = self.particles[gbest_ind] = cp[gbest_p] # Copy gbest gbest = pbest[gbest_ind] self.conv_curve.append(self.objkey(gbest)) self.rrat *= self.rho i += 1 if print_iters : print("\r{}".format(i), end="") # Stopping criteria if i == max_pso_iters or (np.abs(self.particles - gbest) < tol).all() : break grad = lambda x : -(c1*np.sum(r1) + c2*np.sum(r2))*(x - gbest)/(len(r1)*w) if print_iters : print("\n", end="") return self.optRet(gbest, grad, tol, i)
def optimize (self, c1=1, c2=1, alpha=1.2, beta=0.9, max_iters=10000, tol=1e-2, print_iters=False) : """ Performs the PSO optimization loop Arguments are default PSO parameters Returns the optimum found, and lambda function for approximate gradient """ momentum, pbest, gbest = self._optim_init() i = 0 while True : # Using the first and second internal generators, randgen(1) and radgen(2) respectively r1, r2 = self.randgen(1), self.randgen(2) # Momentum and velocity update momentum = beta*momentum + (1-beta)*self.velocity self.velocity = momentum + c1*r1*(pbest - self.particles) + c2*r2*(gbest - self.particles) # Perform velocity clipping before running ipcd() to minimize any violations self.velocity = pso.vclip(self.velocity, self.vmax) ###################################################################### # Perform "Inverse Parabolic Confined Distribution" technique for # boundary handling. Also returns updated particle position and velocity ###################################################################### self.particles, self.velocity = pso.ipcd(self.particles, self.velocity, self.llim, self.rlim, alpha) # Update pbest, gbest less = self.obj(self.particles) < self.obj(pbest) pbest[less] = np.copy(self.particles[less]) gbest = min(pbest , key = self.objkey) self.conv_curve.append(self.objkey(gbest)) # Append to cache after updating particles, velocities, pbest and gbest self.appendCache (self.particles, self.velocity, momentum, pbest, gbest, r1, r2) i += 1 if print_iters : print("\r{}".format(i), end="") # Stopping criteria if i == max_iters or (np.abs(self.particles - gbest) < tol).all() : break # Convert cache list to numpy ndarray self.numpifyCache () grad = lambda x : -(c1*np.sum(r1) + c2*np.sum(r2))*(x - gbest)/(len(r1)*(1-beta)) if print_iters : print("\n", end="") return self.optRet(gbest, grad, tol, i)
def replay (self, seed, c1=0.7, c2=0.7, alpha=1.2, beta=0.9) : """ Given a pre-determined sequence of r1, r2 and a starting position, velocity and momentum, replays the PSO trajectory Typically, this is meant to be used after perturbing the starting position slightly. """ part, vel, mom, pb, gb, r1s, r2s = seed seedcopy = () for s in seed : seedcopy += (np.copy(s), ) (part, vel, mom, pb, gb, r1s, r2s) = seedcopy (pcache, vcache, mcache, pbcache, gbcache) = [part], [vel], [mom], [pb], [gb] for r1, r2 in zip(r1s, r2s) : mom = beta*mom + (1-beta)*vel vel = mom + c1*r1*(pb - part) + c2*r2*(gb - part) vel = pso.vclip(vel, self.vmax) part, vel = pso.ipcd(part, vel, self.llim, self.rlim, alpha) less = self.obj(part) < self.obj(pb) pb[less] = part[less] gb = min(pb , key = lambda x : self.obj(x.reshape(1, -1))[0]) pcache.append(part) vcache.append(vel) mcache.append(mom) pbcache.append(pb) gbcache.append(gb) return np.array(pcache), np.array(vcache), np.array(mcache), np.array(pbcache), np.array(gbcache)
def optimize (self, w=0.7, c1=1.7, c2=1.7, alpha=1.2, max_iters=10000, tol=1e-2, print_iters=False) : """ Runs the PSO loop """ fitness_q, pbest, gbest = self._optim_init() # Set the chaotic generator if not previously set if self.cgen is None : self.cgen = cg.Logistic((self.Gmax, self.D), gens=1) i = -1 while True : i += 1 if print_iters : print("\r{}".format(i), end="") # Stopping criteria if i == max_iters or (np.abs(self.particles - gbest) < tol).all() : break # Chaotic search if i >= self.Nc : fits_ps = np.array(fitness_q).transpose() for j, fits_p in enumerate(fits_ps) : if ((fits_p - self.obj(gbest.reshape(1, -1)))/fits_p < self.stag_tol).all() : chaos_points = self.particles[j] + self.rrat*(self.rlim - self.llim)*(2*self.cgen.chaosPoints(1) - 1) obj_cp = np.where(np.logical_and(self.llim.reshape(1, -1) <= chaos_points, chaos_points <= self.rlim.reshape(1, -1)).all(axis=1), self.obj(chaos_points), np.inf) gbest_p = np.argmin(obj_cp).flatten()[0] # Update after chaotic search if feasible if obj_cp[gbest_p] != np.inf and obj_cp[gbest_p] < self.objkey(self.particles[j]) : self.velocity[j] = self.particles[j] - chaos_points[gbest_p] self.particles[j] = chaos_points[gbest_p] # Perform velocity clipping before running ipcd() to minimize any violations self.velocity = pso.vclip(self.velocity, self.vmax) ###################################################################### # Perform "Inverse Parabolic Confined Distribution" technique for # boundary handling. Also returns updated particle position and velocity ###################################################################### self.particles, self.velocity = pso.ipcd(self.particles, self.velocity, self.llim, self.rlim, alpha) less = self.obj(self.particles) < self.obj(pbest) pbest[less] = np.copy(self.particles[less]) gbest = min(pbest, key = self.objkey) self.conv_curve.append(self.objkey(gbest)) # Appends fitness for tracking whether to enter chaotic search fitness_q.append(self.obj(self.particles)) # Velocity update r1, r2 = np.random.rand(self.Np, self.D), np.random.rand(self.Np, self.D) self.velocity = w*self.velocity + c1*r1*(pbest - self.particles) + c2*r2*(gbest - self.particles) grad = lambda x : -(c1*np.sum(r1) + c2*np.sum(r2))*(x - gbest)/(len(r1)*w) if print_iters : print("\n", end="") return self.optRet(gbest, grad, tol, i)
def forward (self, rad_init=None, w=0.7, c1=1.7, c2=1.7, alpha=1.2, max_iters=10000, local_div=0, rad_search_points=500, local_iters=500, rrat=0.5, rho=0.999, trap_inits=25, trap_rat=0.2, tol=1e-2, print_iters=False) : """ Forward PSO with hull exclusion and radial search """ # Initialise particles and necessary states self.initParticles(rad_init) pbest = np.copy(self.particles) gbest = min(pbest, key=self.objkey) self.conv_curve = [self.objkey(gbest)] trap = [] i = 0 while True : # Velocity update r1, r2 = np.random.rand(self.Np, self.D), np.random.rand(self.Np, self.D) self.velocity = w*self.velocity + c1*r1*(pbest - self.particles) + c2*r2*(gbest - self.particles) # Perform velocity clipping before running ipcd() to minimize any violations self.velocity = pso.vclip(self.velocity, self.vmax) ###################################################################### # Perform "Inverse Parabolic Confined Distribution" technique for # boundary handling. Also returns updated particle position and velocity ###################################################################### self.particles, self.velocity = pso.ipcd(self.particles, self.velocity, self.llim, self.rlim, alpha) # Hull exclusion trap.append(0) if self.hulls != [] : for j, qp in enumerate(self.particles) : for hull in self.hulls : if hull.isPointIn(qp) : # Central about about which radial search occurs within a search radius rad_cent = pbest[np.argmin(np.array([ np.inf if k == j else self.objkey(pb) for k, pb in enumerate(pbest) ])).flatten()[0]] rad_points = rad_cent + self.vmax*(2*np.random.rand(rad_search_points, self.D) - 1) # Checking if radial particle violates dimension limits lim_rp = np.logical_or(self.llim.reshape(1, -1) > rad_points, rad_points < self.rlim.reshape(1, -1)).any(axis=1) # Checking if radial particle itself is in some minima hull rp_out_hull = np.array([ np.array([ h.isPointIn(rp) for h in self.hulls ]).any() for rp in rad_points ]) # Disallow particle if it's violated search limits or within some hull obj_rp = np.where(np.logical_or(lim_rp, rp_out_hull), np.inf, self.obj(rad_points)) # Best radial particle gbest_rp = np.argmin(obj_rp).flatten() # Replace original particle with best radial particle pbest[j] = self.particles[j] = rad_points[gbest_rp] new_vel = self.particles[j] - rad_cent self.velocity[j] = np.random.rand(self.D)*self.vmax*new_vel/np.linalg.norm(new_vel) trap[-1] += 1 break # Update pbest, gbest less = self.obj(self.particles) < self.obj(pbest) pbest[less] = np.copy(self.particles[less]) gbest_ind = np.argmin(self.obj(pbest)).flatten()[0] # Local search local_search = False if self.hulls == [] or (not local_div) or (i > 0 and not (i % local_div)) : lp = pbest[gbest_ind] + rrat*(self.rlim - self.llim)*(2*np.random.rand(rad_search_points, self.D) - 1) lp_in_lims = np.logical_and(self.llim.reshape(1, -1) <= lp, lp <= self.rlim.reshape(1, -1)).all(axis=1) lp_out_hulls = np.ones_like(lp_in_lims).astype(np.bool) if self.hulls == [] \ else np.array([ not np.array([ hull.isPointIn(qp) for hull in self.hulls ]).any() for qp in lp ]) obj_lp = np.where(np.logical_and(lp_in_lims, lp_out_hulls), self.obj(lp), np.inf) gbest_p = np.argmin(obj_lp).flatten()[0] local_search = True # Reset gbest if local search is true if local_search and obj_lp[gbest_p] != np.inf and obj_lp[gbest_p] < self.objkey(pbest[gbest_ind]) : new_vel = lp[gbest_p] - self.particles[gbest_ind] pbest[gbest_ind] = self.particles[gbest_ind] = lp[gbest_p] self.velocity[gbest_ind] = np.random.rand(self.D)*self.vmax*new_vel/np.linalg.norm(new_vel) # Copy gbest gbest = pbest[gbest_ind] self.conv_curve.append(self.objkey(gbest)) rrat *= rho i += 1 if print_iters : print("\rForward = {}".format(i), end="") # Trapping condition if i >= trap_inits and sum(trap[-trap_inits:])/trap_inits >= np.ceil(trap_rat*self.Np) : if print_iters : print("\n", end="") return { 'rets' : tuple(4*[None]), 'kwrets' : {} } # Stopping criteria if i == max_iters or (np.abs(self.particles - gbest) < tol).all() : break grad = lambda x : -(c1*np.sum(r1) + c2*np.sum(r2))*(x - gbest)/(len(r1)*(1-beta)) if print_iters : print("\n", end="") return self.optRet(gbest, grad, tol, i)
def reverse (self, opt, w=0.7, c1=1.7, c2=1.7, min_iters=50, max_iters=1000, print_iters=False) : """ Reverse PSO loop """ xs = opt + 1e-3*(np.random.rand(self.Np, self.D) - 0.5) vs = 1e-3*np.random.rand(self.Np, self.D) pbest = np.copy(xs) gbest = min(pbest, key=self.objkey) # Same magintude as forward PSO stopping criteria tolerance vmax = 1e-2*np.ones_like(self.llim).reshape(1, -1) # less_once, fs = False, None delta_xs = xs - opt nxs = delta_xs/np.linalg.norm(delta_xs, axis=1, keepdims=True) fs = np.array([ get_dirmin(opt, nx, self.objkey, self.llim, self.rlim) for nx in nxs ]) for i in range(max_iters) : r1s = np.random.rand(self.Np, self.D) r2s = np.random.rand(self.Np, self.D) pb_past = fs # Each dimension of a particle has an associated update matrix mats = np.array([ [ [1 - c1*r1s[p,d] - c2*r2s[p,d], w, c1*r1s[p,d]*pb_past[p,d] + c2*r2s[p,d]*gbest[d]], [-c1*r1s[p,d] - c2*r2s[p,d], w, c1*r1s[p,d]*pb_past[p,d] + c2*r2s[p,d]*gbest[d]], [0, 0, 1] ] for p in range(self.Np) for d in range(self.D) ]).reshape(self.Np, self.D, 3, 3) # Invert update matrices and find reverse position vecs = np.array([ np.dot(np.linalg.inv(mats[p,d]), np.array([xs[p,d], vs[p,d], 1])) for p in range(self.Np) for d in range(self.D) ]).reshape(self.Np, self.D, 3) vs = vecs[...,1] # Velocity clipping applies vs = pso.vclip(vs, vmax) # Apply IPCD boundary handling, note the minus sign on 'vs' xs, vs = pso.ipcd(xs, -vs, self.llim, self.rlim) # Update pbest and gbest less = self.obj(xs) <= self.obj(pbest) xs[less] = np.copy(pbest[less]) more = np.invert(less) pbest[more] = np.copy(xs[more]) gbest = min(pbest, key=self.objkey) if print_iters : print("\rReverse = {}".format(i), end="") if less.all() : break self.hulls.append(Hull(pbest, opt)) if print_iters : print("\n", end="") return i