def main(): pop = toolbox.population(n=POP_SIZE) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields best = None for g in range(GEN): for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) return pop, logbook, best
def optimize(budget, func, dim_regs, dim_size): speed_lim = dim_regs[1]/5 creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Particle", list, fitness=creator.FitnessMin, speed=list, smin=None, smax=None, best=None) toolbox = base.Toolbox() toolbox.register("particle", generate, size=dim_size, pmin=dim_regs[0], pmax=dim_regs[1], smin=-speed_lim, smax=speed_lim) toolbox.register("population", tools.initRepeat, list, toolbox.particle) toolbox.register("update", updateParticle, phi1=2.0, phi2=2.0) toolbox.register("evaluate", lambda x: (func(x), )) fmin=[] population = 10 pop = toolbox.population(n=population) best = None i = 0 print(budget) while get_cnt() < budget: for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) clear_epoch() return best
def main(): pop = toolbox.population(n=5) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields GEN = 1000 best = None for g in range(GEN): for part in pop: part.fitness.values = toolbox.evaluate(part) if part.best is None or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if best is None or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) # Gather all the fitnesses in one list and print the stats logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) print(logbook.stream) return pop, logbook, best
def main(): NPOP = 100 # number of particles pop = toolbox.population(n=NPOP) # register the functions used to calculate stats stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("mean", np.nanmean) stats.register("std", np.nanstd) stats.register("min", np.nanmin) stats.register("max", np.nanmax) # initialize the logbook logbook = tools.Logbook() logbook.header = ["gen", "evals", "invalidevals"] + stats.fields GEN = 40 # number of maximum generations best = None BETA_INIT = 1.0 # initial value of contraction-expansion coefficient BETA_FIN = 0.5 # final value of contraction-expansion coefficient """TIPS: large beta -> global search, small beta -> local search beta_init: 0.8 to 1.2 beta_fin : below 0.6 beta must be below e^gamma=1.781 to guarantee convergence of the particle""" betas = np.linspace(BETA_INIT, BETA_FIN, GEN) meanbest = np.zeros(len(pop[0])) # initialize the meanbest for g, beta in zip(range(GEN), betas): meanbest[:] = np.zeros(len(pop[0])) # reinitialize the meanbest for part in pop: part.fitness.values = toolbox.evaluate(part) # reinitialize the particle if its initial fitness value is nan if part.best is None: while np.isnan(part.fitness.values): part[:] = generate(size=None, pmin=part.pmin, pmax=part.pmax) part.fitness.values = toolbox.evaluate(part) # update the particles best position if part.best is None or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values # update the global best position if best is None or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values meanbest += part.best meanbest /= NPOP for i, part in enumerate(pop): toolbox.update(part, best, meanbest, beta) # Gather all the fitnesses in one list and print the stats logbook.record(gen=g, evals=len(pop) * (g + 1), invalidevals=INVALID, **stats.compile(pop)) print(logbook.stream) if logbook.select("std")[-1] <= 1e-5: break return pop, logbook, best
def main_pso(): pop = toolbox.population(n=1000) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields GEN = 10 best = None for g in range(GEN): fitnesses = list(futures.map(toolbox.evaluate, pop)) for ind, fit in zip(pop, fitnesses): ind.fitness.values = fit for part in pop: if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) #Gather all the fitnesses in one list and print the stats logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) print("generation: %i" % g) #print(logbook.stream) # return pop, logbook, best
def maximize(self): pop = self.toolbox.population(n=MU) if self.printLog: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields best = None for g in range(NGEN): for part in pop: part.fitness.values = self.toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: self.toolbox.update(part, best) if self.printLog: logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) print(logbook.stream) return best, best.fitness.values[0], NGEN * MU
def pso_inercia(): pop = toolbox.population(n=50) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields GEN = 1000 best = None for g in range(GEN): fator_de_inercia = 0.9 - g * (0.5 / GEN) for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best, fator_de_inercia) # Gather all the fitnesses in one list and print the stats logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) # print(logbook.stream) # print(best.fitness.values) return best
def pso_algorithm(): pop = toolbox.population(n=5) GEN = 5 best = None list_max = [] for part in pop: part.best = creator.Particle(part) part.best.fitness.values = -1, # iniciamos con un fitness negativo for g in range(GEN): print("-----------------------GENERATION %f -----------------------" % g) for part in pop: part.fitness.values = toolbox.evaluate( part) # evaluamos todas las particulas part.record.append(part.fitness.values) part.record_best.append(part.best.fitness.values) print("**** particle %d" % part.identificador) print("**** FITNESS OBTAINED %f" % part.fitness.values) if part.best.fitness < part.fitness: # check the best particle position print("IMPROVED LOCAL") print("++> best local stored %f" % part.best.fitness.values) part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values print("++> new best local %f" % part.best.fitness.values) x_l, y_l = get_cor(part.best) print("++> coordinates of the best local %s" % x_l) x_p, y_p = get_cor(part) print("++> coordinates of the particle %s" % x_p) else: print("dont improved") print("best local stored %f" % part.best.fitness.values) x_l, y_l = get_cor(part.best) print("++> coordinates of the best local %s" % x_l) x_p, y_p = get_cor(part) print("++> coodinates of the particle %s" % x_p) if not best or best.fitness < part.fitness: # check the best particle if best: print("--> the best global fitness stored %f" % best.fitness.values) best = creator.Particle(part) best.fitness.values = part.fitness.values print("--> the best global fitness %f" % best.fitness.values) list_max.append( part.fitness.values) # store the best particles so far toolbox.update(part, best) for part in pop: print("Particle %d" % part.identificador) print("fitnes for each iteration") print(part.record) print("the best local fitness") print(part.record_best) print("?????? best fitness %f" % best.fitness.values) return best, best.fitness.values[0]
def main(): pop = toolbox.population(n=20) stats = tools.Statistics(lambda indi: indi.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) GEN = 100 best = None for g in range(GEN): for part in pop: part.fitness.values = toolbox.evaluate(part) if part.best is None or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if best is None or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values best.centroids = part.centroids #for part in pop: print("Some of Squared Error: " + str(SSE(best))) for i in range(20): print(str(i) + ": " + str(len(best.clusters[str(i)]))) return pop, best
def main(): pop = toolbox.population(n=5) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields GEN = 1000 best = None for g in range(GEN): for part in pop: part.fitness.values = toolbox.evaluate( part) # evaluamos todas las particulas if not part.best or part.best.fitness < part.fitness: # comprobamos la mejor posicon de la particula part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: # comprobamos la mejor particula best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: # actualizamos las veloicdades toolbox.update(part, best) # Gather all the fitnesses in one list and print the stats logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) print(logbook.stream) return best
def main(): log.infov('[PSO] Starting PSO algorithm') pop = toolbox.population(n=POP_SIZE) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields # Maximum generations MAX_GEN = args.max_gen best = None for g in range(MAX_GEN): fitnesses = toolbox.map(toolbox.evaluate, pop) for part, fit in zip(pop, fitnesses): part.fitness.values = (fit['fitness'], ) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) # Gather all the fitnesses in one list and print the stats logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) log.info(logbook.stream) result = { 'gen{}'.format(g): { 'best_params': [best[i] for i in range(len(best))], 'best_fitness': best.fitness.values[0], } } if g == 0: results = result else: results.update(result) results_IO.to_pickle(results) logger.log({ 'generation': g, 'best_params': [best[i] for i in range(len(best))], 'best_fitness': best.fitness.values[0], }) logger.write(display=False) log.infov('best ={}'.format(best.fitness.values[0])) log.infov('best parm :{}'.format([best[i] for i in range(len(best))])) return pop, logbook, best
def generate(bound_l, bound_u): particle = creator.Particle( np.random.uniform(bound_l, bound_u) for _ in range(DIM)) bound = bound_u - bound_l particle.velocity = np.array( [np.random.uniform(-abs(bound), abs(bound)) for _ in range(DIM)]) particle.best_known = creator.Particle(particle) return particle
def generate(size, bound_l, bound_u): particle = creator.Particle( np.random.uniform(bound_l, bound_u) for _ in range(size)) bound = bound_u - bound_l particle.velocity = [ np.random.uniform(-abs(bound), abs(bound)) for _ in range(size) ] particle.best_known = creator.Particle(particle) return particle
def main(): global current_gen, Map1, Map2, current_particle, Normal_Map1, Normal_Map2 pop = toolbox.population(n=POP_SIZE) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields best = None # map[:, i] = appli # CHAOTIC MAP GENERATOR # MAP = np.ndarray(shape=(POP_SIZE, DIM_SIZE), dtype=float, order='F') # Initial MAP contains all randoms with PARTxDIM Map1.append([[random.uniform(0, 1.) for _ in range(DIM_SIZE)] for _ in range(POP_SIZE)]) Map1.append([[random.uniform(0, 1.) for _ in range(DIM_SIZE)] for _ in range(POP_SIZE)]) Map2.append([[random.uniform(0, 1.) for _ in range(DIM_SIZE)] for _ in range(POP_SIZE)]) Map2.append([[random.uniform(0, 1.) for _ in range(DIM_SIZE)] for _ in range(POP_SIZE)]) Normal_Map1 = Map1 Normal_Map2 = Map2 current_gen = 0 for g in range(GEN): current_particle = 0 for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) # Update maps with next chaotic level Map1 = chaoticFunc(Map1) Map2 = chaoticFunc(Map2) # Normalize MAPS for i in range(POP_SIZE): for j in range(DIM_SIZE): Normal_Map1[0][i][j] = Normalizer(Map1[0][i][j]) Normal_Map2[0][i][j] = Normalizer(Map2[0][i][j]) # Normalize MAPS end current_gen += 1 return pop, logbook, best
def main(): pop = toolbox.population(n=150) stats = tools.Statistics(lambda ind: ind.fitness.values) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields GEN = 200 best = None stop_gen = 200 ok_count = 0 for gen in range(GEN): for part in pop: part.fitness.values = toolbox.evaluate(part) for g in range(GEN): for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values fitnesses = toolbox.map(toolbox.evaluate, pop) for ind, fit in zip(pop, fitnesses): ind.fitness.values = fit fits = [ind.fitness.values[0] for ind in pop] length = len(pop) mean = sum(fits) / length sum2 = sum(x*x for x in fits) std = abs(sum2 / length - mean**2)**0.5 print(gen,min(fits),mean) with open('griewank.txt',mode='a') as f: f.write(str(g)) f.write(" ") f.write(str(min(fits))) f.write(" ") f.write(str(mean)) f.write("\n") for part in pop: toolbox.update(part, best) # Gather all the fitnesses in one list and print the stats #gbook.record(gen=g, evals=len(pop), **stats.compile(pop)) #print(logbook.stream) return pop, logbook, best,stop_gen,ok_count
def pso(ch,bounds,N,GEN,record=True): # 準備したものをフレームワークにぶち込む # tau,相互作用の強度、範囲を最適化 pmins = np.zeros(len(bounds)) pmaxs = np.zeros(len(bounds)) for i,key in enumerate(bounds): pmins[i] = bounds[key][0] pmaxs[i] = bounds[key][1] smaxs = (pmaxs - pmins) * 0.2 smins = -smaxs toolbox = base.Toolbox() toolbox.register("Particle", generate, size=N, pmin=pmins, pmax=pmaxs, smin=smins, smax=smaxs) toolbox.register("population", tools.initRepeat, list, toolbox.Particle) toolbox.register("update", updateParticle, pmin=pmins, pmax=pmaxs, phi1=2.0, phi2=2.0) toolbox.register("evaluate", ch.run) if record: print "xscale=%d, t_interval=%d, met=(%s,%s)" % \ (ch.x_scale, ch.t_interval, ch.metrics[0], ch.metrics[1]) # ex) "opt_result_t10_x10_metNAIVE-SSD_repeat5.csv record_file = result_dir + "opt_result_t" + str(ch.t_interval) + \ "_x" + str(ch.x_scale) + \ "_met" + str(ch.metrics[0]) + "-" + str(ch.metrics[1]) + \ "_repeat" + str(ch.iteration) + ".csv" # generation, tau, intensity, range, normalized error with open(record_file, 'w')as f: writer = csv.writer(f, lineterminator='\n') writer.writerow(["generation", "tau", "interaction_to_ped", "range_of_interaction", "compare_result", "normalized_error"]) pop = toolbox.population(n=N) best = None for g in range(GEN): for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) # Gather all the fitnesses in one list and print the stats # logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) # print(logbook.stream) print g, normalized_error = error(ch.correct_params, best) ### recording result if record: with open(record_file, 'a') as f: writer = csv.writer(f, lineterminator='\n') # 改行コード(\n)を指定しておく writer.writerow([g, best[0],best[1],best[2],best.fitness.values[0],normalized_error]) # list(1次元配列)の場合 print best,best.fitness.values[0]
def minimize(func_name): config = configparser.ConfigParser() config_name = os.path.join(project_dir, 'objective_function/config/scale.ini') config.read(config_name, encoding='utf-8') print(config.sections()) optimal_position_address_dir = os.path.join(project_dir, "objective_function/optimal_position") dim_list = eval(config.get(func_name, 'dim_list')) dim_regs = eval(config.get(func_name, 'dim_regs')) repeat = 30 values = np.zeros((repeat, len(dim_list))) seed = 0 random.seed(seed) np.random.seed(seed) speed_lim = dim_regs[1]/5 for i in range(repeat): for j in range(len(dim_list)): dim_size = dim_list[j] creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Particle", list, fitness=creator.FitnessMin, speed=list, smin=None, smax=None, best=None) toolbox = base.Toolbox() toolbox.register("particle", generate, size=dim_size, pmin=dim_regs[0], pmax=dim_regs[1], smin=-speed_lim, smax=speed_lim) toolbox.register("population", tools.initRepeat, list, toolbox.particle) toolbox.register("update", updateParticle, phi1=2.0, phi2=2.0) toolbox.register("evaluate", lambda x: (function_dict[func_name](x), )) optimal_position_address = os.path.join(optimal_position_address_dir, func_name, "{}_{}.txt".format(func_name, dim_size)) budget = dim_size * 100 set_optimal_position(optimal_position_address) population = 10 pop = toolbox.population(n=population) best = None while get_cnt() < budget: for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: toolbox.update(part, best) values[i, j] = best.fitness.values[0] clear_epoch() print("finist repeat: {}, dim: {}, best f: {}".format(i, dim_size, best.fitness.values)) log_address = os.path.join(project_dir, 'DEAP_exp/log/scale/') file_name = os.path.join(log_address, '{}.txt'.format(obj_name)) os.makedirs(log_address, exist_ok=True) np.savetxt(file_name, values)
def update_connected(self): """ Updates the population of particles according to the connected population scheme. :return: """ for part in self.population: if part.best is None or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if self.best is None or self.best.fitness < part.fitness: self.best = creator.Particle(part) self.best.fitness.values = part.fitness.values
def evaluate_butterflies(pop, toolbox): best = None for part in pop: if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values part.fitness.values = toolbox.evaluate(part) if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values return best
def update_swarm(self, swarm, part): """ Update swarm's attractors personal best and global best """ if not part.fitness.valid: raise RuntimeError, "Particles need to have a valid fitness before calling update_swarm!" if part.best == None or part.fitness > part.bestfit: part.best = creator.Particle( part) # Get the position @UndefinedVariable part.bestfit.values = part.fitness.values # Get the fitness if swarm.best == None or part.fitness > swarm.bestfit: swarm.best = creator.Particle( part) # Get the position @UndefinedVariable swarm.bestfit.values = part.fitness.values # Get the fitness
def eval_and_update(best, c_toolbox, pop): for part in pop: # print(gp_ind.fitness.values) part.fitness.values = c_toolbox.evaluate(part) # print(part.fitness.values) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values for part in pop: c_toolbox.update(part, best) return best
def _generate(self): """Generates a particle using the creator function. Notes ----- Position and speed are uniformly randomly seeded within allowed bounds. The particle also has speed limit settings taken from global values. Returns ------- part : particle object A particle used during optimisation. """ part = creator.Particle( [random.uniform(-1, 1) for _ in range(len(self.value_means))]) part.speed = [ random.uniform(-self.max_speed, self.max_speed) for _ in range(len(self.value_means))] part.smin = -self.max_speed part.smax = self.max_speed part.ident = None part.neighbours = None return part
def generate(self, size, pmin, pmax, smin, smax): part = creator.Particle( random.uniform(pmin, pmax) for _ in range(size)) part.speed = [random.uniform(smin, smax) for _ in range(size)] part.smin = smin part.smax = smax return part
def generate_part(dim, pmin, pmax, smin, smax, int_idx): int_mask = [0]*dim # If int_idx is a list, int_mask will indicate which dimensions are integers try: for i in int_idx: int_mask[i] = 1 except TypeError: pass # If int_idx is None, then int_mask will be all 0 position = [random.uniform(pmin[idx], pmax[idx])if int_mask[idx]==0 else random.randint(pmin[idx], pmax[idx]) for idx in range(dim)] part = creator.Particle(position) part.int_mask = int_mask part.speed = [random.uniform(smin[idx], smax[idx]) for idx in range(dim)] part.pmin = pmin[:] part.pmax = pmax[:] try: for single_int_idx in int_idx: # To make the lb and ub have equal probability of being selected after applying round part.pmin[single_int_idx] = pmin[single_int_idx] - 0.499 part.pmax[single_int_idx] = pmax[single_int_idx] + 0.499 except TypeError: pass part.smin = smin part.smax = smax return part
def generate(size, pmin, pmax, smin, smax): part = creator.Particle([random.uniform(pmin, pmax) for _ in range(size)]) part.speed = np.array([random.uniform(smin, smax) for _ in range(size)]) part.smin = smin part.smax = smax return part
def generate(size, pmin, pmax, smin, smax): #particleの作成(位置だけ決める) part = creator.Particle(random.uniform(pmin, pmax) for _ in range(size)) #初期スピードをリストとして記述(スカラー量) part.speed = [random.uniform(smin, smax) for _ in range(size)] part.smin = smin part.smax = smax return part
def generate(size, pmin, pmax, smin, smax, starting_point): # some (small) variance from initial position part = creator.Particle(random.uniform(pmin, pmax) for _ in range(size)) part[:] = list(map(operator.add, part, starting_point)) part.speed = [random.uniform(smin, smax) for _ in range(size)] part.smin = smin part.smax = smax return part
def generate(size, pmin, pmax, smin, smax): # size: length of the particle, part = creator.Particle(random.uniform(pmin, pmax) for _ in range(size)) part.speed = [random.uniform(smin, smax) for _ in range(size)] # Speed lower and upper limit part.smin = smin part.smax = smax return part
def generate(pmin, pmax, vmin, vmax, dim, size, histogram): particule = creator.Particle([[ random.uniform(pmin, pmax), random.uniform(pmin, numpy.amax(histogram)) ] for _ in range(size)]) particule.velocity = [[random.uniform(vmin, vmax) for _ in range(dim)] for _ in range(len(particule))] return particule
def generate_particle(size, val_min, val_max, s_min, s_max): vals = list(range(val_min, val_max + 1)) random.shuffle(vals) part = creator.Particle(vals) part.speed = [random.uniform(s_min, s_max) for _ in range(size)] part.smin = s_min part.smax = s_max return part