def __init__(self, NC, ND, force): self.NC = NC self.ND = ND self.force = force self.weights = np.empty((NC, ND)) self.fitness = creator.FitnessMin() self.u = None
def __init__(self, NC, ND, min_values, max_values): self.NC = NC self.ND = ND self.min_values = min_values self.max_values = max_values self.centroid = np.empty((NC, ND)) self.fitness = creator.FitnessMin() self.u = None
def sortEpsilonNondominated(individuals, k, first_front_only=False): if k == 0: return [] angle = 30 a = math.tan(math.radians(angle*2))/2 map_fit_ind = defaultdict(list) max_fit = [0,0,0] for ind in individuals: for i,fit in enumerate(ind.fitness.values): if abs(fit) > max_fit[i]: max_fit[i] = abs(fit) for ind in individuals: new_fit = creator.FitnessMin((ind.fitness.values[0]/max_fit[0]*1+ind.fitness.values[1]/max_fit[1]*a + ind.fitness.values[2]/max_fit[2]*a, ind.fitness.values[1]/max_fit[1]*1+ind.fitness.values[0]/max_fit[0]*a+ ind.fitness.values[2]/max_fit[2]*a, ind.fitness.values[2]/max_fit[2]*1 + ind.fitness.values[0]/max_fit[0]*a + ind.fitness.values[1]/max_fit[1]*a,)) map_fit_ind[new_fit].append(ind) fits = map_fit_ind.keys() current_front = [] next_front = [] dominating_fits = defaultdict(int) dominated_fits = defaultdict(list) # Rank first Pareto front for i, fit_i in enumerate(fits): for fit_j in list(fits)[i+1:]: if fit_i.dominates(fit_j): dominating_fits[fit_j] += 1 dominated_fits[fit_i].append(fit_j) elif fit_j.dominates(fit_i): dominating_fits[fit_i] += 1 dominated_fits[fit_j].append(fit_i) if dominating_fits[fit_i] == 0: current_front.append(fit_i) fronts = [[]] for fit in current_front: fronts[-1].extend(map_fit_ind[fit]) pareto_sorted = len(fronts[-1]) # Rank the next front until all individuals are sorted or # the given number of individual are sorted. if not first_front_only: N = min(len(individuals), k) while pareto_sorted < N: fronts.append([]) for fit_p in current_front: for fit_d in dominated_fits[fit_p]: dominating_fits[fit_d] -= 1 if dominating_fits[fit_d] == 0: next_front.append(fit_d) pareto_sorted += len(map_fit_ind[fit_d]) fronts[-1].extend(map_fit_ind[fit_d]) current_front = next_front next_front = [] return fronts
def get(self): pop = [] for cs in self.evospace_sample['sample']: ind = creator.Individual(cs['chromosome']) if 'score' in cs['fitness']: ind.fitness = creator.FitnessMin( values=(cs['fitness']['score'], )) pop.append(ind) return pop
def __init__(self, rc_vals, C, D, data, rc_bounds=None): """ :param dict rc_vals: RC values :param SimulationData data: :param dict rc_bounds: Dictionary of tuples bounding each RC value. """ self.rc_vals = rc_vals self.C = C self.D = D self.data = data self.rc_bounds = rc_bounds # Maximizing fitness self._fitness = creator.FitnessMin()
def main(func, dim, maxfuncevals, ftarget=None): toolbox = base.Toolbox() toolbox.register("update", update) toolbox.register("evaluate", func) toolbox.decorate("evaluate", tupleize) # Create the desired optimal function value as a Fitness object # for later comparison opt = creator.FitnessMin((ftarget, )) # Interval in which to initialize the optimizer interval = -5, 5 sigma = (interval[1] - interval[0]) / 2.0 alpha = 2.0**(1.0 / dim) # Initialize best randomly and worst as a place holder best = creator.Individual( random.uniform(interval[0], interval[1]) for _ in range(dim)) worst = creator.Individual([0.0] * dim) # Evaluate the first individual best.fitness.values = toolbox.evaluate(best) # Evolve until ftarget is reached or the number of evaluation # is exhausted (maxfuncevals) for g in range(1, maxfuncevals): toolbox.update(worst, best, sigma) worst.fitness.values = toolbox.evaluate(worst) if best.fitness <= worst.fitness: # Increase mutation strength and swap the individual sigma = sigma * alpha best, worst = worst, best else: # Decrease mutation strength sigma = sigma * alpha**(-0.25) # Test if we reached the optimum of the function # Remember that ">" for fitness means better (not greater) if best.fitness > opt: return best return best
def calc(PFc): creator.create("FitnessMin", base.Fitness, weights=[-1.0] * len(PFc[0])) creator.create("Individual", str, fitness=creator.FitnessMin) pop = list() for sol in PFc: ind = creator.Individual(str( sol)) # a trick here... use the fitness to identify solutions ind.fitness = creator.FitnessMin(sol) pop.append(ind) del PFc PFc = pop[:] del pop PFc = _get_frontier(PFc) # DEAP version PFc_list = [i.fitness.values for i in PFc] # PYTHON LIST version if len(PFc) < MINIMUM_PFS: return model, -1, -1, len(PFc), -1, PFc # GD # load the PF0 PF0 = list() with open('./PF_0/' + model + '.txt', 'r') as f: for l in f: e = l.strip('\n').split(' ') e = [float(i) for i in e] PF0.append(e) gd = GD(PF0, PFc_list) # GS gs = GS(PF0, PFc_list) # PFS pfs = len(PFc) # HV rp = [1] * len(PFc[0].fitness.values) # reference point hv = HyperVolume(rp).compute(PFc_list) hv = round(hv, 4) return model, gd, gs, pfs, hv, PFc
def get_stats(model_name, res_file): def get_obj_max(): with open( PROJECT_PATH + '/dimacs_data/' + model_name + '.dimacs.augment', 'r') as f: lines = f.readlines() lines = filter( lambda l: not l.startswith('#'), lines) # filter the comment line. usually the first line lines = map(lambda x: x.rstrip(), lines) # delete the \n letter lines = map(lambda x: x.split(" "), lines) linesT = map(list, zip(*lines)) featureIndex = map(int, linesT[0]) cost = map(float, linesT[1]) # used_before = map(int, linesT[2]) defects = map(int, linesT[3]) with open(PROJECT_PATH + '/dimacs_data/' + model_name + '.dimacs', 'r') as f: lines = f.readlines() indicator = filter(lambda l: l.startswith("p cnf "), lines)[0] indicator.rstrip() cnfNum = int(indicator.split(" ")[-1]) objMax = [ cnfNum, max(featureIndex), max(featureIndex), sum(defects), sum(cost) ] return objMax def normalize(fitness, objMax): for o_i, o in enumerate(fitness): fitness[o_i] = o / objMax[o_i] with open(res_file, 'r') as f: lines = f.readlines() lines = map(lambda x: x.rstrip(), lines) start = lines.index("~~~") decs = lines[:start] # print(len(set(decs))) fits = lines[start + 1:-2] runtime = float(lines[-1]) # print("runtime: %f" % runtime) pop_fitness = map(lambda x: x.split(" "), fits) for p_i, p in enumerate(pop_fitness): pop_fitness[p_i] = map(float, p) obj_max = get_obj_max() creator.create("FitnessMin", base.Fitness, weights=[-1.0] * 5, correct=bool, conVio=list) creator.create("Individual", list, fitness=creator.FitnessMin, fulfill=list) pop = list() for d, p in zip(decs, pop_fitness): ind = creator.Individual(map(int, list(d))) correct = p[0] < 0.01 normalize(p, obj_max) ind.fitness = creator.FitnessMin(p) ind.fitness.correct = correct pop.append(ind) # fetch the optimal_on_theory with open(PROJECT_PATH + '/optimal_in_his/' + model_name + '.txt', 'r') as f: lines = f.readlines() lines = map(lambda x: x.rstrip(), lines) start = lines.index("~~~") fits = lines[start + 1:-2] opt_pop_fitness = map(lambda x: x.split(" "), fits) for p_i, p in enumerate(opt_pop_fitness): opt_pop_fitness[p_i] = map(float, p) optimal_in_theory = list() for p in opt_pop_fitness: normalize(p, obj_max) optimal_in_theory.append(p) return stat_basing_on_pop(pop, record_valid_only=True, optimal_in_theory=optimal_in_theory)
def main(func, dim, maxfuncevals, ftarget=None): toolbox.register("particle", generate, size=dim, pmin=PMIN, pmax=PMAX, smin=(PMIN - PMAX) / 10., smax=(PMAX - PMIN) / 10.) toolbox.register("population", tools.initRepeat, list, toolbox.particle) toolbox.register("evaluate", func) toolbox.decorate("evaluate", tupleize) pop = toolbox.population(n=POP) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields opt = creator.FitnessMin((ftarget, )) best = None if VISUALIZE == 1: heatmap = createheatmap(toolbox.evaluate) for g in range(GEN): if VISUALIZE_PARAM == 1: visualize_params(pop, g) ga_pop = [] for part in pop: part.fitness.values = toolbox.evaluate(part) if not part.best or part.best.fitness < part.fitness: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values if not best or best.fitness < part.fitness: best = creator.Particle(part) best.fitness.values = part.fitness.values if best.fitness > opt: return best for part in pop: toolbox.update(part, best) ga_pop.append(toolbox.individual(part)) # Gather all the fitnesses in one list and print the stats #logbook.record(gen=g, evals=len(pop), **stats.compile(pop)) #print(logbook.stream) ga_old = list(map(toolbox.clone, ga_pop)) ga_pop = evolution(ga_pop) # print "CONVERGE?" # cv = all([all([(i < 0.2) for i in x]) for x in [map(operator.sub,ga_old[i], ga_pop[i]) for i in range(len(ga_pop))]]) # print cv # if cv: # print ga_pop[0] pop = recalibrate_particles(pop, ga_pop) if VISUALIZE == 1: visualize_pso(pop, '_pf-', str(g), heatmap) return best
creator.create("FitnessMin", base.Fitness, weights=[-1.0] * 3) # TODO set "4"? creator.create("Individual", str, fitness=creator.FitnessMin) for model in models: union_candidates = [] for f in folders: for i in fetch_all_files(f, model): with open(i, 'r') as records: content = records.readlines() content = map(lambda l: l.strip('\n'), content) for l in content: if l.startswith('T') or l.startswith('~~~') or l.startswith( 'G'): continue e = l.split(' ') e = [float(i) for i in e] ind = creator.Individual(str(e)) ind.fitness = creator.FitnessMin(e) union_candidates.append(ind) # pdb.set_trace() frontier = _get_frontier(union_candidates) # write out the frontier with open( os.path.dirname(os.path.abspath(__file__)) + '/' + model + '.txt', 'w') as f: for front in frontier: f.write(' '.join(map(str, front.fitness.values))) f.write('\n')