Beispiel #1
0
def main():
    print("TUBES I IF3170 INTELIGENSI BUATAN")
    print("Masukkan nama file: ")
    namafile = input()
    states = parse(namafile)

    stop = False
    while (not stop):
        print("Tentukan Algoritma Penyelesaian:")
        print("1. Hill Climbing")
        print("2. Simulated Annealing")
        print("3. Genetic Algorithm")
        print("4. Exit")

        algo = input("Pilihan: ")

        if (algo=='1'):
            print("BOARD AWAL")
            hillClimbing(states)
            input()
        elif (algo=='2'):
            print("BOARD AWAL")
            simulated_annealing(states)
        elif (algo=='3'):
            genetic_algorithm(states)
            print("BOARD AWAL")
        elif (algo=='4'):
            print('Program selesai')
            stop = True
        else:
            print('Invalid input')
Beispiel #2
0
def solve_n_queens_problem(number_of_queens,
                           population_size=10**3,
                           max_depth=200,
                           max_iterations=10**3):
    board = new_random_board(number_of_queens)

    fitness_function = apply
    sort_population = get_sort_population(fitness_function)
    create_new_program = program_generator(max_depth, number_of_queens, board)

    def error_func(program):
        return program.collisions

    best_program = genetic_algorithm(
        sort_population(map(apply, repeat(create_new_program,
                                          population_size))),
        get_selection(fitness_function),
        get_genetic_operators(number_of_queens, create_new_program),
        sort_population,
        error_func,
        max_iterations=max_iterations,
        sample_percentage=.35,
    )

    return reduce(swap, best_program.swaps, best_program.initial_board)
Beispiel #3
0
    def plan_schedule(self):
        values, sets, edges, max_cost = self.formulate_problem()
        mult = lambda x, y: x * y
        factorial = lambda n: reduce(mult, range(1, n+1))
        num_perms = factorial(len(sets)) * reduce(mult, [len(set) for set in sets.values()])
        rospy.loginfo("Solving for %s sets connected by %s edges totalling %s permutations with a max cost of %s." \
                      % (len(sets), len(edges), num_perms, max_cost))

        score, tour = genetic_algorithm(
            sets,
            cost(values, edges, max_cost),
            random_sample(sets.keys(), sets),
            mutate(sets),
            population=100,
            generations=500
        )

        self.schedule = []
        for key, method in tour:
            self.schedule.append({
                "action": method,
                "bin": self.items[key].bin,
                "item": self.items[key].name,
                "others": self.items[key].contents
            })

        print score, tour
        print self.schedule
def big_test_ga(network_tupla,
                dim,
                n_items="nodes",
                fitness=ga.new_fitness,
                n_iterations=12,
                value_resolution=ga.value_resolution,
                min_mass=ga.min_mass,
                max_mass=ga.max_mass,
                precision=ga.precision,
                max_iterations=ga.max_iterations,
                population_size=ga.population_size,
                population_half=ga.population_half,
                elite_size=ga.elite_size,
                random_size=ga.random_size,
                mutation_rate=ga.mutation_rate,
                mutation_part=ga.mutation_part):
    scores = []
    obtained_weights = []
    for i in range(n_iterations):
        print("iteration: " + str(i) + "/" + str(n_iterations))
        result = ga.genetic_algorithm(network_tupla, dim, n_items, fitness,
                                      value_resolution, min_mass, max_mass,
                                      precision, max_iterations,
                                      population_size, population_half,
                                      elite_size, random_size, mutation_rate,
                                      mutation_part)
        score = fitness(result, network_tupla[0], network_tupla[1], dim)
        obtained_weights.append(result)
        scores.append(score)
    return obtained_weights, scores
Beispiel #5
0
def genetic(inputfile, population_size, generations, mutation_prob, swap_prob,
            population_variation, greedy_injection):
    n_days, scores, libraries = scan_file(
        "input/" +
        inputfile)  # scans the input file and saves all the necessary info

    t = datetime.datetime.now()  # starts the timer

    solution = get_solution_to_optimize(inputfile, libraries, scores, n_days,
                                        greedy_injection)

    population = [solution]  # first population is greedy solution

    for i in range(population_size - 1):  # generates population
        new_solution = mutate_solution(
            solution.libraries_list, libraries,
            population_variation)  # mutates the greedy solution
        population.append(generate_solution(
            new_solution, libraries,
            scores))  # generates new solution and appends it to the population
        print("generated solution")

    print("population done")

    for i in range(generations):
        new_population = genetic_algorithm(
            population, libraries, scores, mutation_prob, swap_prob,
            population_variation)  # executes the genetic algorithm
        population = []
        for s in new_population:
            if s in population:  # if population contains s, mutates that solution and appends it to population
                best = sorted(
                    new_population, key=lambda x: x.score, reverse=True
                )[0]  # the best solution is the first one when the array is ordered by scores
                new_solution = mutate_solution(best.libraries_list, libraries,
                                               0.1)  # mutates the solution
                population.append(
                    generate_solution(new_solution, libraries, scores)
                )  # generates new solution and appends it to the population
            else:  # if population does not contains s, appends it
                population.append(s)

        best = sorted(
            population, key=lambda x: x.score, reverse=True
        )[0]  # the best solution is the first one when the array is ordered by scores
        mean = sum([x.score for x in population]) / len(
            population)  # calculates scores mean

        print(i, "- max:", best.score, "avg:", mean)

    elapsed_time = get_elapsed_time(t)  # calculates the elapsed time

    best = sorted(
        population, key=lambda x: x.score, reverse=True
    )[0]  # the best solution is the first one when the array is ordered by scores

    best.print_solution(elapsed_time)  # prints the solution

    write_output(inputfile, best)
Beispiel #6
0
def main():

    iris = datasets.load_iris()
    normalized_iris = normalize_dataset(iris.data)
    n_features = normalized_iris.shape[1]

    fitness = lambda w: 1.0 - evaluate_new_fuzzy_system(
        w[0], w[1], w[2], w[3], normalized_iris, iris.target)

    # Test Fuzzy
    # w = [0.07, 0.34, 0.48, 0.26] # 95%
    # w = [0, 0.21664307088134033, 0.445098590128248, 0.2350617110613577] # 96.6%
    # print(1.0 - fitness(w))

    record = {'GA': [], 'PSO': []}

    for _ in tqdm(range(30)):

        # GA
        best, fbest = genetic_algorithm(fitness_func=fitness,
                                        dim=n_features,
                                        n_individuals=10,
                                        epochs=30,
                                        verbose=False)
        record['GA'].append(1.0 - fbest)

        # PSO
        initial = [0.5, 0.5, 0.5, 0.5]
        bounds = [(0, 1), (0, 1), (0, 1), (0, 1)]
        best, fbest = pso_simple.minimize(fitness,
                                          initial,
                                          bounds,
                                          num_particles=10,
                                          maxiter=30,
                                          verbose=False)
        record['PSO'].append(1.0 - fbest)

    # Statistcs about the runs
    # print('GA:')
    # print(np.amax(record['GA']), np.amin(record['GA']))
    # print(np.mean(record['GA']), np.std(record['GA']))

    # print('PSO:')
    # print(np.amax(record['PSO']), np.amin(record['PSO']))
    # print(np.mean(record['PSO']), np.std(record['PSO']))

    fig, ax = plt.subplots(figsize=(5, 4))

    ax.boxplot(list(record.values()),
               vert=True,
               patch_artist=True,
               labels=list(record.keys()))

    ax.set_xlabel('Algoritmo')
    ax.set_ylabel('Acurácia')

    plt.tight_layout()
    plt.show()
Beispiel #7
0
def q3(m, data, time_budget, lambda_):
    start = time.perf_counter()
    # Load the training data specified.
    training_data = load_training_data(data)

    # Perform the genetic algorithm.
    results_queue = queue.LifoQueue()
    computation_thread = Thread(
        target=lambda: genetic_algorithm(lambda_,
                                         m,
                                         2**100,
                                         training=training_data,
                                         results_queue=results_queue),
        daemon=True  # Allow exiting when the timer runs out.
    )
    computation_thread.start()

    # Wait max. of time-budget seconds for the algorithm to finish.

    time.sleep(time_budget - (time.perf_counter() - start - 0.1))
    best = results_queue.get_nowait()
    print(best.tree)
def main():
    companies, variance, companies_prices_2016, companies_last_day_month_index = pre_process()
    print_expected_distribuiton(variance, companies)
    opt_gene = genetic_algorithm(1000, variance)

    print_actual_distribuiton(opt_gene.gene, companies)

    banks = simulator_setup(len(companies), opt_gene.gene, 100000)
    last_day_month_index = companies_last_day_month_index[0]
    for current_day in range(indicators_interval, 248):
        for index in range(len(companies)):
            company_prices = companies_prices_2016[index]
            call = indicator(company_prices, current_day)
            if(call[0] == 'purchase'):
                banks[index].purchase(call[1], company_prices[current_day])

            elif(call[0] == 'sell'):

                banks[index].sell(call[1], company_prices[current_day])

        if(current_day in last_day_month_index):
            print(
                f'Profits from {MONTHS[last_day_month_index.index(current_day)]}')
            print_banks_month(companies, banks, [
                              company_prices[current_day] for company_prices in companies_prices_2016])
            print()

    for index in range(len(companies)):
        banks[index].sell(1, companies_prices_2016[index][-1])

    print('SELLING REMANDING SHARES....')
    print('FINAL RESULTS')
    print_banks(companies, banks)

    print_highest_profit(banks, companies)

    print_lowest_profit(banks, companies)
Beispiel #9
0
    c1 = st.sidebar.slider('Self Trust Parameter', 0.1, 10.0, 2.0)
    c2 = st.sidebar.slider('Neighbor Trust Parameter', 0.1, 10.0, 2.0)

    n_individuals = st.sidebar.number_input('Set swarm size', 10, 1000, 100)
    n_max_gen = st.sidebar.number_input('Set number of generations', 10, 1000,
                                        100)

if st.sidebar.button('Run'):

    st.subheader('Optimization Results')

    if algorithm == 'Genetic Algorithm':
        optimization_result, arguments_result = genetic_algorithm.genetic_algorithm(
            interval, n_variables, bits_number, n_individuals, n_max_gen,
            selection_method_input, crossover_method_input,
            crossover_probability_input, mutation_method_input,
            mutation_probability_input, elitism_method_input,
            elitism_probability_input, func_pointer_dict[function_input],
            optimization_type_input, seed)

    elif algorithm == 'Particle Swarm':
        optimization_result, arguments_result = particle_swarm.pso(
            interval, n_variables, n_individuals, n_max_gen,
            func_pointer_dict[function_input], optimization_type_input,
            w_min_input, w_max_input, c1, c2, particle_swarm.clipper, seed)

    if optimization_type_input == 'min':
        st.markdown('The minimal value found by the algorithm was ' +
                    optimization_result + '.')
    else:
        st.markdown('The maximum value found by the algorithm was ' +
Beispiel #10
0
# How often a state can be repeated.
number_of_repeated_states = 2

#How long (at least one) path should be from start to end
path_length = 4

#how many branches there should be from the path
number_of_branches = 3

random_model = generate_random_markov_model_with_path(
    names, number_of_repeated_states, path_length, number_of_branches)

data = [random_model.random_walk() for i in range(10)]

pop_size = 50
winner_size = 5

last_population, best = genetic_algorithm(
    lambda size: [
        Individual(
            generate_random_markov_model(random_model.names,
                                         number_of_repeated_states))
        for i in range(size)
    ], pop_size, 50, lambda ind: markov_model_fitness(ind.dna, data),
    lambda pop: top_selection(pop, winner_size),
    lambda pop, pop_size: slow_cross_over(pop, pop_size, mate_markov_model),
    lambda pop: [mutate_individual(i) for i in pop])

random_model.add_graph_to_plt()
best.dna.add_graph_to_plt()
plt.show()
Beispiel #11
0
@author: Philippe
"""

import genetic_algorithm

subjectList = []

sample_input = [
    "Fil 40 ", "CW 10 ", "CoE 115 TJK", "CoE 115 HWX", "CWTS 2 Engg DCS",
    "Math 10 ", "Archaeo 2 "
]
length = len(sample_input)
mycsv = genetic_algorithm.load_csv()

possible_subjects = []
for input in sample_input:
    newSubject = Subject(input)
    possible_subjects.append(filter_subject(mycsv, input))
    for index, row in filter_subject(mycsv, input).iterrows():
        if (row['professor'] == "TBA" or row['professor'] == "CONCEALED"):
            row['professor'] = "TBA, TBA2"
        print(row['professor'])
        newCourse = Course(row['course number'], row['name'], row['professor'],
                           row['schedule'].split(' ')[1])
        newCourse._meetingDay = parse_schedule(row['schedule'].split(' ')[0])
        newSubject._courses.append(newCourse)
    subjectList.append(newSubject)

genetic_algorithm.genetic_algorithm()
Beispiel #12
0
    #replace None positions in child with remain cities
    for i in range(len(parent2)):
        if not parent2[i] in child:
            for x in range(len(child)):
                if child[x] == None:
                    child[x] = parent2[i]
                    break

    return child


#~ bestfit = genetic_algorithm(individual, fitness, mutate, crossover, 120, 40, 5000, 0.5)

# 3-fold cross-validation :))
bestfit1 = genetic_algorithm(individual, fitness, mutate, crossover, 250, 100,
                             4500, 0.5)
bestfit2 = genetic_algorithm(individual, fitness, mutate, crossover, 250, 100,
                             4500, 0.5)
bestfit3 = genetic_algorithm(individual, fitness, mutate, crossover, 250, 100,
                             4500, 0.5)

avg = str((bestfit1 + bestfit2 + bestfit3) / 3.0)

print(SEED1, SEED2, bestfit1, bestfit2, " avg=", avg)

with open("scores_SEED1_SEED2.txt", "w") as f:
    f.write(
        str(SEED1) + "," + str(SEED2) + "," + str(bestfit1) + "," +
        str(bestfit2) + "," + str(bestfit3) + "," + avg + "\n")
Beispiel #13
0
    "optimize": "min",  # "min" or "max"                   
    "cross_prob": 0.87,  # Probability of performing Crossover Operation
    "mutat_prob": 0.70,  # Probability of performing Mutation Operation
    "cross_method":
    "Order 1",  # Method of Crossover: "Order 1", "n points", "Uniform", ""
    "mutation_method":
    "Inversion",  # Method of mutation:  "Swap", "Scramble", "Inversion", ""
    "selection_method":
    "roulette",  # Method of selection: "Roulette", "Rank", "Tournament"
    "generations": 300,  # Number of generations
    "pop_size": 30,  # Population Size
    "elitism": False,  # Perform elitism
    "show_iters": True  # Show iterations (generations)
}

sga = genetic_algorithm(SMP, optionals)

int_pop_bag = sga.initialize()
print("\n ==========> Bolsa Inicial:\n")
print(int_pop_bag)
eval_fit_pop = sga.eval_fit_population(int_pop_bag)
print("\n ==========> Bolsa Inicial Evaluada:\n")
fit_vals_pd = pd.DataFrame({"fit_vals": eval_fit_pop["fit_vals"]})
fit_vals_pd_str = fit_vals_pd.sort_values("fit_vals", ascending=False)
print(list(fit_vals_pd_str.index))

pickA = sga.pickOne(int_pop_bag)
pickB = sga.pickOne(int_pop_bag)
print("\n ==========> Parent 1:\n")
print(pickA)
print("\n ==========> Parent 2:\n")
Beispiel #14
0
def run_train_genetic():

    opt_value_list = [
    ]  # Lista para salvar os resultados parciais encontrados por cada hiperparametro
    opt_time_list = [
    ]  # Lista para salvar os tempos parciais encontrados por cada hiperparametro
    hiperparam_results = [
    ]  # Lista para salvar os resultados de todos os hiperparametros
    problem_values_list = [
        [] for i in range(len(problems))
    ]  # Lista para salvar os valores encontrados para cada problema
    i = 0  # Contador apenas para print dos nomes dos problemas
    problem_times_list = [[] for i in range(len(problems))]

    # ---- Encontrando resultados para cada hiperparametro ---- #
    for hiperparam in hiperparams:
        print("Starting tests with hiperparam", hiperparam)

        for problem in problems:
            # Executa o algoritmo e salva o valor e tempo na lista do hiperparametro
            opt_state, opt_size, opt_value, time = genetic_algorithm(
                max_size=problem[0],
                values=problem[1],
                population_size=hiperparam[0],
                max_iter=100,
                crossover_ratio=hiperparam[1],
                mutation_ratio=hiperparam[2],
                max_time=120)
            opt_value_list.append(opt_value)
            opt_time_list.append(time)

            # Salvando o valor otimo da metaheuristica na lista dos valores por problema (utilizado para achar a normalizacao mais tarde)
            problem_values_list[i].append(opt_value)
            problem_times_list[i].append(time)

            # Print para facilitar acompanhamento
            print('Problem', problem_name[i],
                  'finished with (opt_value, opt_size, time) equals',
                  (opt_value, opt_size, time))
            i = i + 1

        # Salvando os valores encontrados em cada problema na lista de todos os valores
        hiperparam_results.append((hiperparam, opt_value_list, opt_time_list))
        opt_value_list = []
        opt_time_list = []
        i = 0
        print('\n\n')

    # ---- Encontrando os valores normalizados e o melhor hiperparametro ---- #
    max_time_problem = []
    max_value_problem = [
    ]  # Lista com os maiores valores encontrados em cada problema
    normalized_hiperparam_results = [
    ]  # Lista dos resultados dos hiperparametros normalizados
    opt_hiperparam = 0  # Melhor hiperparametro
    opt_mean = 0  # Melhor media
    total_training_time = 0

    # Encontrando o maior valor de cada problema
    for values in problem_values_list:
        max_value_problem.append(max(values))

    # Normalizando os valores
    for result in hiperparam_results:
        hiperparam = result[0]  # Valor do hiperparametro
        list_values = result[1].copy(
        )  # Lista de valores encontrados pelo hiperparametro para cada problema
        list_times = result[2].copy(
        )  # Lista de tempo encontrados pelo hiperparametro para cada problema

        for i in range(0, len(list_values)):
            list_values[i] = list_values[i] / max_value_problem[i]

        # Encontrando problema com maior media
        if sum(list_values) >= opt_mean:
            opt_hiperparam = hiperparam
            opt_mean = sum(list_values)

        total_training_time += sum(list_times)

        normalized_hiperparam_results.append(
            (hiperparam, list_values, list_times))

    # ---- Boxplot dos melhores resultados ---- #
    normalized_hiperparam_results.sort(key=lambda x: sum(x[1]), reverse=True)
    sorted_hiperparam_results = normalized_hiperparam_results[:10]
    time_data = pd.DataFrame()
    data = pd.DataFrame()

    for hiperparam in sorted_hiperparam_results:
        time_data[str(hiperparam[0])] = hiperparam[2]

    for hiperparam in sorted_hiperparam_results:
        data[str(hiperparam[0])] = hiperparam[1]

    fig = plt.figure(figsize=(15, 9))
    sns.boxplot(data=data, showmeans=True)
    plt.title(
        'Boxplot dos resultados normalizados de treino para o algoritmo Genetico'
    )
    plt.savefig('train_plots/genetic-values.png')
    plt.close()

    fig = plt.figure(figsize=(15, 9))
    sns.boxplot(data=time_data, showmeans=True)
    plt.title('Boxplot dos tempos de treino para o algoritmo Genetico')
    plt.savefig('train_plots/genetic-times.png')

    print(
        "The best hiperparam (population, crossover_ratio, mutation_ratio) found was",
        opt_hiperparam)
    print("The total training time was", total_training_time)

    return opt_hiperparam
# uWorldBank,uRiver,uEarth =201,202,203
# mynet.generatehiddennode([wWorld,wBank],[uWorldBank,uRiver,uEarth])
# for c in mynet.con.execute('select * from wordhidden'):print c
#
# for c in mynet.con.execute('select * from hiddenurl'):print c
#
# mynet.trainquery([wWorld,wBank],[uWorldBank,uRiver,uEarth],uWorldBank)
# mynet.getresult([wWorld,wBank],[uWorldBank,uRiver,uEarth])
#
# s=[1,4,3,2,7,3,6,3,2,4,5,3]
# from improve_schedulecost import improve_schedulecost
# money=improve_schedulecost(s)
# print 'totalprice='+str(money)

from annealing_algorithm import annealing_algorithm
from optimization import schedulecost
from optimization import people
from optimization import printschedule
domain = [(0, 9)] * (len(people) * 2)
# s,costf=annealing_algorithm(domain,schedulecost)
# printschedule(s)
# print costf

from genetic_algorithm import genetic_algorithm
s = genetic_algorithm(domain, schedulecost)
printschedule(s)

# from optimization import geneticoptimize
# s=geneticoptimize(domain,schedulecost)
# printschedule(s)
# uWorldBank,uRiver,uEarth =201,202,203
# mynet.generatehiddennode([wWorld,wBank],[uWorldBank,uRiver,uEarth])
# for c in mynet.con.execute('select * from wordhidden'):print c
#
# for c in mynet.con.execute('select * from hiddenurl'):print c
#
# mynet.trainquery([wWorld,wBank],[uWorldBank,uRiver,uEarth],uWorldBank)
# mynet.getresult([wWorld,wBank],[uWorldBank,uRiver,uEarth])
#
# s=[1,4,3,2,7,3,6,3,2,4,5,3]
# from improve_schedulecost import improve_schedulecost
# money=improve_schedulecost(s)
# print 'totalprice='+str(money)

from annealing_algorithm import annealing_algorithm
from optimization import schedulecost
from optimization import people
from optimization import printschedule
domain=[(0,9)]*(len(people)*2)
# s,costf=annealing_algorithm(domain,schedulecost)
# printschedule(s)
# print costf

from genetic_algorithm import genetic_algorithm
s=genetic_algorithm(domain,schedulecost)
printschedule(s)

# from optimization import geneticoptimize
# s=geneticoptimize(domain,schedulecost)
# printschedule(s)
Beispiel #17
0
def get_from_wdimacs(file_name):
    f = open(file_name)
    clauses = []
    num_vars = 0
    for l in f:
        if l[0] == 'c':
            continue
        elif l[0] == 'p':
            num_vars = int(l.split(' ')[2])
            continue
        else:
            clauses.append(parse_clause(l))
    return (clauses, num_vars)


if args.question == 1:
    clause = parse_clause(args.clause)
    print satisfies_clause(clause, args.assignment)
    print num_of_satisfied_clauses([clause], args.assignment)
elif args.question == 2:
    (clauses, num_vars) = get_from_wdimacs(args.wdimacs)
    print num_of_satisfied_clauses(clauses, args.assignment)
elif args.question == 3:
    (clauses, num_vars) = get_from_wdimacs(args.wdimacs)
    for i in range(args.repetitions):
        (pop, gens, best) = genetic_algorithm(
            num_vars, 0.6, 2,
            20, 0, (lambda x: num_of_satisfied_clauses(clauses, x)),
            len(clauses), args.time_budget)
def individual():
    return [randint(minimum, maximum) for x in xrange(length)]


def fitness(chromosome):
    f = 0
    for a, b in zip(obj, chromosome):
        f += abs(ord(a) - b)
    return f


def mutate(individual):
    i = randint(0, len(individual) - 1)
    individual[i] = randint(minimum, maximum)
    return individual


def crossover(male, female):
    half = randint(0, len(male) - 1)
    return male[:half] + female[half:]


def callback(population, e):
    print 'Epoch #{} Best: fitness ({}) individual ({})'.format(
        e, fitness(population[0]), ''.join([chr(e) for e in population[0]]))


# genetic_algorithm(individual, fitness, mutate, crossover, n_individuals=10, epochs=10, crossover_rate=0.6, mutation_rate=0.2, callback=None):
genetic_algorithm(individual, fitness, mutate, crossover, 1000, 50, 0.6, 0.5,
                  callback)
Beispiel #19
0
        fitness_score = 0.0
        for i, codon in enumerate(genome):
            if codon:
                weight += knapsack_problem[i][0]
                fitness_score += knapsack_problem[i][1]
        if weight > max_weight:
            fitness_score = 0.0
        return fitness_score

    return knapsack_fitness


#construct a random knapsack problem
KNAPSACK_PROBLEM_SIZE = 1000
KNAPSACK_PROBLEM = make_knapsack_problem(KNAPSACK_PROBLEM_SIZE, 10.0, 100.0)
MAX_WEIGHT = 1000
MONITOR = make_knapsack_monitor("output", KNAPSACK_PROBLEM)
FITNESS = make_knapsack_fitness(KNAPSACK_PROBLEM, MAX_WEIGHT)
INITIAL_STATE = {"mutation_rate": 0.05}
#construct a random initial population
POPULATION_SIZE = 100
INITIAL_POPULATION = []
for _ in range(POPULATION_SIZE):
    current_genome = make_knapsack_genome(KNAPSACK_PROBLEM_SIZE)
    current_fitness_score = FITNESS(INITIAL_STATE, current_genome)
    individual = Individual(current_fitness_score, current_genome)
    INITIAL_POPULATION.append(individual)

genetic_algorithm(MONITOR, simple_select, FITNESS, simple_crossover,
                  simple_boolean_mutation, INITIAL_STATE, INITIAL_POPULATION)
	return (uniform(-0.1, 0.1) * x, uniform(-0.1, 0.1) * y)

# Given two parents, it returns the offspring of them
def crossover(male, female):
	mx, _ = male
	_, fy = female
	return (mx, fy)

# Callback function to render the animation
def callback(population, epoch):
	ax.cla()
	plt.contourf(X,Y,Z, 10)
	ax.set_title('Epoch: {}'.format(epoch))
	yaxis = 2.2
	ax.text(x_min, yaxis, 'Offspring (#, fitness)', bbox={'facecolor':'red', 'alpha':0, 'pad':100})
	yaxis -= 0.12
	i = 0
	for x, y in population:
		yaxis -= 0.12
		ax.text(x_min, yaxis, '({}, {:0.5f})\n'.format(i, fitness((x, y))), bbox={'facecolor':'red', 'alpha':0, 'pad':100})
		plt.scatter(x, y)
		i += 1
		plt.pause(0.5)
	plt.pause(1)


if __name__ == '__main__':
	best = genetic_algorithm(individual=individual, fitness=fitness, mutate=mutate, crossover=crossover, callback=callback)
	while True:
		plt.pause(0.05)
Beispiel #21
0
        '-population',
        help=
        'String containing the population represented as bitstrings of length n separated by space'
    )
    args = parser.parse_args()
    question = args.question
    start_time = time.time()

    if question == 1:
        for i in range(args.repetitions):
            print(ga.mutation_operator(args.bits_x, args.chi))
    elif question == 2:
        for i in range(args.repetitions):
            print(ga.crossover_operator(args.bits_x, args.bits_y))
    elif question == 3:
        print(ga.onemax(args.bits_x))
    elif question == 4:
        pop = np.array(args.population.split(' '))
        pop_fitness = np.array([ga.onemax(x) for x in pop])
        for i in range(args.repetitions):
            print(ga.tournament_selection(args.k, pop, pop_fitness))
    elif question == 5:
        lmbd = int(get_argument_value('-lambda', sys.argv[1:]))
        for i in range(args.repetitions):
            t, fbest, xbest = ga.genetic_algorithm(args.n, args.chi, args.k,
                                                   lmbd)
            print('{:>4}   {:>4}  {:>4}  {:>4}  {:>4}  {:>4}  {:>4}'.format(
                args.n, args.chi, args.k, lmbd, t, fbest, xbest))
    else:
        print('Incorrect question number.')
Beispiel #22
0
def print_rg(i, elite):
    print(f'iter:{i+1:>7}   score:{elite[1]:>10.7}', end='\r')


# 学習結果の確認グラフ作成
def plot_ind(ind):
    X, T = sim_ind(ind)
    plt.plot(T, Xsim, label='simple')
    plt.plot(T, Xnom, label='nominal')
    plt.plot(T, X, label='rg')
    plt.legend()
    plt.show()


if __name__ == '__main__':
    GENE_LENGTH = 8 * int(EOS / dt)
    INDIVI_NUM = 100
    MUTATE_PROB = 0.1
    GENERATIONS = 10000

    individuals, highscore_list = genetic_algorithm(GENE_LENGTH,
                                                    INDIVI_NUM,
                                                    MUTATE_PROB,
                                                    prob_rg,
                                                    generations=GENERATIONS,
                                                    print_func=print_rg)

    elite = individuals[-1][0]

    plot_ind(elite)
Beispiel #23
0
INDIVI_NUM = 1000
MUTATE_PROB = 0.1
GENERATIONS = 100000

## 簡易版(one-max問題)
#GENE_LENGTH = 64
#INDIVI_NUM  = 250
#MUTATE_PROB = 0.05
#GENERATIONS = 100000

## sqrt2
#GENE_LENGTH = 30
#INDIVI_NUM  = 200
#MUTATE_PROB = 0.05
#GENERATIONS = 10000

# one-max問題
individuals, highscore_list = genetic_algorithm(GENE_LENGTH,
                                                INDIVI_NUM,
                                                MUTATE_PROB,
                                                prob_one_max,
                                                generations=GENERATIONS,
                                                break_func=break_cond_one_max,
                                                print_func=print_one_max)

## sqrt2
#individuals, highscore_list = genetic_algorithm(GENE_LENGTH, INDIVI_NUM, MUTATE_PROB, prob_sqrt2,
#        generations=GENERATIONS, break_func=break_cond_sqrt2, print_func=print_sqrt2)

plot_highscores(highscore_list)
Beispiel #24
0
def main():
    ##Variable Initialization

    N = 100
    n_children = 4

    parents_number = 2  #num of parents used in the mutation
    sel_pressure = 10  #How many individuals are used in the tournament selection
    mu_pressure = 0.4  #Probability of mutation
    G = 100  #Number of generations (stop criteria)

    results = pd.read_excel('./results/predictions/predictions_' +
                            sys.argv[1] + '_train_test.xlsx',
                            sheet_name="E0")
    C_train = results.drop('real', axis=1)
    columns = [np.str(col) for col in C_train.columns]
    columns.append("fitness")
    chromosomes_data = []

    M = pd.read_excel(
        './data/cost_matrices.xlsx',
        sheetname=np.str(len(results['real'].unique())) + 'matriz'
    )  ##An example of cost matrix file is attached in the repository
    print(M)
    ##Train the genetic algorithm for each of the cross validation steps
    for i in range(10):
        sheet = "E" + np.str(i)
        results = pd.read_excel('./results/predictions/predictions_' +
                                sys.argv[1] + '_train_test.xlsx',
                                sheet_name=sheet)
        l_real = results['real']
        C_train = results.drop('real', axis=1)
        t = len(C_train.columns)

        parent, fitness = genetic.genetic_algorithm(C_train, l_real, M,
                                                    sel_pressure, t,
                                                    parents_number, n_children,
                                                    N, mu_pressure, G)
        print("\nFitness in iteration %d:\n%d" % (i, fitness))
        row = [elem for elem in parent]
        row.append(fitness)
        chromosomes_data.append(row)

    chromosomes = pd.DataFrame(chromosomes_data, columns=columns)
    print(chromosomes)
    chromosomes.to_csv('./results/evaluation/chromosomes_' + sys.argv[1] +
                       '.csv')

    ## Evaluation
    res = pd.DataFrame(columns=['MAE', 'MZE'])
    for i in range(10):
        sheet = "T" + np.str(i)
        C_test = pd.read_excel('./results/predictions/predictions_' +
                               sys.argv[1] + '_train_test.xlsx',
                               sheet_name=sheet)
        chromosome = chromosomes.iloc[i][:-1].tolist()
        y_true, y_pred = get_predictions(chromosome, M, C_test)

        print("\n\n")
        mae, mze = evaluate(y_true, y_pred)
        res.loc[i] = [mae, mze]

    res.to_csv('./results/evaluation/evaluation_' + sys.argv[1] + '.csv')
Beispiel #25
0
        """ declares how a Square object should be printed
        """
        s = 'Square with side = ' + str(self.side) + '\n' + \
            'Area = ' + str(self.area()) + '\n' + \
            'Perimeter = ' + str(self.perimeter())
        return s


class TestContainer(unittest.TestCase):
    longMessage = True


def make_test_function(description, a, b):
    def test(self):
        sq = Square(a)
        self.assertEqual(sq.area(), b,
                         f"Area is shown {sq.area()} rather than {b}")

    return test


if __name__ == '__main__':

    #Automated Test Generation

    for i in range(3):
        side, area = genetic_algorithm(20)
        test_func = make_test_function(i + 1, side, area)
        setattr(TestContainer, "test_{}".format(i + 1), test_func)

    unittest.main()
Beispiel #26
0
	child = [-1]*len(parent1)
	
	#fill with None
	for x in range(0,len(child)):
		child[x] = None

	start_pos = random.randint(0,len(parent1))
	end_pos = random.randint(0,len(parent1))

	if start_pos < end_pos:
		# start->end
		for x in range(start_pos,end_pos):
			child[x] = parent1[x]
	elif start_pos > end_pos:
		#end->start
		for i in range(end_pos,start_pos):
			child[i] = parent1[i]


	#replace None positions in child with remain cities
	for i in range(len(parent2)):
		if not parent2[i] in child:
			for x in range(len(child)):
				if child[x] == None:
					child[x] = parent2[i]
					break

	return child

genetic_algorithm(individual, fitness, mutate, crossover, 120, 95, 8000, 0.5)
Beispiel #27
0
experiment = args.experiment

if experiment == 1:
    # Experiment 1 runtime vs mutation rate
    with open('runtime_mutation_{}_{}.csv'.format(args.start, args.end),
              'w') as out_file_1:
        print('Starting Experiment 1...')
        results = []
        chis = np.arange(args.start, args.end, 0.2)
        for chi in chis:
            print('Chi: {}'.format(chi))
            res = [str(chi)]
            for j in range(100):
                if (j + 1) % 10 == 0:
                    print('Step: {}'.format(j + 1))
                t, fbest, xbest = ga.genetic_algorithm(200, chi, 4, 100)
                res.append(str(t * 100))
            results.append(res)
            out_file_1.write(','.join(res) + '\n')
    print('Experiment 1 completed!')
elif experiment == 2:
    # Experiment 2 runtime vs problem size
    with open('runtime_problem_size.csv', 'w') as out_file_1:
        print('Starting Experiment 2...')
        results = []
        n_values = np.arange(20, 200, 10)
        for n in n_values:
            print('N: {}'.format(n))
            res = [str(n)]
            for j in range(100):
                if (j + 1) % 10 == 0:
Beispiel #28
0
def main():

    x = np.linspace(0, 1, 1)
    y1 = trimf(x, [1, 4, 8])
    y2 = trimf(x, [3, 6, 8])
    y3 = trimf(x, [1, 4, 8])
    y4 = trimf(x, [3, 6, 8])
    plot(x, y1, y2, y3, y4)
    #xlabel('trimf, P = [3, 6, 8]')
    show()

    data, target = load_dataset('t.csv')  #random_example_test con t sale 0.61
    normalized = normalize_dataset(data)  # con random sale 0.6 y 0.9 conGA
    n_features = data.shape[1]

    fitness = lambda ws: 1.0 - evaluate_new_fuzzy_system(
        ws, normalized, target)

    ###################confusion_matrix
    #    print('Confusion matrix \n',  confusion_matrix(target,))
    # Test Fuzzy
    # ws = [0.07, 0.34, 0.48, 0.26,0.07, 0.34, 0.48, 0.26,0.07, 0.34, 0.48, 0.26,0.07, 0.34, 0.48, 0.26,0.07, 0.34, 0.48, 0.26,0.07, 0.34, 0.48, 0.26] # 95%
    # ws = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
    ws = [
        0.993900000000000, 1, 0.772120000000000, 0.774550000000000,
        0.993940000000000, 1, 0.781820000000000, 0.793949000000000,
        0.357580000000000, 0.363640000000000, 0.260610000000000,
        0.254550000000000, 0.212120000000000, 0.248480000000000,
        0.278790000000000, 0.242420000000000, 0.125450000000000,
        0.121820000000000, 0.00606000000000000, 0.0181800000000000,
        0.133330000000000, 0.127270000000000, 0.1, 0.1
    ]  # w = [0, 0.21664307088134033, 0.445098590128248, 0.2350617110613577] # 96.6%
    # ws=[28,27.8,26.85,25.1,26.35,27.38,27.68,25.06,24.1,23,22,19,19.5,18,18.5,19,19.5,16.9,16.8,16,16.2,16.1,15.9,14]
    wsx = normalize_dataset(ws)
    print(wsx)

    Classification = 1.0 - fitness(wsx)
    print(target)
    print(Classification)

    # cm=confusion_matrix(target, Classification)
    # #   print('Confusion matrix \n',cm)
    # plt.figure(num=10)
    # cm=confusion_matrix(target, Classification)
    # print(confusion_matrix(target, Classification))
    # plt.imshow(confusion_matrix(target, Classification),
    #         cmap='Blues', interpolation='nearest')
    # plt.colorbar()
    # for (i, j), z in np.ndenumerate(cm):
    #     plt.text(j, i, z, ha='center', va='center')
    # plt.grid(False)
    # plt.ylabel('truth label')
    # plt.xlabel('Predicted label');
    # plt.savefig("matrix.pdf")

    record = {'GA': [], 'PSO': []}
    for _ in tqdm(range(10)):
        # GA
        t, fbest = genetic_algorithm(fitness_func=fitness,
                                     dim=n_features,
                                     n_individuals=10,
                                     epochs=40,
                                     verbose=False)
        record['GA'].append(1.0 - fbest)
        # PSO
        initial = [
            0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
            0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5
        ]
        bounds = [
            (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1),
            (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1),
            (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1)
        ]
        best, fbest = pso_simple.minimize(fitness,
                                          initial,
                                          bounds,
                                          num_particles=10,
                                          maxiter=10,
                                          verbose=False)
        record['PSO'].append(1.0 - fbest)
#     print(t)
    plt.plot(t)
    #print(fbest)
    # Statistcs about the runs
    print('GA:')
    print(np.amax(record['GA']), np.amin(record['GA']))
    print(np.mean(record['GA']), np.std(record['GA']))

    #record['Classification']
    print('PSO:')
    print(np.amax(record['PSO']), np.amin(record['PSO']))
    print(np.mean(record['PSO']), np.std(record['PSO']))

    fig, ax = plt.subplots(figsize=(5, 4))
    ax.boxplot(list(record.values()),
               vert=True,
               patch_artist=True,
               labels=list(record.keys()))

    ax.set_xlabel('Comparison')
    ax.set_ylabel('Accuracy')
    plt.tight_layout()
    #plt.show()
    plt.savefig("ga.pdf")