Esempio n. 1
0
    def test_hill_climb_discrete_max():
        """Test hill_climb function for a discrete maximization problem"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        _, _, curve = hill_climb(problem, restarts=20, timing=True)

        assert (curve.shape[1] == 2)
Esempio n. 2
0
    def test_hill_climb_continuous_min():
        """Test hill_climb function for a continuous minimization problem"""

        problem = ContinuousOpt(5, OneMax(), maximize=False)
        best_state, best_fitness, _ = hill_climb(problem, restarts=20)

        x = np.array([0, 0, 0, 0, 0])

        assert (np.array_equal(best_state, x) and best_fitness == 0)
Esempio n. 3
0
    def test_hill_climb_discrete_max():
        """Test hill_climb function for a discrete maximization problem"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        best_state, best_fitness, _ = hill_climb(problem, restarts=20)

        x = np.array([1, 1, 1, 1, 1])

        assert (np.array_equal(best_state, x) and best_fitness == 5)
Esempio n. 4
0
    def test_hill_climb_max_iters():
        """Test hill_climb function with max_iters less than infinite"""

        problem = DiscreteOpt(5, OneMax(), maximize=True)
        x = np.array([0, 0, 0, 0, 0])

        best_state, best_fitness, _ = hill_climb(problem,
                                                 max_iters=1,
                                                 restarts=0,
                                                 init_state=x)

        assert best_fitness == 1
Esempio n. 5
0
def hill_climbing(problem, starting_cell):
    """
    Hill climbing implementation to minimize TSP
    Inputs: 
            problem --> optimization problem object required by mlrose
            starting_cell --> starting cell number, index starting from 1
    Outputs: 
            optimized_cells --> cells to visit
    """
    # mlrose requires indices start from zero
    # mlrose also requires init_state to be a 1D np array
    #init_cell = np.array([starting_cell-1])
    #init_cell = np.array([0,2,3,4,5,6,7,8,9,10,11,12,1])

    optimized_cells, _ = mlrose.hill_climb(problem)

    # Add 1 to all cells since in our case, cell numbers start from one not zero!
    #optimized_cells += 1

    return optimized_cells
Esempio n. 6
0
best_state, best_fitness = mlrose.simulated_annealing(
    optimization_problem,
    schedule=mlrose.ExpDecay(),
    max_attempts=100,
    max_iters=1000,
    init_state=init_state,
    random_state=1)

print('The best state : ', best_state)
print('Fitness: ', best_fitness)
visualize_state(best_state)

# solving using hill-climb algorithm
hc_state, hc_fitness, hc_curve = mlrose.hill_climb(optimization_problem,
                                                   init_state=init_state,
                                                   curve=True,
                                                   restarts=50,
                                                   random_state=3,
                                                   max_iters=10)

print('The best state : ', hc_state)
print('Fitness Hill Climb: ', hc_fitness)
print('Curve: ', hc_curve)
visualize_state(hc_state)

# Randomized hill climbing
rhc_state, rhc_fitness, rhc_curve = mlrose.random_hill_climb(
    optimization_problem,
    init_state=init_state,
    curve=True,
    restarts=50,
    random_state=3,
Esempio n. 7
0
        custo = 1
    return custo


fitness_function([0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1])
fitness = mlrose.CustomFitness(
    fitness_function)  # CUSTOM FITNESS FUNCTION DO MLROSE
problema = mlrose.DiscreteOpt(
    length=14,
    fitness_fn=
    fitness,  #14 PRODUTOS DIFERENTES, PARA CADA UM PODE ASSUMIR 2 VALORES (0 E 1)
    maximize=True,
    max_val=2)
"""## Hill climb"""

melhor_solucao, melhor_custo = mlrose.hill_climb(problema)
melhor_solucao, melhor_custo

imprimir_solucao(melhor_solucao)
"""## Simulated annealing"""

melhor_solucao, melhor_custo = mlrose.simulated_annealing(problema)
melhor_solucao, melhor_custo

imprimir_solucao(melhor_solucao)
"""## Algoritmo genético"""

melhor_solucao, melhor_custo = mlrose.genetic_alg(problema,
                                                  pop_size=500,
                                                  mutation_prob=0.2)
melhor_solucao, melhor_custo
Esempio n. 8
0
# Maximizacao
problema_maximizacao = mlrose.ContinuousOpt(length=6
    ,fitness_fn=fitness
    ,maximize=True
    ,min_val=0
    ,max_val=1)

# Minimizacao
problema_minimizacao = mlrose.ContinuousOpt(length=6
    ,fitness_fn=fitness
    ,maximize=False
    ,min_val=0
    ,max_val=1)

############ Teste Hill Climb ############
melhor_solucao, melhor_custo = mlrose.hill_climb(problem=problema_maximizacao, random_state=1)
melhor_solucao = melhor_solucao/melhor_solucao.sum()

############ Teste Simulated Annealing ############
melhor_solucao, melhor_custo = mlrose.simulated_annealing(problem=problema_maximizacao, random_state=1)
melhor_solucao = melhor_solucao/melhor_solucao.sum()

############ Teste Genetic Algorithm ############
problema_maximizacao_ga = mlrose.ContinuousOpt(length=6
    ,fitness_fn=fitness
    ,maximize=True
    ,min_val=0.01
    ,max_val=1)

melhor_solucao, melhor_custo = mlrose.genetic_alg(problem=problema_maximizacao, random_state=1)
melhor_solucao = melhor_solucao/melhor_solucao.sum()
Esempio n. 9
0
                              maximize=False,
                              max_val=10)
#(length =tamanho da solucao (12 voos),objeto que criamos acima
#maximize = True -> maximiza o valor retornado, maior preco
#maximize = False -> minimiza o valor retornado, menor preco
#max_val = o algoritmo precisa gerar uma lista com 12 posicoes e o valor maximo é 10
#max_val é a quantidade de voos que temos
#Algoritmo gera uma lista com 12 posicoes e em cada posicao pode variar entre 0 e 9 (10 numeros)

# HILL CLIMB ------------------------------------------------------------------------------------------
"""## Hill climb
subida da encosta (maximos e mínimos global e local)
"""

melhor_solucao, melhor_custo = mlrose.hill_climb(
    problema,
    random_state=1)  #random_state -> muda a semente geradora aleatória
imprimir_voos(melhor_solucao)

# SIMULATED ANNEALING ----------------------------------------------------------------------------------
"""## Simulated annealing"""

melhor_solucao, melhor_custo = mlrose.simulated_annealing(
    problema, schedule=mlrose.decay.GeomDecay(init_temp=10000), random_state=1)
imprimir_voos(melhor_solucao)

# ALGORITMO GENÉTICO ----------------------------------------------------------------------------------
"""## Algoritmo genético"""

melhor_solucao, melhor_custo = mlrose.genetic_alg(problema,
                                                  pop_size=500,
Esempio n. 10
0

# Initialize fitness function object using coords_list
fitness_coords = mlrose.TravellingSales(coords = coords_list)

problem_fit = mlrose.TSPOpt(length = len(coords_list), fitness_fn = fitness_coords,
                            maximize=False)

# # Solve problem using simulated annealing
# best_state, best_fitness = mlrose.simulated_annealing(problem_fit,
#                                                       random_state = 2,
#                                                       schedule=mlrose.GeomDecay(),
#                                                       max_attempts=200)

# Solve problem using random hill climb
best_state, best_fitness = mlrose.hill_climb(problem_fit, 
                                              random_state = 2)

seq = [x['distance'] for x in close_sites]
closest_hop = next((index for (index, d) in enumerate(close_sites) if d['distance'] == min(seq)), None)

d=deque(best_state)
d.rotate(-list(d).index(closest_hop))

print()
print('Total distance:', "{:.3f}".format(best_fitness), 'ly')

idx=0
strformat = '{:' + str(max(int(np.ceil(np.log10(len(close_sites)))),2)+1) + '}'
strformat += ' {:' + str(systemlen) + '}'
strformat += ' {:' + str(bodylen) + '}'
strformat += ' {:' + str(typelen) + '}'
Esempio n. 11
0
import six
import sys
sys.modules['sklearn.externals.six'] = six
import mlrose
from airportProblem import fitnessFunction, showFlights

#customFitness é pra problema personalizado
fitness = mlrose.CustomFitness(fitnessFunction)
#represetando o problema, DiscreteOpt porque estamos trabalhando com números int(10 voos por cidade), length é o tamanho da solução(12 voos)
#maximize false porque queremos o menores valores das passagens, max_val é no máximo ate 10(0 a 9)
problem = mlrose.DiscreteOpt(length=12,
                             fitness_fn=fitness,
                             maximize=False,
                             max_val=10)
# retorna a melhor solução e o melhor custo, unico parametro obrigatório é a representação do problema
bestSoluction, bestCost = mlrose.hill_climb(problem)

print(bestSoluction, bestCost)
showFlights(bestSoluction)
Esempio n. 12
0
                                     max_val=len(POS_to_int))

        # Define initial state
        init_state = np.zeros(len(test_sentence.tags))

        # Solve problem using simulated annealing
        best_state, best_fitness1 = mlrose.simulated_annealing(
            problem,
            max_attempts=10,
            max_iters=1000,
            init_state=init_state,
            random_state=1)

        best_state, best_fitness2 = mlrose.hill_climb(problem,
                                                      restarts=0,
                                                      max_iters=1000,
                                                      init_state=init_state,
                                                      random_state=1)

        best_state, best_fitness3 = mlrose.random_hill_climb(
            problem,
            max_attempts=10,
            max_iters=1000,
            init_state=init_state,
            random_state=1)

        best_state, best_fitness4 = mlrose.genetic_alg(problem,
                                                       max_attempts=10,
                                                       max_iters=1000,
                                                       random_state=1)
Esempio n. 13
0
    def rate(self, co_type: str, co_value: float):
        """Calculates the preference of characteristic objects using the defined stochastic method.

        Parameters
        ----------
        co_type: str
            Type of preference values of objects characteristic of the COMET method
        co_value: float
            The value of preferences of objects characteristic of the COMET method
        Returns
        -------

        """

        model = Comet(self._criteria)
        model.generate_co()
        model.rate_co(co_type, co_value)

        dict_arg = {
            'model': model,
            'alternatives': self._alternatives,
            'preference': self._alternativesPreference
        }

        if self._stochasticMethod == "hill-climbing":
            problem = ContinuousOpt(model.get_co_len(),
                                    CustomFitness(self._mlrose_fitness,
                                                  **dict_arg),
                                    maximize=False,
                                    step=0.01)
            pos, _, cost_history = hill_climb(
                problem,
                max_iters=self._iterations,
                curve=True,
                init_state=model.get_co_preference())
            cost_history = np.abs(cost_history)

        elif self._stochasticMethod == "simulated-annealing":
            problem = ContinuousOpt(model.get_co_len(),
                                    CustomFitness(self._mlrose_fitness,
                                                  **dict_arg),
                                    maximize=False,
                                    step=0.01)
            pos, _, cost_history = simulated_annealing(
                problem,
                max_iters=self._iterations,
                curve=True,
                init_state=model.get_co_preference())
            cost_history = np.abs(cost_history)

        elif self._stochasticMethod == "pso":
            bound_max = np.ones(model.get_co_len())
            bound_min = np.zeros(model.get_co_len())
            bounds = (bound_min, bound_max)
            options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}

            optimizer = single.GlobalBestPSO(n_particles=20,
                                             dimensions=model.get_co_len(),
                                             options=options,
                                             bounds=bounds)
            cost, pos = optimizer.optimize(self._pso_fitness, self._iterations,
                                           **dict_arg)
            cost_history = optimizer.cost_history

        else:
            raise ValueError(
                "Wrong optimization method has been determined: %s" %
                (repr(self._stochasticMethod)))

        return pos, cost_history