Beispiel #1
0
def apply(population_size: int, individual_size: int, bounds: np.ndarray,
          func: Callable[[np.ndarray],
                         float], opts: Any, callback: Callable[[Dict], Any],
          lambdas: Union[list, np.array], ng: int, c: Union[int, float],
          p: Union[int, float], max_evals: int, seed: Union[int, None],
          population: Union[np.array,
                            None], answer: Union[None, float,
                                                 int]) -> [np.ndarray, int]:
    """
    Applies the MPEDE differential evolution algorithm.
    :param population_size: Size of the population (NP-max)
    :type population_size: int
    :param ng: Number of generations after the best strategy is updated.
    :type ng: int
    :param lambdas: Percentages of each of the 4 subpopulations.
    :type lambdas: Union[list, np.array]
    :param individual_size: Number of gens/features of an individual.
    :type individual_size: int
    :param bounds: Numpy ndarray with individual_size rows and 2 columns.
    First column represents the minimum value for the row feature.
    Second column represent the maximum value for the row feature.
    :type bounds: np.ndarray
    :param func: Evaluation function. The function used must receive one
     parameter.This parameter will be a numpy array representing an individual.
    :type func: Callable[[np.ndarray], float]
    :param opts: Optional parameters for the fitness function.
    :type opts: Any type.
    :param callback: Optional function that allows read access to the state of all variables once each generation.
    :type callback: Callable[[Dict], Any]
    :param max_evals: Number of evaluations after the algorithm is stopped.
    :type max_evals: int
    :param seed: Random number generation seed. Fix a number to reproduce the
    same results in later experiments.
    :param p: Parameter to choose the best vectors. Must be in (0, 1].
    :type p: Union[int, float]
    :param c: Variable to control parameter adoption. Must be in [0, 1].
    :type c: Union[int, float]
    :type seed: Union[int, None]
    :return: A pair with the best solution found and its fitness.
    :rtype [np.ndarray, int]

    """

    # 0. Check external parameters
    if type(population_size) is not int or population_size <= 0:
        raise ValueError("population_size must be a positive integer.")

    if type(individual_size) is not int or individual_size <= 0:
        raise ValueError("individual_size must be a positive integer.")

    if type(max_evals) is not int or max_evals <= 0:
        raise ValueError("max_evals must be a positive integer.")

    if type(bounds) is not np.ndarray or bounds.shape != (individual_size, 2):
        raise ValueError("bounds must be a NumPy ndarray.\n"
                         "The array must be of individual_size length. "
                         "Each row must have 2 elements.")

    if type(seed) is not int and seed is not None:
        raise ValueError("seed must be an integer or None.")

    if type(p) not in [int, float] and 0 < p <= 1:
        raise ValueError("p must be a real number in (0, 1].")

    if type(c) not in [int, float] and 0 <= c <= 1:
        raise ValueError("c must be an real number in [0, 1].")

    if type(ng) is not int:
        raise ValueError("ng must be a positive integer number.")

    if type(lambdas) not in [list, np.ndarray
                             ] and len(lambdas) != 4 and sum(lambdas) != 1:
        raise ValueError(
            "lambdas must be a list or npdarray of 4 numbers that sum 1.")

    np.random.seed(seed)

    # 1. Initialize internal parameters
    # 1.1 Control parameters
    u_cr = np.ones(3) * 0.5
    u_f = np.ones(3) * 0.5
    f_var = np.zeros(3)
    fes = np.zeros(3)

    # 1.2 Initialize population
    pop_size = lambdas * population_size

    if population is None:
        big_population = commons.init_population(int(sum(pop_size)),
                                                 individual_size, bounds)

    pops = np.array_split(big_population, 4)

    chosen = np.random.randint(0, 3)
    newpop = np.concatenate((pops[chosen], pops[3]))
    pops[chosen] = newpop
    pop_size = list(map(len, pops))
    current_generation = 0
    num_evals = 0

    f = []
    cr = []
    fitnesses = []
    for j in range(3):
        f.append(np.empty(pop_size[j]))
        cr.append(np.empty(pop_size[j]))
        fitnesses.append(commons.apply_fitness(pops[j], func, opts))
        num_evals += len(pops[j])

    # 2. Start the algorithm
    while num_evals <= max_evals:
        current_generation += 1

        # 2.1 Generate CR and F values
        for j in range(3):
            f[j] = scipy.stats.cauchy.rvs(loc=u_f[j],
                                          scale=0.1,
                                          size=len(pops[j]))
            f[j] = np.clip(f[j], 0, 1)

            cr[j] = np.random.normal(u_cr[j], 0.1, len(pops[j]))
            cr[j] = np.clip(cr[j], 0, 1)

        # 2.2 Apply mutation to each subpopulation
        mutated1 = commons.current_to_pbest_mutation(
            pops[0], fitnesses[0], f[0].reshape(len(f[0]), 1),
            np.ones(len(pops[0])) * p, bounds)

        mutated2 = commons.current_to_rand_1_mutation(
            pops[1], fitnesses[1], f[1].copy().reshape(len(f[1]), 1) * .5 + 1,
            f[1].reshape(len(f[1]), 1), bounds)

        mutated3 = commons.binary_mutation(pops[2], f[2].reshape(len(f[2]), 1),
                                           bounds)

        # 2.3 Do the crossover and calculate new fitness
        crossed1 = commons.crossover(pops[0], mutated1,
                                     cr[0].reshape(len(cr[0]), 1))
        crossed2 = mutated2
        crossed3 = commons.crossover(pops[2], mutated3,
                                     cr[2].reshape(len(cr[2]), 1))

        c_fitness1 = commons.apply_fitness(crossed1, func, opts)
        c_fitness2 = commons.apply_fitness(crossed2, func, opts)
        c_fitness3 = commons.apply_fitness(crossed3, func, opts)

        for j in range(3):
            num_evals += len(pops[j])
            fes[j] += len(pops[j])

        # 2.4 Do the selection and update control parameters
        winners1 = c_fitness1 < fitnesses[0]
        winners2 = c_fitness2 < fitnesses[1]
        winners3 = c_fitness3 < fitnesses[2]

        pops[0] = commons.selection(pops[0], crossed1, fitnesses[0],
                                    c_fitness1)
        pops[1] = commons.selection(pops[1], crossed2, fitnesses[1],
                                    c_fitness2)
        pops[2] = commons.selection(pops[2], crossed3, fitnesses[2],
                                    c_fitness3)

        fitnesses[0][winners1] = c_fitness1[winners1]
        fitnesses[1][winners2] = c_fitness2[winners2]
        fitnesses[2][winners3] = c_fitness3[winners3]

        if sum(winners1) != 0 and np.sum(f[0][winners1]) != 0:
            u_cr[0] = (1 - c) * u_cr[0] + c * np.mean(cr[0][winners1])
            u_f[0] = (1 - c) * u_f[0] + c * (np.sum(f[0][winners1]**2) /
                                             np.sum(f[0][winners1]))
        if sum(winners2) != 0 and np.sum(f[1][winners2]) != 0:
            u_cr[1] = (1 - c) * u_cr[1] + c * np.mean(cr[1][winners2])
            u_f[1] = (1 - c) * u_f[1] + c * (np.sum(f[1][winners2]**2) /
                                             np.sum(f[1][winners2]))
        if sum(winners3) != 0 and np.sum(f[2][winners3]) != 0:
            u_cr[2] = (1 - c) * u_cr[2] + c * np.mean(cr[2][winners3])
            u_f[2] = (1 - c) * u_f[2] + c * (np.sum(f[2][winners3]**2) /
                                             np.sum(f[2][winners3]))

        fes[0] += np.sum(fitnesses[0][winners1] - c_fitness1[winners1])
        fes[1] += np.sum(fitnesses[1][winners2] - c_fitness2[winners2])
        fes[2] += np.sum(fitnesses[2][winners3] - c_fitness3[winners3])

        population = np.concatenate((pops[0], pops[1], pops[2]))
        fitness = np.concatenate((fitnesses[0], fitnesses[1], fitnesses[2]))

        if current_generation % ng == 0:
            k = [f_var[i] / len(pops[i] / ng) for i in range(3)]
            chosen = np.argmax(k)

        indexes = np.arange(0, len(population), 1, np.int)
        np.random.shuffle(indexes)
        indexes = np.array_split(indexes, 4)
        chosen = np.random.randint(0, 3)

        pops = []
        fitnesses = []
        f = []
        cr = []

        for j in range(3):

            if j == chosen:
                pops.append(
                    np.concatenate(
                        (population[indexes[j]], population[indexes[3]])))
                fitnesses.append(
                    np.concatenate((fitness[indexes[j]], fitness[indexes[3]])))
            else:
                pops.append(population[indexes[j]])
                fitnesses.append(fitness[indexes[j]])

            f.append(np.empty(len(pops[j])))
            cr.append(np.empty(len(pops[j])))

        if callback is not None:
            callback(**(locals()))

        best = np.argmin(fitness)

        if fitness[best] == answer:
            yield population[best], fitness[best], population, fitness
            break
        else:
            yield population[best], fitness[best], population, fitness
def apply(population_size: int, individual_size: int, bounds: np.ndarray,
          func: Callable[[np.ndarray], float], opts: Any,
          callback: Callable[[Dict], Any], max_evals: int, seed: Union[int,
                                                                       None],
          population: Union[np.array, None], answer: Union[None, float, int]):
    """
    Applies the Self-adaptive differential evolution algorithm (SaDE).
    :param population_size: Size of the population.
    :type population_size: int
    :param individual_size: Number of gens/features of an individual.
    :type individual_size: int
    :param bounds: Numpy ndarray with individual_size rows and 2 columns.
    First column represents the minimum value for the row feature.
    Second column represent the maximum value for the row feature.
    :type bounds: np.ndarray
    :param func: Evaluation function. The function used must receive one
     parameter.This parameter will be a numpy array representing an individual.
    :type func: Callable[[np.ndarray], float]
    :param opts: Optional parameters for the fitness function.
    :type opts: Any type.
    :param callback: Optional function that allows read access to the state of all variables once each generation.
    :type callback: Callable[[Dict], Any]
    :param max_evals: Number of evaluatios after the algorithm is stopped.
    :type max_evals: int
    :param seed: Random number generation seed. Fix a number to reproduce the
    same results in later experiments.
    :type seed: Union[int, None]
    :return: A pair with the best solution found and its fitness.
    :rtype [np.ndarray, int]
    """

    np.random.seed(seed)
    if population is None:
        population = commons.init_population(population_size, individual_size,
                                             bounds)

    fitness = commons.apply_fitness(population, func, opts)

    num_evals = 0

    pf1, pf2, pf3 = 1 / 3, 1 / 3, 1 / 3
    cr1, cr2, cr3 = 1 / 3, 1 / 3, 1 / 3
    bin_cross = 0.5
    best_mutation = 0.5

    current_generation = 1
    learning_period = 50
    update = True

    s = collections.defaultdict(lambda: np.zeros(learning_period))

    learning_k = 0
    run_mmts = True
    mmts_desired_evals = 60
    num_no_mmts = 0

    while num_evals < max_evals:

        # 1. Generate ensemble parameters
        if current_generation == 1 or current_generation > learning_period:
            f = np.random.choice([0.3, 0.5, 0.7],
                                 p=[pf1, pf2, pf3],
                                 size=population_size)
            cr = np.random.choice([0.1, 0.5, 0.9],
                                  p=[cr1, cr2, cr3],
                                  size=population_size)
            mutation_strat = np.random.choice(
                ['rand', 'best'],
                p=[1 - best_mutation, best_mutation],
                size=population_size)
            cross_method = np.random.choice(['bin', 'exp'],
                                            p=[bin_cross, 1 - bin_cross],
                                            size=population_size)

        # 2.0 Niching
        m = 5
        pop_b = population.copy().reshape(population.shape[0], 1,
                                          population.shape[1])
        distances = np.sqrt(
            np.einsum('ijk, ijk->ij', population - pop_b, population - pop_b))
        neighbors = np.argsort(distances, axis=1)
        l_best_indexes = neighbors[:, 1]
        neighbors = neighbors[l_best_indexes, 1:m]

        # 2.1 Mutation
        rand_mut_idx = np.where(mutation_strat == 'rand')[0]
        best_mut_idx = np.where(mutation_strat == 'best')[0]
        mutated = np.empty(population.shape)

        # 2.1.a Rand mutation
        # Generate parents indexes
        choices = np.arange(0, neighbors.shape[1])
        parents1 = np.empty(population.shape)
        parents2 = np.empty(population.shape)
        parents3 = np.empty(population.shape)

        for i in range(population_size):
            choice = np.random.choice(choices, 3, replace=False)
            parents1[i] = population[neighbors[i, choice[0]]]
            parents2[i] = population[neighbors[i, choice[1]]]
            parents3[i] = population[neighbors[i, choice[2]]]

        mutated[rand_mut_idx] = parents1[
            rand_mut_idx] + f[rand_mut_idx].reshape(len(rand_mut_idx), 1) * (
                parents2[rand_mut_idx] - parents3[rand_mut_idx])

        mutated[best_mut_idx] = population[
            l_best_indexes[best_mut_idx]] + f[best_mut_idx].reshape(
                len(best_mut_idx),
                1) * (parents1[best_mut_idx] - parents2[best_mut_idx])

        mutated = commons.keep_bounds(mutated, bounds)

        # 2.2 Crossover
        bin_cross_idx = np.where(cross_method == 'bin')[0]
        exp_cross_idx = np.where(cross_method == 'exp')[0]

        crossed = np.empty(population.shape)
        crossed[bin_cross_idx] = commons.crossover(
            population[bin_cross_idx], mutated[bin_cross_idx],
            cr[bin_cross_idx].reshape(len(cr[bin_cross_idx]), 1))
        crossed[exp_cross_idx] = commons.exponential_crossover(
            population[exp_cross_idx], mutated[exp_cross_idx],
            cr[exp_cross_idx].reshape(len(cr[exp_cross_idx]), 1))

        # 2.3 Recalculate fitness
        c_fitness = commons.apply_fitness(crossed, func, opts)
        num_evals += population_size

        # 2.4 Distance between new population and original population
        distances = np.sqrt(
            np.einsum('ijk, ijk->ij', crossed - pop_b, crossed - pop_b))
        neighbors = np.argsort(distances, axis=1)
        l_best_indexes = neighbors[:, 1]

        selection = [
            c_fitness[i] < fitness[l_best_indexes[i]]
            for i in range(len(population))
        ]
        population[l_best_indexes[selection]] = crossed[selection]
        fitness[l_best_indexes[selection]] = c_fitness[selection]

        if 1 < current_generation < learning_period:
            num_no_mmts += 60
            if num_no_mmts >= num_evals_mmts:
                run_mmts = True
                num_no_mmts = 0

        if current_generation > learning_period:
            if np.random.randn() > p_mmts:
                run_mmts = True

        if run_mmts:
            selected = clearing(0.2, 5, population, fitness.copy())
            a = mmts.mmts(population[selected], bounds, fitness[selected],
                          mmts_desired_evals, func, opts)
            population[selected] = a[0]
            fitness_test = fitness[selected] < a[1]
            s['mmts_ok'] = len(fitness_test)
            s['mmts_fail'] = 5 - len(fitness_test)
            fitness[selected] = a[1]
            num_evals += a[2]

            if current_generation < learning_period:
                num_evals_mmts = a[2]

            run_mmts = False

        # 3. Update control parameters
        s['pf1_ok'][learning_k] = sum(np.logical_and(f == 0.3, selection))
        s['pf1_fail'][learning_k] = sum(f == 0.3) - s['pf1_ok'][learning_k]
        s['pf2_ok'][learning_k] = sum(np.logical_and(f == 0.5, selection))
        s['pf2_fail'][learning_k] = sum(f == 0.5) - s['pf2_ok'][learning_k]
        s['pf3_ok'][learning_k] = sum(np.logical_and(f == 0.7, selection))
        s['pf3_fail'][learning_k] = sum(f == 0.7) - s['pf3_ok'][learning_k]

        s['cr1_ok'][learning_k] = sum(np.logical_and(cr == 0.1, selection))
        s['cr1_fail'][learning_k] = sum(cr == 0.1) - s['cr1_ok'][learning_k]
        s['cr2_ok'][learning_k] = sum(np.logical_and(cr == 0.5, selection))
        s['cr2_fail'][learning_k] = sum(cr == 0.5) - s['cr2_ok'][learning_k]
        s['cr3_ok'][learning_k] = sum(np.logical_and(cr == 0.9, selection))
        s['cr3_fail'][learning_k] = sum(cr == 0.9) - s['cr3_ok'][learning_k]

        selection = np.array(selection)
        s['rand_ok'][learning_k] = sum(selection[rand_mut_idx])
        s['rand_fail'][learning_k] = len(
            rand_mut_idx) - s['rand_ok'][learning_k]
        s['best_ok'][learning_k] = sum(selection[best_mut_idx])
        s['best_fail'][learning_k] = len(
            best_mut_idx) - s['best_ok'][learning_k]

        s['bin_ok'][learning_k] = sum(selection[bin_cross_idx])
        s['bin_fail'][learning_k] = len(
            bin_cross_idx) - s['bin_ok'][learning_k]
        s['exp_ok'][learning_k] = sum(selection[exp_cross_idx])
        s['exp_fail'][learning_k] = len(
            exp_cross_idx) - s['exp_ok'][learning_k]
        s['saepsde_ok'][learning_k] = len(selection)
        s['saepsde_fail'][learning_k] = population_size - len(selection)

        learning_k = (learning_k + 1) % learning_period

        current_generation += 1

        if current_generation > learning_period:
            sf1 = np.sum(s['pf1_ok']) / (np.sum(s['pf1_ok']) +
                                         np.sum(s['pf1_fail'])) + 0.02
            sf2 = np.sum(s['pf2_ok']) / (np.sum(s['pf2_ok']) +
                                         np.sum(s['pf2_fail'])) + 0.02
            sf3 = np.sum(s['pf3_ok']) / (np.sum(s['pf3_ok']) +
                                         np.sum(s['pf3_fail'])) + 0.02

            pf1 = sf1 / (sf1 + sf2 + sf3)
            pf2 = sf2 / (sf1 + sf2 + sf3)
            pf3 = sf3 / (sf1 + sf2 + sf3)

            pcr1 = np.sum(s['cr1_ok']) / (np.sum(s['cr1_ok']) +
                                          np.sum(s['cr1_fail'])) + 0.02
            pcr2 = np.sum(s['cr2_ok']) / (np.sum(s['cr2_ok']) +
                                          np.sum(s['cr2_fail'])) + 0.02
            pcr3 = np.sum(s['cr3_ok']) / (np.sum(s['cr3_ok']) +
                                          np.sum(s['cr3_fail'])) + 0.02

            cr1 = pcr1 / (pcr1 + pcr2 + pcr3)
            cr2 = pcr2 / (pcr1 + pcr2 + pcr3)
            cr3 = pcr3 / (pcr1 + pcr2 + pcr3)

            pmut_best = np.sum(s['best_ok']) / (np.sum(s['best_ok']) +
                                                np.sum(s['best_fail'])) + 0.02
            pmut_rand = np.sum(s['rand_ok']) / (np.sum(s['rand_ok']) +
                                                np.sum(s['rand_fail'])) + 0.02

            best_mutation = pmut_best / (pmut_best + pmut_rand)

            pcross_bin = np.sum(s['bin_ok']) / (np.sum(s['bin_ok']) +
                                                np.sum(s['bin_fail'])) + 0.02
            pcross_exp = np.sum(s['exp_ok']) / (np.sum(s['exp_ok']) +
                                                np.sum(s['exp_fail'])) + 0.02

            bin_cross = pcross_bin / (pcross_bin + pcross_exp)

            p_saepsde = np.sum(s['saepsde_ok']) / (
                np.sum(s['saepsde_ok']) + np.sum(s['saepsde_fail'])) + 0.02
            p_mmts = np.sum(s['mmts_ok']) / (np.sum(s['mmts_ok']) +
                                             np.sum(s['mmts_fail'])) + 0.02

            p_mmts = p_mmts / (p_saepsde + p_mmts)

        if callback is not None:
            callback(**(locals()))

        best = np.argmin(fitness)

        if fitness[best] == answer:
            yield population[best], fitness[best], population
            break
        else:
            yield population[best], fitness[best], population
Beispiel #3
0
import os
import commons
from cec2005real.cec2005 import Function

############################################
#              Main Function               #
############################################
dims = [2, 10, 30]
for dim in dims:
    for funcNum in functions.keys():
        fbench = Function(funcNum, dim)
        info = fbench.info()
        function = fbench.get_eval_function()
        bounds = [(info['lower'], info['upper'])]
        startingPopulations = [
            commons.init_population(10 * dim, dim, np.array(bounds))
            for x in range(RUNS)
        ]
        for j, algo in enumerate(algos.keys()):
            for x in range(0, RUNS):
                params = algo.get_default_params(dim=dim)
                bounds = np.array(bounds * dim)
                params['func'] = function
                params['bounds'] = bounds
                #params['max_evals'] = 10000
                params['opts'] = None
                params['answer'] = None
                params['population'] = startingPopulations[x].copy()
                result = algo.apply(**params)
                """
                print(type(result), algo)
Beispiel #4
0
def apply(population_size: int, individual_size: int, bounds: np.ndarray,
          func: Callable[[np.ndarray], np.float], opts: Any, memory_size: int,
          callback: Callable[[Dict], Any], max_evals: int, seed: Union[int,
                                                                       None],
          population: Union[np.array,
                            None], answer: Union[None, float,
                                                 int]) -> [np.ndarray, int]:
    """
    Applies the L-SHADE Differential Evolution Algorithm.
    :param population_size: Size of the population.
    :type population_size: int
    :param individual_size: Number of gens/features of an individual.
    :type individual_size: int
    :param bounds: Numpy ndarray with individual_size rows and 2 columns.
    First column represents the minimum value for the row feature.
    Second column represent the maximum value for the row feature.
    :type bounds: np.ndarray
    :param func: Evaluation function. The function used must receive one
     parameter.This parameter will be a numpy array representing an individual.
    :type func: Callable[[np.ndarray], float]
    :param opts: Optional parameters for the fitness function.
    :type opts: Any type.
    :param memory_size: Size of the internal memory.
    :type memory_size: int
    :param callback: Optional function that allows read access to the state of all variables once each generation.
    :type callback: Callable[[Dict], Any]
    :param max_evals: Number of evaluations after the algorithm is stopped.
    :type max_evals: int
    :param seed: Random number generation seed. Fix a number to reproduce the
    same results in later experiments.
    :type seed: Union[int, None]
    :return: A pair with the best solution found and its fitness.
    :rtype [np.ndarray, int]
    """
    # 0. Check parameters are valid
    if type(population_size) is not int or population_size <= 0:
        raise ValueError("population_size must be a positive integer.")

    if type(individual_size) is not int or individual_size <= 0:
        raise ValueError("individual_size must be a positive integer.")

    if type(max_evals) is not int or max_evals <= 0:
        raise ValueError("max_iter must be a positive integer.")

    if type(bounds) is not np.ndarray or bounds.shape != (individual_size, 2):
        raise ValueError("bounds must be a NumPy ndarray.\n"
                         "The array must be of individual_size length. "
                         "Each row must have 2 elements.")

    if type(seed) is not int and seed is not None:
        raise ValueError("seed must be an integer or None.")

    np.random.seed(seed)
    random.seed(seed)

    # 1. Initialization
    if population is None:
        population = commons.init_population(population_size, individual_size,
                                             bounds)
    init_size = population_size
    m_cr = np.ones(memory_size) * 0.5
    m_f = np.ones(memory_size) * 0.5
    archive = []
    k = 0
    fitness = commons.apply_fitness(population, func, opts)

    all_indexes = list(range(memory_size))
    current_generation = 0
    num_evals = population_size
    # Calculate max_iters
    n = population_size
    i = 0
    max_iters = 0

    # Calculate max_iters
    n = population_size
    i = 0
    max_iters = 0

    while i < max_evals:
        max_iters += 1
        n = round((4 - init_size) / max_evals * i + init_size)
        i += n

    while num_evals < max_evals:
        # 2.1 Adaptation
        r = np.random.choice(all_indexes, population_size)
        cr = np.random.normal(m_cr[r], 0.1, population_size)
        cr = np.clip(cr, 0, 1)
        cr[m_cr[r] == 1] = 0
        f = scipy.stats.cauchy.rvs(loc=m_f[r], scale=0.1, size=population_size)
        f[f > 1] = 0

        while sum(f <= 0) != 0:
            r = np.random.choice(all_indexes, sum(f <= 0))
            f[f <= 0] = scipy.stats.cauchy.rvs(loc=m_f[r],
                                               scale=0.1,
                                               size=sum(f <= 0))

        p = np.ones(population_size) * .11

        # 2.2 Common steps
        mutated = commons.current_to_pbest_mutation(population, fitness,
                                                    f.reshape(len(f), 1), p,
                                                    bounds)
        crossed = commons.crossover(population, mutated, cr.reshape(len(f), 1))
        c_fitness = commons.apply_fitness(crossed, func, opts)
        num_evals += population_size
        population, indexes = commons.selection(population,
                                                crossed,
                                                fitness,
                                                c_fitness,
                                                return_indexes=True)

        # 2.3 Adapt for next generation
        archive.extend(population[indexes])

        if len(indexes) > 0:
            if len(archive) > population_size:
                archive = random.sample(archive, population_size)

            weights = np.abs(fitness[indexes] - c_fitness[indexes])
            weights /= np.sum(weights)
            m_cr[k] = np.sum(weights * cr[indexes]**2) / np.sum(
                weights * cr[indexes])
            if np.isnan(m_cr[k]):
                m_cr[k] = 1
            m_f[k] = np.sum(weights * f[indexes]**2) / np.sum(
                weights * f[indexes])
            k += 1
            if k == memory_size:
                k = 0

        fitness[indexes] = c_fitness[indexes]
        # Adapt population size
        new_population_size = round((4 - init_size) / max_evals * num_evals +
                                    init_size)
        if population_size > new_population_size:
            population_size = new_population_size
            best_indexes = np.argsort(fitness)[:population_size]
            population = population[best_indexes]
            fitness = fitness[best_indexes]
            if k == init_size:
                k = 0

        if callback is not None:
            callback(**(locals()))
        current_generation += 1

        best = np.argmin(fitness)

        if fitness[best] == answer:
            yield population[best], fitness[best], population, fitness
            break
        else:
            yield population[best], fitness[best], population, fitness
Beispiel #5
0
def apply(population_size: int, individual_size: int, bounds: np.ndarray,
          func: Callable[[np.ndarray], float], opts: Any, p: Union[int, float],
          c: Union[int, float], callback: Callable[[Dict], Any],
          max_evals: int, seed: Union[int, None], population: Union[np.array,
                                                                    None],
          answer: Union[None, int, float]) -> [np.ndarray, int]:
    """
    Applies the JADE Differential Evolution algorithm.
    :param population_size: Size of the population.
    :type population_size: int
    :param individual_size: Number of gens/features of an individual.
    :type individual_size: int
    :param bounds: Numpy ndarray with individual_size rows and 2 columns.
    First column represents the minimum value for the row feature.
    Second column represent the maximum value for the row feature.
    :type bounds: np.ndarray
    :param func: Evaluation function. The function used must receive one
     parameter.This parameter will be a numpy array representing an individual.
    :type func: Callable[[np.ndarray], float]
    :param opts: Optional parameters for the fitness function.
    :type opts: Any type.
    :param p: Parameter to choose the best vectors. Must be in (0, 1].
    :type p: Union[int, float]
    :param c: Variable to control parameter adoption. Must be in [0, 1].
    :type c: Union[int, float]
    :param callback: Optional function that allows read access to the state of all variables once each generation.
    :type callback: Callable[[Dict], Any]
    :param max_evals: Number of evaluations after the algorithm is stopped.
    :type max_evals: int
    :param seed: Random number generation seed. Fix a number to reproduce the
    same results in later experiments.
    :type seed: Union[int, None]
    :return: A pair with the best solution found and its fitness.
    :rtype [np.ndarray, int]
    """
    # 0. Check parameters are valid
    if type(population_size) is not int or population_size <= 0:
        raise ValueError("population_size must be a positive integer.")

    if type(individual_size) is not int or individual_size <= 0:
        raise ValueError("individual_size must be a positive integer.")

    if type(max_evals) is not int or max_evals <= 0:
        raise ValueError("max_evals must be a positive integer.")

    if type(bounds) is not np.ndarray or bounds.shape != (individual_size, 2):
        raise ValueError("bounds must be a NumPy ndarray.\n"
                         "The array must be of individual_size length. "
                         "Each row must have 2 elements.")

    if type(seed) is not int and seed is not None:
        raise ValueError("seed must be an integer or None.")

    if type(p) not in [int, float] and 0 < p <= 1:
        raise ValueError("p must be a real number in (0, 1].")
    if type(c) not in [int, float] and 0 <= c <= 1:
        raise ValueError("c must be an real number in [0, 1].")

    np.random.seed(seed)

    # 1. Init population
    if population is None:
        population = commons.init_population(population_size, individual_size,
                                             bounds)
    u_cr = 0.5
    u_f = 0.6

    p = np.ones(population_size) * p
    fitness = commons.apply_fitness(population, func, opts)
    max_iters = max_evals // population_size
    #print("jade : ", max_iters)
    results = []
    for current_generation in range(max_iters):
        # 2.1 Generate parameter values for current generation
        cr = np.random.normal(u_cr, 0.1, population_size)
        f = np.random.rand(population_size // 3) * 1.2
        f = np.concatenate(
            (f,
             np.random.normal(u_f, 0.1,
                              population_size - (population_size // 3))))

        # 2.2 Common steps
        mutated = commons.current_to_pbest_mutation(population, fitness,
                                                    f.reshape(len(f), 1), p,
                                                    bounds)
        crossed = commons.crossover(population, mutated, cr.reshape(len(f), 1))
        c_fitness = commons.apply_fitness(crossed, func, opts)
        population, indexes = commons.selection(population,
                                                crossed,
                                                fitness,
                                                c_fitness,
                                                return_indexes=True)

        # 2.3 Adapt for next generation
        if len(indexes) != 0:
            u_cr = (1 - c) * u_cr + c * np.mean(cr[indexes])
            u_f = (1 - c) * u_f + c * (np.sum(f[indexes]**2) /
                                       np.sum(f[indexes]))

        fitness[indexes] = c_fitness[indexes]
        if callback is not None:
            callback(**(locals()))

        best = np.argmin(fitness)

        if fitness[best] == answer:
            results.append(
                (population[best], fitness[best], population, fitness))
            break
        else:
            results.append(
                (population[best], fitness[best], population, fitness))
    return results
Beispiel #6
0
def apply(population_size: int, individual_size: int, f: Union[float, int],
          cr: Union[float,
                    int], bounds: np.ndarray, func: Callable[[np.ndarray],
                                                             float], opts: Any,
          callback: Callable[[Dict], Any], cross: str, max_evals: int,
          seed: Union[int, None], population: Union[np.ndarray, None],
          answer: Union[None, int, float]) -> [np.ndarray, int]:
    """
    Applies the standard differential evolution algorithm.
    :param population_size: Size of the population.
    :type population_size: int
    :param individual_size: Number of gens/features of an individual.
    :type individual_size: int
    :param f: Mutation parameter. Must be in [0, 2].
    :type f: Union[float, int]
    :param cr: Crossover Ratio. Must be in [0, 1].
    :type cr: Union[float, int]
    :param bounds: Numpy ndarray with individual_size rows and 2 columns.
    First column represents the minimum value for the row feature.
    Second column represent the maximum value for the row feature.
    :type bounds: np.ndarray
    :param func: Evaluation function. The function used must receive one
    parameter.This parameter will be a numpy array representing an individual.
    :type func: Callable[[np.ndarray], float]
    :param opts: Optional parameters for the fitness function.
    :type opts: Any type.
    :param callback: Optional function that allows read access to the state of all variables once each generation.
    :type callback: Callable[[Dict], Any]
    :param cross: Indicates whether to use the binary crossover('bin') or the exponential crossover('exp').
    :type cross: str
    :param max_evals: Number of evaluations after the algorithm is stopped.
    :type max_evals: int
    :param seed: Random number generation seed. Fix a number to reproduce the
    same results in later experiments.
    :type seed: Union[int, None]
    :return: A pair with the best solution found and its fitness.
    :rtype [np.ndarray, int]
    """

    # 0. Check parameters are valid
    if type(population_size) is not int or population_size <= 0:
        raise ValueError("population_size must be a positive integer.")

    if type(individual_size) is not int or individual_size <= 0:
        raise ValueError("individual_size must be a positive integer.")

    if (type(f) is not int and type(f) is not float) or not 0 <= f <= 2:
        raise ValueError("f (mutation parameter) must be a "
                         "real number in [0,2].")

    if (type(cr) is not int and type(cr) is not float) or not 0 <= cr <= 1:
        raise ValueError("cr (crossover ratio) must be a "
                         "real number in [0,1].")

    if type(max_evals) is not int or max_evals <= 0:
        raise ValueError("max_evals must be a positive integer.")

    if type(bounds) is not np.ndarray or bounds.shape != (individual_size, 2):
        raise ValueError("bounds must be a NumPy ndarray.\n"
                         "The array must be of individual_size length. "
                         "Each row must have 2 elements.")

    if type(cross) is not str and cross not in ['bin', 'exp']:
        raise ValueError(
            "cross must be a string and must be one of \'bin\' or \'cross\'")
    if type(seed) is not int and seed is not None:
        raise ValueError("seed must be an integer or None.")

    # 1. Initialization
    np.random.seed(seed)
    if population is None:
        population = commons.init_population(population_size, individual_size,
                                             bounds)

    #population = commons.init_population(population_size, individual_size, bounds)
    try:
        fitness = commons.apply_fitness(population, func, opts)
    except TypeError:
        print(func, population)

    #use self.population and self.fitness - move the loop into a step function

    max_iters = max_evals // population_size
    #print("de : ", max_iters)
    results = []
    for current_generation in range(max_iters):

        mutated = commons.binary_mutation(population, f, bounds)
        if cross == 'bin':
            crossed = commons.crossover(population, mutated, cr)
        else:
            crossed = commons.exponential_crossover(population, mutated, cr)

        c_fitness = commons.apply_fitness(crossed, func, opts)
        population, indexes = commons.selection(population,
                                                crossed,
                                                fitness,
                                                c_fitness,
                                                return_indexes=True)

        fitness[indexes] = c_fitness[indexes]

        #print(locals())

        best = np.argmin(fitness)

        if callback is not None:
            callback(**(locals()))

        best = np.argmin(fitness)

        if fitness[best] == answer:
            results.append(
                (population[best], fitness[best], population, fitness))
            break
        else:
            results.append(
                (population[best], fitness[best], population, fitness))
    return results
Beispiel #7
0
def apply(population_size: int, individual_size: int, bounds: np.ndarray,
          func: Callable[[np.ndarray], float], opts: Any,
          callback: Callable[[Dict], Any], max_evals: int, seed: Union[int,
                                                                       None],
          answer: Union[None, int, float], population: [np.ndarray, None]):
    """
    Applies the Self-adaptive differential evolution algorithm (SaDE).
    :param population_size: Size of the population.
    :type population_size: int
    :param individual_size: Number of gens/features of an individual.
    :type individual_size: int
    :param bounds: Numpy ndarray with individual_size rows and 2 columns.
    First column represents the minimum value for the row feature.
    Second column represent the maximum value for the row feature.
    :type bounds: np.ndarray
    :param func: Evaluation function. The function used must receive one
     parameter.This parameter will be a numpy array representing an individual.
    :type func: Callable[[np.ndarray], float]
    :param opts: Optional parameters for the fitness function.
    :type opts: Any type.
    :param callback: Optional function that allows read access to the state of all variables once each generation.
    :type callback: Callable[[Dict], Any]
    :param max_evals: Number of evaluatios after the algorithm is stopped.
    :type max_evals: int
    :param seed: Random number generation seed. Fix a number to reproduce the
    same results in later experiments.
    :type seed: Union[int, None]
    :return: A pair with the best solution found and its fitness.
    :rtype [np.ndarray, int]
    """

    if type(population_size) is not int or population_size <= 0:
        raise ValueError("population_size must be a positive integer.")

    if type(individual_size) is not int or individual_size <= 0:
        raise ValueError("individual_size must be a positive integer.")

    if type(bounds) is not np.ndarray or bounds.shape != (individual_size, 2):
        raise ValueError("bounds must be a NumPy ndarray.\n"
                         "The array must be of individual_size length. "
                         "Each row must have 2 elements.")

    if type(max_evals) is not int or max_evals <= 0:
        raise ValueError("max_evals must be a positive integer.")

    if type(seed) is not int and seed is not None:
        raise ValueError("seed must be an integer or None.")

    # 1. Initialization
    np.random.seed(seed)
    if population is None:
        population = commons.init_population(population_size, individual_size,
                                             bounds)

    # 2. SaDE Algorithm
    probability = 0.5
    fitness = commons.apply_fitness(population, func, opts)
    cr_m = 0.5
    f_m = 0.5

    sum_ns1 = 0
    sum_nf1 = 0
    sum_ns2 = 0
    sum_nf2 = 0
    cr_list = []

    f = np.random.normal(f_m, 0.3, population_size)
    f = np.clip(f, 0, 2)

    cr = np.random.normal(cr_m, 0.1, population_size)
    cr = np.clip(cr, 0, 1)

    max_iters = max_evals // population_size

    #print("sade : ", max_iters)
    results = []
    for current_generation in range(max_iters):
        # 2.1 Mutation
        # 2.1.1 Randomly choose which individuals do each mutation
        choice = np.random.rand(population_size)
        choice_1 = choice < probability
        choice_2 = choice >= probability

        # 2.1.2 Apply the mutations
        mutated = population.copy()
        mutated[choice_1] = commons.binary_mutation(
            population[choice_1], f[choice_1].reshape(sum(choice_1), 1),
            bounds)
        mutated[choice_2] = commons.current_to_best_2_binary_mutation(
            population[choice_2], fitness[choice_2],
            f[choice_2].reshape(sum(choice_2), 1), bounds)

        # 2.2 Crossover
        crossed = commons.crossover(population, mutated,
                                    cr.reshape(population_size, 1))
        c_fitness = commons.apply_fitness(crossed, func, opts)

        # 2.3 Selection
        population = commons.selection(population, crossed, fitness, c_fitness)
        winners = c_fitness < fitness
        fitness[winners] = c_fitness[winners]

        # 2.4 Self Adaption
        chosen_1 = np.sum(np.bitwise_and(choice_1, winners))
        chosen_2 = np.sum(np.bitwise_and(choice_2, winners))
        sum_ns1 += chosen_1
        sum_ns2 += chosen_2
        sum_nf1 += np.sum(choice_1) - chosen_1
        sum_nf2 += np.sum(choice_2) - chosen_2
        cr_list = np.concatenate((cr_list, cr[winners]))

        # 2.4.1 Adapt mutation strategy probability
        if (current_generation + 1) % 50 == 0:
            probability = sum_ns1 * (sum_ns2 + sum_nf2) / (sum_ns2 *
                                                           (sum_ns1 + sum_nf1))
            probability = np.clip(probability, 0, 1)
            if np.isnan(probability):
                probability = .99
            sum_ns1 = 0
            sum_ns2 = 0
            sum_nf1 = 0
            sum_nf2 = 0

        # 2.4.2
        if (current_generation + 1) % 25 == 0:
            if len(cr_list) != 0:
                cr_m = np.mean(cr_list)
                cr_list = []
            cr = np.random.normal(cr_m, 0.1, population_size)
            cr = np.clip(cr, 0, 1)

        if callback is not None:
            callback(**(locals()))

        best = np.argmin(fitness)

        if fitness[best] == answer:
            results.append(
                (population[best], fitness[best], population, fitness))
            break
        else:
            results.append(
                (population[best], fitness[best], population, fitness))
    return results