コード例 #1
0
def CuckooSearch(fish, xd, yd, **kwargs):
    prey_velocity = kwargs.get(
        'prey_velocity', fish.focal_velocity
    )  # Prey velocity defaults to fish focal velocity if prey velocity not specified
    n = kwargs.get('n', 50)  # Number of nests
    iterations = kwargs.get('iterations', 1000)  # Number of iterations
    pa = kwargs.get(
        'p_a', 0.25
    )  # Proportion of bad solutions rejected and replaced with random new ones each iteration ("alien eggs discovered")
    dim = 12

    nests = np.random.rand(n, dim)  # Initialize nests randomly
    new_nests = np.copy(
        nests)  # Create new_nests initially as a copy of the other ones
    fitnesses = np.full(n,
                        np.inf)  # Initialize fitness vector with bad fitnesses

    fmin, best_nest, nests, fitnesses = pairwise_best_nests(
        nests, new_nests, fitnesses, n, fish, prey_velocity, xd, yd
    )  # Calculating initial fitnesses, using comparison function for convenience
    initial_best_fitness = max(fitnesses)

    for i in range(iterations):  # Begin main search loop
        new_nests = global_random_walk(
            nests, best_nest, n, i / iterations, fmin / initial_best_fitness
        )  # Generate new solutions (but keep the current best)
        fnew, best, nests, fitnesses = pairwise_best_nests(
            nests, new_nests, fitnesses, n, fish, prey_velocity, xd, yd
        )  # Compare solutions pairwise vs levy perturbations and keep best of each pair
        new_nests = local_random_walk(
            new_nests, pa, n, dim
        )  # Create new solutions based on differences between current ones
        fnew, best, nests, fitnesses = pairwise_best_nests(
            nests, new_nests, fitnesses, n, fish, prey_velocity, xd,
            yd)  # Evaluate those pairing-based new solutions and find the best
        if fnew < fmin:
            fmin = fnew
            best_nest = best

    return maneuver_from_proportions(fish, prey_velocity, xd, yd, best_nest)
コード例 #2
0
def objective_function(p):
    return maneuver.maneuver_from_proportions(fish, prey_velocity, xd, yd,
                                              p).fitness
コード例 #3
0
def objective_function(fish, prey_velocity, xd, yd, p):
    maneuver = maneuver_from_proportions(fish, prey_velocity, xd, yd, p)
    return -maneuver.fitness
コード例 #4
0
def SAMASearch(fish, xd, yd, **kwargs):
    """ Implements the self-learning antelopes migration algorithm of Lin et al 2019 """
    prey_velocity = kwargs.get(
        'prey_velocity', fish.focal_velocity
    )  # Prey velocity defaults to fish focal velocity if prey velocity not specified

    d = 12  # Dimensions of the problem
    N = kwargs.get('N', 30)  # Number of antelopes in the herd
    M = kwargs.get('iterations', 1000)  # Number of iterations
    alpha = kwargs.get('alpha', 0.95)
    beta = kwargs.get('beta', 1.05)
    gamma = kwargs.get('gamma', 0.04)  # paper recommends 0.04
    N_ordinary_min = kwargs.get(
        'N_ordinary_min', 10
    )  # Minimum number of ordinary antelopes & 1 - maximum number of scout antelopes
    N_ordinary_max = kwargs.get(
        'N_ordinary_max', 20
    )  # Maximum number of ordinary antelopes & 1 - minimum number of scout antelopes
    assert N_ordinary_min < N
    assert N_ordinary_max < N

    R = np.full(M, np.nan)  # Ordinary antelope grazing radius
    sigma = np.full(M, np.nan)  # Scout antelope exploring amplitude
    mu = 0  # Current running total of stagnation iterations
    N_ordinary = round((N_ordinary_min + N_ordinary_max) /
                       2)  # Starting number of ordinary antelopes

    X = np.full(
        (M, d), np.nan
    )  # Array holds the "grazing center" (best solution) at each iteration
    f = np.full(
        M, np.nan
    )  # Array holds the fitness of the best solution at each iteration
    X[0] = np.random.rand(d)
    f[0] = objective_function(fish, prey_velocity, xd, yd, X[0])
    fEvals = 0  # Running total of objective function evaluations

    antelopes = np.random.rand(
        N, d)  # Initialize antelopes (ordinary/scout doesn't matter yet)

    for i in range(1, M):  # Begin main search loop
        R[i] = 1 if i < 2 else (R[i - 1] *
                                alpha if f[i - 1] == f[i -
                                                       2] else R[i - 1] * beta)
        for j in range(N_ordinary):  # Ordinary antelope grazing
            antelopes[j] = X[i - 1] + np.random.uniform(-R[i], R[i], d)
        sigma[i] = 1 if i < 2 else (
            sigma[i - 1] * 0.5 if f[i - 1] == f[i - 2] else 1
        )  # note sigma[i] here is wrongly given as R[i] in original algorithm paper
        for j in range(N_ordinary, N):  # Scout antelope grazing
            antelopes[j] = X[i - 1] + np.random.normal(0, sigma[i], d)
        # Use Numpy where shenanigans to replace parameter values outside the (0,1) range with random values inside the range
        bad_value_locations = np.where((antelopes < 0) | (antelopes > 1))
        antelopes[bad_value_locations] = np.random.rand(
            len(bad_value_locations[0]))
        # antelopes = np.clip(antelopes, 0, 1) # alternative way of trimming overruns, probably better for my problem, but the above is what's in the original algo paper
        # Evaluate the fitness of all antelopes and update the grazing center X[i] with the best fitness f[i]
        X[i] = X[
            i -
            1]  # initialize grazing center with the previous one for comparison
        f[i] = f[i - 1]  # same for its fitness
        for k, antelope in enumerate(antelopes):
            fitness = objective_function(fish, prey_velocity, xd, yd, antelope)
            if fitness > f[i]:
                X[i] = antelope
                f[i] = fitness
                antelope_type = "ordinary" if k < N_ordinary else "scout"
                print("In iteration", i, "with N_o=", N_ordinary, ", R=", R[i],
                      ", sigma=", sigma[i], antelope_type,
                      "antelope improved fitness f[i] to",
                      f[i])  #,"and antelope\nX[i]=", X[i])
        # Conduct self-learning organization to determine number of ordinary and scout antelopes
        if f[i] == f[i - 1]:
            mu += 1
            P = 1 - np.exp(-(mu / (M * gamma))**2)
            if np.random.rand() < P and N_ordinary > N_ordinary_min:
                N_ordinary -= 1
        else:
            mu = 0
            if N_ordinary < N_ordinary_max:
                N_ordinary += 1
        # Conduct self-learning search by algorithm 5 lines 8-13
        if f[i] != f[i - 1]:
            X_S = X[i] + (
                X[i] - X[i - 1]
            )  # self-learning search by equation (4) investigates further change in direction of most recent improvement
        else:
            X_S = X[i] + (
                X[i] - np.mean(antelopes, axis=0)
            )  # self-learning search by equation (5), investigates reversing average tendency of unproductive search step
        X_S = np.clip(
            X_S, 0, 1
        )  # not included in the paper but necessary to either clip or randomize perhaps
        f_s = objective_function(fish, prey_velocity, xd, yd, X_S)
        if f_s > f[
                i]:  # update the best solution if an improvement was found in this step
            X[i] = X_S
            f[i] = f_s
            print("In iteration", i, "with N_o=", N_ordinary, "and R=", R[i],
                  "X_S learning improved fitness f[i] to",
                  f[i])  #, "and antelope\nX[i]=", X[i])
        fEvals += len(antelopes) + 1
    print("Evaluation count was ", fEvals)
    return maneuver_from_proportions(fish, prey_velocity, xd, yd, X[M - 1])