Пример #1
0
 def __init__(self, problem, options):
     options.setdefault("optimizer_name", "MA (MA-ES)")
     MuCommaLambda.__init__(self, problem, options)
     n = self.ndim_problem
     # expectation of chi distribution ||N(0,I)|| # for (M12) in Fig. 3
     self.expectation_chi = (n ** 0.5) * (1 - 1 / (4 * n) + 1 / (21 * (n ** 2)))
     self.alpha_cov = 2
Пример #2
0
 def __init__(self, problem, options):
     options.setdefault("optimizer_name", "ASEBO (ASEBO)")
     MuCommaLambda.__init__(self, problem, options)
     # number of samples/individuals for each iteration
     self.n_t = options.get("n_t", 100)  # num_sensings in train.py
     # minimum of samples
     self.min_n_t = options.get("min_n_t", 10)  # min in train.py
     # number of iterations of full sampling
     if options.get("iota") is None:
         raise ValueError(
             "the option 'iota' is a hyperparameter which should be set or tuned in advance."
         )
     self.iota = min(options.get("iota") - 1,
                     self.ndim_problem)  # k in train.py
     # PCA threshold
     self.epsilon = options.get("epsilon", 0.995)  # threshold in train.py
     # smoothing parameter
     self.sigma = options.get("sigma", 0.02)
     # decay rate of covariance matrix adaptation (lambda)
     self.gamma = options.get("gamma", 0.995)  # decay in train.py
     # step-size (learning rate of Adam optimizer)
     self.eta = options.get("learning_rate",
                            0.02)  # learning_rate in train.py
     # probability of sampling from isotropic Gaussian distribution
     self.alpha = options.get("alpha", 1)
Пример #3
0
 def __init__(self, problem, options):
     options.setdefault("optimizer_name", "SDA (SDA-ES)")
     MuCommaLambda.__init__(self, problem, options)
     # m -> number of search directions (evolution paths)
     self.n_evolution_paths = options.setdefault("n_evolution_paths", 10)
     self.c_cov = 0.4 / np.sqrt(problem["ndim_problem"])
     self.c_c = 0.25 / np.sqrt(problem["ndim_problem"])
     self.c_s = 0.3
     self.d_sigma = 1
     self.p_star = 0.05
Пример #4
0
 def __init__(self, problem, options):
     options.setdefault("optimizer_name", "RankOne (R1-ES)")
     MuCommaLambda.__init__(self, problem, options)
     # learning (changing) rate of covariance matrix
     self.c_cov = options.get("c_cov", 1 / (3 * np.sqrt(problem["ndim_problem"]) + 5))
     # learning (changing) rate of principal search direction (evolution path)
     self.c = options.get("c", 2 / (problem["ndim_problem"] + 7))
     # learning (changing) rate of cumulative rank rate
     self.c_s = options.get("c_s", 0.3)
     # target ratio for mutation strength adaptation
     self.q_star = options.get("q_star", 0.3)
     # damping factor for mutation strength adaptation
     self.d_sigma = options.get("d_sigma", 1)
Пример #5
0
 def __init__(self, problem, options):
     options.setdefault("optimizer_name", "CEM (CEM)")
     MuCommaLambda.__init__(self, problem, options)
     # fraction of the best individuals for updating mean + std of sampling distribution
     self.best_frac = options.get("best_frac", 0.05)
     # number of the best individuals for updating mean + std of sampling distribution
     # == number of parents (n_parents) in ES terminology
     self.n_best = max(1, int(self.n_individuals * self.best_frac))
     # initial std for sampling distribution
     # == global/individual step-size (step_size) in ES terminology
     self.init_std = options.get("init_std", 1.0)
     # std decayed for updating std of sampling distribution
     self.extra_std = options.get("extra_std", 1.0)
     # number of epochs taken to decay std for updating std of sampling distribution
     self.extra_decay_time = options.get("extra_decay_time", 100)
Пример #6
0
    def __init__(self, problem, options):
        options.setdefault("optimizer_name", "RestartRm (R-Rm-ES)")
        MuCommaLambda.__init__(self, problem, options)
        # learning (changing) rate of covariance matrix
        self.c_cov = options.get(
            "c_cov", 1 / (3 * np.sqrt(problem["ndim_problem"]) + 5))
        # learning (changing) rate of principal search direction (evolution path)
        self.c = options.get("c", 2 / (problem["ndim_problem"] + 7))
        # learning (changing) rate of cumulative rank rate
        self.c_s = options.get("c_s", 0.3)
        # target ratio for mutation strength adaptation
        self.q_star = options.get("q_star", 0.3)
        # damping factor for mutation strength adaptation
        self.d_sigma = options.get("d_sigma", 1)
        # number of multiple evolution paths
        self.n_evolution_paths = options.get("n_evolution_paths", 2)  # m
        # generation gap for multiple evolution paths
        self.T = options.get("T", problem["ndim_problem"])

        # threshold of step-size for restart
        self.threshold_step_size = options.get("threshold_step_size", 1e-10)
Пример #7
0
    def optimize(self, fitness_function=None):
        start_optimization = time.time()

        if (fitness_function is None) and (self.fitness_function != None):
            fitness_function = self.fitness_function

        # initialize
        parent = MuCommaLambda._get_m(self)
        start_evaluation = time.time()
        y = fitness_function(parent)
        n_evaluations, time_evaluations = 1, time.time() - start_evaluation
        best_so_far_x, best_so_far_y = np.copy(parent), np.copy(y)

        if self.save_fitness_data: fitness_data = [y]
        if self.save_best_so_far_x:
            history_x = np.hstack((n_evaluations, best_so_far_x))
        else:
            history_x = None

        # initialize variables involved for all individuals
        beta = np.sqrt(1 / self.ndim_problem)  # adaptation vs precision
        beta_scal = self.beta_scal  # in the range (0, 1)
        # beta_scal: small values facilitate a precise but time-consuming adaptaton

        # iterate
        termination = "max_evaluations"
        is_restart, n_restart = True, 0
        while n_evaluations < self.max_evaluations:
            if is_restart:
                if n_restart > 0:
                    parent = self.rng.uniform(self.lower_boundary,
                                              self.upper_boundary,
                                              (self.ndim_problem, ))
                    start_evaluation = time.time()
                    y = fitness_function(parent)
                    time_evaluations += (time.time() - start_evaluation)
                    n_evaluations += 1
                    if best_so_far_y > y:
                        best_so_far_x, best_so_far_y = np.copy(
                            parent), np.copy(y)
                    if self.save_best_so_far_x and not (
                            n_evaluations % self.freq_best_so_far_x):
                        history_x = np.vstack(
                            (history_x,
                             np.hstack((n_evaluations, best_so_far_x))))
                    if self.save_fitness_data: fitness_data.append(np.copy(y))

                    self.n_individuals *= 2
                    if self.n_individuals >= 1000: self.n_individuals = 1000

                Y = np.tile(y, (self.n_individuals, ))  # fitness of population
                X = np.empty(
                    (self.n_individuals, self.ndim_problem))  # population
                delta = np.ones((self.ndim_problem, ))  # individual step-sizes
                xi = np.empty((self.n_individuals, ))  # global step-size
                Z = np.empty(
                    (self.n_individuals, self.ndim_problem))  # Gaussian noises
                is_restart = False

            # 1. creation of lambda offspring
            for k in range(self.n_individuals):
                if self.rng.uniform(0, 1, 1) > 0.5: xi[k] = 1.4
                else: xi[k] = 1 / 1.4
                Z[k, :] = self.rng.standard_normal((self.ndim_problem, ))
                X[k, :] = parent + xi[k] * delta * Z[k, :]

                start_evaluation = time.time()
                Y[k] = fitness_function(X[k, :])
                time_evaluations += time.time() - start_evaluation
                n_evaluations += 1

                if self.save_fitness_data: fitness_data.append(Y[k])

                # update best-so-far x and y
                if best_so_far_y > Y[k]:
                    best_so_far_x, best_so_far_y = np.copy(X[k, :]), np.copy(
                        Y[k])
                if self.save_best_so_far_x and not (n_evaluations %
                                                    self.freq_best_so_far_x):
                    history_x = np.vstack(
                        (history_x, np.hstack((n_evaluations, best_so_far_x))))

                # check three termination criteria
                is_break, termination = MuCommaLambda._check_terminations(
                    self, n_evaluations,
                    time.time() - start_optimization, best_so_far_y)
                if is_break: break

            # 2. selection / adaptation
            sel = np.argmin(Y)
            parent = np.copy(X[sel, :])
            delta *= (np.power(xi[sel], beta) * np.power(
                np.exp(np.abs(Z[sel, :]) - np.sqrt(2 / np.pi)), beta_scal))

            # check termination criteria
            runtime = time.time() - start_optimization
            is_break, termination = MuCommaLambda._check_terminations(
                self, n_evaluations, runtime, best_so_far_y)
            if is_break: break
            if np.min(delta) <= self.threshold_step_size:
                is_restart, n_restart = True, n_restart + 1
            if np.max(delta) >= 3 * np.min(self.upper_boundary -
                                           self.lower_boundary):
                is_restart, n_restart = True, n_restart + 1

        fitness_data, time_compression = MuCommaLambda._save_data(
            self, history_x, fitness_data)

        results = {
            "best_so_far_x": best_so_far_x,
            "best_so_far_y": best_so_far_y,
            "n_evaluations": n_evaluations,
            "runtime": runtime,
            "fitness_data": fitness_data,
            "termination": termination,
            "time_evaluations": time_evaluations,
            "time_compression": time_compression,
            "delta": delta,
            "n_restart": n_restart
        }
        return results
Пример #8
0
 def __init__(self, problem, options):
     options.setdefault("optimizer_name",
                        "Ostermeier (Ostermeier's (1,lambda)-ES)")
     MuCommaLambda.__init__(self, problem, options)
     self.beta_scal = options.get("beta_scal", 1 / self.ndim_problem)
Пример #9
0
    def optimize(self, fitness_function=None):
        start_optimization = time.time()

        if (fitness_function is None) and (self.fitness_function != None):
            fitness_function = self.fitness_function
        
        # initialize
        m = MuCommaLambda._get_m(self)
        start_evaluation = time.time()
        y = fitness_function(m)
        n_evaluations, time_evaluations = 1, time.time() - start_evaluation
        best_so_far_x, best_so_far_y = np.copy(m), np.copy(y)

        if self.save_fitness_data: fitness_data = [y]
        if self.save_best_so_far_x: history_x = np.hstack((n_evaluations, best_so_far_x))
        else: history_x = None

        # iterate
        n_evolution_paths = self.n_evolution_paths
        termination = "max_evaluations"
        x_z1, x_z2 = np.sqrt(1 - self.c_cov), np.sqrt(self.c_cov) # Line 9 of Algorithm 1
        q_1, q_2 = 1 - self.c_c, np.sqrt(self.c_c * (2 - self.c_c)) # Line 15 of Algorithm 1
        Z_1, Z_2 = 1 - self.c_s, np.sqrt(self.c_s * (2 - self.c_s)) # Line 22 of Algorithm 1
        is_restart, n_restart = True, 0
        while n_evaluations < self.max_evaluations:
            if is_restart:
                if n_restart > 0:
                    m = self.rng.uniform(self.lower_boundary,
                        self.upper_boundary, (self.ndim_problem,))
                    start_evaluation = time.time()
                    y = fitness_function(m)
                    time_evaluations += (time.time() - start_evaluation)
                    n_evaluations += 1
                    if self.save_fitness_data: fitness_data.append(np.copy(y))
                    if best_so_far_y > y: best_so_far_x, best_so_far_y = np.copy(m), np.copy(y)
                    if self.save_best_so_far_x and not(n_evaluations % self.freq_best_so_far_x):
                        history_x = np.vstack((history_x, np.hstack((n_evaluations, best_so_far_x))))
                    
                    self.n_individuals = self.n_individuals * 2
                    self.n_parents = int(np.floor(self.n_individuals / 2))
                    self.d_sigma = self.d_sigma * 2
                
                # set weights for parents
                w = np.log(np.arange(1, self.n_parents + 1))
                w = (np.log(self.n_parents + 1) - w) / (
                    self.n_parents * np.log(self.n_parents + 1) - np.sum(w))
                mu_eff = 1 / np.sum(np.power(w, 2))
                W = np.tile(w[:, np.newaxis], (1, self.ndim_problem))

                # initialize m search directions
                # note that in the original paper Q is a n*m matrix (v.s. a m*n matrix here) 
                Q = np.empty((n_evolution_paths, self.ndim_problem))
                for i in range(n_evolution_paths):
                    Q[i, :] = 1e-10 * self.rng.standard_normal((self.ndim_problem,))

                sigma = self.step_size
                RR = np.arange(1, self.n_individuals * 2 + 1)
                Z = 0
                U_mean = (self.n_individuals ** 2) / 2
                U_var = np.sqrt((self.n_individuals ** 2) * (2 * self.n_individuals + 1) / 12)
                d_sigma, p_star = self.d_sigma, self.p_star
                
                X = np.empty((self.n_individuals, self.ndim_problem)) # population
                Y = np.tile(y, (self.n_individuals,)) # fitness of population
                is_restart = False
            
            Y_bak = np.copy(Y)
            for i in range(self.n_individuals):
                z1 = self.rng.standard_normal((self.ndim_problem,))
                z2 = self.rng.standard_normal((n_evolution_paths,))
                X[i, :] = m + sigma * (x_z1 * z1 + x_z2 * np.dot(z2, Q))
                start_evaluation = time.time()
                y = fitness_function(X[i, :])
                time_evaluations += (time.time() - start_evaluation)
                n_evaluations += 1
                Y[i] = y

                if self.save_fitness_data: fitness_data.append(np.copy(y))

                # update best-so-far x and y
                if best_so_far_y > y: best_so_far_x, best_so_far_y = np.copy(X[i, :]), np.copy(y)
                if self.save_best_so_far_x and not(n_evaluations % self.freq_best_so_far_x):
                    history_x = np.vstack((history_x, np.hstack((n_evaluations, best_so_far_x))))
                
                # check three termination criteria
                is_break, termination = MuCommaLambda._check_terminations(
                    self, n_evaluations, time.time() - start_optimization, best_so_far_y)
                if is_break: break

            # check three termination criteria
            runtime = time.time() - start_optimization
            is_break, termination = MuCommaLambda._check_terminations(
                self, n_evaluations, runtime, best_so_far_y)
            if is_break: break
            if sigma <= self.threshold_step_size:
                is_restart, n_restart = True, n_restart + 1

            # update distribution mean
            index = np.argsort(Y)
            X, Y = X[index, :], Y[index]
            m_bak = m
            m = np.sum(X[:self.n_parents, :] * W, 0)

            # update search directions
            z = np.sqrt(mu_eff) * (m - m_bak) / sigma
            for i in range(n_evolution_paths):
                Q[i, :] = q_1 * Q[i, :] + q_2 * z
                t = (np.sum(z * Q[i, :])) / (np.sum(Q[i, :] * Q[i, :]))
                z = 1 / np.sqrt(1 + t ** 2) * (z - t * Q[i, :])
            
            # update step-size
            F = np.hstack((Y, Y_bak))
            R1 = np.sum(RR[np.argsort(F) >= self.n_individuals])
            U = R1 - self.n_individuals * (self.n_individuals + 1) / 2
            Z = Z_1 * Z + Z_2 * (U - U_mean) / U_var
            sigma = sigma * np.exp((st.norm.cdf(Z) / (1 - p_star) - 1) / d_sigma)
        
        fitness_data, time_compression = MuCommaLambda._save_data(self, history_x, fitness_data)
        
        results = {"best_so_far_x": best_so_far_x,
            "best_so_far_y": best_so_far_y,
            "n_evaluations": n_evaluations,
            "runtime": runtime,
            "fitness_data": fitness_data,
            "termination": termination,
            "time_evaluations": time_evaluations,
            "time_compression": time_compression,
            "m": m,
            "step_size": sigma,
            "n_individuals": self.n_individuals,
            "n_parents": self.n_parents,
            "d_sigma": self.d_sigma,
            "n_restart": n_restart}
        return results
Пример #10
0
    def optimize(self, fitness_function=None):
        start_optimization = time.time()

        if (fitness_function is None) and (self.fitness_function != None):
            fitness_function = self.fitness_function

        # initialize distribution mean
        m = MuCommaLambda._get_m(self)  # distribution mean
        start_evaluation = time.time()
        y = fitness_function(m)
        time_evaluations, n_evaluations = time.time() - start_evaluation, 1
        best_so_far_x, best_so_far_y = np.copy(m), np.copy(y)
        if self.save_fitness_data: fitness_data = [y]
        if self.save_best_so_far_x:
            history_x = np.hstack((n_evaluations, best_so_far_x))
        else:
            history_x = None

        # iterate / evolve
        termination = "max_evaluations"
        # Here introduce 3 new constant symbols to simplify code
        # constant symbols for Line 5 of Algorithm 1
        m_1, m_2 = np.sqrt(1 - self.c_cov), np.sqrt(self.c_cov)
        # constant symbol for Line 12 of Algorithm 1
        p_1 = 1 - self.c
        is_restart, n_restart = True, 0
        while n_evaluations < self.max_evaluations:
            if is_restart:
                is_restart = False
                if n_restart > 0:
                    m = self.rng.uniform(self.lower_boundary,
                                         self.upper_boundary,
                                         (self.ndim_problem, ))
                    start_evaluation = time.time()
                    y = fitness_function(m)
                    time_evaluations += (time.time() - start_evaluation)
                    n_evaluations += 1

                    if best_so_far_y > y:
                        best_so_far_x, best_so_far_y = np.copy(m), np.copy(y)
                    if self.save_fitness_data: fitness_data.append(np.copy(y))
                    if self.save_best_so_far_x and not (
                            n_evaluations % self.freq_best_so_far_x):
                        history_x = np.vstack(
                            (history_x,
                             np.hstack((n_evaluations, best_so_far_x))))

                    self.n_individuals *= 2
                    self.n_parents = int(np.floor(self.n_individuals / 2))
                    self.d_sigma *= 2

                # set weights for parents selection
                w, w_1 = np.log(np.arange(1, self.n_parents +
                                          1)), np.log(self.n_parents + 1)
                w = (w_1 - w) / (self.n_parents * w_1 - np.sum(w))
                # normalization factor for principal search direction adaptation
                mu_eff = 1 / np.sum(np.power(w, 2))

                sigma = self.step_size  # mutation strength (global step-size)
                p = np.zeros((self.ndim_problem,
                              ))  # principal search direction (evolution path)
                s = 0  # cumulative rank rate
                p_2 = np.sqrt(self.c * (2 - self.c) * mu_eff)
                RR = np.arange(1, self.n_parents * 2 +
                               1)  # ranks for R_t, R_(t+1)

                X = np.empty(
                    (self.n_individuals, self.ndim_problem))  # population
                Y = np.tile(y, (self.n_individuals, ))  # fitness of population

            Y_bak = np.copy(Y)
            for i in range(self.n_individuals):  # one generation
                z = self.rng.standard_normal((self.ndim_problem, ))
                r = self.rng.standard_normal()
                # sample (Line 5 of Algorithm 1)
                X[i, :] = m + sigma * (m_1 * z + m_2 * r * p)
                start_evaluation = time.time()
                y = fitness_function(X[i, :])
                time_evaluations += (time.time() - start_evaluation)
                n_evaluations += 1
                Y[i] = y

                # update best-so-far x and y
                if best_so_far_y > y:
                    best_so_far_x, best_so_far_y = np.copy(X[i, :]), np.copy(y)
                if self.save_fitness_data: fitness_data.append(np.copy(y))
                if self.save_best_so_far_x and not (n_evaluations %
                                                    self.freq_best_so_far_x):
                    history_x = np.vstack(
                        (history_x, np.hstack((n_evaluations, best_so_far_x))))

                # check three termination criteria
                runtime = time.time() - start_optimization
                is_break, termination = MuCommaLambda._check_terminations(
                    self, n_evaluations, runtime, best_so_far_y)
                if is_break: break

            # check four termination criteria
            is_break, termination = MuCommaLambda._check_terminations(
                self, n_evaluations, runtime, best_so_far_y, sigma)
            if termination == "threshold_step_size (lower)":
                is_restart, n_restart = True, n_restart + 1
            elif is_break:
                break

            # update distribution mean
            index = np.argsort(Y)
            X, Y = X[index, :], Y[index]  # Line 10 of Algorithm 1
            m_bak = m
            # Line 11 of Algorithm 1
            m = np.zeros((self.ndim_problem, ))
            for j in range(self.n_parents):
                m += w[j] * X[j, :]

            # update principal search direction (Line 12 of Algorithm 1)
            p = p_1 * p + p_2 * ((m - m_bak) / sigma)

            # adapt mutation strength (rank-based success rule, RSR)
            F = np.hstack((Y_bak[:self.n_parents], Y[:self.n_parents]))
            R = np.argsort(F)
            # Line 13 of Algorithm 1
            R_t, R_t1 = RR[R < self.n_parents], RR[R >= self.n_parents]
            q = np.sum(w * (R_t - R_t1)) / self.n_parents
            s = (1 - self.c_s) * s + self.c_s * (q - self.q_star)
            sigma *= np.exp(s / self.d_sigma)

        fitness_data, time_compression = MuCommaLambda._save_data(
            self, history_x, fitness_data)

        results = {
            "best_so_far_x": best_so_far_x,
            "best_so_far_y": best_so_far_y,
            "n_evaluations": n_evaluations,
            "runtime": runtime,
            "fitness_data": fitness_data,
            "termination": termination,
            "time_evaluations": time_evaluations,
            "time_compression": time_compression,
            "m": m,
            "p": p,
            "s": s,
            "step_size": sigma,
            "n_individuals": self.n_individuals,
            "n_parents": self.n_parents,
            "d_sigma": self.d_sigma,
            "n_restart": n_restart
        }
        return results
Пример #11
0
    def optimize(self, fitness_function=None):
        start_optimization = time.time()

        if (fitness_function is None) and (self.fitness_function != None):
            fitness_function = self.fitness_function

        # initialize distribution mean
        x = MuCommaLambda._get_m(self)  # distribution mean
        start_evaluation = time.time()
        y = fitness_function(x)
        time_evaluations, n_evaluations = time.time() - start_evaluation, 1
        best_so_far_x, best_so_far_y = np.copy(x), np.copy(y)
        if self.save_fitness_data: fitness_data = [y]
        if self.save_best_so_far_x:
            history_x = np.hstack((n_evaluations, best_so_far_x))
        else:
            history_x = None

        # iterate / evolve
        n_iterations = 1  # n_iter in train.py
        n_failure_cholesky = 0  # number of failure of cholesky decomposition
        G = []  # all gradients obtained during optmization
        m, v = np.zeros((self.ndim_problem, )), np.zeros(
            (self.ndim_problem, ))  # for Adam optimizer
        while n_evaluations < self.max_evaluations:
            if n_iterations < self.iota:  # do full sampling before iota iterations
                UUT = np.zeros([self.ndim_problem,
                                self.ndim_problem])  # covariance matrix
                n_t, alpha = self.n_t, self.alpha  # n_samples in es.py
            else:  # use PCA decomposition to obtain subspace
                pca = PCA()
                pca_fit = pca.fit(G)  # SVD
                # take top n_t directions of maximum variance to construct covariance matrix
                n_t = max(
                    np.argmax(
                        np.cumsum(pca_fit.explained_variance_ratio_) >
                        self.epsilon) + 1, self.min_n_t)
                U, U_ort = pca_fit.components_[:n_t], pca_fit.components_[n_t:]
                UUT, UUT_ort = np.matmul(U.T, U), np.matmul(U_ort.T, U_ort)
                if n_iterations == self.iota: n_t = self.n_t

            # sample from hybrid Gaussian distribution
            cov = ((alpha / self.ndim_problem) * np.eye(self.ndim_problem) +
                   ((1 - alpha) / n_t) * UUT) * self.sigma
            A = np.zeros((
                n_t,
                self.ndim_problem))  # search directions (perturbation vectors)
            try:
                search_directions = cholesky(cov,
                                             check_finite=False,
                                             overwrite_a=True)  # l in es.py
                for i in range(n_t):
                    try:
                        A[i] = search_directions.dot(
                            self.rng.standard_normal((self.ndim_problem, )))
                    except LinAlgError:
                        A[i] = self.rng.standard_normal((self.ndim_problem, ))
            except LinAlgError:
                n_failure_cholesky += 1
                for i in range(n_t):
                    A[i] = self.rng.standard_normal((self.ndim_problem, ))
            # renormalize
            A /= np.linalg.norm(A, axis=-1)[:, np.newaxis]

            # estimate gradient via antithetic sampling
            antithetic_fitness = np.zeros((n_t, 2))  # all_rollouts in es.py
            for i in range(n_t):
                up_x = x + A[i, :]
                start_evaluation = time.time()
                up_y = fitness_function(up_x)
                time_evaluations += (time.time() - start_evaluation)
                n_evaluations += 1
                # update best-so-far x and y
                if best_so_far_y > up_y:
                    best_so_far_x, best_so_far_y = np.copy(up_x), np.copy(up_y)
                if self.save_fitness_data: fitness_data.append(np.copy(up_y))
                if self.save_best_so_far_x and not (n_evaluations %
                                                    self.freq_best_so_far_x):
                    history_x = np.vstack(
                        (history_x, np.hstack((n_evaluations, best_so_far_x))))
                if n_evaluations >= self.max_evaluations: break

                down_x = x - A[i, :]
                start_evaluation = time.time()
                down_y = fitness_function(down_x)
                time_evaluations += (time.time() - start_evaluation)
                n_evaluations += 1
                # update best-so-far x and y
                if best_so_far_y > down_y:
                    best_so_far_x, best_so_far_y = np.copy(down_x), np.copy(
                        down_y)
                if self.save_fitness_data: fitness_data.append(np.copy(down_y))
                if self.save_best_so_far_x and not (n_evaluations %
                                                    self.freq_best_so_far_x):
                    history_x = np.vstack(
                        (history_x, np.hstack((n_evaluations, best_so_far_x))))
                if n_evaluations >= self.max_evaluations: break

                antithetic_fitness[i, :] = np.array([up_y, down_y])

            if n_evaluations >= self.max_evaluations: break
            antithetic_fitness = (antithetic_fitness -
                                  np.mean(antithetic_fitness)) / (
                                      np.std(antithetic_fitness) + 1e-8)
            fitness_diff = antithetic_fitness[:,
                                              0] - antithetic_fitness[:,
                                                                      1]  # m in es.py
            gradient = np.zeros((self.ndim_problem, ))  # g in es.py
            for i in range(n_t):
                gradient += (A[i, :] * fitness_diff[i])
            gradient /= (2 * self.sigma)

            # adaptive exploration mechanism
            if n_iterations >= self.iota:
                alpha = np.linalg.norm(np.dot(
                    gradient, UUT_ort)) / np.linalg.norm(np.dot(gradient, UUT))

            # add current gradient to G
            if n_iterations == 1: G = np.copy(gradient)
            else: G = np.vstack([self.gamma * G, gradient])
            gradient /= (np.linalg.norm(gradient) / self.ndim_problem + 1e-8)

            # update gradient via Adam optimizer
            m = 0.9 * m + (1 - 0.9) * gradient
            mt = m / (1 - 0.9**n_iterations)
            v = 0.999 * v + (1 - 0.999) * (gradient**2)
            vt = v / (1 - 0.999**n_iterations)
            x -= (self.eta * mt / (np.sqrt(vt) + 1e-8))
            n_iterations += 1

        fitness_data, time_compression = MuCommaLambda._save_data(
            self, history_x, fitness_data)

        results = {
            "best_so_far_x": best_so_far_x,
            "best_so_far_y": best_so_far_y,
            "n_evaluations": n_evaluations,
            "runtime": time.time() - start_optimization,
            "fitness_data": fitness_data,
            "termination": "max_evaluations",
            "time_evaluations": time_evaluations,
            "time_compression": time_compression,
            "n_iterations": n_iterations,
            "x": x,
            "gradient": gradient,
            "n_t": n_t,
            "alpha": alpha,
            "n_failure_cholesky": n_failure_cholesky,
            "shape_G": G.shape
        }
        return results
Пример #12
0
    def optimize(self, fitness_function=None):
        start_optimization = time.time()

        if (fitness_function is None) and (self.fitness_function != None):
            fitness_function = self.fitness_function

        # initialize distribution mean
        m = MuCommaLambda._get_m(self) # distribution mean == _cur_mean
        cur_std = self.init_std # distribution std
        start_evaluation = time.time()
        y = fitness_function(m)
        time_evaluations, n_evaluations = time.time() - start_evaluation, 1
        best_so_far_x, best_so_far_y = np.copy(m), np.copy(y)
        if self.save_fitness_data: fitness_data = [y]
        if self.save_best_so_far_x: history_x = np.hstack((n_evaluations, best_so_far_x))
        else: history_x = None

        # iterate / evolve
        termination = "max_evaluations"
        n_epoch = 0

        # self.ndim_problem == _n_params
        X = np.empty((self.n_individuals, self.ndim_problem)) # population
        Y = np.tile(y, (self.n_individuals,)) # fitness of population
        while n_evaluations < self.max_evaluations:
            for i in range(self.n_individuals): # one generation / epoch
                # sample
                extra_var_mult = max(1.0 - n_epoch / self.extra_decay_time, 0)
                sample_std = np.sqrt(np.square(cur_std) + np.square(self.extra_std) * extra_var_mult)
                z = self.rng.standard_normal((self.ndim_problem,))
                X[i, :] = m + sample_std * z
                start_evaluation = time.time()
                y = fitness_function(X[i, :])
                time_evaluations += (time.time() - start_evaluation)
                n_evaluations += 1
                Y[i] = y

                # update best-so-far x and y
                if best_so_far_y > y: best_so_far_x, best_so_far_y = np.copy(X[i, :]), np.copy(y)
                if self.save_fitness_data: fitness_data.append(np.copy(y))
                if self.save_best_so_far_x and not(n_evaluations % self.freq_best_so_far_x):
                    history_x = np.vstack((history_x, np.hstack((n_evaluations, best_so_far_x))))

                # check three termination criteria
                runtime = time.time() - start_optimization
                is_break, termination = MuCommaLambda._check_terminations(
                    self, n_evaluations, runtime, best_so_far_y)
                if is_break: break

            # check three termination criteria
            is_break, termination = MuCommaLambda._check_terminations(
                self, n_evaluations, runtime, best_so_far_y)
            if is_break: break

            # update distribution mean + std via Maximum Likelihood Estimation (MLE)
            index = np.argsort(Y)
            X = X[index, :]
            m = np.mean(X[:self.n_best, :], axis=0)
            cur_std = np.std(X[:self.n_best, :], axis=0) # Here it is a vector rather than a scalar
            n_epoch += 1

        fitness_data, time_compression = MuCommaLambda._save_data(self, history_x, fitness_data)

        results = {"best_so_far_x": best_so_far_x,
                   "best_so_far_y": best_so_far_y,
                   "n_evaluations": n_evaluations,
                   "runtime": runtime,
                   "fitness_data": fitness_data,
                   "termination": termination,
                   "time_evaluations": time_evaluations,
                   "time_compression": time_compression,
                   "m": m,
                   "cur_std": cur_std,
                   "n_epoch": n_epoch}
        return results
Пример #13
0
    def optimize(self, fitness_function=None):
        start_optimization = time.time()

        if (fitness_function is None) and (self.fitness_function != None):
            fitness_function = self.fitness_function

        # initialize distribution mean
        # Here not confuse distribution mean (m) with number of multiple evolution paths (m)
        m = MuCommaLambda._get_m(self)  # distribution mean
        start_evaluation = time.time()
        y = fitness_function(m)
        time_evaluations, n_evaluations = time.time() - start_evaluation, 1
        best_so_far_x, best_so_far_y = np.copy(m), np.copy(y)
        if self.save_fitness_data: fitness_data = [y]
        else: fitness_data = None
        if self.save_best_so_far_x:
            history_x = np.hstack((n_evaluations, best_so_far_x))
        else:
            history_x = None

        # set weights for parents selection
        w, w_1 = np.log(np.arange(1, self.n_parents +
                                  1)), np.log(self.n_parents + 1)
        w = (w_1 - w) / (self.n_parents * w_1 - np.sum(w))
        # normalization factor for search direction adaptation
        mu_eff = 1 / np.sum(np.power(w, 2))

        # iterate / evolve
        termination = "max_evaluations"
        sigma = self.step_size  # mutation strength (global step-size)
        n_evolution_paths = self.n_evolution_paths  # m
        # multiple evolution paths
        MEP = np.zeros((n_evolution_paths, self.ndim_problem))  # P
        t_hat = np.zeros(
            (n_evolution_paths, ))  # direction set for Algorithm 2
        p = np.zeros((self.ndim_problem,
                      ))  # principal search direction (evolution path)
        s = 0  # cumulative rank rate
        t = 0  # generation index
        # Here introduce 6 new constant symbols to simplify code
        # constant symbols for Line 4 of Algorithm 3
        a, b = np.sqrt(1 - self.c_cov), np.sqrt(self.c_cov)
        a_m = a**n_evolution_paths
        # constant symbols for Line 11 of Algorithm 3
        p_1, p_2 = 1 - self.c, np.sqrt(self.c * (2 - self.c) * mu_eff)
        # constant symbols for Line 13 of Algorithm 3
        RR = np.arange(1, self.n_parents * 2 + 1)  # ranks for R_t, R_(t+1)

        X = np.empty((self.n_individuals, self.ndim_problem))  # population
        Y = np.tile(y, (self.n_individuals, ))  # fitness of population
        while n_evaluations < self.max_evaluations:
            Y_bak = np.copy(Y)
            for i in range(self.n_individuals):  # one generation
                z = self.rng.standard_normal((self.ndim_problem, ))
                # sample (Line 4 of Algorithm 3)
                sum_p = np.zeros((self.ndim_problem, ))
                for j in range(1, n_evolution_paths + 1):
                    r = self.rng.standard_normal()
                    sum_p += (a**(n_evolution_paths - j)) * r * MEP[j - 1, :]
                X[i, :] = m + sigma * (a_m * z + b * sum_p)
                start_evaluation = time.time()
                y = fitness_function(X[i, :])
                time_evaluations += (time.time() - start_evaluation)
                n_evaluations += 1
                Y[i] = y

                # update best-so-far x and y
                if best_so_far_y > y:
                    best_so_far_x, best_so_far_y = np.copy(X[i, :]), np.copy(y)
                if self.save_fitness_data: fitness_data.append(np.copy(y))
                if self.save_best_so_far_x and not (n_evaluations %
                                                    self.freq_best_so_far_x):
                    history_x = np.vstack(
                        (history_x, np.hstack((n_evaluations, best_so_far_x))))

                # check three termination criteria
                runtime = time.time() - start_optimization
                is_break, termination = MuCommaLambda._check_terminations(
                    self, n_evaluations, runtime, best_so_far_y)
                if is_break: break

            # check four termination criteria
            is_break, termination = MuCommaLambda._check_terminations(
                self, n_evaluations, runtime, best_so_far_y, sigma)
            if is_break: break

            # update distribution mean
            index = np.argsort(Y)
            X, Y = X[index, :], Y[index]  # Line 9 of Algorithm 3
            m_bak = m
            # Line 10 of Algorithm 3
            m = np.zeros((self.ndim_problem, ))
            for j in range(self.n_parents):
                m += w[j] * X[j, :]

            # update principal search direction (Line 11 of Algorithm 3)
            p = p_1 * p + p_2 * ((m - m_bak) / sigma)

            # update multiple evolution paths (Algorithm 2 or Line 12 of Algorithm 3)
            T_min = np.min(np.diff(t_hat))
            if (T_min > self.T) or (t < n_evolution_paths):
                for i in range(n_evolution_paths - 1):
                    MEP[i, :], t_hat[i] = MEP[i + 1, :], t_hat[i + 1]
            else:
                i_apostrophe = np.argmin(np.diff(t_hat))
                for i in range(i_apostrophe, n_evolution_paths - 1):
                    MEP[i, :], t_hat[i] = MEP[i + 1, :], t_hat[i + 1]
            MEP[n_evolution_paths - 1, :] = p
            t_hat[n_evolution_paths - 1] = t
            t += 1  # Line 14 of Algorithm 3

            # adapt mutation strength (rank-based success rule, RSR) -> Line 13 of Algorithm 3
            F = np.hstack((Y_bak[:self.n_parents], Y[:self.n_parents]))
            R = np.argsort(F)
            R_t, R_t1 = RR[R < self.n_parents], RR[R >= self.n_parents]
            q = np.sum(w * (R_t - R_t1)) / self.n_parents
            s = (1 - self.c_s) * s + self.c_s * (q - self.q_star)
            sigma *= np.exp(s / self.d_sigma)

        fitness_data, time_compression = MuCommaLambda._save_data(
            self, history_x, fitness_data)

        results = {
            "best_so_far_x": best_so_far_x,
            "best_so_far_y": best_so_far_y,
            "n_evaluations": n_evaluations,
            "runtime": runtime,
            "fitness_data": fitness_data,
            "termination": termination,
            "time_evaluations": time_evaluations,
            "time_compression": time_compression,
            "m": m,
            "p": p,
            "s": s,
            "step_size": sigma,
            "t_hat": t_hat
        }
        return results
Пример #14
0
    def optimize(self, fitness_function=None):
        start_optimization = time.time()

        if (fitness_function is None) and (self.fitness_function != None):
            fitness_function = self.fitness_function

        # initialize distribution mean
        m = MuCommaLambda._get_m(self) # y in Fig. 3
        start_evaluation = time.time()
        y = fitness_function(m)
        n_evaluations, time_evaluations = 1, time.time() - start_evaluation
        best_so_far_x, best_so_far_y = np.copy(m), np.copy(y)
        Y = np.tile(y, (self.n_individuals,)) # fitness of population

        if self.save_fitness_data: fitness_data = [y]
        if self.save_best_so_far_x: history_x = np.hstack((n_evaluations, best_so_far_x))
        else: history_x = None
        
        # set weights for parents
        w = np.log(np.arange(1, self.n_parents + 1))
        w = (np.log((self.n_individuals + 1) / 2) - w) / (
            self.n_parents * np.log((self.n_individuals + 1) / 2) - np.sum(w))
        mu_eff = 1 / np.sum(np.power(w, 2))

        # initialize transformation matrix
        s = np.zeros((self.ndim_problem,))
        M = np.diag(np.ones((self.ndim_problem,)))

        c_s = (mu_eff + 2) / (mu_eff + self.ndim_problem + 5)
        s_1, s_2 = 1 - c_s, np.sqrt(mu_eff * c_s * (2 - c_s))
        c_1 = self.alpha_cov / (np.power(self.ndim_problem + 1.3, 2) + mu_eff)
        c_w = min(1 - c_1, self.alpha_cov * (mu_eff + 1 / mu_eff - 2) / (
            np.power(self.ndim_problem + 2, 2) + self.alpha_cov * mu_eff / 2))
        d_sigma = 1 + c_s + 2 * max(0, np.sqrt((mu_eff - 1) / (self.ndim_problem + 1)) - 1)

        # iterate
        sigma, step_size_data = self.step_size, [self.step_size if self.save_step_size_data else None]
        Z = np.empty((self.n_individuals, self.ndim_problem)) # Guassian noise for mutation
        D = np.empty((self.n_individuals, self.ndim_problem)) # search directions
        X = np.empty((self.n_individuals, self.ndim_problem)) # population
        I = np.diag(np.ones((self.ndim_problem,)))
        while n_evaluations < self.max_evaluations:
            for i in range(self.n_individuals): # l in Fig. 3
                Z[i, :] = self.rng.standard_normal((self.ndim_problem,)) # z_l in Fig. 3
                D[i, :] = np.transpose(np.dot(M, Z[i, :][:, np.newaxis])) # d_l in Fig. 3
                X[i, :] = m + sigma * D[i, :]
                start_evaluation = time.time()
                y = fitness_function(X[i, :]) # f_l in Fig. 3
                time_evaluations += (time.time() - start_evaluation)
                n_evaluations += 1
                Y[i] = y

                if self.save_fitness_data: fitness_data.append(np.copy(y))
                
                # update best-so-far x and y
                if best_so_far_y > y: best_so_far_x, best_so_far_y = np.copy(X[i, :]), np.copy(y)
                if self.save_best_so_far_x and not(n_evaluations % self.freq_best_so_far_x):
                    history_x = np.vstack((history_x, np.hstack((n_evaluations, best_so_far_x))))

                # check three termination criteria
                is_break, termination = MuCommaLambda._check_terminations(
                    self, n_evaluations, time.time() - start_optimization, best_so_far_y)
                if is_break: break
            
            # check four termination criteria
            runtime = time.time() - start_optimization
            is_break, termination = MuCommaLambda._check_terminations(
                    self, n_evaluations, runtime, best_so_far_y)
            if is_break: break
            if sigma <= self.threshold_step_size:
                termination = "threshold_step_size (lower)"
                break
            
            index = np.argsort(Y)
            Z, D, Y = Z[index, :], D[index, :], Y[index]
            d_w = np.zeros((self.ndim_problem,)) # for (M9) in Fig. 3
            z_w = np.zeros((self.ndim_problem,)) # for (M10) in Fig. 3
            zzt_w = np.zeros((self.ndim_problem, self.ndim_problem)) # for (M11) in Fig. 3
            for j in range(self.n_parents):
                d_w += (w[j] * D[j, :])
                z_w += (w[j] * Z[j, :])
                zzt_w += (w[j] * np.dot(Z[j, :][:, np.newaxis], Z[j, :][np.newaxis, :]))
            
            # update distribution mean
            m += (sigma * d_w)

            # update transformation matrix
            s = s_1 * s + s_2 * z_w
            M1 = (c_1 / 2) * (np.dot(s[:, np.newaxis], s[np.newaxis, :]) - I)
            M2 = (c_w / 2) * (zzt_w - I)
            M = np.dot(M, I + M1 + M2)

            # update step-size
            sigma *= np.exp(c_s / d_sigma * (np.linalg.norm(s) / self.expectation_chi - 1))
            if self.save_step_size_data: step_size_data.append(sigma)
        
        fitness_data, time_compression = MuCommaLambda._save_data(self, history_x, fitness_data)
        
        results = {"best_so_far_x": best_so_far_x,
            "best_so_far_y": best_so_far_y,
            "n_evaluations": n_evaluations,
            "runtime": runtime,
            "fitness_data": fitness_data,
            "termination": termination,
            "time_evaluations": time_evaluations,
            "time_compression": time_compression,
            "m": m,
            "step_size": sigma,
            "step_size_data": step_size_data}
        return results