Exemplo n.º 1
0
    def __init__(
            self,
            gen=500,
            elite=0.5,
            scale=0.3,
            variant=1,
            screen_output=False):
        """
        Constructs a Cross-Entropy Algorithm (Python)

        USAGE: algorithm.py_cross_entropy(gen = 1, elite = 0.5, scale = 0.2, variant=1, screen_output = False))

        NOTE: A multivariate normal distribution is used.
              The first sample is centered around the population champion.
              Covariance matrix and mean is evaluated using ind.best_x

        * gen: number of generations
        * elite: fraction of the population considered as elite (in (0,1])
        * scale: scaling factor for the estimated covariance matrix
        * variant: algoritmic variant to use (one of [1,2])
                 1. 'Canonical' - Covariance Matrix is evaluated as sum (x_(i+1)-mu_i)^T (x_(i+1)-mu_i)
                 2. 'Dario's' - Covariance Matrix is evaluated as   sum (x_(i+1)-mu_i^T)^T (x_(i+1)-mu_i^T)
        * screen_output: activates screen_output (output at each generation)
        """
        try:
            import numpy as np
        except ImportError:
            raise ImportError(
                "This algorithm needs numpy to run. Is numpy installed?")

        base.__init__(self)
        self.__gen = gen
        self.__elite = elite
        self.__scale = scale
        self.__screen_output = screen_output
        self.__weights = []
        self.__variant = variant
        np.random.seed()
Exemplo n.º 2
0
    def __init__(self,
                 gen=500,
                 elite=0.5,
                 scale=0.3,
                 variant=1,
                 screen_output=False):
        """
        Constructs a Cross-Entropy Algorithm (Python)

        USAGE: algorithm.py_cross_entropy(gen = 1, elite = 0.5, scale = 0.2, variant=1, screen_output = False))

        NOTE: A multivariate normal distribution is used.
              The first sample is centered around the population champion.
              Covariance matrix and mean is evaluated using ind.best_x

        * gen: number of generations
        * elite: fraction of the population considered as elite (in (0,1])
        * scale: scaling factor for the estimated covariance matrix
        * variant: algoritmic variant to use (one of [1,2])
                 1. 'Canonical' - Covariance Matrix is evaluated as sum (x_(i+1)-mu_i)^T (x_(i+1)-mu_i)
                 2. 'Dario's' - Covariance Matrix is evaluated as   sum (x_(i+1)-mu_i^T)^T (x_(i+1)-mu_i^T)
        * screen_output: activates screen_output (output at each generation)
        """
        try:
            import numpy as np
        except ImportError:
            raise ImportError(
                "This algorithm needs numpy to run. Is numpy installed?")

        base.__init__(self)
        self.__gen = gen
        self.__elite = elite
        self.__scale = scale
        self.__screen_output = screen_output
        self.__weights = []
        self.__variant = variant
        np.random.seed()
Exemplo n.º 3
0
    def __init__(self,
                 gen=500,
                 cc=-1,
                 cs=-1,
                 c1=-1,
                 cmu=-1,
                 sigma0=0.5,
                 ftol=1e-6,
                 xtol=1e-6,
                 memory=False,
                 screen_output=False):
        """
        Constructs a Covariance Matrix Adaptation Evolutionary Strategy (Python)

        USAGE: algorithm.py_cmaes(gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False)

        NOTE: In our variant of the algorithm, particle memory is used to extract the elite and reinsertion
        is made aggressively ..... getting rid of the worst guy). Also, the bounds of the problem
        are enforced, as to allow PaGMO machinery to work. Fine control on each iteration can be achieved
        by calling the algo with gen=1 (algo state is stored, cmaes will continue at next call ... without
        initializing again all its state!!)

        * gen: number of generations
        * cc: time constant for C cumulation (in [0,1]) if -1 automatic values are set
        * cs: time constant for sigma cumulation (in [0,1]) if -1 automatic values are set
        * c1: learning rate for rank-1 update (in [0,1]) if -1 automatic values are set
        * cmu: learning rate for rank-mu update (in [0,1]) if -1 automatic values are set
        * sigma0: starting step (std)
        * xtol: stopping criteria on the x tolerance
        * ftol: stopping criteria on the f tolerance
        * memory: when True the algorithm preserves memory of covariance, step and more between successive runs
        * screen_output: activates screen_output (output at each generation)
        """
        try:
            import numpy as np
        except ImportError:
            raise ImportError(
                "This algorithm needs numpy to run. Is numpy installed?")

        if (gen <= 0):
            raise ValueError("gen needs to be > 0")

        if ((cc < 0 or cc > 1) and not cc == -1):
            raise ValueError("cc needs to be in [0,1] or -1 for auto value")

        if ((cs < 0 or cs > 1) and not cc == -1):
            raise ValueError("cs needs to be in [0,1] or -1 for auto value")

        if ((c1 < 0 or c1 > 1) and not cc == -1):
            raise ValueError("c1 needs to be in [0,1] or -1 for auto value")

        if ((cmu < 0 or cmu > 1) and not cc == -1):
            raise ValueError("cmu needs to be in [0,1] or -1 for auto value")

        base.__init__(self)

        # Data members
        self.__cc = cc
        self.__cs = cs
        self.__c1 = c1
        self.__cmu = cmu
        self.__gen = gen
        self.__xtol = xtol
        self.__ftol = ftol
        self.__sigma0 = sigma0
        self.__memory = memory
        self.screen_output = screen_output

        # Algorithm memory
        self.__mean = 0
        self.__variation = 0
        self.__newpop = np.matrix([[1]])
        self.__B = 0
        self.__D = 0
        self.__C = 0
        self.__invsqrtC = 0
        self.__pc = 0
        self.__ps = 0
        self.__counteval = 0
        self.__eigeneval = 0

        np.random.seed()
Exemplo n.º 4
0
    def __init__(
            self,
            gen=500,
            cc=-1,
            cs=-1,
            c1=-1,
            cmu=-1,
            sigma0=0.5,
            ftol=1e-6,
            xtol=1e-6,
            memory=False,
            screen_output=False):
        """
        Constructs a Covariance Matrix Adaptation Evolutionary Strategy (Python)

        USAGE: algorithm.py_cmaes(gen = 500, cc = -1, cs = -1, c1 = -1, cmu = -1, sigma0=0.5, ftol = 1e-6, xtol = 1e-6, memory = False, screen_output = False)

        NOTE: In our variant of the algorithm, particle memory is used to extract the elite and reinsertion
        is made aggressively ..... getting rid of the worst guy). Also, the bounds of the problem
        are enforced, as to allow PaGMO machinery to work. Fine control on each iteration can be achieved
        by calling the algo with gen=1 (algo state is stored, cmaes will continue at next call ... without
        initializing again all its state!!)

        * gen: number of generations
        * cc: time constant for C cumulation (in [0,1]) if -1 automatic values are set
        * cs: time constant for sigma cumulation (in [0,1]) if -1 automatic values are set
        * c1: learning rate for rank-1 update (in [0,1]) if -1 automatic values are set
        * cmu: learning rate for rank-mu update (in [0,1]) if -1 automatic values are set
        * sigma0: starting step (std)
        * xtol: stopping criteria on the x tolerance
        * ftol: stopping criteria on the f tolerance
        * memory: when True the algorithm preserves memory of covariance, step and more between successive runs
        * screen_output: activates screen_output (output at each generation)
        """
        try:
            import numpy as np
        except ImportError:
            raise ImportError(
                "This algorithm needs numpy to run. Is numpy installed?")

        if (gen <= 0):
            raise ValueError("gen needs to be > 0")

        if ((cc < 0 or cc > 1) and not cc == -1):
            raise ValueError("cc needs to be in [0,1] or -1 for auto value")

        if ((cs < 0 or cs > 1) and not cc == -1):
            raise ValueError("cs needs to be in [0,1] or -1 for auto value")

        if ((c1 < 0 or c1 > 1) and not cc == -1):
            raise ValueError("c1 needs to be in [0,1] or -1 for auto value")

        if ((cmu < 0 or cmu > 1) and not cc == -1):
            raise ValueError("cmu needs to be in [0,1] or -1 for auto value")

        base.__init__(self)

        # Data members
        self.__cc = cc
        self.__cs = cs
        self.__c1 = c1
        self.__cmu = cmu
        self.__gen = gen
        self.__xtol = xtol
        self.__ftol = ftol
        self.__sigma0 = sigma0
        self.__memory = memory
        self.screen_output = screen_output

        # Algorithm memory
        self.__mean = 0
        self.__variation = 0
        self.__newpop = np.matrix([[1]])
        self.__B = 0
        self.__D = 0
        self.__C = 0
        self.__invsqrtC = 0
        self.__pc = 0
        self.__ps = 0
        self.__counteval = 0
        self.__eigeneval = 0

        np.random.seed()