Ejemplo n.º 1
0
def pairwise_metropolis_sampling(repl_i, sid_i, replicas, states, U):
    """
    Return a replica "j" to exchange with the given replica "i" based on
    the Metropolis criterion:

    P(i<->j) = min{1, exp(-du_ij)};

    du_ij = u_a(x_j) + u_b(x_i) - u_a(x_i) - u_b(x_j),

    where i and j currently occupy states a and b respectively. Repeating this
    MANY times (perhaps N^3 - N^5, N = # of replicas) will construct a 
    Markov chain that will eventually approach the same distribution obtained
    by directly sampling the distribution of all replica/state permutations.
    """
    # Choose another replica other than repl_i.
    #
    nreplicas = len(replicas)
    repl_j = repl_i
    while repl_j == repl_i:
        j = choice(range(nreplicas))
        repl_j = replicas[j]
        sid_j = states[j]
    # Apply the Metropolis acceptance criteria. If the move is accepted, return
    # this replica, otherwise return the same replica (no exchange).
    #
    du = (U[sid_i][repl_j] + U[sid_j][repl_i]
          - U[sid_i][repl_i] - U[sid_j][repl_j])
    if du > 0.:
        if _random() > exp(-du):
            return repl_i
        else:
            return repl_j
    else:
        return repl_j
Ejemplo n.º 2
0
    def step(self):
        """Take a step in the optimization"""
        rnd_cross = _random((self.npop, self.ndim))
        for i in xrange(self.npop):
            t0, t1, t2 = i, i, i
            while t0 == i:
                t0 = _randint(self.npop)
            while t1 == i or t1 == t0:
                t1 = _randint(self.npop)
            while t2 == i or t2 == t0 or t2 == t1:
                t2 = _randint(self.npop)

            v = self.population[t0, :] + self.F * (self.population[t1, :] -
                                                   self.population[t2, :])

            crossover = rnd_cross[i] <= self.C
            u = np.where(crossover, v, self.population[i, :])

            ri = _randint(self.ndim)
            u[ri] = v[ri]

            ufit = self.m * self.fun(u)

            if ufit < self.fitness[i]:
                self.population[i, :] = u
                self.fitness[i] = ufit
Ejemplo n.º 3
0
def pairwise_metropolis_sampling(repl_i, sid_i, replicas, states, U):
    """
    Return a replica "j" to exchange with the given replica "i" based on
    the Metropolis criterion:

    P(i<->j) = min{1, exp(-du_ij)};

    du_ij = u_a(x_j) + u_b(x_i) - u_a(x_i) - u_b(x_j),

    where i and j currently occupy states a and b respectively. Repeating this
    MANY times (perhaps N^3 - N^5, N = # of replicas) will construct a 
    Markov chain that will eventually approach the same distribution obtained
    by directly sampling the distribution of all replica/state permutations.
    """
    # Choose another replica other than repl_i.
    #
    nreplicas = len(replicas)
    repl_j = repl_i
    while repl_j == repl_i:
        j = choice(range(nreplicas))
        repl_j = replicas[j]
        sid_j = states[j]
    # Apply the Metropolis acceptance criteria. If the move is accepted, return
    # this replica, otherwise return the same replica (no exchange).
    #
    du = (U[sid_i][repl_j] + U[sid_j][repl_i] - U[sid_i][repl_i] -
          U[sid_j][repl_j])
    if du > 0.:
        if _random() > exp(-du):
            return repl_i
        else:
            return repl_j
    else:
        return repl_j
Ejemplo n.º 4
0
    def __init__(self,
                 fun,
                 bounds,
                 npop,
                 F=0.5,
                 C=0.9,
                 seed=None,
                 maximize=False):

        if seed is not None:
            np.random.seed(seed)

        self.fun = fun
        self.bounds = np.asarray(bounds)
        self.npop = npop
        self.F = F
        self.C = C

        self.ndim = (self.bounds).shape[0]
        self.m = -1 if maximize else 1

        bl = self.bounds[:, 0]
        bw = self.bounds[:, 1] - self.bounds[:, 0]
        self.population = bl[None, :] + _random(
            (self.npop, self.ndim)) * bw[None, :]
        self.fitness = np.empty(npop, dtype=float)
        self._minidx = None
Ejemplo n.º 5
0
def calc_different_random_vector(vector):
    random_vector=vector
    while _ln.norm(_n.cross(random_vector,vector))<0.000000001:
        random_vector=_random((3))
        random_vector*=2
        random_vector+=_n.array([-1,-1,-1])
        random_vector/=_ln.norm(random_vector)
    return(random_vector)
Ejemplo n.º 6
0
def weighted_choice(choices):
    """Return a discrete outcome given a set of outcome/weight pairs."""
    r = _random()*sum(w for c,w in choices)
    for c,w in choices:
        r -= w
        if r < 0:
            return c
    # You should never get here.
    return None
Ejemplo n.º 7
0
def weighted_choice(choices):
    """Return a discrete outcome given a set of outcome/weight pairs."""
    r = _random() * sum(w for c, w in choices)
    for c, w in choices:
        r -= w
        if r < 0:
            return c
    # You should never get here.
    return None
Ejemplo n.º 8
0
    def __init__(self,
                 fun,
                 bounds,
                 npop,
                 F=0.5,
                 C=0.5,
                 seed=None,
                 maximize=False):
        """ Constructor

        Parameters
        ----------
        fun: callable
        the function to be minimized

        bounds: sequence of tuples
            parameter bounds as [ndim, 2] sequence

        npop: int
            the size of the population
            5 * ndim - 10 * ndim are usual values

        F: float, optional (default=0.5)
            the difference amplification factor.
            Values of 0.5-0.8 are good in most cases.

        C: float, optional (default=0.5)
            The cross-over probability. Use 0.9 to test for fast convergence, and smaller
            values (~0.1) for a more elaborate search.

        seed: int, optional (default=None)
            Random seed, for reproductible results

        maximize: bool, optional (default=False)
            Switch setting whether to maximize or minimize the function.
            Defaults to minimization.
        """
        if seed is not None:
            np.random.seed(seed)

        self.fun = fun
        self.bounds = np.asarray(bounds)
        self.npop = npop
        self.F = F
        self.C = C

        self.ndim = (self.bounds).shape[0]
        self.m = -1 if maximize else 1

        bl = self.bounds[:, 0]
        bw = self.bounds[:, 1] - self.bounds[:, 0]
        self.population = bl[None, :] + _random(
            (self.npop, self.ndim)) * bw[None, :]
        self.fitness = np.empty(npop, dtype=float)
        self._minidx = None