Exemple #1
0
def generate(image, label):
    parent = [
        candidate(int(np.random.uniform(0, 32)), int(np.random.uniform(0, 32)),
                  np.random.normal(128 / 256, 127 / 256),
                  np.random.normal(128 / 256, 127 / 256),
                  np.random.normal(128 / 256, 127 / 256)) for i in range(400)
    ]
    iter_time = 1

    for i in range(iter_time):
        parent, child = DE(parent)
        for j in range(len(parent)):
            if score(image, label, parent[j]) > score(image, label, child[j]):
                parent[j] = child[j]

    final_index = 0
    for i in range(len(parent)):
        if score(image, label, parent[i]) < score(image, label,
                                                  parent[final_index]):
            final_index = i

    final_candi = parent[final_index]
    final_image = copy.deepcopy(image)
    final_x, final_y = final_candi.get_loc()
    final_R, final_G, final_B = final_candi.get_perturbation()
    final_image[0][0][final_x][final_y] = final_R
    final_image[0][1][final_x][final_y] = final_G
    final_image[0][2][final_x][final_y] = final_B
    return final_image, final_candi
Exemple #2
0
class Model(object):
    """
    Differential Evolution Monte Carlo
    ...and so much more...

    """
    param_names = property(lambda self: [p.name for p in self._params],
                           doc="""
                           List of parameter names.
                           """)

    param_display_names = property(
        lambda self: [p.display_name for p in self._params],
        doc="""
                                   List of parameter display names.
                                   """)

    particles = property(
        lambda self: self.apply_param_transform(np.asarray(self._particles)),
        doc="""
                         Particles as an array.
                         """)

    log_likes = property(lambda self: np.asarray(self._log_likes),
                         doc="""
                         Log likelihoods as an array.
                         """)

    weights = property(lambda self: np.asarray(self._weights),
                       doc="""
                       Weights as an array.
                       """)

    times = property(lambda self: np.asarray(self._times),
                     doc="""
                     Times as an array.
                     """)

    posts = property(lambda self: np.asarray(self._posts),
                     doc="""
                     Posts as an array.
                     """)

    default_prop_gen = DE(gamma_best=0.0, rand_base=False)
    default_burnin_prop_gen = DE(gamma_best=None, rand_base=True)

    def __init__(self,
                 name,
                 params,
                 like_fun,
                 like_args=None,
                 num_chains=None,
                 proposal_gen=None,
                 burnin_proposal_gen=None,
                 initial_zeros_ok=False,
                 init_multiplier=1,
                 use_priors=True,
                 verbose=False,
                 partition=None,
                 parallel=None):
        """
        DEMC
        """
        # set the vars
        self._name = name
        self._params = params  # can't be None
        if num_chains is None:
            num_chains = int(np.min([len(params) * 10, 100]))
        self._num_chains = num_chains
        self._initial_zeros_ok = initial_zeros_ok
        self._init_multiplier = init_multiplier
        self._use_priors = use_priors
        self._verbose = verbose
        if partition is None:
            partition = len(self._params)
        if partition > len(self._params):
            partition = len(self._params)
        self._partition = partition
        self._parallel = parallel

        # set up proposal generator
        if proposal_gen is None:
            proposal_gen = self.default_prop_gen
        self._prop_gen = proposal_gen
        if burnin_proposal_gen is None:
            burnin_proposal_gen = self.default_burnin_prop_gen
        self._burnin_prop_gen = burnin_proposal_gen

        # set up the like function
        # process the passed in like_fun
        self._like_fun = like_fun
        self._like_args = like_args
        if self._like_args is None:
            self._like_args = ()

        # in some cases we will need to recalc the likes, but the user
        # doesn't need to know about this
        self._recalc_likes = False

        # see if we need to apply a transform to any params
        if np.any([p.transform for p in self._params]):
            self._transform_needed = True
        else:
            self._transform_needed = False

        # used for preprocessing
        self._proposal = None
        self._prop_log_likes = None
        self._prop_posts = None
        self._prev_log_likes = None
        self._prev_posts = None

        # we have not initialized
        self._initialized = False

    def _initialize(self, force=False, num_chains=None, partition=None):
        if self._initialized and not force:
            # already done
            return

        if self._verbose:
            sys.stdout.write('Initializing: ')
            sys.stdout.flush()

        # time it
        stime = time.time()

        # initialize the particles and log_likes
        self._num_params = len(self._params)
        if num_chains is not None:
            self._num_chains = num_chains
        self._particles = []
        self._log_likes = []
        self._weights = []
        self._times = []
        self._posts = []

        # set the partition
        # see if we're modifying it
        if partition is not None:
            if partition > len(self._params):
                partition = len(self._params)
            self._partition = partition

        self._parts = np.array([1] * self._partition + [0] *
                               (len(self._params) - self._partition),
                               dtype=np.bool)

        # fill using init priors (for initialization)
        init_parts = self._num_chains * self._init_multiplier
        pop = np.hstack([
            p.init_prior.rvs(
                (init_parts, 1)) if hasattr(p.init_prior, "rvs") else np.ones(
                    (init_parts, 1)) * p.init_prior for p in self._params
        ])
        if pop.ndim < 2:
            pop = pop[:, np.newaxis]

        # get the initial log_likes
        if self._verbose:
            sys.stdout.write('%d(%d) ' % (init_parts, self._num_chains))
            sys.stdout.flush()
        log_likes, posts = self._calc_log_likes(pop)

        # make sure not zero
        if not self._initial_zeros_ok:
            ind = np.isinf(log_likes) | np.isnan(log_likes)
            good_ind = ~ind
            while good_ind.sum() < self._num_chains:
                if self._verbose:
                    sys.stdout.write(
                        '%d(%d) ' %
                        (ind.sum(), self._num_chains - good_ind.sum()))
                    sys.stdout.flush()
                npop = np.hstack([
                    p.init_prior.rvs(
                        (ind.sum(),
                         1)) if hasattr(p.init_prior, "rvs") else np.ones(
                             (ind.sum(), 1)) * p.init_prior
                    for p in self._params
                ])
                if npop.ndim < 2:
                    npop = npop[:, np.newaxis]

                pop[ind, :] = npop

                # calc the log likes for those new pops
                log_likes[ind], temp_posts = self._calc_log_likes(pop[ind])
                if temp_posts is not None:
                    posts[ind] = temp_posts
                ind = np.isinf(log_likes) | np.isnan(log_likes)
                good_ind = ~ind

            # save the good pop
            good_ind = ~ind
            pop = pop[good_ind]
            if pop.ndim < 2:
                pop = pop[:, np.newaxis]
            log_likes = log_likes[good_ind]
            if posts is not None:
                posts = posts[good_ind]

        if len(pop) > self._num_chains:
            pop = pop[:self._num_chains]
            log_likes = log_likes[:self._num_chains]
            if posts is not None:
                posts = posts[:self._num_chains]

        # append the initial log_likes and particles
        self._times.append(time.time() - stime)
        self._log_likes.append(log_likes)
        if self._use_priors:
            # calc log_priors
            log_priors = self.calc_log_prior(pop)
            self._weights.append(log_likes + log_priors)
        else:
            self._weights.append(log_likes)
        self._particles.append(pop)
        if posts is not None:
            self._posts.append(posts)

        # say we've initialized
        self._initialized = True

    def apply_param_transform(self, pop):
        if self._transform_needed:
            pop = pop.copy()
            for i, p in enumerate(self._params):
                if p.transform:
                    pop[..., i] = p.transform(pop[..., i])
        return pop

    def _calc_log_likes(self, pop):
        # apply transformation if necessary
        pop = self.apply_param_transform(pop)

        # first get the log likelihood for the pop
        out = self._like_fun(pop, *(self._like_args))
        if isinstance(out, tuple):
            # split into likes and posts
            log_likes, posts = out
        else:
            # just likes
            log_likes = out
            posts = None

        return log_likes, posts

    def _get_part_ind(self):
        # grab the current partition indices
        parts = self._parts.copy()

        # roll them the size of the partition
        self._parts = np.roll(self._parts, self._partition)

        # return the pre-rolled value
        return parts

    def _crossover(self, burnin=False):
        if burnin:
            prop_gen = self._burnin_prop_gen
        else:
            prop_gen = self._prop_gen

        # always pass params, though no longer using priors
        proposal = prop_gen(self._particles[-1], self._weights[-1],
                            self._params)

        # apply the partition by copying prev values back
        parts = self._get_part_ind()
        proposal[:, ~parts] = self._particles[-1][:, ~parts]

        return proposal

    def _migrate(self):
        # pick which items will migrate
        num_to_migrate = np.random.random_integers(2, self._num_chains)
        to_migrate = random.sample(range(self._num_chains), num_to_migrate)

        # do a circle swap
        keepers = []
        for f_ind in range(len(to_migrate)):
            if f_ind == len(to_migrate) - 1:
                # loop to beg
                t_ind = 0
            else:
                # loop to next
                t_ind = f_ind + 1

            # set the from and to inds
            i = to_migrate[f_ind]
            j = to_migrate[t_ind]

            # do comparison and swap if necessary
            log_diff = np.float64(self._weights[-1][i] - self._weights[-1][j])

            # now exp so we can get the other probs
            if log_diff > 0.0:
                log_diff = 0.0
            mh_prob = np.exp(log_diff)
            if np.isnan(mh_prob):
                mh_prob = 0.0
            keep = (mh_prob - np.random.rand()) > 0.0
            if keep:
                keepers.append({
                    'ind': j,
                    'particle': self._particles[-1][i],
                    'weight': self._weights[-1][i],
                    'log_like': self._log_likes[-1][i]
                })

        for k in keepers:
            # do the swap (i.e., replace j with i)
            # replace the particle, weight, log_like
            self._particles[-1][k['ind']] = k['particle']
            self._weights[-1][k['ind']] = k['weight']
            self._log_likes[-1][k['ind']] = k['log_like']

    def _post_evolve(self, pop, kept):
        pass

    def _evolve(self, burnin=False):
        # first generate new proposals
        # loop over groups, making new proposal pops via mutation
        # or crossover
        if self._proposal is None:
            proposal = self._crossover(burnin=burnin)
        else:
            proposal = self._proposal

        # eval the population (this is separate from the proposals so
        # that we can parallelize the entire operation)
        if self._prop_log_likes is None:
            prop_log_likes, prop_posts = self._calc_log_likes(proposal)
        else:
            prop_log_likes = self._prop_log_likes
            prop_posts = self._prop_posts

        # see if recalc prev_likes in case of HyperPrior
        if self._recalc_likes:
            if self._prev_log_likes is None:
                prev_log_likes, prev_posts = self._calc_log_likes(
                    self._particles[-1])
            else:
                prev_log_likes = self._prev_log_likes
                prev_posts = self._prev_posts
        else:
            prev_log_likes = self._log_likes[-1]

        # decide whether to keep the new proposal or not
        # keep with a MH step
        log_diff = np.float64(prop_log_likes - prev_log_likes)

        # next see if we need to include priors for each param
        if self._use_priors:
            prop_log_prior, prev_log_prior = self.calc_log_prior(
                proposal, self._particles[-1])
            weights = prop_log_likes + prop_log_prior
            log_diff += np.float64(prop_log_prior - prev_log_prior)

            prev_weights = prev_log_likes + prev_log_prior
        else:
            weights = prop_log_likes
            prev_weights = prev_log_likes

        # handle much greater than one
        log_diff[log_diff > 0.0] = 0.0
        # now exp so we can get the other probs
        mh_prob = np.exp(log_diff)
        mh_prob[np.isnan(mh_prob)] = 0.0
        keep = (mh_prob - np.random.rand(len(mh_prob))) > 0.0

        # set the not keepers from previous population
        proposal[~keep] = self._particles[-1][~keep]
        prop_log_likes[~keep] = prev_log_likes[~keep]
        weights[~keep] = prev_weights[~keep]
        #if self._use_priors:
        #    weights[~keep] += prev_log_prior[~keep]
        if prop_posts is not None:
            prop_posts[~keep] = self._posts[-1][~keep]

        # append the new proposal
        self._particles.append(proposal)
        self._log_likes.append(prop_log_likes)
        self._weights.append(weights)
        if prop_posts is not None:
            self._posts.append(prop_posts)

        # call post_evolve hook
        self._post_evolve(proposal, keep)

        # clean up for next
        self._proposal = None
        self._prop_log_likes = None
        self._prop_posts = None
        self._prev_log_likes = None
        self._prev_posts = None

        pass

    def __call__(self, num_iter, burnin=False, migration_prob=0.0):
        # make sure we've initialized
        self._initialize()

        # loop over iterations
        if self._verbose:
            sys.stdout.write('Iterations (%d): ' % (num_iter))
        times = []
        for i in xrange(num_iter):
            if np.random.rand() < migration_prob:
                # migrate, which is deterministic and done in place
                if self._verbose:
                    sys.stdout.write('x ')
                self._migrate()
            if self._verbose:
                sys.stdout.write('%d ' % (i + 1))
                sys.stdout.flush()
            stime = time.time()
            # evolve the population to the next generation
            self._evolve(burnin=burnin)
            times.append(time.time() - stime)
        if self._verbose:
            sys.stdout.write('\n')
        self._times.extend(times)
        return times

    def calc_log_prior(self, *props):
        # set starting log_priors
        log_priors = [np.zeros(len(p)) for p in props]

        # loop over params
        for i, param in enumerate(self._params):
            if hasattr(param.prior, "pdf"):
                # it's not a fixed value
                # pick props and make sure to pass all
                # into pdf at the same time
                # to ensure using the same prior dist
                p = np.hstack(
                    [props[j][:, i][:, np.newaxis] for j in range(len(props))])
                log_pdf = np.log(param.prior.pdf(p))

                for j in range(len(props)):
                    log_priors[j] += log_pdf[:, j]

        # just pick singular column if there's only one passed in
        if len(log_priors) == 1:
            log_priors = log_priors[0]
        return log_priors
Exemple #3
0
from de import DE
from problem_data import ProblemData
from config import Config


def test(self):
    print('%6.2f %6.2f' % (self.score, self.config.population.best_score))


if __name__ == "__main__":

    config = Config()
    config.size = 100
    config.dimensions = 5
    config.set_function_evaluations_budget(10000)
    problem_data = ProblemData(pname='Rosenbrock',
                               n_dimensions=config.dimensions)
    config.problem = problem_data

    de = DE(config=config)
    de.set_before_eval_callback(test)
    de.run()

    print(de.population.best_score)
Exemple #4
0
 def __init__(self, model, **settings):
   O.__init__(self)
   self.model = model
   self.settings = default().update(**settings)
   self.de = DE(model, gens = self.settings.k1)
   self.mutator = Mutator(model.get_tree())
Exemple #5
0
def run_de(graph):
    model = Model(graph)
    de = DE(model)
    stat = de.run()
    stat.tiles()
            % (median(fbest_r)))
        if runs > 1:
            results.write('Gbest Standard Deviation: %.4f\n\n' %
                          (stdev(fbest_r)))
        results.write('Elappsed Time Average: %.4f\n' %
                      (sum(elapTime_r) / len(elapTime_r)))
        if runs > 1:
            results.write('Elappsed Time Standard Deviation: %.4f\n' %
                          (stdev(elapTime_r)))
        results.write(
            '=================================================================================================================\n'
        )


if __name__ == '__main__':
    from de import DE

    max_iterations = 100
    pop_size = 20
    dim = 2
    runs = 10
    bounds = ((-5.12, 5.12), (-5.12, 5.12))
    p = DE()
    p.diferentialEvolution(pop_size,
                           dim,
                           bounds,
                           max_iterations,
                           runs,
                           maximize=False,
                           operator=0)
Exemple #7
0
import benchmark_functions as bf
import matplotlib.pyplot as plt
from ga import GA
from de import DE

# Select function for minimization
f = bf.Sphere(dim=2)
# View 3d plot of the function
if f.dim == 2:
    bf.plot3d(function=f, show_contour=True)
    plt.show()
# Create algorithm, Differential evolution or Genetic algorithm
de = DE(obj_function=f, F=0.8, Cr=0.3, gen_max=10000)
ga = GA(obj_function=f, k=70, pm=0.5, gen_max=10000, sigma=2)
# Find solution
de_solution, de_gen = de.search(show_progress=False, error_tolerance=f.epsilon)
ga_solution, ga_gen = ga.search(show_progress=False,
                                error_tolerance=f.epsilon,
                                c=0.9,
                                msc=50)
# Print results
print("============== Differential evolution ==============")
print(
    "Solution {:f} found in {:d} generations. Optimal value is {:f}. Error is: {:f}."
    .format(f.value(de_solution), de_gen, f.optimum,
            f.value(de_solution) - f.optimum))

print("============== Genetic algorithm ==============")
print(
    "Solution {:f} found in {:d} generations. Optimal value is {:f}. Error is: {:f}."
    .format(f.value(ga_solution), ga_gen, f.optimum,