Beispiel #1
0
def generate(image, label):
    parent = [
        candidate(int(np.random.uniform(0, 32)), int(np.random.uniform(0, 32)),
                  np.random.normal(128 / 256, 127 / 256),
                  np.random.normal(128 / 256, 127 / 256),
                  np.random.normal(128 / 256, 127 / 256)) for i in range(400)
    ]
    iter_time = 1

    for i in range(iter_time):
        parent, child = DE(parent)
        for j in range(len(parent)):
            if score(image, label, parent[j]) > score(image, label, child[j]):
                parent[j] = child[j]

    final_index = 0
    for i in range(len(parent)):
        if score(image, label, parent[i]) < score(image, label,
                                                  parent[final_index]):
            final_index = i

    final_candi = parent[final_index]
    final_image = copy.deepcopy(image)
    final_x, final_y = final_candi.get_loc()
    final_R, final_G, final_B = final_candi.get_perturbation()
    final_image[0][0][final_x][final_y] = final_R
    final_image[0][1][final_x][final_y] = final_G
    final_image[0][2][final_x][final_y] = final_B
    return final_image, final_candi
Beispiel #2
0
class Model(object):
    """
    Differential Evolution Monte Carlo
    ...and so much more...

    """
    param_names = property(lambda self: [p.name for p in self._params],
                           doc="""
                           List of parameter names.
                           """)

    param_display_names = property(
        lambda self: [p.display_name for p in self._params],
        doc="""
                                   List of parameter display names.
                                   """)

    particles = property(
        lambda self: self.apply_param_transform(np.asarray(self._particles)),
        doc="""
                         Particles as an array.
                         """)

    log_likes = property(lambda self: np.asarray(self._log_likes),
                         doc="""
                         Log likelihoods as an array.
                         """)

    weights = property(lambda self: np.asarray(self._weights),
                       doc="""
                       Weights as an array.
                       """)

    times = property(lambda self: np.asarray(self._times),
                     doc="""
                     Times as an array.
                     """)

    posts = property(lambda self: np.asarray(self._posts),
                     doc="""
                     Posts as an array.
                     """)

    default_prop_gen = DE(gamma_best=0.0, rand_base=False)
    default_burnin_prop_gen = DE(gamma_best=None, rand_base=True)

    def __init__(self,
                 name,
                 params,
                 like_fun,
                 like_args=None,
                 num_chains=None,
                 proposal_gen=None,
                 burnin_proposal_gen=None,
                 initial_zeros_ok=False,
                 init_multiplier=1,
                 use_priors=True,
                 verbose=False,
                 partition=None,
                 parallel=None):
        """
        DEMC
        """
        # set the vars
        self._name = name
        self._params = params  # can't be None
        if num_chains is None:
            num_chains = int(np.min([len(params) * 10, 100]))
        self._num_chains = num_chains
        self._initial_zeros_ok = initial_zeros_ok
        self._init_multiplier = init_multiplier
        self._use_priors = use_priors
        self._verbose = verbose
        if partition is None:
            partition = len(self._params)
        if partition > len(self._params):
            partition = len(self._params)
        self._partition = partition
        self._parallel = parallel

        # set up proposal generator
        if proposal_gen is None:
            proposal_gen = self.default_prop_gen
        self._prop_gen = proposal_gen
        if burnin_proposal_gen is None:
            burnin_proposal_gen = self.default_burnin_prop_gen
        self._burnin_prop_gen = burnin_proposal_gen

        # set up the like function
        # process the passed in like_fun
        self._like_fun = like_fun
        self._like_args = like_args
        if self._like_args is None:
            self._like_args = ()

        # in some cases we will need to recalc the likes, but the user
        # doesn't need to know about this
        self._recalc_likes = False

        # see if we need to apply a transform to any params
        if np.any([p.transform for p in self._params]):
            self._transform_needed = True
        else:
            self._transform_needed = False

        # used for preprocessing
        self._proposal = None
        self._prop_log_likes = None
        self._prop_posts = None
        self._prev_log_likes = None
        self._prev_posts = None

        # we have not initialized
        self._initialized = False

    def _initialize(self, force=False, num_chains=None, partition=None):
        if self._initialized and not force:
            # already done
            return

        if self._verbose:
            sys.stdout.write('Initializing: ')
            sys.stdout.flush()

        # time it
        stime = time.time()

        # initialize the particles and log_likes
        self._num_params = len(self._params)
        if num_chains is not None:
            self._num_chains = num_chains
        self._particles = []
        self._log_likes = []
        self._weights = []
        self._times = []
        self._posts = []

        # set the partition
        # see if we're modifying it
        if partition is not None:
            if partition > len(self._params):
                partition = len(self._params)
            self._partition = partition

        self._parts = np.array([1] * self._partition + [0] *
                               (len(self._params) - self._partition),
                               dtype=np.bool)

        # fill using init priors (for initialization)
        init_parts = self._num_chains * self._init_multiplier
        pop = np.hstack([
            p.init_prior.rvs(
                (init_parts, 1)) if hasattr(p.init_prior, "rvs") else np.ones(
                    (init_parts, 1)) * p.init_prior for p in self._params
        ])
        if pop.ndim < 2:
            pop = pop[:, np.newaxis]

        # get the initial log_likes
        if self._verbose:
            sys.stdout.write('%d(%d) ' % (init_parts, self._num_chains))
            sys.stdout.flush()
        log_likes, posts = self._calc_log_likes(pop)

        # make sure not zero
        if not self._initial_zeros_ok:
            ind = np.isinf(log_likes) | np.isnan(log_likes)
            good_ind = ~ind
            while good_ind.sum() < self._num_chains:
                if self._verbose:
                    sys.stdout.write(
                        '%d(%d) ' %
                        (ind.sum(), self._num_chains - good_ind.sum()))
                    sys.stdout.flush()
                npop = np.hstack([
                    p.init_prior.rvs(
                        (ind.sum(),
                         1)) if hasattr(p.init_prior, "rvs") else np.ones(
                             (ind.sum(), 1)) * p.init_prior
                    for p in self._params
                ])
                if npop.ndim < 2:
                    npop = npop[:, np.newaxis]

                pop[ind, :] = npop

                # calc the log likes for those new pops
                log_likes[ind], temp_posts = self._calc_log_likes(pop[ind])
                if temp_posts is not None:
                    posts[ind] = temp_posts
                ind = np.isinf(log_likes) | np.isnan(log_likes)
                good_ind = ~ind

            # save the good pop
            good_ind = ~ind
            pop = pop[good_ind]
            if pop.ndim < 2:
                pop = pop[:, np.newaxis]
            log_likes = log_likes[good_ind]
            if posts is not None:
                posts = posts[good_ind]

        if len(pop) > self._num_chains:
            pop = pop[:self._num_chains]
            log_likes = log_likes[:self._num_chains]
            if posts is not None:
                posts = posts[:self._num_chains]

        # append the initial log_likes and particles
        self._times.append(time.time() - stime)
        self._log_likes.append(log_likes)
        if self._use_priors:
            # calc log_priors
            log_priors = self.calc_log_prior(pop)
            self._weights.append(log_likes + log_priors)
        else:
            self._weights.append(log_likes)
        self._particles.append(pop)
        if posts is not None:
            self._posts.append(posts)

        # say we've initialized
        self._initialized = True

    def apply_param_transform(self, pop):
        if self._transform_needed:
            pop = pop.copy()
            for i, p in enumerate(self._params):
                if p.transform:
                    pop[..., i] = p.transform(pop[..., i])
        return pop

    def _calc_log_likes(self, pop):
        # apply transformation if necessary
        pop = self.apply_param_transform(pop)

        # first get the log likelihood for the pop
        out = self._like_fun(pop, *(self._like_args))
        if isinstance(out, tuple):
            # split into likes and posts
            log_likes, posts = out
        else:
            # just likes
            log_likes = out
            posts = None

        return log_likes, posts

    def _get_part_ind(self):
        # grab the current partition indices
        parts = self._parts.copy()

        # roll them the size of the partition
        self._parts = np.roll(self._parts, self._partition)

        # return the pre-rolled value
        return parts

    def _crossover(self, burnin=False):
        if burnin:
            prop_gen = self._burnin_prop_gen
        else:
            prop_gen = self._prop_gen

        # always pass params, though no longer using priors
        proposal = prop_gen(self._particles[-1], self._weights[-1],
                            self._params)

        # apply the partition by copying prev values back
        parts = self._get_part_ind()
        proposal[:, ~parts] = self._particles[-1][:, ~parts]

        return proposal

    def _migrate(self):
        # pick which items will migrate
        num_to_migrate = np.random.random_integers(2, self._num_chains)
        to_migrate = random.sample(range(self._num_chains), num_to_migrate)

        # do a circle swap
        keepers = []
        for f_ind in range(len(to_migrate)):
            if f_ind == len(to_migrate) - 1:
                # loop to beg
                t_ind = 0
            else:
                # loop to next
                t_ind = f_ind + 1

            # set the from and to inds
            i = to_migrate[f_ind]
            j = to_migrate[t_ind]

            # do comparison and swap if necessary
            log_diff = np.float64(self._weights[-1][i] - self._weights[-1][j])

            # now exp so we can get the other probs
            if log_diff > 0.0:
                log_diff = 0.0
            mh_prob = np.exp(log_diff)
            if np.isnan(mh_prob):
                mh_prob = 0.0
            keep = (mh_prob - np.random.rand()) > 0.0
            if keep:
                keepers.append({
                    'ind': j,
                    'particle': self._particles[-1][i],
                    'weight': self._weights[-1][i],
                    'log_like': self._log_likes[-1][i]
                })

        for k in keepers:
            # do the swap (i.e., replace j with i)
            # replace the particle, weight, log_like
            self._particles[-1][k['ind']] = k['particle']
            self._weights[-1][k['ind']] = k['weight']
            self._log_likes[-1][k['ind']] = k['log_like']

    def _post_evolve(self, pop, kept):
        pass

    def _evolve(self, burnin=False):
        # first generate new proposals
        # loop over groups, making new proposal pops via mutation
        # or crossover
        if self._proposal is None:
            proposal = self._crossover(burnin=burnin)
        else:
            proposal = self._proposal

        # eval the population (this is separate from the proposals so
        # that we can parallelize the entire operation)
        if self._prop_log_likes is None:
            prop_log_likes, prop_posts = self._calc_log_likes(proposal)
        else:
            prop_log_likes = self._prop_log_likes
            prop_posts = self._prop_posts

        # see if recalc prev_likes in case of HyperPrior
        if self._recalc_likes:
            if self._prev_log_likes is None:
                prev_log_likes, prev_posts = self._calc_log_likes(
                    self._particles[-1])
            else:
                prev_log_likes = self._prev_log_likes
                prev_posts = self._prev_posts
        else:
            prev_log_likes = self._log_likes[-1]

        # decide whether to keep the new proposal or not
        # keep with a MH step
        log_diff = np.float64(prop_log_likes - prev_log_likes)

        # next see if we need to include priors for each param
        if self._use_priors:
            prop_log_prior, prev_log_prior = self.calc_log_prior(
                proposal, self._particles[-1])
            weights = prop_log_likes + prop_log_prior
            log_diff += np.float64(prop_log_prior - prev_log_prior)

            prev_weights = prev_log_likes + prev_log_prior
        else:
            weights = prop_log_likes
            prev_weights = prev_log_likes

        # handle much greater than one
        log_diff[log_diff > 0.0] = 0.0
        # now exp so we can get the other probs
        mh_prob = np.exp(log_diff)
        mh_prob[np.isnan(mh_prob)] = 0.0
        keep = (mh_prob - np.random.rand(len(mh_prob))) > 0.0

        # set the not keepers from previous population
        proposal[~keep] = self._particles[-1][~keep]
        prop_log_likes[~keep] = prev_log_likes[~keep]
        weights[~keep] = prev_weights[~keep]
        #if self._use_priors:
        #    weights[~keep] += prev_log_prior[~keep]
        if prop_posts is not None:
            prop_posts[~keep] = self._posts[-1][~keep]

        # append the new proposal
        self._particles.append(proposal)
        self._log_likes.append(prop_log_likes)
        self._weights.append(weights)
        if prop_posts is not None:
            self._posts.append(prop_posts)

        # call post_evolve hook
        self._post_evolve(proposal, keep)

        # clean up for next
        self._proposal = None
        self._prop_log_likes = None
        self._prop_posts = None
        self._prev_log_likes = None
        self._prev_posts = None

        pass

    def __call__(self, num_iter, burnin=False, migration_prob=0.0):
        # make sure we've initialized
        self._initialize()

        # loop over iterations
        if self._verbose:
            sys.stdout.write('Iterations (%d): ' % (num_iter))
        times = []
        for i in xrange(num_iter):
            if np.random.rand() < migration_prob:
                # migrate, which is deterministic and done in place
                if self._verbose:
                    sys.stdout.write('x ')
                self._migrate()
            if self._verbose:
                sys.stdout.write('%d ' % (i + 1))
                sys.stdout.flush()
            stime = time.time()
            # evolve the population to the next generation
            self._evolve(burnin=burnin)
            times.append(time.time() - stime)
        if self._verbose:
            sys.stdout.write('\n')
        self._times.extend(times)
        return times

    def calc_log_prior(self, *props):
        # set starting log_priors
        log_priors = [np.zeros(len(p)) for p in props]

        # loop over params
        for i, param in enumerate(self._params):
            if hasattr(param.prior, "pdf"):
                # it's not a fixed value
                # pick props and make sure to pass all
                # into pdf at the same time
                # to ensure using the same prior dist
                p = np.hstack(
                    [props[j][:, i][:, np.newaxis] for j in range(len(props))])
                log_pdf = np.log(param.prior.pdf(p))

                for j in range(len(props)):
                    log_priors[j] += log_pdf[:, j]

        # just pick singular column if there's only one passed in
        if len(log_priors) == 1:
            log_priors = log_priors[0]
        return log_priors
Beispiel #3
0
 def __init__(self, model, **settings):
   O.__init__(self)
   self.model = model
   self.settings = default().update(**settings)
   self.de = DE(model, gens = self.settings.k1)
   self.mutator = Mutator(model.get_tree())
Beispiel #4
0
from de import DE
from problem_data import ProblemData
from config import Config


def test(self):
    print('%6.2f %6.2f' % (self.score, self.config.population.best_score))


if __name__ == "__main__":

    config = Config()
    config.size = 100
    config.dimensions = 5
    config.set_function_evaluations_budget(10000)
    problem_data = ProblemData(pname='Rosenbrock',
                               n_dimensions=config.dimensions)
    config.problem = problem_data

    de = DE(config=config)
    de.set_before_eval_callback(test)
    de.run()

    print(de.population.best_score)
Beispiel #5
0
class Star1(O):
  def __init__(self, model, **settings):
    O.__init__(self)
    self.model = model
    self.settings = default().update(**settings)
    self.de = DE(model, gens = self.settings.k1)
    self.mutator = Mutator(model.get_tree())

  # def sample(self, sub_folder):
  #   stat = self.de.run()
  #   self.to_csv(stat, "csv/"+sub_folder+"/"+self.model.get_tree().name+".csv")
  #   stat.settings.gen_step = self.settings.gen_step
  #   stat.tiles()
  #   population = set()
  #   for point in stat.generations[0]:
  #     population.add(point)
  #   for point in stat.generations[-1]:
  #     population.add(point)
  #   best_size = int(len(population) * self.settings.best_percent/100)
  #   best = sel_nsga2(self.model, list(population), best_size)
  #   rest = population - set(best)
  #   return best, list(rest)

  def sample(self, sub_folder):
    stat = self.de.run()
    self.to_csv(stat, "csv/"+sub_folder+"/"+self.model.get_tree().name+".csv")
    stat.settings.gen_step = self.settings.gen_step
    stat.tiles()
    best = set()
    population = set()
    for point in stat.generations[0]:
      population.add(point)
    for point in stat.generations[-1]:
      population.add(point)
    for obj_index in range(len(self.de.settings.better)):
      sorted_pop = sorted(list(population), key=lambda x: x.objectives[obj_index], reverse=True)[:len(stat.generations[-1])//5]
      best.update(sorted_pop)
    rest = population - best
    return list(best), list(rest)

  def rank(self, best, rest):
    best_size = len(best)
    rest_size = len(rest)
    p_best = best_size / (best_size + rest_size)
    p_rest = rest_size / (best_size + rest_size)
    decisions = []
    for dec_node in self.model.bases:
      f_best, pos_count, neg_count = 0, 0, 0
      for point in best:
        if point.decisions[dec_node.id] > 0:
          pos_count += 1
        elif point.decisions[dec_node.id] < 0:
          neg_count += 1
      f_pos_best = pos_count / best_size
      l_pos_best = f_pos_best * p_best
      f_neg_best = neg_count / best_size
      l_neg_best = f_neg_best * p_best
      f_pos_rest, f_neg_rest = 0, 0
      for point in rest:
        if point.decisions[dec_node.id] > 0:
          f_pos_rest += 1
        else:
          f_neg_rest += 1
      f_pos_rest /= rest_size
      f_neg_rest /= rest_size
      l_pos_rest = f_pos_rest * p_rest
      l_neg_rest = f_neg_rest * p_rest
      if l_pos_best == 0 and l_pos_rest == 0:
        sup_pos = 0
      else:
        sup_pos = l_pos_best ** 2 / (l_pos_best + l_pos_rest)
      if l_neg_best == 0 and l_neg_rest == 0:
        sup_neg = 0
      else:
        sup_neg = l_neg_best ** 2 / (l_neg_best + l_neg_rest)
      decisions.append(Decision(id = dec_node.id, name = dec_node.name,
                                support=sup_pos, value = 1,
                                type = dec_node.type, container=dec_node.container,
                                cost = dec_node.base_cost, benefit = dec_node.base_benefit))
      decisions.append(Decision(id = dec_node.id, name = dec_node.name,
                                support=sup_neg, value = -1,
                                type = dec_node.type, container=dec_node.container,
                                cost = dec_node.base_cost, benefit = dec_node.base_benefit))
    decisions.sort(key=lambda x:x.support, reverse=True)
    sorted_decs = []
    aux = set()
    for dec in decisions:
      if dec.id not in aux:
        sorted_decs.append(dec)
        aux.add(dec.id)
    assert len(sorted_decs) == len(self.model.bases), "Mismatch after sorting support"
    return sorted_decs

  def generate(self, presets = None, check_validity = False):
    population = list()
    while len(population) < self.settings.k2:
      point = Point(self.mutator.generate())
      if not point in population:
        for preset in presets:
          point.decisions[preset.id] = preset.value
        if check_validity:
          self.model.reset_nodes(point.decisions)
          self.model.eval(self.model.get_tree().root)
          if self.model.get_tree().root.value != 1:
            continue
        population.append(point)
    return population

  @staticmethod
  def objective_stats(generations):
    stats = []
    obj_len = len(generations[0][0].objectives)
    objective_map = {}
    for i in range(obj_len):
      objectives = []
      data_map = {}
      meds = []
      iqrs = []
      for gen, pop in enumerate(generations):
        objs = [pt.objectives[i] for pt in pop]
        objectives.append(objs)
        med, iqr = median_iqr(objs)
        meds.append(med)
        iqrs.append(iqr)
      objective_map[i] = objectives
      data_map["meds"] = meds
      data_map["iqrs"] = iqrs
      stats.append(data_map)
    return stats, objective_map

  def evaluate(self, point, decisions):
    model = self.model
    if not point.objectives:
      model.reset_nodes(point.decisions)
      self.model.eval(self.model.get_tree().root)
      point.objectives = self.model.get_tree().evaluate(model, point)
      point.objectives.append(sum(decision.cost for decision in decisions if decision.value > 0))
      point._nodes = [node.clone() for node in model.get_tree().nodes.values()]
      point.objectives = [0 if one is None else one for one in point.objectives]
    return point.objectives

  def prune(self, support, check_validity):
    gens = []
    for i in range(len(support)):
      decisions = support[:i]
      population = self.generate(decisions, check_validity=check_validity)
      for point in population:
        self.evaluate(point, decisions)
      gens.append(population)
    obj_stats, objective_map = self.objective_stats(gens)
    return obj_stats, gens, objective_map

  def report(self, stats, sub_folder, fig_name):
    #headers = [obj.__name__.split("_")[-1] for obj in self.de.settings.obj_funcs]
    headers = ["root cost", "root benefit", "softgoals", "preset decisions cost"]
    headers = ["root cost", "root benefit", "softgoals"]
    med_spread_plot(stats, headers, fig_name="img/"+sub_folder+"/"+fig_name+".png")
    return "img/"+sub_folder+"/"+fig_name+".png"

  def to_csv(self, stats, fname):
    directory = fname.rsplit("/", 1)[0]
    mkdir(directory)
    last_gen = sorted(stats.generations[-1], key=lambda x:x.objectives[1]-x.objectives[0], reverse=True)
    self.plot_objectives(last_gen, directory)
    ids = self.model.get_tree().nodes.keys()
    names = [self.model.get_tree().nodes[key].name for key in ids] + ["?cost", "?benefit", "?softgoals"]
    table = [names]
    max_cost, max_benefit = -1, -1
    for point in last_gen:
      row = [point.get_nodes()[key].value for key in ids] + point.objectives
      max_cost = max(point.objectives[0], max_cost)
      max_benefit = max(point.objectives[1], max_benefit)
      table.append(row)
    with open(fname, "wb") as file_obj:
      writer = csv.writer(file_obj)
      writer.writerows(table)

  def plot_objectives(self, points, directory):
    directory = directory.replace("csv/", "img/")
    objectives = []
    for point in points:
      dec_lens = sum([1 if dec == 1 else 0 for dec in point.decisions.values()])
      obj = [dec_lens]
      for o in point.objectives[:2]:
        obj.append(o)
      objectives.append(obj)
    zipped = zip(*objectives)
    x = zipped[0]
    costs = zipped[1]
    benefits = zipped[2]
    tree_name = self.model.get_tree().name
    mkdir(directory)
    point_plot(x, {"cost":costs}, ['ro'], "%s/%s_costs.png"%(directory, tree_name))
    point_plot(x, {"benefit":benefits}, ['bx'], "%s/%s_benefits.png"%(directory, tree_name))
    point_plot_3d(x, costs, benefits, 'r', 'x', "%s/%s_3d.png"%(directory, tree_name),
                  "Number of Decisions", "Costs", "Benefits")

  def visualize(self, decisions):
    tracks = []
    for i in range(len(decisions)):
      pos_decs, neg_decs = [], []
      for j in range(len(decisions)):
        if j < i:
          pos_decs.append(Decision(id = decisions[j].id, value = decisions[j].value,
                                   cost = decisions[j].cost, benefit = decisions[j].benefit))
          neg_decs.append(Decision(id = decisions[j].id, value = decisions[j].value,
                                   cost = decisions[j].cost, benefit = decisions[j].benefit))
        elif j == i:
          pos_decs.append(Decision(id = decisions[j].id, value = +1,
                                   cost = decisions[j].cost, benefit = decisions[j].benefit))
          neg_decs.append(Decision(id = decisions[j].id, value = -1,
                                   cost = decisions[j].cost, benefit = decisions[j].benefit))
        # else:
        #   pos_decs.append(Decision(id = decisions[j].id, value = -1 * decisions[j].value))
        #   neg_decs.append(Decision(id = decisions[j].id, value = -1 * decisions[j].value))
      if decisions[i].value == 1:
        pos_pop = self.generate(pos_decs, check_validity=True)
        neg_pop = self.generate(neg_decs, check_validity=False)
      else:
        pos_pop = self.generate(pos_decs, check_validity=False)
        neg_pop = self.generate(neg_decs, check_validity=True)
      for pos, neg in zip(pos_pop, neg_pop):
        self.evaluate(pos, pos_decs)
        self.evaluate(neg, neg_decs)
      p_meds, p_iqrs = [], []
      n_meds, n_iqrs = [], []
      for o_i in range(len(pos_pop[0].objectives)):
        p_objs = [pt.objectives[o_i] for pt in pos_pop]
        n_objs = [pt.objectives[o_i] for pt in neg_pop]
        med, iqr = median_iqr(p_objs)
        p_meds.append(med)
        p_iqrs.append(iqr)
        med, iqr = median_iqr(n_objs)
        n_meds.append(med)
        n_iqrs.append(iqr)
      tracks.append(O(id = decisions[i].id, name = decisions[i].name,
                      pos_meds = p_meds, pos_iqrs = p_iqrs,
                      neg_meds = n_meds, neg_iqrs = n_iqrs,
                      prefered_value = decisions[i].value,
                      cost = decisions[i].cost, benefit = decisions[i].benefit))
    return tracks




  @staticmethod
  def get_elbow(gens, index, obj_index=None):
    pop = gens[index]
    pop = sorted(pop, key=lambda x: x.objectives[obj_index])
    point = pop[len(pop)//2]
    return point
Beispiel #6
0
def run_de(graph):
    model = Model(graph)
    de = DE(model)
    stat = de.run()
    stat.tiles()
Beispiel #7
0
def run_de(graph):
    model = Model(graph)
    de = DE(model)
    stat = de.run()
    stat.tiles()
            % (median(fbest_r)))
        if runs > 1:
            results.write('Gbest Standard Deviation: %.4f\n\n' %
                          (stdev(fbest_r)))
        results.write('Elappsed Time Average: %.4f\n' %
                      (sum(elapTime_r) / len(elapTime_r)))
        if runs > 1:
            results.write('Elappsed Time Standard Deviation: %.4f\n' %
                          (stdev(elapTime_r)))
        results.write(
            '=================================================================================================================\n'
        )


if __name__ == '__main__':
    from de import DE

    max_iterations = 100
    pop_size = 20
    dim = 2
    runs = 10
    bounds = ((-5.12, 5.12), (-5.12, 5.12))
    p = DE()
    p.diferentialEvolution(pop_size,
                           dim,
                           bounds,
                           max_iterations,
                           runs,
                           maximize=False,
                           operator=0)
Beispiel #9
0
import benchmark_functions as bf
import matplotlib.pyplot as plt
from ga import GA
from de import DE

# Select function for minimization
f = bf.Sphere(dim=2)
# View 3d plot of the function
if f.dim == 2:
    bf.plot3d(function=f, show_contour=True)
    plt.show()
# Create algorithm, Differential evolution or Genetic algorithm
de = DE(obj_function=f, F=0.8, Cr=0.3, gen_max=10000)
ga = GA(obj_function=f, k=70, pm=0.5, gen_max=10000, sigma=2)
# Find solution
de_solution, de_gen = de.search(show_progress=False, error_tolerance=f.epsilon)
ga_solution, ga_gen = ga.search(show_progress=False,
                                error_tolerance=f.epsilon,
                                c=0.9,
                                msc=50)
# Print results
print("============== Differential evolution ==============")
print(
    "Solution {:f} found in {:d} generations. Optimal value is {:f}. Error is: {:f}."
    .format(f.value(de_solution), de_gen, f.optimum,
            f.value(de_solution) - f.optimum))

print("============== Genetic algorithm ==============")
print(
    "Solution {:f} found in {:d} generations. Optimal value is {:f}. Error is: {:f}."
    .format(f.value(ga_solution), ga_gen, f.optimum,