コード例 #1
0
ファイル: genetic_algorithm.py プロジェクト: mambocab/sbse14
    def run(self, text_report=True):
        init_xs = tuple(self.model.random_input_vector()
                        for _ in xrange(self.spec.population_size))
        get_energy = lambda x: x.energy
        best_era = None

        report = base.StringBuilder() if text_report else base.NullObject()

        self._population = tuple(self.model.compute_model_io(xs)
                                 for xs in init_xs)

        best = min(self._population, key=get_energy)

        self._evals, lives = 0, 4

        for gen in xrange(self.spec.iterations):
            if self._evals > self.spec.iterations or lives <= 0:
                break

            prev_best_energy = best.energy

            self._population = self._breed_next_generation()

            best_in_generation = min(self._population, key=get_energy)
            best = min(best, best_in_generation, key=get_energy)

            report += str(best.energy)
            report += ('+' if x.energy < prev_best_energy else '.'
                       for x in self._population)
            report += '\n'

            energies = NumberLog(inits=(c.energy for c in self._population))
            try:
                improved = energies.better(prev_energies)
            except NameError:
                improved = False
            prev_energies = energies  # noqa: flake8 doesn't catch use above

            if improved:
                best_era = energies
            else:
                lives -= 1

        if best_era is None:
            best_era = energies

        return SearchReport(best=best.energy,
                            best_era=best_era,
                            evaluations=self._evals,
                            searcher=self.__class__,
                            spec=self.spec,
                            report=None)
コード例 #2
0
ファイル: simulated_annealer.py プロジェクト: mambocab/sbse14
 def __init__(self, *args, **kwargs):
     super(SimulatedAnnealer, self).__init__(*args, **kwargs)
     self._current = self.model.random_model_io()
     self._best = self._current  # assumes current is immutable
     self._lives = 4
     self._best_era = None
     self._current_era_energies = NumberLog(max_size=None)
コード例 #3
0
    def __init__(self, *args, **kwargs):
        super(ParticleSwarmOptimizer, self).__init__(*args, **kwargs)

        self._flock = tuple(self._new_particle()
                            for _ in range(self.spec.population_size))
        self._evals = len(self._flock)
        self._current_flock_energies = NumberLog(p.energy
                                                 for p in self._flock)
        self._best = min(self._flock, key=lambda x: x.energy)
        self._lives = 4
コード例 #4
0
ファイル: log_test.py プロジェクト: mambocab/sbse14
class TestNumberLog(TestCase):
    def setUp(self):  # noqa
        self.max_size = 64
        self.log = NumberLog(max_size=self.max_size)
        random.seed(7)

    def test_validation(self):
        self.log += 48.8
        self.log += 14.24

        # given current implementation, should always be sorted,
        # regardless of insertion order
        assert_equal(sorted(self.log._cache), self.log._cache)
        assert not self.log._valid_statistics

        self.log._prepare_data()

        assert self.log._valid_statistics
        assert_equal(sorted(self.log._cache), self.log._cache)

    def test_invalidation(self):
        self.log += 48.8
        self.log += 14.24
        self.log._prepare_data()

        # make sure validness actually changes
        assert self.log._valid_statistics

        self.log += 56.4

        assert not self.log._valid_statistics

    def test_len_n(self):
        n = 2000
        for _ in xrange(n):
            self.log += 2
        assert_equal(self.log._n, n)
        assert_equal(len(self.log), self.max_size)

    def test_max_size(self):
        for x in xrange(2000):
            self.log += 2
        assert len(self.log._cache) == self.max_size
コード例 #5
0
class ParticleSwarmOptimizer(Searcher):
    """
    A searcher that models a "flock" of individuals roaming the search space.
    Individuals make decisions about where to go next based both on their
    own experience and on the experience of the whole group. For more
    information, see https://github.com/timm/sbse14/wiki/pso#details and
    http://en.wikipedia.org/wiki/Particle_swarm_optimization
    """

    def __init__(self, *args, **kwargs):
        super(ParticleSwarmOptimizer, self).__init__(*args, **kwargs)

        self._flock = tuple(self._new_particle()
                            for _ in range(self.spec.population_size))
        self._evals = len(self._flock)
        self._current_flock_energies = NumberLog(p.energy
                                                 for p in self._flock)
        self._best = min(self._flock, key=lambda x: x.energy)
        self._lives = 4
        self._best_flock = None

    def _new_particle(self):
        return Particle(self.model, self.spec.phi1, self.spec.phi2)

    def _update(self):
        self._prev_flock_energies = self._current_flock_energies

        for p in self._flock:
            p._update(self._best)
        self._evals += len(self._flock)
        self._current_flock_energies = NumberLog(p.energy
                                                 for p in self._flock)

        self._best = min(self._best, *self._current_flock_energies)
        if self._current_flock_energies.better(self._prev_flock_energies) or self._best_flock is None:
            self._best_flock = self._flock
        else:
            self._lives -= 1

    def run(self, text_report=False):
        for i in range(self.spec.generations):
            self._update()
            if self._lives <= 0 or self._evals >= self.spec.iterations:
                break

        best_flock_energies = NumberLog(p.energy for p in self._best_flock)
        return SearchReport(best=self._best,
                            best_era=best_flock_energies,
                            evaluations=self._evals,
                            searcher=self.__class__,
                            spec=self.spec,
                            report=None)
コード例 #6
0
    def run(self, text_report=False):
        for i in range(self.spec.generations):
            self._update()
            if self._lives <= 0 or self._evals >= self.spec.iterations:
                break

        best_flock_energies = NumberLog(p.energy for p in self._best_flock)
        return SearchReport(best=self._best,
                            best_era=best_flock_energies,
                            evaluations=self._evals,
                            searcher=self.__class__,
                            spec=self.spec,
                            report=None)
コード例 #7
0
class ParticleSwarmOptimizer(Searcher):
    """
    A searcher that models a "flock" of individuals roaming the search space.
    Individuals make decisions about where to go next based both on their
    own experience and on the experience of the whole group. For more
    information, see https://github.com/timm/sbse14/wiki/pso#details and
    http://en.wikipedia.org/wiki/Particle_swarm_optimization
    """
    def __init__(self, *args, **kwargs):
        super(ParticleSwarmOptimizer, self).__init__(*args, **kwargs)

        self._flock = tuple(self._new_particle()
                            for _ in range(self.spec.population_size))
        self._evals = len(self._flock)
        self._current_flock_energies = NumberLog(p.energy for p in self._flock)
        self._best = min(self._flock, key=lambda x: x.energy)
        self._lives = 4
        self._best_flock = None

    def _new_particle(self):
        return Particle(self.model, self.spec.phi1, self.spec.phi2)

    def _update(self):
        self._prev_flock_energies = self._current_flock_energies

        for p in self._flock:
            p._update(self._best)
        self._evals += len(self._flock)
        self._current_flock_energies = NumberLog(p.energy for p in self._flock)

        self._best = min(self._best, *self._current_flock_energies)
        if self._current_flock_energies.better(
                self._prev_flock_energies) or self._best_flock is None:
            self._best_flock = self._flock
        else:
            self._lives -= 1

    def run(self, text_report=False):
        for i in range(self.spec.generations):
            self._update()
            if self._lives <= 0 or self._evals >= self.spec.iterations:
                break

        best_flock_energies = NumberLog(p.energy for p in self._best_flock)
        return SearchReport(best=self._best,
                            best_era=best_flock_energies,
                            evaluations=self._evals,
                            searcher=self.__class__,
                            spec=self.spec,
                            report=None)
コード例 #8
0
    def _update(self):
        self._prev_flock_energies = self._current_flock_energies

        for p in self._flock:
            p._update(self._best)
        self._evals += len(self._flock)
        self._current_flock_energies = NumberLog(p.energy
                                                 for p in self._flock)

        self._best = min(self._best, *self._current_flock_energies)
        if self._current_flock_energies.better(self._prev_flock_energies) or self._best_flock is None:
            self._best_flock = self._flock
        else:
            self._lives -= 1
コード例 #9
0
    def run(self, text_report=True):
        n_candiates = self.spec.n_candiates
        self._frontier = [
            self.model.random_model_io() for _ in xrange(n_candiates)
        ]
        self._evals, lives = 0, 4

        for _ in xrange(self.spec.generations):
            if lives <= 0:
                break
            old_frontier_energies = NumberLog(x.energy for x in self._frontier)
            self._update_frontier()
            new_frontier_energies = NumberLog(x.energy for x in self._frontier)
            if not new_frontier_energies.better(old_frontier_energies):
                lives -= 1

        return SearchReport(
            best=min(self._frontier, key=lambda x: x.energy).energy,
            best_era=NumberLog(inits=(x.energy for x in self._frontier)),
            evaluations=self._evals,
            searcher=self.__class__,
            spec=self.spec,
            report=None)
コード例 #10
0
    def run(self, text_report=True):
        n_candiates = self.spec.n_candiates
        self._frontier = [self.model.random_model_io()
                          for _ in xrange(n_candiates)]
        self._evals, lives = 0, 4

        for _ in xrange(self.spec.generations):
            if lives <= 0:
                break
            old_frontier_energies = NumberLog(x.energy
                                              for x in self._frontier)
            self._update_frontier()
            new_frontier_energies = NumberLog(x.energy
                                              for x in self._frontier)
            if not new_frontier_energies.better(old_frontier_energies):
                lives -= 1

        return SearchReport(
            best=min(self._frontier, key=lambda x: x.energy).energy,
            best_era=NumberLog(inits=(x.energy for x in self._frontier)),
            evaluations=self._evals,
            searcher=self.__class__,
            spec=self.spec,
            report=None)
コード例 #11
0
ファイル: simulated_annealer.py プロジェクト: mambocab/sbse14
    def _end_era(self):
        self._report += ('\n', '{: .2}'.format(self._best.energy), ' ')
        if not self._best_era:
            self._best_era = self._current_era_energies

        try:
            improved = self._current_era_energies.better(
                self._prev_era_energies)
        except AttributeError:
            improved = False
        if improved:
            self._best_era = self._current_era_energies
        else:
            self._lives -= 1

        self._prev_era_energies = self._current_era_energies
        self._current_era_energies = NumberLog(max_size=None)
コード例 #12
0
ファイル: maxwalksat.py プロジェクト: vivekaxl/Courses
    def _end_era(self):
        self._report += ('\n{: .2}'.format(self._best.energy), ' ')

        # _prev_era won't exist in era 0, so account for that case
        try:
            improved = self._current_era.better(self._prev_era)
        except AttributeError:
            improved = False
        self._prev_era = self._current_era

        # track best_era
        if improved or self._best_era is None:
            self._best_era = self._current_era
        else:
            self._lives -= 1

        if self._lives <= 0:
            self._terminate = True
        else:
            self._current_era = NumberLog()
コード例 #13
0
ファイル: maxwalksat.py プロジェクト: vivekaxl/Courses
    def run(self, text_report=True):
        '''run MaxWalkSat on self.model'''

        # current ModelIO to evaluate and mutate
        self._current = self.model.random_model_io()
        self._best = self._current
        # initialize and update log variables to track values by era
        self._current_era = NumberLog()
        self._current_era += self._current.energy
        self._best_era = None
        # bookkeeping variables
        self._evals = 0
        self._lives = 4
        self._report = StringBuilder() if text_report else NullObject()
        self._terminate = False

        while self._evals < self.spec.iterations and not self._terminate:
            # get the generator for a random independent variable

            if self.spec.p_mutation > random.random():
                # if not searching a dimension, mutate randomly
                self._update('+')
            else:
                # if doing a local search, choose a dimension
                dimension = base.random_index(self._current.xs)
                search_iv = self.model.xs[dimension]
                # then try points all along the dimension
                lo, hi = search_iv.lo, search_iv.hi
                for j in self._local_search_xs(lo, hi, 10):
                    self._update('|', dimension=dimension, value=j)

        return SearchReport(best=self._best.energy,
                            best_era=self._best_era,
                            evaluations=self._evals,
                            searcher=self.__class__,
                            spec=self.spec,
                            report=self._report)
コード例 #14
0
ファイル: simulated_annealer.py プロジェクト: mambocab/sbse14
class SimulatedAnnealer(Searcher):
    """
    A searcher that works by mostly-dumb stochastic search that starts with
    lots of random jumps, then makes fewer random jumps, simulating a cooling
    process. See http://en.wikipedia.org/wiki/Simulated_annealing and
    https://github.com/timm/sbse14/wiki/sa for more information.
    """

    def __init__(self, *args, **kwargs):
        super(SimulatedAnnealer, self).__init__(*args, **kwargs)
        self._current = self.model.random_model_io()
        self._best = self._current  # assumes current is immutable
        self._lives = 4
        self._best_era = None
        self._current_era_energies = NumberLog(max_size=None)

    def run(self, text_report=True):
        """
        Run the SimulatedAnnealer on the model specified at object
        instantiation time.
        """
        self._report = StringBuilder() if text_report else NullObject()
        evals = None

        for k in range(self.spec.iterations):
            if self._lives <= 0 and self.spec.terminate_early:
                evals = k
                break
            self._update(k / self.spec.iterations)
            if k % self.spec.era_length == 0 and k != 0:
                self._end_era()

        if evals is None:
            evals = self.spec.iterations
        return SearchReport(best=self._best.energy, evaluations=evals,
                            best_era=self._best_era, spec=self.spec,
                            searcher=self.__class__, report=self._report)

    def _mutate(self, xs):
        return tuple(xs[i] if random.random() < self.spec.p_mutation else v
                     for i, v in enumerate(self.model.random_input_vector()))

    def _get_neighbor(self, model_io):
        neighbor = None
        while neighbor is None:
            gen = self._mutate(model_io.xs)
            try:
                neighbor = self.model(tuple(gen), io=True)
            except ModelInputException:
                pass

        return neighbor

    def _end_era(self):
        self._report += ('\n', '{: .2}'.format(self._best.energy), ' ')
        if not self._best_era:
            self._best_era = self._current_era_energies

        try:
            improved = self._current_era_energies.better(
                self._prev_era_energies)
        except AttributeError:
            improved = False
        if improved:
            self._best_era = self._current_era_energies
        else:
            self._lives -= 1

        self._prev_era_energies = self._current_era_energies
        self._current_era_energies = NumberLog(max_size=None)

    def _update(self, temperature):
        """update the state of the annealer"""
        # generate new neighbor
        neighbor = self._get_neighbor(self._current)
        self._current_era_energies += neighbor.energy

        # compare neighbor and update best
        if neighbor.energy < self._best.energy:
            self._best, self._current = neighbor, neighbor
            self._report += '!'

        if neighbor.energy < self._current.energy:
            self._current = neighbor
            self._report += '+'
        else:
            # if neighbor is worse than current, we still jump there sometimes
            cnorm = self.model.normalize(self._current.energy)
            nnorm = self.model.normalize(neighbor.energy)
            # occasionally jump to neighbor, even if it's a bad idea
            if self._good_idea(cnorm, nnorm, temperature) < random.random():
                self._current = neighbor
                self._report += '?'

        self._report += '.'

    def _good_idea(self, old, new, temp):
        """
        sets the threshold we compare to to decide whether to jump

        returns e^-((new-old)/temp)
        """
        numerator = new - old

        if not 0 <= numerator <= 1:
            numerator = old - new
        try:
            exponent = numerator / temp
        except ZeroDivisionError:
            return 0
        rv = math.exp(-exponent)
        if rv > 1:
            raise ValueError('p returning greater than one',
                             rv, old, new, temp)
        return rv * self.spec.cooling_factor
コード例 #15
0
ファイル: log_test.py プロジェクト: samH99/LexisNexis
 def setUp(self):  # noqa
     self.max_size = 64
     self.log = NumberLog(max_size=self.max_size)
     random.seed(7)
コード例 #16
0
class SimulatedAnnealer(Searcher):
    """
    A searcher that works by mostly-dumb stochastic search that starts with
    lots of random jumps, then makes fewer random jumps, simulating a cooling
    process. See http://en.wikipedia.org/wiki/Simulated_annealing and
    https://github.com/timm/sbse14/wiki/sa for more information.
    """
    def __init__(self, *args, **kwargs):
        super(SimulatedAnnealer, self).__init__(*args, **kwargs)
        self._current = self.model.random_model_io()
        self._best = self._current  # assumes current is immutable
        self._lives = 4
        self._best_era = None
        self._current_era_energies = NumberLog(max_size=None)

    def run(self, text_report=True):
        """
        Run the SimulatedAnnealer on the model specified at object
        instantiation time.
        """
        self._report = StringBuilder() if text_report else NullObject()
        evals = None

        for k in range(self.spec.iterations):
            if self._lives <= 0 and self.spec.terminate_early:
                evals = k
                break
            self._update(k / self.spec.iterations)
            if k % self.spec.era_length == 0 and k != 0:
                self._end_era()

        if evals is None:
            evals = self.spec.iterations
        return SearchReport(best=self._best.energy,
                            evaluations=evals,
                            best_era=self._best_era,
                            spec=self.spec,
                            searcher=self.__class__,
                            report=self._report)

    def _mutate(self, xs):
        return tuple(xs[i] if random.random() < self.spec.p_mutation else v
                     for i, v in enumerate(self.model.random_input_vector()))

    def _get_neighbor(self, model_io):
        neighbor = None
        while neighbor is None:
            gen = self._mutate(model_io.xs)
            try:
                neighbor = self.model(tuple(gen), io=True)
            except ModelInputException:
                pass

        return neighbor

    def _end_era(self):
        self._report += ('\n', '{: .2}'.format(self._best.energy), ' ')
        if not self._best_era:
            self._best_era = self._current_era_energies

        try:
            improved = self._current_era_energies.better(
                self._prev_era_energies)
        except AttributeError:
            improved = False
        if improved:
            self._best_era = self._current_era_energies
        else:
            self._lives -= 1

        self._prev_era_energies = self._current_era_energies
        self._current_era_energies = NumberLog(max_size=None)

    def _update(self, temperature):
        """update the state of the annealer"""
        # generate new neighbor
        neighbor = self._get_neighbor(self._current)
        self._current_era_energies += neighbor.energy

        # compare neighbor and update best
        if neighbor.energy < self._best.energy:
            self._best, self._current = neighbor, neighbor
            self._report += '!'

        if neighbor.energy < self._current.energy:
            self._current = neighbor
            self._report += '+'
        else:
            # if neighbor is worse than current, we still jump there sometimes
            cnorm = self.model.normalize(self._current.energy)
            nnorm = self.model.normalize(neighbor.energy)
            # occasionally jump to neighbor, even if it's a bad idea
            if self._good_idea(cnorm, nnorm, temperature) < random.random():
                self._current = neighbor
                self._report += '?'

        self._report += '.'

    def _good_idea(self, old, new, temp):
        """
        sets the threshold we compare to to decide whether to jump

        returns e^-((new-old)/temp)
        """
        numerator = new - old

        if not 0 <= numerator <= 1:
            numerator = old - new
        try:
            exponent = numerator / temp
        except ZeroDivisionError:
            return 0
        rv = math.exp(-exponent)
        if rv > 1:
            raise ValueError('p returning greater than one', rv, old, new,
                             temp)
        return rv * self.spec.cooling_factor