Пример #1
0
 def setUp(self):
     self.D, self.nFES, self.nGEN = 10, 10, 10
     self.t = StoppingTask(D=self.D, nFES=self.nFES, nGEN=self.nGEN, refValue=1, benchmark=MyBenchmark())
Пример #2
0
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys

sys.path.append('../')

from NiaPy.algorithms.basic import MonarchButterflyOptimization
from NiaPy.task import StoppingTask
from NiaPy.benchmarks import Sphere

# we will run Monarch Butterfly Optimization algorithm for 5 independent runs
for i in range(5):
    task = StoppingTask(D=10, nFES=10000, benchmark=Sphere())
    algo = MonarchButterflyOptimization(NP=20, PAR=5.0 / 12.0, PER=1.2)
    best = algo.run(task)
    print('%s -> %s' % (best[0], best[1]))
Пример #3
0
 def setUp(self):
     self.D = 6
     self.Lower, self.Upper = [2, 1, 1], [10, 10, 2]
     self.task = StoppingTask(Lower=self.Lower, Upper=self.Upper, D=self.D)
Пример #4
0
class StoppingTaskTestCase(TestCase):
    r"""Test case for testing `Task`, `StoppingTask` and `CountingTask` classes.

    Date:
            April 2019

    Author:
            Klemen Berkovič

    See Also:
            * :class:`NiaPy.util.Task`
            * :class:`NiaPy.util.CountingTask`
            * :class:`NiaPy.util.StoppingTask`
    """

    def setUp(self):
        self.D, self.nFES, self.nGEN = 10, 10, 10
        self.t = StoppingTask(D=self.D, nFES=self.nFES, nGEN=self.nGEN, refValue=1, benchmark=MyBenchmark())

    def test_isFeasible_fine(self):
        x = full(self.D, 10)
        self.assertTrue(self.t.isFeasible(x))
        x = full(self.D, -10)
        self.assertTrue(self.t.isFeasible(x))
        x = rnd.uniform(-10, 10, self.D)
        self.assertTrue(self.t.isFeasible(x))
        x = full(self.D, -20)
        self.assertFalse(self.t.isFeasible(x))
        x = full(self.D, 20)
        self.assertFalse(self.t.isFeasible(x))

    def test_nextIter_fine(self):
        for i in range(self.nGEN):
            self.assertFalse(self.t.stopCond())
            self.t.nextIter()
        self.assertTrue(self.t.stopCond())

    def test_stopCondI(self):
        for i in range(self.nGEN):
            self.assertFalse(self.t.stopCondI(), msg='Error at %s iteration!!!' % (i))
        self.assertTrue(self.t.stopCondI())

    def test_eval_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nFES):
            self.assertAlmostEqual(self.t.eval(x), self.D, msg='Error at %s iteration!!!' % (i))
        self.assertTrue(self.t.stopCond())

    def test_eval_over_nFES_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nFES):
            self.t.eval(x)
        self.assertEqual(inf, self.t.eval(x))
        self.assertTrue(self.t.stopCond())

    def test_eval_over_nGEN_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nGEN):
            self.t.nextIter()
        self.assertEqual(inf, self.t.eval(x))
        self.assertTrue(self.t.stopCond())

    def test_nFES_count_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nFES):
            self.t.eval(x)
            self.assertEqual(self.t.Evals, i + 1, 'Error at %s. evaluation' % (i + 1))

    def test_nGEN_count_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nGEN):
            self.t.nextIter()
            self.assertEqual(self.t.Iters, i + 1, 'Error at %s. iteration' % (i + 1))

    def test_stopCond_evals_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nFES - 1):
            self.t.eval(x)
            self.assertFalse(self.t.stopCond())
        self.t.eval(x)
        self.assertTrue(self.t.stopCond())

    def test_stopCond_iters_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nGEN - 1):
            self.t.nextIter()
            self.assertFalse(self.t.stopCond())
        self.t.nextIter()
        self.assertTrue(self.t.stopCond())

    def test_stopCond_refValue_fine(self):
        x = full(self.D, 1.0)
        for i in range(self.nGEN - 5):
            self.assertFalse(self.t.stopCond())
            self.assertEqual(self.D, self.t.eval(x))
            self.t.nextIter()
        x = full(self.D, 0.0)
        self.assertEqual(0, self.t.eval(x))
        self.assertTrue(self.t.stopCond())
        self.assertEqual(self.nGEN - 5, self.t.Iters)
Пример #5
0
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
from NiaPy.benchmarks import Sphere
from NiaPy.task import StoppingTask
from NiaPy.algorithms.basic import BeesAlgorithm

import sys
sys.path.append('../')


# we will run Bees Algorithm for 5 independent runs
for i in range(5):
    task = StoppingTask(D=20, nGEN=2, benchmark=Sphere())
    algo = BeesAlgorithm(NP=50, m=20, e=10, nep=20, nsp=15, ngh=7)
    best = algo.run(task)
    print('%s -> %s' % (best[0], best[1]))
Пример #6
0
class StoppingTaskBaseTestCase(TestCase):
    r"""Test case for testing `Task`, `StoppingTask` and `CountingTask` classes.

    Date:
            April 2019

    Author:
            Klemen Berkovič

    See Also:
            * :class:`NiaPy.util.Task`
            * :class:`NiaPy.util.CountingTask`
            * :class:`NiaPy.util.StoppingTask`
    """

    def setUp(self):
        self.D = 6
        self.Lower, self.Upper = [2, 1, 1], [10, 10, 2]
        self.task = StoppingTask(Lower=self.Lower, Upper=self.Upper, D=self.D)

    def test_dim_ok(self):
        self.assertEqual(self.D, self.task.D)
        self.assertEqual(self.D, self.task.dim())

    def test_lower(self):
        self.assertTrue(array_equal(fullArray(self.Lower, self.D), self.task.Lower))
        self.assertTrue(array_equal(fullArray(self.Lower, self.D), self.task.bcLower()))

    def test_upper(self):
        self.assertTrue(array_equal(fullArray(self.Upper, self.D), self.task.Upper))
        self.assertTrue(array_equal(fullArray(self.Upper, self.D), self.task.bcUpper()))

    def test_range(self):
        self.assertTrue(array_equal(fullArray(self.Upper, self.D) - fullArray(self.Lower, self.D), self.task.bRange))
        self.assertTrue(array_equal(fullArray(self.Upper, self.D) - fullArray(self.Lower, self.D), self.task.bcRange()))

    def test_ngens(self):
        self.assertEqual(inf, self.task.nGEN)

    def test_nfess(self):
        self.assertEqual(inf, self.task.nFES)

    def test_stop_cond(self):
        self.assertFalse(self.task.stopCond())

    def test_stop_condi(self):
        self.assertFalse(self.task.stopCondI())

    def test_eval(self):
        self.assertRaises(AttributeError, lambda: self.task.eval([]))

    def test_evals(self):
        self.assertEqual(0, self.task.evals())

    def test_iters(self):
        self.assertEqual(0, self.task.iters())

    def test_next_iter(self):
        self.assertEqual(None, self.task.nextIter())

    def test_is_feasible(self):
        self.assertFalse(self.task.isFeasible(fullArray([1, 2, 3], self.D)))
Пример #7
0
    def _run(X, y, train_index, val_index, random_seed, optimizer, evaluator,
             benchmark, optimizer_settings, nGENs, continue_opt, cut_type,
             cutting_perc, j):
        opt_settings = es.get_args(optimizer)
        opt_settings.update(optimizer_settings)
        X1 = X
        cuted = []  # Which genes (features) are cutted
        fitnesses = []  # Fitness values after every cutting
        xb, fxb, benchm = None, -1, None
        pop, fpop = None, None
        for nGENp in nGENs:  # Every interval before cutting
            benchm = benchmark(X=X1,
                               y=y,
                               train_indices=train_index,
                               valid_indices=val_index,
                               random_seed=random_seed,
                               evaluator=evaluator)
            task = StoppingTask(D=X1.shape[1],
                                nGEN=nGENp,
                                optType=OptimizationType.MINIMIZATION,
                                benchmark=benchm)

            evo = optimizer(seed=random_seed, **opt_settings)

            # Start new optimization. Continue on pop and give fitness of pop.
            pop, fpop, xb, fxb = DynFeatureSelection.runTask(
                evo, task, starting_pop=pop, starting_fpop=fpop)

            if not isinstance(xb, np.ndarray):
                xb = xb.x
            xb = np.copy(xb)

            # Cut genotype. Four different strategies
            if cut_type == 'diff':
                # Best solutions say which features are most common, and worst solutions vote which features are most common.
                # Difference between votes say which features will be cutted - those that are more coomon in worst solutions and
                # least common in best features.
                idx = DynFeatureSelection.cut_n_vote_diff(pop,
                                                          fpop,
                                                          cutting_perc,
                                                          benchm,
                                                          n=25)
            elif cut_type == 'vote_all':
                # Every solution votes which features are most common. Similar than best_vote worst, but every solution votes.
                idx = DynFeatureSelection.cut_all_vote_for_worst(
                    pop, fpop, cutting_perc, benchm)
            elif cut_type == 'best_vote_worst':
                # Best solutions say which features are most common. Least common features are cutted.
                idx = DynFeatureSelection.cut_n_vote(pop,
                                                     fpop,
                                                     cutting_perc,
                                                     benchm,
                                                     n=50)
            elif cut_type == 'worst_vote_best':
                # Worst solutions say which features are most common. Most common features are cutted.
                idx = DynFeatureSelection.cut_n_vote(pop,
                                                     fpop,
                                                     cutting_perc,
                                                     benchm,
                                                     n=-50)
            cuted.append(idx)  # Log which genes are cutted
            fitnesses.append(fxb)  # Log fitenss value after the cutting

            X1 = np.delete(X1, idx,
                           axis=1)  # Delete columns (feature) from genotypes

            # If we want the optimization to continue on genes not cutted
            if continue_opt:
                if isinstance(
                        pop[0],
                        np.ndarray):  # If population is in shape of ndarray
                    pop = np.delete(pop, idx, 1)
                else:  # If population is in shape of individuals
                    for ind in pop:
                        ind.x = np.delete(ind.x, idx)
            else:  # If we want that after cutting, the solutions are reseted (random reinitialization)
                pop = None
                fpop = None

        # Transform solutions to datasets with selected features
        xb = benchmark.to_phenotype(xb, benchm.split)

        # Remove not selected features
        for i in range(len(cuted) - 2, -1, -1):
            cut = np.sort(cuted[i])
            for c in cut:
                xb = np.insert(xb, c, False)

        return xb, fxb
Пример #8
0
import sys
sys.path.append('../')
# End of fix

from NiaPy.algorithms.basic import GreyWolfOptimizer
from NiaPy.task import StoppingTask, OptimizationType
from NiaPy.benchmarks import Pinter

# initialize Pinter benchamrk with custom bound
pinterCustom = Pinter(-5, 5)

# we will run 10 repetitions of Grey Wolf Optimizer against Pinter benchmark function
for i in range(10):
    # first parameter takes dimension of problem
    # second parameter takes the number of function evaluations
    # third parameter is benchmark optimization type
    # forth parameter is benchmark function
    task = StoppingTask(D=20,
                        nGEN=100,
                        optType=OptimizationType.MINIMIZATION,
                        benchmark=pinterCustom)

    # parameter is population size
    algo = GreyWolfOptimizer(NP=20)

    # running algorithm returns best found minimum
    best = algo.run(task)

    # printing best minimum
    print(best)
Пример #9
0
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix

from NiaPy.algorithms.basic import GreyWolfOptimizer
from NiaPy.task import StoppingTask

# we will run 10 repetitions of Grey Wolf Optimizer against Pinter benchmark function
for i in range(10):
    task = StoppingTask(D=10, nFES=1000, benchmark='pinter')
    algorithm = GreyWolfOptimizer(NP=20)
    best = algorithm.run(task)
    print(best[-1])
Пример #10
0
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix

from NiaPy.task import StoppingTask
from NiaPy.benchmarks import Sphere
from NiaPy.algorithms.other import RandomSearch

for i in range(1):
    task = StoppingTask(D=5, nGEN=5000, benchmark=Sphere())
    algo = RandomSearch()
    best = algo.run(task=task)
    print(best)
Пример #11
0
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys

sys.path.append('../')
# End of fix

from NiaPy.task import StoppingTask
from NiaPy.benchmarks import Sphere
from NiaPy.algorithms.basic import CatSwarmOptimization

task = StoppingTask(D=10, nFES=1000, logger=True, benchmark=Sphere())
algo = CatSwarmOptimization()
best = algo.run(task=task)
print('%s -> %s' % (best[0], best[1]))
#plot a convergence graph
task.plot()