def test_multiple_outputs_error_rate_ts(): from EvoDAG import EvoDAG from EvoDAG.node import Add, Min, Max y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, function_set=[Add, Min, Max], early_stopping_rounds=100, time_limit=0.9, multiple_outputs=True, fitness_function='ER', seed=0, popsize=100) gp.X = X[:-1] gp.nclasses(y[:-1]) gp.y = y[:-1] gp.create_population() a = gp.random_offspring() hys = SparseArray.argmax(a.hy) hy = np.array(hys.full_array()) # print(((hys - gp._y_klass).sign().fabs() * gp._mask_ts).sum()) mask = np.array(gp._mask_ts.full_array()).astype(np.bool) # print((y[:-1][mask] != hy[mask]).mean()) print(-a.fitness, (y[:-1][mask] != hy[mask]).mean()) assert_almost_equals(-a.fitness, (y[:-1][mask] != hy[mask]).mean())
def test_g_precision(): from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=200, time_limit=0.9, fitness_function='g_precision', multiple_outputs=True, seed=0, popsize=1000) gp.y = y gp.X = X gp.create_population() # off = gp.random_offspring() off = gp.population.bsf hy = SparseArray.argmax(off.hy) index = np.array(gp._mask_ts.index) y = np.array(gp._y_klass.full_array())[index] hy = np.array(hy.full_array())[index] nclasses = gp._bagging_fitness.nclasses precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) score = np.prod(precision) - 1 assert gp._fitness_function == 'g_precision' gp._bagging_fitness.set_fitness(off) assert_almost_equals(score, off.fitness) index = np.array(gp._mask_ts.full_array()) == 0 y = np.array(gp._y_klass.full_array())[index] hy = SparseArray.argmax(off.hy) hy = np.array(hy.full_array())[index] precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) score = np.prod(precision) - 1 if np.isfinite(score) and np.isfinite(off.fitness_vs): assert_almost_equals(score, off.fitness_vs)
def test_add_repeated_args(): from EvoDAG import EvoDAG from EvoDAG.node import Add, Min, Max y = cl.copy() for ff in [Add, Min, Max]: ff.nargs = 10 gp = EvoDAG( generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, # multiple_outputs=True, classifier=False, all_inputs=True, function_set=[ff], pr_variable=1, seed=0, popsize=10000) gp.X = X # gp.nclasses(y) gp.y = y gp.create_population() print(gp.population.population) node = gp.random_offspring() print(node, node._variable, X.shape) assert len(node._variable) <= X.shape[1] ff.nargs = 2
def test_classification_mo2(): from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=10, time_limit=0.9, multiple_outputs=True, all_inputs=True, remove_raw_inputs=False, seed=0, popsize=10000) gp.X = X gp.nclasses(y) y = gp._bagging_fitness.transform_to_mo(y) y = [SparseArray.fromlist(x) for x in y.T] gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=10, time_limit=0.9, multiple_outputs=True, all_inputs=True, seed=0, remove_raw_inputs=False, popsize=10000).fit(X, y) m = gp.model() print([(x, x._variable, x.height) for x in m._hist]) # assert False assert len(m.decision_function(gp.X)) == 3
def test_random_generations(): from EvoDAG import EvoDAG from EvoDAG.population import SteadyState class P(SteadyState): def random_selection(self, negative=False): raise RuntimeError('!') y = cl.copy() y[y != 1] = -1 for pop in ['SteadyState', 'Generational', P]: gp = EvoDAG(population_class=pop, all_inputs=True, random_generations=1, early_stopping_rounds=1, popsize=2) gp.X = X gp.y = y gp.create_population() print(gp.population._random_generations) assert gp.population._random_generations == 1 if pop == P: try: ind = gp.random_offspring() gp.replace(ind) assert False except RuntimeError: pass else: for i in range(3): gp.replace(gp.random_offspring()) assert gp.population.generation == 2
def test_a_precision(): from EvoDAG.cython_utils import Score from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, multiple_outputs=True, seed=0, popsize=500) gp.y = y gp.X = X gp.create_population() # off = gp.random_offspring() off = gp.population.bsf hy = SparseArray.argmax(off.hy) index = np.array(gp._mask_ts.index) y = np.array(gp._y_klass.full_array())[index] hy = np.array(hy.full_array())[index] nclasses = gp._bagging_fitness.nclasses precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) f1 = Score(nclasses) mf1, mf1_v = f1.a_precision(gp._y_klass, SparseArray.argmax(off.hy), gp._mask_ts.index) assert_almost_equals(np.mean(precision), mf1) gp._fitness_function = 'a_precision' gp._bagging_fitness.set_fitness(off) assert_almost_equals(mf1 - 1, off.fitness) index = np.array(gp._mask_ts.full_array()) == 0 y = np.array(gp._y_klass.full_array())[index] hy = SparseArray.argmax(off.hy) hy = np.array(hy.full_array())[index] precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) assert_almost_equals(np.mean(precision) - 1, off.fitness_vs)
def test_multiple_variables(): import numpy as np from EvoDAG.population import Inputs from EvoDAG.cython_utils import SelectNumbers from EvoDAG import EvoDAG from SparseArray import SparseArray y = cl.copy() gp = EvoDAG(classifier=True, multiple_outputs=True, popsize=5, share_inputs=True) gp.X = X gp.X[-1]._eval_tr = SparseArray.fromlist( [0 for x in range(gp.X[-1].hy.size())]) gp.nclasses(y) gp.y = y inputs = Inputs(gp, SelectNumbers([x for x in range(len(gp.X))])) inputs._func = [inputs._func[-1]] inputs._nfunc = 1 v = inputs.input() assert v is not None mask = np.array(gp._mask[0].full_array(), dtype=np.bool) D = np.array([x.hy.full_array() for x in gp.X]).T b = np.array(gp._ytr[0].full_array()) coef = np.linalg.lstsq(D[mask], b[mask])[0] for a, b in zip(coef, v.weight[0]): assert_almost_equals(a, b)
def test_random_generations(): from EvoDAG import EvoDAG from EvoDAG.population import SteadyState class P(SteadyState): def random_selection(self, negative=False): raise RuntimeError('!') y = cl.copy() y[y != 1] = -1 for pop in ['SteadyState', 'Generational', P]: gp = EvoDAG(population_class=pop, classifier=False, all_inputs=True, random_generations=1, early_stopping_rounds=1, popsize=2) gp.X = X gp.y = y gp.create_population() print(gp.population._random_generations) assert gp.population._random_generations == 1 if pop == P: try: ind = gp.random_offspring() gp.replace(ind) assert False except RuntimeError: pass else: for i in range(3): gp.replace(gp.random_offspring()) assert gp.population.generation == 2
def test_RSE_avg_zero(): from EvoDAG.bagging_fitness import BaggingFitness from EvoDAG.node import Centroid from EvoDAG import EvoDAG Centroid.nargs = 0 class B(BaggingFitness): def __init__(self, **kw): super(B, self).__init__(**kw) self._base._times = 0 def set_regression_mask(self, v): base = self._base mask = np.ones(v.size()) if base._times == 0: mask[10:12] = 0 else: mask[10:13] = 0 base._mask = SparseArray.fromlist(mask) base._times += 1 x = np.linspace(-1, 1, 100) y = 4.3 * x**2 + 3.2 * x - 3.2 y[10:12] = 0 gp = EvoDAG(classifier=False, popsize=10, generations=2) gp._bagging_fitness = B(base=gp) gp.X = [SparseArray.fromlist(x)] gp.y = y print(gp._times) assert gp._times == 2 gp.create_population() while not gp.stopping_criteria(): a = gp.random_offspring() gp.replace(a) Centroid.nargs = 2
def test_inputs_func_argument_regression(): from EvoDAG import EvoDAG class Error: nargs = 2 min_nargs = 2 classification = True regression = True def __init__(self, *args, **kwargs): raise RuntimeError('aqui') y = cl.copy() y[y == 0] = -1 y[y > -1] = 1 gp = EvoDAG(classifier=False, multiple_outputs=False, pr_variable=0, input_functions=[Error], popsize=5, share_inputs=True) gp.X = X gp.nclasses(y) gp.y = y try: gp.create_population() assert False except RuntimeError: pass
def test_generational_generation(): from EvoDAG.population import Generational from EvoDAG import EvoDAG function_set = [x for x in EvoDAG()._function_set if x.regression] gp = EvoDAG(population_class='Generational', classifier=False, function_set=function_set, popsize=10) gp.X = X y = cl.copy() y[y != 1] = -1 gp.y = y gp.create_population() assert isinstance(gp.population, Generational) p = [] for i in range(gp.popsize - 1): a = gp.random_offspring() p.append(a) gp.replace(a) assert len(gp.population._inner) == (gp.popsize - 1) a = gp.random_offspring() p.append(a) gp.replace(a) assert len(gp.population._inner) == 0 for a, b in zip(gp.population.population, p): assert a == b
def test_all_init_popsize(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 gp = EvoDAG(population_class='Generational', all_inputs=True, early_stopping_rounds=1, popsize=2) gp.X = X gp.y = y gp.create_population() assert gp.init_popsize == len(gp.X) gp = EvoDAG(population_class='Generational', # all_inputs=True, early_stopping_rounds=1, popsize=2) gp.X = X gp.y = y gp.create_population() assert gp.init_popsize == gp.popsize
def test_macro_F1(): from EvoDAG.cython_utils import Score from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, multiple_outputs=True, seed=2, popsize=1000) gp.y = y gp.X = X gp.create_population() off = gp.random_offspring() hy = SparseArray.argmax(off.hy) index = np.array(gp._mask_ts.index) y = np.array(gp._y_klass.full_array())[index] hy = np.array(hy.full_array())[index] nclasses = gp._bagging_fitness.nclasses precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) recall = np.array([(hy[y == k] == k).mean() for k in range(nclasses)]) print(precision, recall) f1 = Score(nclasses) mf1, mf1_v = f1.a_F1(gp._y_klass, SparseArray.argmax(off.hy), gp._mask_ts.index) for x, y in zip(precision, f1.precision): if not np.isfinite(x): continue assert_almost_equals(x, y) for x, y in zip(recall, f1.recall): if not np.isfinite(x): continue assert_almost_equals(x, y) _ = (2 * precision * recall) / (precision + recall) m = ~np.isfinite(_) _[m] = 0 assert_almost_equals(np.mean(_), mf1) print(f1.precision, f1.recall, mf1, mf1_v) gp._fitness_function = 'macro-F1' gp._bagging_fitness.set_fitness(off) assert_almost_equals(off.fitness, mf1 - 1) assert_almost_equals(off.fitness_vs, mf1_v - 1) index = np.array(gp._mask_ts.full_array()) == 0 y = np.array(gp._y_klass.full_array())[index] hy = SparseArray.argmax(off.hy) hy = np.array(hy.full_array())[index] precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) recall = np.array([(hy[y == k] == k).mean() for k in range(nclasses)]) _ = (2 * precision * recall) / (precision + recall) m = ~np.isfinite(_) _[m] = 0 assert_almost_equals(np.mean(_) - 1, off.fitness_vs)
def test_create_population2(): from EvoDAG import EvoDAG from EvoDAG.node import Function gp = EvoDAG(generations=1, classifier=False, pr_variable=1, popsize=10) gp.X = X y = cl.copy() mask = y == 0 y[mask] = 1 y[~mask] = -1 gp.y = y gp.create_population() for i in gp.population.population[4:]: assert isinstance(i, Function)
def test_min_class(): from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, multiple_outputs=True, seed=0, popsize=100) gp.y = y[:-1] gp.X = X[:-1] assert gp._bagging_fitness.min_class == 2
def test_SteadyState_generation(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 gp = EvoDAG(population_class='SteadyState', all_inputs=True, early_stopping_rounds=1, popsize=2) gp.X = X gp.y = y gp.create_population() for i in range(3): gp.replace(gp.random_offspring()) assert gp.population.generation == 2
def test_create_population_cl(): from EvoDAG import EvoDAG from EvoDAG.node import Function, Variable, NaiveBayes, NaiveBayesMN gp = EvoDAG(generations=1, popsize=50, multiple_outputs=True) gp.X = X gp.nclasses(cl) gp.y = cl.copy() gp.create_population() flag = False for i in gp.population.population: assert isinstance(i, Function) or isinstance(i, Variable) if isinstance(i, Function): if not (isinstance(i, NaiveBayes) or isinstance(i, NaiveBayesMN)): flag = True assert flag
def test_all_init_popsize(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 gp = EvoDAG(population_class='Generational', all_inputs=True, classifier=False, early_stopping_rounds=1, pr_variable=1, popsize=2) gp.X = X gp.y = y gp.create_population() assert gp.init_popsize == len(gp.X) gp = EvoDAG( population_class='Generational', classifier=False, # all_inputs=True, early_stopping_rounds=1, popsize=2) gp.X = X gp.y = y gp.create_population() assert gp.init_popsize == gp.popsize
def test_multiple_outputs(): from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, multiple_outputs=True, seed=0, popsize=10000) gp.X = X gp.nclasses(y) gp.y = y gp.create_population() assert len(gp.y) == 3
def test_SteadyState_generation(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 gp = EvoDAG(population_class='SteadyState', all_inputs=True, classifier=False, early_stopping_rounds=1, popsize=2) gp.X = X gp.y = y gp.create_population() for i in range(3): gp.replace(gp.random_offspring()) assert gp.population.generation == 2
def test_mask(): from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, multiple_outputs=True, seed=0, popsize=100) gp.y = y gp.X = X gp.create_population() ts = np.array(gp._mask_ts.full_array()) == 0 vs = np.array(gp._mask_vs.full_array()) == 1 assert np.all(ts == vs)
def test_all_inputs2(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 gp = EvoDAG(population_class='Generational', all_inputs=True, popsize=3) gp.X = X gp.y = y gp.create_population() print(len(gp.population.population), len(gp.X)) assert len(gp.population.population) == len(gp.X) for i in range(gp.popsize): a = gp.random_offspring() gp.replace(a) assert len(gp.population.population) == gp.popsize
def test_random_offspring(): from EvoDAG import EvoDAG from EvoDAG.node import Add, Sin gp = EvoDAG(generations=1, function_set=[Add, Sin], multiple_outputs=True, seed=1, tournament_size=2, popsize=10) gp.X = X gp.nclasses(cl) gp.y = cl.copy() gp.create_population() a = gp.random_offspring() assert isinstance(a, Add) or isinstance(a, Sin) assert np.isfinite(a.fitness)
def test_all_inputs(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 for pc in ['Generational', 'SteadyState']: gp = EvoDAG(population_class=pc, all_inputs=True, classifier=False, popsize=10) gp.X = X gp.y = y gp.create_population() assert len(gp.population.population) < 10 for i in range(gp.population.popsize, gp.population._popsize): a = gp.random_offspring() gp.replace(a) assert len(gp.population.population) == 10
def test_inputs_func_argument(): from EvoDAG import EvoDAG class Error: nargs = 2 min_nargs = 2 classification = True regression = True def __init__(self, *args, **kwargs): raise RuntimeError('aqui') y = cl.copy() gp = EvoDAG(classifier=True, multiple_outputs=True, pr_variable=0, input_functions=[Error], popsize=5, share_inputs=True) gp.X = X gp.nclasses(y) gp.y = y try: gp.create_population() assert False except RuntimeError: pass gp = EvoDAG( classifier=True, multiple_outputs=True, pr_variable=0, input_functions=['NaiveBayes', 'NaiveBayesMN', 'MultipleVariables'], popsize=5, share_inputs=True).fit(X, y) assert gp try: EvoDAG(classifier=True, multiple_outputs=True, pr_variable=0, input_functions=[ 'NaiveBayesXX', 'NaiveBayesMN', 'MultipleVariables' ], popsize=5, share_inputs=True).fit(X, y) except AttributeError: pass
def test_multiple_outputs_mask(): from EvoDAG import EvoDAG from EvoDAG.node import Add, Min, Max y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, function_set=[Add, Min, Max], early_stopping_rounds=100, time_limit=0.9, tr_fraction=0.8, multiple_outputs=True, seed=0, popsize=100) gp.X = X[:-1] gp.nclasses(y[:-1]) gp.y = y[:-1] assert gp._mask_vs.sum() == 27
def test_all_inputs(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 for pc in ['Generational', 'SteadyState']: gp = EvoDAG(population_class=pc, all_inputs=True, popsize=10) gp.X = X gp.y = y gp.create_population() assert len(gp.population.population) < 10 for i in range(gp.population.popsize, gp.population._popsize): a = gp.random_offspring() gp.replace(a) assert len(gp.population.population) == 10
def test_classification_mo(): from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=10, time_limit=0.9, multiple_outputs=True, all_inputs=True, seed=0, popsize=10000) gp.X = X gp.nclasses(y) y = gp._bagging_fitness.transform_to_mo(y) gp.y = [SparseArray.fromlist(x) for x in y.T] assert isinstance(gp._mask, list) gp.create_population()
def test_all_inputs3(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 gp = EvoDAG(population_class='SteadyState', all_inputs=True, classifier=False, pr_variable=1, popsize=3) gp.X = X gp.y = y gp.create_population() print(len(gp.population.population), len(gp.X)) assert len(gp.population.population) == gp.population.popsize for i in range(gp.popsize): a = gp.random_offspring() gp.replace(a) assert len(gp.population.population) == gp.popsize
def test_height(): from EvoDAG.node import Mul, NaiveBayesMN, NaiveBayes from EvoDAG import EvoDAG gp = EvoDAG(generations=1, seed=1, multiple_outputs=True, tournament_size=2, popsize=5) gp.X = X gp.nclasses(cl) gp.y = cl.copy() gp.create_population() print(NaiveBayes.nargs, NaiveBayesMN.nargs) print([(x, x.height) for x in gp.population.population]) assert np.all([x.height == 0 for x in gp.population.population]) args = [3, 4] f = gp._random_offspring(Mul, args) assert f.height == 1
def test_F1(): from EvoDAG.cython_utils import Score from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, multiple_outputs=True, seed=0, popsize=500) gp.y = y gp.X = X gp.create_population() off = gp.random_offspring() hy = SparseArray.argmax(off.hy) index = np.array(gp._mask_ts.index) y = np.array(gp._y_klass.full_array())[index] hy = np.array(hy.full_array())[index] nclasses = gp._bagging_fitness.nclasses precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) recall = np.array([(hy[y == k] == k).mean() for k in range(nclasses)]) f1 = Score(nclasses) assert gp._bagging_fitness.min_class >= 0 and gp._bagging_fitness.min_class < gp._bagging_fitness.nclasses mf1, mf1_v = f1.F1(gp._bagging_fitness.min_class, gp._y_klass, SparseArray.argmax(off.hy), gp._mask_ts.index) _ = (2 * precision * recall) / (precision + recall) m = ~np.isfinite(_) _[m] = 0 assert_almost_equals(_[gp._bagging_fitness.min_class], mf1) gp._fitness_function = 'F1' gp._bagging_fitness.set_fitness(off) assert_almost_equals(mf1 - 1, off.fitness) index = np.array(gp._mask_ts.full_array()) == 0 y = np.array(gp._y_klass.full_array())[index] hy = SparseArray.argmax(off.hy) hy = np.array(hy.full_array())[index] precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) recall = np.array([(hy[y == k] == k).mean() for k in range(nclasses)]) _ = (2 * precision * recall) / (precision + recall) m = ~np.isfinite(_) _[m] = 0 assert_almost_equals(_[gp._bagging_fitness.min_class] - 1, off.fitness_vs)
def test_g_F1(): from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(generations=np.inf, tournament_size=2, early_stopping_rounds=100, time_limit=0.9, fitness_function='g_F1', multiple_outputs=True, seed=0, popsize=500) gp.y = y gp.X = X gp.create_population() off = gp.random_offspring() hy = SparseArray.argmax(off.hy) index = np.array(gp._mask_ts.index) y = np.array(gp._y_klass.full_array())[index] hy = np.array(hy.full_array())[index] nclasses = gp._bagging_fitness.nclasses recall = np.array([(hy[y == k] == k).mean() for k in range(nclasses)]) precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) _ = (2 * precision * recall) / (precision + recall) m = ~np.isfinite(_) _[m] = 0 score = np.prod(_) - 1 assert gp._fitness_function == 'g_F1' gp._bagging_fitness.set_fitness(off) print(score, _) assert_almost_equals(score, off.fitness) index = np.array(gp._mask_ts.full_array()) == 0 y = np.array(gp._y_klass.full_array())[index] hy = SparseArray.argmax(off.hy) hy = np.array(hy.full_array())[index] recall = np.array([(hy[y == k] == k).mean() for k in range(nclasses)]) precision = np.array([(y[hy == k] == k).mean() for k in range(nclasses)]) _ = (2 * precision * recall) / (precision + recall) m = ~np.isfinite(_) _[m] = 0 score = np.prod(_) - 1 print(score, _) assert_almost_equals(score, off.fitness_vs)
def test_inputs(): from EvoDAG.population import Inputs from EvoDAG.cython_utils import SelectNumbers from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(classifier=True, multiple_outputs=True, popsize=5, share_inputs=True) gp.X = X gp.nclasses(y) gp.y = y inputs = Inputs(gp, SelectNumbers([x for x in range(len(gp.X))])) func = inputs._func for f in func: inputs._func = [f] inputs._nfunc = 1 v = inputs.input() assert v is not None inputs = Inputs(gp, SelectNumbers([x for x in range(len(gp.X))]))
def test_clean(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 for pc in ['Generational', 'SteadyState']: gp = EvoDAG(population_class=pc, classifier=False, popsize=5) gp.X = X gp.y = y gp.create_population() for i in range(10): v = gp.random_offspring() gp.replace(v) pop = gp.population.population esi = gp.population.estopping for i in gp.population._hist: print(i == esi, i in pop, i, '-' * 10, i.fitness) if i == esi: assert i.hy is not None elif i in pop: assert i.hy is not None assert gp.population.estopping.hy is not None
def test_all_variables_inputs(): from EvoDAG.population import Inputs from EvoDAG.cython_utils import SelectNumbers from EvoDAG import EvoDAG y = cl.copy() gp = EvoDAG(classifier=True, multiple_outputs=True, use_all_vars_input_functions=True, popsize=5, share_inputs=True) gp.X = X gp.nclasses(y) gp.y = y inputs = Inputs(gp, SelectNumbers([x for x in range(len(gp.X))])) func = inputs._func print(func, gp.nvar) for f in func: v = inputs.all_variables() assert v is not None assert isinstance(v, f) assert inputs._all_variables_index == len(func)
def test_clean(): from EvoDAG import EvoDAG y = cl.copy() y[y != 1] = -1 for pc in ['Generational', 'SteadyState']: gp = EvoDAG(population_class=pc, popsize=5) gp.X = X gp.y = y gp.create_population() for i in range(10): v = gp.random_offspring() gp.replace(v) pop = gp.population.population esi = gp.population.estopping for i in gp.population._hist: print(i == esi, i in pop, i, '-'*10, i.fitness) if i == esi: assert i.hy is not None elif i in pop: assert i.hy is not None assert gp.population.estopping.hy is not None
def test_generational_generation(): from EvoDAG.population import Generational from EvoDAG import EvoDAG gp = EvoDAG(population_class='Generational', popsize=10) gp.X = X y = cl.copy() y[y != 1] = -1 gp.y = y gp.create_population() assert isinstance(gp.population, Generational) p = [] for i in range(gp.popsize-1): a = gp.random_offspring() p.append(a) gp.replace(a) assert len(gp.population._inner) == (gp.popsize - 1) a = gp.random_offspring() p.append(a) gp.replace(a) assert len(gp.population._inner) == 0 for a, b in zip(gp.population.population, p): assert a == b