Exemplo n.º 1
0
def get_po_toolbox(predictors, response):
    creator.create("FitnessAge", base.Fitness, weights=(WEIGHT_FITNESS, WEIGHT_AGE_DENSITY))
    creator.create("Individual", SemanticPrimitiveTree, fitness=creator.FitnessAge, age=int)

    toolbox = base.Toolbox()
    pset = symbreg.get_numpy_pset(len(predictors[0]))
    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=MIN_DEPTH_INIT, max_=MAX_DEPTH_INIT)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", initialization.syntactically_distinct, individual=toolbox.individual, retries=100)
    toolbox.register("select", tools.selRandom)

    expression_dict = cachetools.LRUCache(maxsize=10000)
    toolbox.register("error_func", ERROR_FUNCTION, response=response)
    toolbox.register("evaluate_error", semantics.calc_eval_semantics, context=pset.context, predictors=predictors,
                     eval_semantics=toolbox.error_func, expression_dict=expression_dict)

    toolbox.register("koza_node_selector", operators.internally_biased_node_selector, bias=INTERNAL_NODE_SELECTION_BIAS)
    toolbox.register("mate", operators.one_point_xover_biased, node_selector=toolbox.koza_node_selector)
    toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=MAX_HEIGHT))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=MAX_SIZE))

    mstats = reports.configure_inf_protected_stats()
    multi_archive = get_archive(response)

    pop = toolbox.population(n=POP_SIZE)
    toolbox.register("run", afpo.pareto_optimization, population=pop, toolbox=toolbox, xover_prob=XOVER_PROB,
                     mut_prob=MUT_PROB, ngen=NGEN, tournament_size=TOURN_SIZE, num_randoms=1, stats=mstats,
                     archive=multi_archive, calc_pareto_front=False)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(multi_archive))
    return toolbox
Exemplo n.º 2
0
def get_generic_toolbox(predictors, response, test_predictors, test_response, register_mut_func, mut_pb, xover_pb,
                        node_selector, size_limit=MAX_SIZE):
    creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
    creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

    toolbox = base.Toolbox()
    pset = symbreg.get_numpy_pset(len(predictors[0]))
    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=MIN_DEPTH_INIT, max_=MAX_DEPTH_INIT)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", initialization.syntactically_distinct, individual=toolbox.individual, retries=100)
    toolbox.register("select", tools.selTournament, tournsize=TOURNAMENT_SIZE)

    expression_dict = cachetools.LRUCache(maxsize=10000)
    toolbox.register("semantic_calculator", semantics.calculate_semantics, context=pset.context, predictors=predictors,
                     expression_dict=expression_dict)

    toolbox.register("error_func", ERROR_FUNCTION, response=response)
    toolbox.register("evaluate_error", semantics.calc_eval_semantics, context=pset.context, predictors=predictors,
                     eval_semantics=toolbox.error_func, expression_dict=expression_dict)
    toolbox.register("assign_fitness", ea_simple_semantics.assign_raw_fitness)

    toolbox.register("mate", operators.one_point_xover_biased, node_selector=node_selector)
    toolbox.decorate("mate", operators.static_limit(key=operator.attrgetter("height"), max_value=MAX_HEIGHT))
    toolbox.decorate("mate", operators.static_limit(key=len, max_value=size_limit))

    lib = library.PopulationLibrary()
    register_mut_func(toolbox, lib, pset, response, node_selector)
    toolbox.decorate("mutate", operators.static_limit(key=operator.attrgetter("height"), max_value=MAX_HEIGHT))
    toolbox.decorate("mutate", operators.static_limit(key=len, max_value=size_limit))

    mstats = reports.configure_inf_protected_stats()
    multi_archive = get_archive(pset, test_predictors, test_response)
    multi_archive.archives.append(lib)

    pop = toolbox.population(n=POP_SIZE)
    toolbox.register("run", ea_simple_semantics.ea_simple, population=pop, toolbox=toolbox, cxpb=xover_pb,
                     mutpb=mut_pb, ngen=NGEN, elite_size=0, stats=mstats, verbose=True, archive=multi_archive)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(multi_archive))

    return toolbox
Exemplo n.º 3
0
def get_toolbox(predictors, response, pset, test_predictors=None, test_response=None):
    creator.create("ErrorAgeSizeComplexity", base.Fitness, weights=(-1.0, -1.0, -1.0, -1.0))
    creator.create("Individual", gp.PrimitiveTree, fitness=creator.ErrorAgeSizeComplexity, age=int)
    toolbox = base.Toolbox()
    toolbox.register("expr", sp.generate_parametrized_expression,
                     partial(gp.genFull, pset=pset, min_=MIN_DEPTH_INIT, max_=MAX_DEPTH_INIT), predictors)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("compile", gp.compile, pset=pset)
    toolbox.register("select", tools.selRandom)
    toolbox.register("koza_node_selector", operators.internally_biased_node_selector, bias=INTERNAL_NODE_SELECTION_BIAS)
    toolbox.register("mate", operators.one_point_xover_biased, node_selector=toolbox.koza_node_selector)
    toolbox.decorate("mate", operators.static_limit(key=operator.attrgetter("height"), max_value=MAX_HEIGHT))
    toolbox.decorate("mate", operators.static_limit(key=len, max_value=MAX_SIZE))
    toolbox.register("grow", sp.generate_parametrized_expression,
                     partial(gp.genGrow, pset=pset, min_=MIN_GEN_GROW, max_=MAX_GEN_GROW), predictors)
    toolbox.register("mutate", operators.mutation_biased, expr=toolbox.grow, node_selector=toolbox.koza_node_selector)
    toolbox.decorate("mutate", operators.static_limit(key=operator.attrgetter("height"), max_value=MAX_HEIGHT))
    toolbox.decorate("mutate", operators.static_limit(key=len, max_value=MAX_SIZE))
    expression_dict = cachetools.LRUCache(maxsize=1000)
    subset_selection_archive = subset_selection.RandomSubsetSelectionArchive(frequency=SUBSET_CHANGE_FREQUENCY,
                                                                             predictors=predictors, response=response,
                                                                             subset_size=SUBSET_SIZE,
                                                                             expression_dict=expression_dict)
    toolbox.register("error_func", partial(ERROR_FUNCTION, response))
    toolbox.register("evaluate_error", sp.simple_parametrized_evaluate, context=pset.context, predictors=predictors,
                     error_function=toolbox.error_func, expression_dict=expression_dict)
    toolbox.register("assign_fitness", afpo.assign_age_fitness_size_complexity)
    multi_archive = utils.get_archive()
    multi_archive.archives.append(subset_selection_archive)
    mstats = reports.configure_inf_protected_stats()
    pop = toolbox.population(n=POP_SIZE)
    toolbox.register("run", afpo.pareto_optimization, population=pop, toolbox=toolbox, xover_prob=XOVER_PROB,
                     mut_prob=MUT_PROB, ngen=NGEN, tournament_size=TOURNAMENT_SIZE, num_randoms=1, stats=mstats,
                     archive=multi_archive, calc_pareto_front=False, verbose=False, reevaluate_population=True)
    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(multi_archive))
    return toolbox
Exemplo n.º 4
0
def get_gp_lgx_toolbox(predictors, response):
    creator.create("FitnessMax", base.Fitness, weights=(WEIGHT_FITNESS,))
    creator.create("Individual", SemanticPrimitiveTree, fitness=creator.FitnessMax, age=int)

    toolbox = base.Toolbox()
    pset = symbreg.get_numpy_pset(len(predictors[0]))
    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=MIN_DEPTH_INIT, max_=MAX_DEPTH_INIT)
    toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
    toolbox.register("population", initialization.syntactically_distinct, individual=toolbox.individual, retries=100)
    toolbox.register("select", tools.selTournament, tournsize=TOURN_SIZE)

    lib = library.SemanticLibrary()
    lib.generate_trees(pset, LIBRARY_DEPTH, predictors)
    toolbox.register("lib_selector", lib.get_closest, distance_measure=distances.cumulative_absolute_difference,
                     k=LIBRARY_SEARCH_NEIGHBORS, check_constant=True)
    toolbox.register("mate", locally_geometric.homologous_lgx, library_selector=toolbox.lib_selector,
                     internal_bias=INTERNAL_NODE_SELECTION_BIAS, max_height=MAX_HEIGHT,
                     distance_measure=distances.cumulative_absolute_difference, distance_threshold=0.0)
    toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=MAX_HEIGHT))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=MAX_SIZE))

    expression_dict = cachetools.LRUCache(maxsize=10000)

    toolbox.register("error_func", ERROR_FUNCTION, response=response)
    toolbox.register("evaluate_error", semantics.calc_eval_semantics, context=pset.context, predictors=predictors,
                     eval_semantics=toolbox.error_func, expression_dict=expression_dict)
    toolbox.register("assign_fitness", afpo.assign_pure_fitness)

    mstats = reports.configure_inf_protected_stats()
    multi_archive = get_archive(response)

    pop = toolbox.population(n=POP_SIZE)
    toolbox.register("run", ea_simple_semantics.ea_simple, population=pop, toolbox=toolbox, cxpb=XOVER_PROB,
                     mutpb=MUT_PROB, ngen=NGEN, elite_size=0, stats=mstats, verbose=False, archive=multi_archive)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(multi_archive))
    return toolbox
Exemplo n.º 5
0
def get_gp_toolbox(predictors, response):
    creator.create("FitnessMax", base.Fitness, weights=(WEIGHT_FITNESS, ))
    creator.create("Individual",
                   SemanticPrimitiveTree,
                   fitness=creator.FitnessMax,
                   age=int)

    toolbox = base.Toolbox()
    pset = symbreg.get_numpy_pset(len(predictors[0]))
    toolbox.register("expr",
                     gp.genHalfAndHalf,
                     pset=pset,
                     min_=MIN_DEPTH_INIT,
                     max_=MAX_DEPTH_INIT)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.expr)
    toolbox.register("population",
                     initialization.syntactically_distinct,
                     individual=toolbox.individual,
                     retries=100)
    toolbox.register("select", tools.selTournament, tournsize=TOURN_SIZE)

    expression_dict = cachetools.LRUCache(maxsize=10000)
    toolbox.register("error_func", ERROR_FUNCTION, response=response)
    toolbox.register("evaluate_error",
                     semantics.calc_eval_semantics,
                     context=pset.context,
                     predictors=predictors,
                     eval_semantics=toolbox.error_func,
                     expression_dict=expression_dict)
    toolbox.register("assign_fitness", afpo.assign_pure_fitness)

    toolbox.register("koza_node_selector",
                     operators.internally_biased_node_selector,
                     bias=INTERNAL_NODE_SELECTION_BIAS)
    toolbox.register("mate",
                     operators.one_point_xover_biased,
                     node_selector=toolbox.koza_node_selector)
    toolbox.decorate(
        "mate",
        gp.staticLimit(key=operator.attrgetter("height"),
                       max_value=MAX_HEIGHT))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=MAX_SIZE))

    mstats = reports.configure_inf_protected_stats()
    multi_archive = get_archive(response)

    pop = toolbox.population(n=POP_SIZE)
    toolbox.register("run",
                     ea_simple_semantics.ea_simple,
                     population=pop,
                     toolbox=toolbox,
                     cxpb=XOVER_PROB,
                     mutpb=MUT_PROB,
                     ngen=NGEN,
                     elite_size=0,
                     stats=mstats,
                     verbose=False,
                     archive=multi_archive)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(multi_archive))
    return toolbox
Exemplo n.º 6
0
def get_novelty_po_lgx_toolbox(predictors, response):
    creator.create("FitnessNovelty",
                   base.Fitness,
                   weights=(WEIGHT_FITNESS, WEIGHT_NOVELTY))
    creator.create("Individual",
                   SemanticPrimitiveTree,
                   fitness=creator.FitnessNovelty,
                   age=int)

    toolbox = base.Toolbox()
    pset = symbreg.get_numpy_pset(len(predictors[0]))
    toolbox.register("expr",
                     gp.genHalfAndHalf,
                     pset=pset,
                     min_=MIN_DEPTH_INIT,
                     max_=MAX_DEPTH_INIT)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.expr)
    toolbox.register("population",
                     initialization.syntactically_distinct,
                     individual=toolbox.individual,
                     retries=100)
    toolbox.register("select", tools.selRandom)

    lib = library.SemanticLibrary()
    lib.generate_trees(pset, LIBRARY_DEPTH, predictors)
    toolbox.register("lib_selector",
                     lib.get_closest,
                     distance_measure=distances.cumulative_absolute_difference,
                     k=LIBRARY_SEARCH_NEIGHBORS,
                     check_constant=True)
    toolbox.register("mate",
                     locally_geometric.homologous_lgx,
                     library_selector=toolbox.lib_selector,
                     internal_bias=INTERNAL_NODE_SELECTION_BIAS,
                     max_height=MAX_HEIGHT,
                     distance_measure=distances.cumulative_absolute_difference,
                     distance_threshold=0.0)
    toolbox.decorate(
        "mate",
        gp.staticLimit(key=operator.attrgetter("height"),
                       max_value=MAX_HEIGHT))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=MAX_SIZE))

    expression_dict = cachetools.LRUCache(maxsize=10000)
    toolbox.register("error_func", ERROR_FUNCTION, response=response)
    toolbox.register("evaluate_error",
                     semantics.calc_eval_semantics,
                     context=pset.context,
                     predictors=predictors,
                     eval_semantics=toolbox.error_func,
                     expression_dict=expression_dict)

    mstats = reports.configure_inf_protected_stats()
    multi_archive = get_archive(response)

    pop = toolbox.population(n=POP_SIZE)
    toolbox.register("run",
                     afpo.pareto_optimization,
                     population=pop,
                     toolbox=toolbox,
                     xover_prob=XOVER_PROB,
                     mut_prob=MUT_PROB,
                     ngen=NGEN,
                     tournament_size=TOURN_SIZE,
                     num_randoms=1,
                     stats=mstats,
                     archive=multi_archive,
                     calc_pareto_front=False)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(multi_archive))
    return toolbox
Exemplo n.º 7
0
def get_toolbox_base(predictors, response, toolbox, param_mut_prob):
    metadata_dict = dict()
    latitude_longitude = np.load('../data/SweData/metadata/latlon.npy')
    elevation = np.load('../data/SweData/metadata/elevation.npy')
    aspect = np.load('../data/SweData/metadata/aspect.npy')
    metadata_dict["LatLon"] = latitude_longitude
    metadata_dict["Elevation"] = np.repeat(elevation, 3)
    metadata_dict["Aspect"] = np.repeat(aspect, 3)
    metadata_dict["Response"] = response
    predictors_dict = [None, None, None]
    predictors_indices = np.arange(predictors.shape[1])
    predictors_dict[0] = predictors[:, predictors_indices % 3 == 0]
    predictors_dict[1] = predictors[:, predictors_indices % 3 == 1]
    predictors_dict[2] = predictors[:, predictors_indices % 3 == 2]
    metadata_dict["Predictors"] = predictors_dict

    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("select", tools.selRandom)

    # Crossover
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.decorate(
        "mate", gp.staticLimit(key=operator.attrgetter("height"),
                               max_value=17))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=300))

    # Mutation
    toolbox.register("expr_mutation", gp.genFull, min_=0, max_=2)
    toolbox.register("subtree_mutate",
                     gp.mutUniform,
                     expr=toolbox.expr_mutation,
                     pset=toolbox.pset)
    toolbox.decorate(
        "subtree_mutate",
        gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
    toolbox.decorate("subtree_mutate", gp.staticLimit(key=len, max_value=300))

    toolbox.register("parameter_mutation",
                     mutation.one_point_parameter_mutation,
                     toolbox=toolbox,
                     metadata=metadata_dict,
                     two_point_scale=0.005,
                     radius_scale=0.25,
                     iterations=20)
    toolbox.register(
        "mutate",
        mutation.multi_mutation,
        mutations=[toolbox.subtree_mutate, toolbox.parameter_mutation],
        probs=[0.05, param_mut_prob])

    # Fast evaluation configuration
    numpy_response = np.array(response)
    numpy_predictors = np.array(predictors)
    expression_dict = cachetools.LRUCache(maxsize=2000)
    toolbox.register("error_func",
                     fast_evaluate.anti_correlation,
                     response=numpy_response)
    toolbox.register("evaluate_error",
                     fast_numpy_evaluate_metadata,
                     context=toolbox.pset.context,
                     predictors=numpy_predictors,
                     metadata=metadata_dict,
                     error_function=toolbox.error_func,
                     expression_dict=expression_dict,
                     arg_prefix="ARG")
    toolbox.register("evaluate",
                     afpo.evaluate_age_fitness_size,
                     error_func=toolbox.evaluate_error)

    random_data_points = np.random.choice(len(predictors), 1000, replace=False)
    subset_predictors = numpy_predictors[random_data_points, :]
    toolbox.register("calc_semantics",
                     calculate_semantics,
                     context=toolbox.pset.context,
                     predictors=subset_predictors,
                     metadata=metadata_dict)
    toolbox.register("simplify_front",
                     simplify.simplify_all,
                     toolbox=toolbox,
                     size_threshold=0,
                     semantics_threshold=10e-5,
                     precompute_semantics=True)

    pop = toolbox.population(n=1000)
    mstats = reports.configure_inf_protected_stats()
    pareto_archive = archive.ParetoFrontSavingArchive(
        frequency=1,
        criteria_chooser=archive.pick_fitness_size_from_fitness_age_size,
        simplifier=toolbox.simplify_front)

    toolbox.register("run",
                     afpo.afpo,
                     population=pop,
                     toolbox=toolbox,
                     xover_prob=0.75,
                     mut_prob=0.20,
                     ngen=1000,
                     tournament_size=2,
                     num_randoms=1,
                     stats=mstats,
                     mut_archive=None,
                     hall_of_fame=pareto_archive)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(pareto_archive))

    return toolbox
Exemplo n.º 8
0
def get_toolbox(predictors, response):
    creator.create("ErrorAgeSize", base.Fitness, weights=(-1.0, -1.0, -1.0))
    creator.create("Individual",
                   gp.PrimitiveTree,
                   fitness=creator.ErrorAgeSize)

    toolbox = base.Toolbox()
    pset = symbreg.get_numpy_polynomial_explog_trig_pset(len(predictors[0]))
    pset.addEphemeralConstant("gaussian", lambda: random.gauss(0.0, 1.0))

    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=6)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("select", tools.selRandom)

    # Crossover
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.decorate(
        "mate", gp.staticLimit(key=operator.attrgetter("height"),
                               max_value=17))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=300))

    # Mutation
    toolbox.register("expr_mutation", gp.genFull, min_=0, max_=2)
    toolbox.register("mutate",
                     gp.mutUniform,
                     expr=toolbox.expr_mutation,
                     pset=pset)
    toolbox.decorate(
        "mutate",
        gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
    toolbox.decorate("mutate", gp.staticLimit(key=len, max_value=300))

    # Fast evaluation configuration
    numpy_response = numpy.array(response)
    numpy_predictors = numpy.array(predictors)
    expression_dict = cachetools.LRUCache(maxsize=2000)

    # toolbox.register("error_func", fast_evaluate.mean_absolute_percentage_error, response=numpy_response)
    toolbox.register("error_func",
                     fast_evaluate.anti_correlation,
                     response=numpy_response)
    toolbox.register("evaluate_error",
                     fast_evaluate.fast_numpy_evaluate,
                     context=pset.context,
                     predictors=numpy_predictors,
                     error_function=toolbox.error_func,
                     expression_dict=expression_dict)
    toolbox.register("evaluate",
                     afpo.evaluate_age_fitness_size,
                     error_func=toolbox.evaluate_error)

    random_data_points = numpy.random.choice(len(predictors),
                                             1000,
                                             replace=False)
    subset_predictors = numpy_predictors[random_data_points, :]

    toolbox.register("calc_semantics",
                     semantics.calculate_semantics,
                     context=pset.context,
                     predictors=subset_predictors)
    toolbox.register("simplify_front",
                     simplify.simplify_all,
                     toolbox=toolbox,
                     size_threshold=0,
                     semantics_threshold=10e-5,
                     precompute_semantics=True)

    pop = toolbox.population(n=1000)
    mstats = reports.configure_inf_protected_stats()
    pareto_archive = archive.ParetoFrontSavingArchive(
        frequency=1,
        criteria_chooser=archive.pick_fitness_size_from_fitness_age_size,
        simplifier=toolbox.simplify_front)

    toolbox.register("run",
                     afpo.afpo,
                     population=pop,
                     toolbox=toolbox,
                     xover_prob=0.75,
                     mut_prob=0.01,
                     ngen=1000,
                     tournament_size=2,
                     num_randoms=1,
                     stats=mstats,
                     hall_of_fame=pareto_archive)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(pareto_archive))

    return toolbox
Exemplo n.º 9
0
def get_toolbox_base(predictors, response, toolbox, param_mut_prob):
    metadata_dict = dict()
    latitude_longitude = np.load('../data/SweData/metadata/latlon.npy')
    elevation = np.load('../data/SweData/metadata/elevation.npy')
    aspect = np.load('../data/SweData/metadata/aspect.npy')
    metadata_dict["LatLon"] = latitude_longitude
    metadata_dict["Elevation"] = np.repeat(elevation, 3)
    metadata_dict["Aspect"] = np.repeat(aspect, 3)
    metadata_dict["Response"] = response
    predictors_dict = [None, None, None]
    predictors_indices = np.arange(predictors.shape[1])
    predictors_dict[0] = predictors[:, predictors_indices % 3 == 0]
    predictors_dict[1] = predictors[:, predictors_indices % 3 == 1]
    predictors_dict[2] = predictors[:, predictors_indices % 3 == 2]
    metadata_dict["Predictors"] = predictors_dict

    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("select", tools.selRandom)

    # Crossover
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=300))

    # Mutation
    toolbox.register("expr_mutation", gp.genFull, min_=0, max_=2)
    toolbox.register("subtree_mutate", gp.mutUniform, expr=toolbox.expr_mutation, pset=toolbox.pset)
    toolbox.decorate("subtree_mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
    toolbox.decorate("subtree_mutate", gp.staticLimit(key=len, max_value=300))

    toolbox.register("parameter_mutation", mutation.one_point_parameter_mutation,
                     toolbox=toolbox, metadata=metadata_dict, two_point_scale=0.005, radius_scale=0.25, iterations=20)
    toolbox.register("mutate", mutation.multi_mutation,
                     mutations=[toolbox.subtree_mutate, toolbox.parameter_mutation], probs=[0.05, param_mut_prob])

    # Fast evaluation configuration
    numpy_response = np.array(response)
    numpy_predictors = np.array(predictors)
    expression_dict = cachetools.LRUCache(maxsize=2000)
    toolbox.register("error_func", fast_evaluate.anti_correlation, response=numpy_response)
    toolbox.register("evaluate_error", fast_numpy_evaluate_metadata, context=toolbox.pset.context,
                     predictors=numpy_predictors, metadata=metadata_dict, error_function=toolbox.error_func,
                     expression_dict=expression_dict, arg_prefix="ARG")
    toolbox.register("evaluate", afpo.evaluate_age_fitness_size, error_func=toolbox.evaluate_error)

    random_data_points = np.random.choice(len(predictors), 1000, replace=False)
    subset_predictors = numpy_predictors[random_data_points, :]
    toolbox.register("calc_semantics", calculate_semantics, context=toolbox.pset.context,
                     predictors=subset_predictors, metadata=metadata_dict)
    toolbox.register("simplify_front", simplify.simplify_all, toolbox=toolbox, size_threshold=0,
                     semantics_threshold=10e-5, precompute_semantics=True)

    pop = toolbox.population(n=1000)
    mstats = reports.configure_inf_protected_stats()
    pareto_archive = archive.ParetoFrontSavingArchive(frequency=1,
                                                      criteria_chooser=archive.pick_fitness_size_from_fitness_age_size,
                                                      simplifier=toolbox.simplify_front)

    toolbox.register("run", afpo.afpo, population=pop, toolbox=toolbox, xover_prob=0.75, mut_prob=0.20, ngen=1000,
                     tournament_size=2, num_randoms=1, stats=mstats,
                     mut_archive=None, hall_of_fame=pareto_archive)

    toolbox.register("save", reports.save_log_to_csv)
    toolbox.decorate("save", reports.save_archive(pareto_archive))

    return toolbox