Exemple #1
0
    def get_learners(self, key, bounds, dview):
        SystemInfo._validate_key(key)
        self._Ebounds = bounds

        def decomp(f):
            def wrapper(E):
                return SystemInfo.decomplexify(
                    f(E))  #adaptive does not work with complex matrices

            return wrapper

        funcs = {
            'L': decomp(self.refl_L),
            'N': decomp(self.smat_N),
            'R': decomp(self.refl_R)
        }
        hashed_funcs = {k: quickle.quickle(v, dview) for k, v in funcs.items()}
        return [
            adaptive.Learner1D(hashed_funcs[k], bounds=bounds) for k in key
        ]
Exemple #2
0
 def setup(self):
     self.learner = adaptive.Learner1D(f_1d, bounds=(-1, 1))
Exemple #3
0
import json

import adaptive
from skopt import gp_minimize
from skopt.utils import dump

from openmc_model import objective

# Optimisation for 1D EXAMPLE

# Uses adaptive sampling methods from task 8 to obtain starting points for the optimiser
learner = adaptive.Learner1D(objective, bounds=(0, 100))
runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 7)
runner.ioloop.run_until_complete(runner.task)


# Gaussian Processes based optimisation that returns an SciPy optimisation object
res = gp_minimize(objective,          # the function to minimize
                  [(0., 100.)],       # the bounds on each dimension of x
                  n_calls=30,         # the number of evaluations of f
                  n_random_starts=0,  # the number of random initialization points
                  verbose=True,
                  x0=[[i] for i in list(learner.data.keys())], # initial data from the adaptive sampling method
                  y0=list(learner.data.values()) # initial data from the adaptive sampling method
                  )

# Saves the optimisation simulation reults to a file
dump(res, 'saved_optimisation_1d.dat')
Exemple #4
0
def adaptive_scan(x_init,
                  fun=None,
                  fun_grad=None,
                  grad_lookup=None,
                  options={}):
    """
    One dimensional scan of the function values around the initial point, using
    adaptive sampling

    Parameters
    ----------
    x_init : float
        Initial point
    fun : callable
        Goal function
    fun_grad : callable
        Function that computes the gradient of the goal function
    grad_lookup : callable
        Lookup a previously computed gradient
    options : dict
        Options include

        accuracy_goal: float
            Targeted accuracy for the sampling algorithm
        probe_list : list
            Points to definitely include in the sampling
        init_point : boolean
            Include the initial point in the sampling
    """
    if "accuracy_goal" in options:
        accuracy_goal = options["accuracy_goal"]
    else:
        accuracy_goal = 0.5
    print("accuracy_goal: " + str(accuracy_goal))

    probe_list = []
    if "probe_list" in options:
        for x in options["probe_list"]:
            probe_list.append(eval(x))

    if "init_point" in options:
        init_point = bool(options.pop("init_point"))
        if init_point:
            probe_list.append(x_init)

    # TODO make adaptive scan be able to do multidimensional scan
    bounds = options["bounds"][0]
    bound_min = bounds[0]
    bound_max = bounds[1]
    probe_list_min = min(probe_list)
    probe_list_max = max(probe_list)
    bound_min = min(bound_min, probe_list_min)
    bound_max = max(bound_max, probe_list_max)
    print(" ")
    print("bound_min: " + str((bound_min) / (2e9 * np.pi)))
    print("bound_max: " + str((bound_max) / (2e9 * np.pi)))
    print(" ")

    def fun1d(x):
        return fun([x])

    learner = adaptive.Learner1D(fun1d, bounds=(bound_min, bound_max))

    if probe_list:
        for x in probe_list:
            print("from probe_list: " + str(x))
            tmp = learner.function(x)
            print("done\n")
            learner.tell(x, tmp)

    adaptive.runner.simple(
        learner, goal=lambda learner_: learner_.loss() < accuracy_goal)
Exemple #5
0
import random
import adaptive
import holoviews
import bokeh
import ipywidgets
import adaptive.notebook_integration
#from adaptive.learner.base_learner import BaseLearner
#from adaptive.notebook_integration import ensure_holoviews
#from adaptive.utils import cache_latest
adaptive.notebook_extension()

offset = random.uniform(-0.5, 0.5)

def f(x, offset=offset, wait=True):
    from time import sleep
    from random import random

    a = 0.01
    if wait:
        sleep(random() / 10)
    return x + a**2 / (a**2 + (x - offset)**2)

learner = adaptive.Learner1D(f, bounds=(-1, 1))

runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)

runner.live_info()

runner.live_plot(update_interval=0.1)