Beispiel #1
0
    def setup_tuner(self):
        self.tunecfg = self.experiment["tuner"]
        self.parameters = list(self.tunecfg["parameters"].keys())
        self.dimensions = self.parse_dimensions(self.tunecfg["parameters"])
        self.space = normalize_dimensions(self.dimensions)
        self.priors = self.parse_priors(self.tunecfg["priors"])

        self.kernel = ConstantKernel(
            constant_value=self.tunecfg.get("variance_value", 0.1**2),
            constant_value_bounds=tuple(
                self.tunecfg.get("variance_bounds", (0.01**2, 0.5**2))),
        ) * Matern(
            length_scale=self.tunecfg.get("length_scale_value", 0.3),
            length_scale_bounds=tuple(
                self.tunecfg.get("length_scale_bounds", (0.2, 0.8))),
            nu=2.5,
        )
        self.opt = Optimizer(
            dimensions=self.dimensions,
            n_points=self.tunecfg.get("n_points", 1000),
            n_initial_points=self.tunecfg.get("n_initial_points",
                                              5 * len(self.dimensions)),
            gp_kernel=self.kernel,
            gp_kwargs=dict(normalize_y=True),
            gp_priors=self.priors,
            acq_func=self.tunecfg.get("acq_func", "ts"),
            acq_func_kwargs=self.tunecfg.get(
                "acq_func_kwargs",
                None),  # TODO: Check if this works for all parameters
            random_state=self.rng.randint(0,
                                          np.iinfo(np.int32).max),
        )
Beispiel #2
0
 def setup_backend(self, params, n_initial_points=None, **options):
     """Special method to initialize the backend from params."""
     self.params = params
     if n_initial_points is None:
         n_initial_points = guess_n_initial_points(params)
     self.optimizer = Optimizer(create_dimensions(params),
                                n_initial_points=n_initial_points,
                                **options)
def test_update_model():
    opt = Optimizer(
        dimensions=[(0.0, 1.0)],
        n_points=10,
        random_state=0,
    )
    points = [[0.0], [1.0], [0.5]]
    scores = [-1.0, 1.0, 0.0]
    variances = [0.3, 0.2, 0.4]
    for p, s, v in zip(points, scores, variances):
        update_model(
            optimizer=opt,
            point=p,
            score=s,
            variance=v,
        )
    assert len(opt.Xi) == 3
    assert np.allclose(opt.Xi, points)
    assert np.allclose(opt.yi, scores)
    assert np.allclose(opt.noisei, variances)
Beispiel #4
0
def initialize_optimizer(
    X: Sequence[list],
    y: Sequence[float],
    noise: Sequence[float],
    parameter_ranges: Sequence[Union[Sequence, Dimension]],
    noise_scaling_coefficient: float = 1.0,
    random_seed: int = 0,
    warp_inputs: bool = True,
    normalize_y: bool = True,
    #kernel_lengthscale_prior_lower_bound: float = 0.1,
    #kernel_lengthscale_prior_upper_bound: float = 0.5,
    #kernel_lengthscale_prior_lower_steepness: float = 2.0,
    #kernel_lengthscale_prior_upper_steepness: float = 1.0,
    n_points: int = 500,
    n_initial_points: int = 16,
    acq_function: str = "mes",
    acq_function_samples: int = 1,
    acq_function_lcb_alpha: float = 1.96,
    resume: bool = True,
    fast_resume: bool = True,
    model_path: Optional[str] = None,
    gp_initial_burnin: int = 100,
    gp_initial_samples: int = 300,
    gp_priors: Optional[List[Callable[[float], float]]] = None,
) -> Optimizer:
    """Create an Optimizer object and if needed resume and/or reinitialize.

    Parameters
    ----------
    X : Sequence of lists
        Contains n_points many lists, each representing one configuration.
    y : Sequence of floats
        Contains n_points many scores, one for each configuration.
    noise : Sequence of floats
        Contains n_points many variances, one for each score.
    parameter_ranges : Sequence of Dimension objects or tuples
        Parameter range specifications as expected by scikit-optimize.
    random_seed : int, default=0
        Random seed for the optimizer.
    warp_inputs : bool, default=True
        If True, the optimizer will internally warp the input space for a better model
        fit. Can negatively impact running time and required burnin samples.
    n_points : int, default=500
        Number of points to evaluate the acquisition function on.
    n_initial_points : int, default=16
        Number of points to pick quasi-randomly to initialize the the model, before
        using the acquisition function.
    acq_function : str, default="mes"
        Acquisition function to use.
    acq_function_samples : int, default=1
        Number of hyperposterior samples to average the acquisition function over.
    resume : bool, default=True
        If True, resume optimization from existing data. If False, start with a
        completely fresh optimizer.
    fast_resume : bool, default=True
        If True, restore the optimizer from disk, avoiding costly reinitialization.
        If False, reinitialize the optimizer from the existing data.
    model_path : str or None, default=None
        Path to the file containing the existing optimizer to be used for fast resume
        functionality.
    gp_initial_burnin : int, default=100
        Number of burnin samples to use for reinitialization.
    gp_initial_samples : int, default=300
        Number of samples to use for reinitialization.
    gp_priors : list of callables, default=None
        List of priors to be used for the kernel hyperparameters. Specified in the
        following order:
        - signal magnitude prior
        - lengthscale prior (x number of parameters)
        - noise magnitude prior

    Returns
    -------
    bask.Optimizer
        Optimizer object to be used in the main tuning loop.
    """
    logger = logging.getLogger(LOGGER)
    # Create random generator:
    random_state = setup_random_state(random_seed)
    #space = normalize_dimensions(parameter_ranges)

    gp_kwargs = dict(
        normalize_y=normalize_y,
        warp_inputs=warp_inputs,
    )
    if acq_function == "rand":
        current_acq_func = random.choice(["mes", "pvrs", "ei", "lcb", "ts"])
    else:
        current_acq_func = acq_function

    if acq_function_lcb_alpha == float("inf"):
        acq_function_lcb_alpha = str(
            acq_function_lcb_alpha
        )  # Bayes-skopt expect alpha as a string, "inf", in case of infinite alpha.
    acq_func_kwargs = dict(
        alpha=acq_function_lcb_alpha,
        n_thompson=500,
    )

    #roundflat = make_roundflat(
                #kernel_lengthscale_prior_lower_bound,
                #kernel_lengthscale_prior_upper_bound,
                #kernel_lengthscale_prior_lower_steepness,
                #kernel_lengthscale_prior_upper_steepness,
            #)
    #priors = [
        # Prior distribution for the signal variance:
        #lambda x: halfnorm(scale=2.).logpdf(np.sqrt(np.exp(x))) + x / 2.0 - np.log(2.0),
        # Prior distribution for the length scales:
        #*[lambda x: roundflat(np.exp(x)) + x for _ in range(space.n_dims)],
        # Prior distribution for the noise:
        #lambda x: halfnorm(scale=2.).logpdf(np.sqrt(np.exp(x))) + x / 2.0 - np.log(2.0)
        #]

    opt = Optimizer(
        dimensions=parameter_ranges,
        n_points=n_points,
        n_initial_points=n_initial_points,
        # gp_kernel=kernel,  # TODO: Let user pass in different kernels
        gp_kwargs=gp_kwargs,
        #gp_priors=priors,
        gp_priors=gp_priors,
        acq_func=current_acq_func,
        acq_func_kwargs=acq_func_kwargs,
        random_state=random_state,
    )

    if not resume:
        return opt

    reinitialize = True
    if model_path is not None and fast_resume:
        path = pathlib.Path(model_path)
        if path.exists():
            with open(model_path, mode="rb") as model_file:
                old_opt = dill.load(model_file)
                logger.info(f"Resuming from existing optimizer in {model_path}.")
            if opt.space == old_opt.space:
                old_opt.acq_func = opt.acq_func
                old_opt.acq_func_kwargs = opt.acq_func_kwargs
                old_opt.n_points = opt.n_points
                opt = old_opt
                reinitialize = False
            else:
                logger.info(
                    "Parameter ranges have been changed and the "
                    "existing optimizer instance is no longer "
                    "valid. Reinitializing now."
                )
            if gp_priors is not None:
                opt.gp_priors = gp_priors

    if reinitialize and len(X) > 0:
        logger.info(
            f"Importing {len(X)} existing datapoints. " f"This could take a while..."
        )
        if acq_function == "rand":
            logger.debug(f"Current random acquisition function: {current_acq_func}")
        opt.tell(
            X,
            y,
            #noise_vector=noise,
            noise_vector=[i * noise_scaling_coefficient for i in noise],
            gp_burnin=gp_initial_burnin,
            gp_samples=gp_initial_samples,
            n_samples=acq_function_samples,
            progress=True,
        )
        logger.info("Importing finished.")
    #root_logger.debug(f"noise_vector: {[i*noise_scaling_coefficient for i in noise]}")
    logger.debug(f"GP kernel_: {opt.gp.kernel_}")
    #logger.debug(f"GP priors: {opt.gp_priors}")
    #logger.debug(f"GP X_train_: {opt.gp.X_train_}")
    #logger.debug(f"GP alpha: {opt.gp.alpha}")
    #logger.debug(f"GP alpha_: {opt.gp.alpha_}")
    #logger.debug(f"GP y_train_: {opt.gp.y_train_}")
    #logger.debug(f"GP y_train_std_: {opt.gp.y_train_std_}")
    #logger.debug(f"GP y_train_mean_: {opt.gp.y_train_mean_}")

    #if warp_inputs and hasattr(opt.gp, "warp_alphas_"):
        #warp_params = dict(
            #zip(
                #parameter_ranges.keys(),
                #zip(
                    #np.around(np.exp(opt.gp.warp_alphas_), 3),
                    #np.around(np.exp(opt.gp.warp_betas_), 3),
                #),
            #)
        #)
        #logger.debug(
            #f"Input warping was applied using the following parameters for "
            #f"the beta distributions:\n"
            #f"{warp_params}"
        #)

    return opt
Beispiel #5
0
def update_model(
    optimizer: Optimizer,
    point: list,
    score: float,
    variance: float,
    noise_scaling_coefficient: float = 1.0,
    acq_function_samples: int = 1,
    acq_function_lcb_alpha: float = 1.96,
    gp_burnin: int = 5,
    gp_samples: int = 300,
    gp_initial_burnin: int = 100,
    gp_initial_samples: int = 300,
) -> None:
    """Update the optimizer model with the newest data.

    Parameters
    ----------
    optimizer : bask.Optimizer
        Optimizer object which is to be updated.
    point : list
        Latest configuration which was tested.
    score : float
        Elo score the configuration achieved.
    variance : float
        Variance of the Elo score of the configuration.
    acq_function_samples : int, default=1
        Number of hyperposterior samples to average the acquisition function over.
    gp_burnin : int, default=5
        Number of burnin iterations to use before keeping samples for the model.
    gp_samples : int, default=300
        Number of samples to collect for the model.
    gp_initial_burnin : int, default=100
        Number of burnin iterations to use for the first initial model fit.
    gp_initial_samples : int, default=300
        Number of samples to collect
    """
    logger = logging.getLogger(LOGGER)
    while True:
        try:
            now = datetime.now()
            # We fetch kwargs manually here to avoid collisions:
            n_samples = acq_function_samples
            gp_burnin = gp_burnin
            gp_samples = gp_samples
            if optimizer.gp.chain_ is None:
                gp_burnin = gp_initial_burnin
                gp_samples = gp_initial_samples
            optimizer.tell(
                x=point,
                y=score,
                #noise_vector=variance,
                noise_vector=noise_scaling_coefficient * variance,
                n_samples=n_samples,
                gp_samples=gp_samples,
                gp_burnin=gp_burnin,
            )
            later = datetime.now()
            difference = (later - now).total_seconds()
            logger.info(f"GP sampling finished ({difference}s)")
            #logger.debug(f"noise_vector: {[i*noise_scaling_coefficient for i in noise]}")
            logger.debug(f"GP kernel_: {optimizer.gp.kernel_}")
            #logger.debug(f"GP priors: {opt.gp_priors}")
            #logger.debug(f"GP X_train_: {opt.gp.X_train_}")
            #logger.debug(f"GP alpha: {opt.gp.alpha}")
            #logger.debug(f"GP alpha_: {opt.gp.alpha_}")
            #logger.debug(f"GP y_train_: {opt.gp.y_train_}")
            #logger.debug(f"GP y_train_std_: {opt.gp.y_train_std_}")
            #logger.debug(f"GP y_train_mean_: {opt.gp.y_train_mean_}")
        except ValueError:
            logger.warning(
                "Error encountered during fitting. Trying to sample chain a bit. "
                "If this problem persists, restart the tuner to reinitialize."
            )
            optimizer.gp.sample(n_burnin=11, priors=optimizer.gp_priors)
        else:
            break
Beispiel #6
0
class TuningServer(object):
    def __init__(self, experiment_path, dbconfig_path, **kwargs):
        self.logger = logging.getLogger("TuningServer")
        self.experiment_path = experiment_path
        if os.path.isfile(dbconfig_path):
            with open(dbconfig_path, "r") as config_file:
                config = config_file.read().replace("\n", "")
                self.logger.debug(f"Reading DB config:\n{config}")
                self.connect_params = json.loads(config)
        else:
            raise ValueError("No dbconfig file found at provided path")

        self.engine = create_sqlalchemy_engine(self.connect_params)
        Base.metadata.create_all(self.engine)
        sm = sessionmaker(bind=self.engine)
        self.sessionmaker = get_session_maker(sm)

        if os.path.isfile(experiment_path):
            with open(experiment_path, "r+") as experiment_file:
                exp = experiment_file.read().replace("\n", "")
                self.logger.debug(f"Reading experiment config:\n{exp}")
                self.experiment = json.loads(exp)
                self.logger.debug(f"self.experiment = \n{self.experiment}")
        else:
            raise ValueError("No experiment config file found at provided path")
        self.time_controls = [
            TimeControl.from_strings(*x) for x in self.experiment["time_controls"]
        ]
        self.rng = np.random.RandomState(self.experiment.get("random_seed", 123))
        self.setup_tuner()

        try:
            os.makedirs("experiments")
        except FileExistsError:
            pass
        # TODO: in principle after deleting all jobs from the database,
        #       this could be problematic:
        self.pos = None
        self.chain = None
        if "tune_id" in self.experiment:
            self.resume_tuning()

    def write_experiment_file(self):
        with open(self.experiment_path, "w") as experiment_file:
            experiment_file.write(json.dumps(self.experiment, indent=2))

    def save_state(self):
        path = os.path.join(
            "experiments", f"data_tuneid_{self.experiment['tune_id']}.npz"
        )
        np.savez_compressed(
            path, np.array(self.opt.gp.pos_), np.array(self.opt.gp.chain_)
        )
        with open("model.pkl", mode="wb") as file:
            dill.dump(self.opt, file)

    def resume_tuning(self):
        path = os.path.join(
            "experiments", f"data_tuneid_{self.experiment['tune_id']}.npz"
        )
        if os.path.exists(path):
            data = np.load(path)
            self.opt.gp.pos_ = data["arr_0"]
            self.opt.gp.chain_ = data["arr_1"]

    def parse_dimensions(self, param_dict):
        def make_numeric(s):
            try:
                return int(s)
            except ValueError:
                try:
                    return float(s)
                except ValueError:
                    return s

        dimensions = []
        for s in param_dict.values():
            prior_str = re.findall(r"(\w+)\(", s)[0]
            prior_param_strings = re.findall(r"\((.*?)\)", s)[0].split(",")
            keys = [x.split("=")[0].strip() for x in prior_param_strings]
            vals = [make_numeric(x.split("=")[1].strip()) for x in prior_param_strings]
            dim = getattr(skspace, prior_str)(**dict(zip(keys, vals)))
            dimensions.append(dim)
        return dimensions

    def parse_priors(self, priors):
        if isinstance(priors, str):
            try:
                result = joblib.load(priors)
            except IOError:
                self.logger.error(
                    f"Priors could not be loaded from path {priors}. Terminating..."
                )
                sys.exit(1)
        else:
            result = []
            for i, p in enumerate(priors):
                prior_str = re.findall(r"(\w+)\(", p)[0]
                prior_param_strings = re.findall(r"\((.*?)\)", p)[0].split(",")
                keys = [x.split("=")[0].strip() for x in prior_param_strings]
                vals = [float(x.split("=")[1].strip()) for x in prior_param_strings]
                if prior_str == "roundflat":
                    prior = (
                        lambda x, keys=keys, vals=vals: roundflat(
                            np.exp(x), **dict(zip(keys, vals))
                        )
                        + x
                    )
                else:
                    dist = getattr(scipy.stats, prior_str)(**dict(zip(keys, vals)))
                    if i == 0 or i == len(priors) - 1:
                        # The signal variance and the signal noise are in positive,
                        # sqrt domain
                        prior = (
                            lambda x, dist=dist: dist.logpdf(np.sqrt(np.exp(x)))
                            + x / 2.0
                            - np.log(2.0)
                        )
                    else:
                        # The lengthscale(s) are in positive domain
                        prior = (
                            lambda x, dist=dist: dist.logpdf(np.exp(x)) + x
                        )  # noqa: E731
                result.append(prior)
        return result

    def setup_tuner(self):
        self.tunecfg = self.experiment["tuner"]
        self.parameters = list(self.tunecfg["parameters"].keys())
        self.dimensions = self.parse_dimensions(self.tunecfg["parameters"])
        self.space = normalize_dimensions(self.dimensions)
        self.priors = self.parse_priors(self.tunecfg["priors"])

        self.kernel = ConstantKernel(
            constant_value=self.tunecfg.get("variance_value", 0.1 ** 2),
            constant_value_bounds=tuple(
                self.tunecfg.get("variance_bounds", (0.01 ** 2, 0.5 ** 2))
            ),
        ) * Matern(
            length_scale=self.tunecfg.get("length_scale_value", 0.3),
            length_scale_bounds=tuple(
                self.tunecfg.get("length_scale_bounds", (0.2, 0.8))
            ),
            nu=2.5,
        )
        self.opt = Optimizer(
            dimensions=self.dimensions,
            n_points=self.tunecfg.get("n_points", 1000),
            n_initial_points=self.tunecfg.get(
                "n_initial_points", 5 * len(self.dimensions)
            ),
            gp_kernel=self.kernel,
            gp_kwargs=dict(
                normalize_y=True, warp_inputs=self.tunecfg.get("warp_inputs", True)
            ),
            gp_priors=self.priors,
            acq_func=self.tunecfg.get("acq_func", "ts"),
            acq_func_kwargs=self.tunecfg.get(
                "acq_func_kwargs", None
            ),  # TODO: Check if this works for all parameters
            random_state=self.rng.randint(0, np.iinfo(np.int32).max),
        )

    def query_data(self, session, include_active=False):
        tune_id = self.experiment["tune_id"]
        # First check if samplesize was reached:
        sample_sizes = np.array(
            session.query(
                SqlResult.ww_count
                + SqlResult.wd_count
                + SqlResult.wl_count
                + SqlResult.dd_count
                + SqlResult.dl_count
                + SqlResult.ll_count
            )
            .join(SqlJob)
            .filter(SqlJob.active, SqlJob.tune_id == tune_id)
            .all()
        ).squeeze()
        samplesize_reached = False
        if np.all(sample_sizes >= self.experiment.get("minimum_samplesize", 16)):
            samplesize_reached = True

        q = session.query(SqlJob).filter(SqlJob.tune_id == tune_id).order_by(SqlJob.id)
        if not include_active:
            q = q.filter(SqlJob.active == False)  # noqa
        jobs = q.all()
        query = (
            session.query(SqlUCIParam.job_id, SqlUCIParam.key, SqlUCIParam.value)
            .join(SqlJob)
            .filter(SqlJob.tune_id == tune_id)
        )
        df = pd.read_sql(query.statement, query.session.bind)
        df["value"] = df["value"].astype(float)
        self.logger.debug(f"Data frame: {df.head()}")
        X = (
            df.pivot(index="job_id", columns="key")
            .sort_index()
            .droplevel(0, axis=1)[self.parameters]
            .values
        )
        y = {tc: [] for tc in self.time_controls}
        variances = {tc: [] for tc in self.time_controls}
        for job in jobs:
            for result in job.results:
                tc = result.time_control.to_tuple()
                if tc not in self.time_controls:
                    continue
                counts = np.array(
                    [
                        result.ww_count,
                        result.wd_count,
                        result.wl_count + result.dd_count,
                        result.dl_count,
                        result.ll_count,
                    ]
                )
                score, variance = counts_to_penta(
                    counts=counts, random_state=0, n_dirichlet_samples=100000
                )
                y[tc].append(score)
                variances[tc].append(variance)
        return (
            X,
            np.array(list(y.values())).mean(axis=0),
            np.array(list(variances.values())).mean(axis=0),
            samplesize_reached,
        )

    @staticmethod
    def change_engine_config(engine_config, params):
        init_strings = InitStrings(
            engine_config[0]["initStrings"]
        )  # TODO: allow tuning of different index
        for k, v in params.items():
            init_strings[k] = v

    def insert_jobs(self, session, new_x):
        # First set all active jobs to inactive:
        session.query(SqlJob).filter(
            SqlJob.active == True,  # noqa: E712
            SqlJob.tune_id == self.experiment["tune_id"],  # noqa: E712
        ).update(
            {"active": False}
        )  # noqa

        # Insert new job:
        job_dict = {
            "engine": self.experiment["engine"],
            "cutechess": self.experiment["cutechess"],
        }
        job_json = json.dumps(job_dict)
        job = SqlJob(
            minimum_version=self.experiment.get("minimum_version", 1),
            maximum_version=self.experiment.get("maximum_version", None),
            minimum_samplesize=self.experiment.get("minimum_samplesize", 16),
            config=job_json,
            engine1_exe=self.experiment.get("engine1_exe", "lc0"),
            engine1_nps=self.experiment["engine1_nps"],
            engine2_exe=self.experiment.get("engine2_exe", "sf"),
            engine2_nps=self.experiment["engine2_nps"],
            tune_id=self.experiment["tune_id"],
        )
        session.add(job)
        for tc in self.time_controls:
            sql_tc = (
                session.query(SqlTimeControl)
                .filter(
                    SqlTimeControl.engine1_time == tc.engine1_time,
                    SqlTimeControl.engine1_increment == tc.engine1_increment,
                    SqlTimeControl.engine2_time == tc.engine2_time,
                    SqlTimeControl.engine2_increment == tc.engine2_increment,
                )
                .one_or_none()
            )
            if sql_tc is None:
                sql_tc = SqlTimeControl(
                    engine1_time=tc.engine1_time,
                    engine1_increment=tc.engine1_increment,
                    engine2_time=tc.engine2_time,
                    engine2_increment=tc.engine2_increment,
                )
                session.add(sql_tc)

            result = SqlResult(job=job, time_control=sql_tc)
            session.add(result)
        for k, v in zip(self.parameters, new_x):
            param = SqlUCIParam(key=k, value=str(v), job=job)
            session.add(param)

    def run(self):
        # 0. Before we run the main loop, do we need to initialize or resume?
        #    * Resume from files (in experiment folder)
        #    * Create tune entry in db if it does not exist yet

        if "tune_id" not in self.experiment:
            with self.sessionmaker() as session:
                tune = SqlTune(
                    weight=self.experiment.get("weight", 1.0),
                    description=self.experiment.get("description", None),
                )
                session.add(tune)
                session.flush()
                self.experiment["tune_id"] = tune.id
                self.write_experiment_file()
                new_x = self.opt.ask()
                # Alter engine json using Initstrings
                params = dict(zip(self.parameters, new_x))
                self.change_engine_config(self.experiment["engine"], params)
                self.insert_jobs(session, new_x)
                self.logger.info("New jobs committed to database.")
        while True:
            self.logger.debug("Begin querying for new data...")
            # Check if minimum sample size and minimum wait time are reached, then query
            # data and update model:
            with self.sessionmaker() as session:
                X, y, variances, samplesize_reached = self.query_data(
                    session, include_active=True
                )
                self.logger.debug(
                    f"Queried the database for data and got (last 5):\n"
                    f"{X[-5:]}\n{y[-5:]}"
                )
                if len(X) == 0:
                    self.logger.info("There are no datapoints yet, start first job")
                    new_x = self.opt.ask()
                    # Alter engine json using Initstrings
                    params = dict(zip(self.parameters, new_x))
                    self.change_engine_config(self.experiment["engine"], params)
                    self.insert_jobs(session, new_x)
                    self.logger.info("New jobs committed to database.")
                    samplesize_reached = False

            if not samplesize_reached:
                sleep_seconds = self.experiment.get("sleep_time", 60)
                self.logger.debug(
                    f"Required sample size not yet reached. Sleeping {sleep_seconds}"
                    f"seconds."
                )
                sleep(sleep_seconds)
                continue

            # Tell optimizer about the new results:
            now = datetime.now()
            self.opt.tell(
                X.tolist(),
                y.tolist(),
                noise_vector=variances.tolist(),
                fit=True,
                replace=True,
                n_samples=self.tunecfg["n_samples"],
                gp_samples=self.tunecfg["gp_samples"],
                gp_burnin=self.tunecfg["gp_burnin"],
                progress=False,
            )
            later = datetime.now()
            difference = (later - now).total_seconds()
            self.logger.info(
                f"Calculating GP posterior and acquisition function finished in "
                f"{difference}s"
            )
            self.logger.info(f"Current GP kernel:\n{self.opt.gp.kernel_}")
            if self.opt.gp.chain_ is not None:
                self.logger.debug("Saving position and chain")
                self.save_state()

            # Ask optimizer for new configuration and insert jobs:
            new_x = self.opt.ask()
            # Alter engine json using Initstrings
            params = dict(zip(self.parameters, new_x))
            self.change_engine_config(self.experiment["engine"], params)
            with self.sessionmaker() as session:
                self.insert_jobs(session, new_x)
            self.logger.info("New jobs committed to database.")
            sleep(self.experiment.get("sleep_time", 60))

            if self.opt.gp.chain_ is not None:
                result_object = create_result(
                    Xi=X.tolist(),
                    yi=y.tolist(),
                    space=self.opt.space,
                    models=[self.opt.gp],
                )
                try:
                    opt_x, opt_y = expected_ucb(result_object)
                    self.logger.info(
                        f"Current optimum: "
                        f"{dict(zip(self.parameters, np.around(opt_x,4)))}"
                    )
                except ValueError:
                    self.logger.info(
                        "Current optimum: None (optimizer errored out :( )"
                    )

    def deactivate(self):
        raise NotImplementedError

    def reactivate(self):
        raise NotImplementedError