コード例 #1
0
def atomic_write_in_dir(path, **kwargs):
    """Creates an atomic writer using a temporary file in the same directory
     as the destination file.
  """
    monkeypatch_os_link()
    writer = AtomicWriter(path, **kwargs)
    return writer._open(_get_fileobject_func(writer, os.path.dirname(path)))
コード例 #2
0
ファイル: model.py プロジェクト: AvniNargwani/todoman
    def save(self, list_=None):
        list_ = list_ or self.list
        path = os.path.join(list_.path, self.filename)
        assert path.startswith(list_.path)
        sequence = self.todo.get('SEQUENCE', 0)
        self.todo['SEQUENCE'] = sequence + 1
        if os.path.exists(path):
            # Update an existing entry:
            with open(path, 'rb') as f:
                cal = icalendar.Calendar.from_ical(f.read())
                for index, component in enumerate(cal.subcomponents):
                    if component.get('uid', None) == self.uid:
                        cal.subcomponents[index] = self.todo

            with AtomicWriter(path, overwrite=True).open() as f:
                f.write(cal.to_ical().decode("UTF-8"))
        else:
            # Save a new entry:
            c = icalendar.Calendar()
            c.add('prodid', 'io.barrera.todoman')
            c.add('version', '2.0')
            c.add_component(self.todo)

            with AtomicWriter(path).open() as f:
                f.write(c.to_ical().decode("UTF-8"))
コード例 #3
0
ファイル: file_helpers.py プロジェクト: 1Thamer/openpilot0.6
def atomic_write_on_fs_tmp(path, **kwargs):
  """Creates an atomic writer using a temporary file in a temporary directory
     on the same filesystem as path.
  """
  # TODO(mgraczyk): This use of AtomicWriter relies on implementation details to set the temp
  #                 directory.
  writer = AtomicWriter(path, **kwargs)
  return writer._open(_get_fileobject_func(writer, get_tmpdir_on_same_filesystem(path)))
コード例 #4
0
def write_utf8_file_atomic(
    filename: str,
    utf8_data: str,
    private: bool = False,
) -> None:
    """Write a file and rename it into place using atomicwrites.

    Writes all or nothing.

    This function uses fsync under the hood. It should
    only be used to write mission critical files as
    fsync can block for a few seconds or longer is the
    disk is busy.

    Using this function frequently will significantly
    negatively impact performance.
    """
    try:
        with AtomicWriter(filename, overwrite=True).open() as fdesc:
            if not private:
                os.fchmod(fdesc.fileno(), 0o644)
            fdesc.write(utf8_data)
    except OSError as error:
        _LOGGER.exception("Saving file failed: %s", filename)
        raise WriteError(error) from error
コード例 #5
0
    def _write_new(self, path):
        vtodo = self.serialize()

        c = icalendar.Calendar()
        c.add_component(vtodo)

        with AtomicWriter(path).open() as f:
            c.add('prodid', 'io.barrera.todoman')
            c.add('version', '2.0')
            f.write(c.to_ical().decode("UTF-8"))

        return vtodo
コード例 #6
0
    def _write_existing(self, path):
        original = self._read(path)
        vtodo = self.serialize(original)

        with open(path, 'rb') as f:
            cal = icalendar.Calendar.from_ical(f.read())
            for index, component in enumerate(cal.subcomponents):
                if component.get('uid', None) == self.todo.uid:
                    cal.subcomponents[index] = vtodo

        with AtomicWriter(path, overwrite=True).open() as f:
            f.write(cal.to_ical().decode("UTF-8"))
コード例 #7
0
ファイル: utils.py プロジェクト: thomasaarholt/adaptive
def save(fname, data, compress=True):
    fname = os.path.expanduser(fname)
    dirname = os.path.dirname(fname)
    if dirname:
        os.makedirs(dirname, exist_ok=True)

    blob = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
    if compress:
        blob = gzip.compress(blob)

    with AtomicWriter(fname, "wb", overwrite=True).open() as f:
        f.write(blob)
コード例 #8
0
    def save(self, todo):
        path = os.path.join(self.path, todo.filename)

        if os.path.exists(path):
            # Update an existing entry:
            with open(path, 'rb') as f:
                cal = icalendar.Calendar.from_ical(f.read())
                for index, component in enumerate(cal.subcomponents):
                    if component.get('uid', None) == todo.uid:
                        cal.subcomponents[index] = todo.todo

            with AtomicWriter(path, overwrite=True).open() as f:
                f.write(cal.to_ical().decode("UTF-8"))
        else:
            # Save a new entry:
            c = icalendar.Calendar()
            c.add('prodid', 'io.barrera.todoman')
            c.add('version', '2.0')
            c.add_component(todo.todo)

            with AtomicWriter(path).open() as f:
                f.write(c.to_ical().decode("UTF-8"))
コード例 #9
0
ファイル: cli.py プロジェクト: gekkehenker/chess-tuning-tools
def local(  # noqa: C901
    tuning_config,
    acq_function="mes",
    acq_function_samples=1,
    confidence=0.9,
    data_path=None,
    gp_burnin=5,
    gp_samples=300,
    gp_initial_burnin=100,
    gp_initial_samples=300,
    logfile="log.txt",
    n_initial_points=30,
    n_points=500,
    plot_every=5,
    plot_path="plots",
    random_seed=0,
    result_every=5,
    resume=True,
    verbose=False,
):
    """Run a local tune.

    Parameters defined in the `tuning_config` file always take precedence.
    """
    json_dict = json.load(tuning_config)
    settings, commands, fixed_params, param_ranges = load_tuning_config(json_dict)
    log_level = logging.DEBUG if verbose else logging.INFO
    log_format = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
    root_logger = logging.getLogger()
    root_logger.setLevel(log_level)
    file_logger = logging.FileHandler(settings.get("logfile", logfile))
    file_logger.setFormatter(log_format)
    root_logger.addHandler(file_logger)
    console_logger = logging.StreamHandler(sys.stdout)
    console_logger.setFormatter(log_format)
    root_logger.addHandler(console_logger)
    logging.debug(f"Got the following tuning settings:\n{json_dict}")

    # 1. Create seed sequence
    ss = np.random.SeedSequence(settings.get("random_seed", random_seed))
    # 2. Create kernel
    # 3. Create optimizer
    random_state = np.random.RandomState(np.random.MT19937(ss.spawn(1)[0]))
    opt = Optimizer(
        dimensions=list(param_ranges.values()),
        n_points=settings.get("n_points", n_points),
        n_initial_points=settings.get("n_initial_points", n_initial_points),
        # gp_kernel=kernel,  # TODO: Let user pass in different kernels
        gp_kwargs=dict(normalize_y=True),
        # gp_priors=priors,  # TODO: Let user pass in priors
        acq_func=settings.get("acq_function", acq_function),
        acq_func_kwargs=dict(alpha="inf", n_thompson=20),
        random_state=random_state,
    )
    X = []
    y = []
    noise = []
    iteration = 0

    # 3.1 Resume from existing data:
    if data_path is None:
        data_path = "data.npz"
    if resume:
        path = pathlib.Path(data_path)
        if path.exists():
            with np.load(path) as importa:
                X = importa["arr_0"].tolist()
                y = importa["arr_1"].tolist()
                noise = importa["arr_2"].tolist()
            if len(X[0]) != opt.space.n_dims:
                logging.error(
                    "The number of parameters are not matching the number of "
                    "dimensions. Rename the existing data file or ensure that the "
                    "parameter ranges are correct."
                )
                sys.exit(1)
            reduction_needed, X_reduced, y_reduced, noise_reduced = reduce_ranges(
                X, y, noise, opt.space
            )
            if reduction_needed:
                backup_path = path.parent / (
                    path.stem + f"_backup_{int(time.time())}" + path.suffix
                )
                logging.warning(
                    f"The parameter ranges are smaller than the existing data. "
                    f"Some points will have to be discarded. "
                    f"The original {len(X)} data points will be saved to "
                    f"{backup_path}"
                )
                np.savez_compressed(
                    backup_path, np.array(X), np.array(y), np.array(noise)
                )
                X = X_reduced
                y = y_reduced
                noise = noise_reduced

            iteration = len(X)
            logging.info(
                f"Importing {iteration} existing datapoints. This could take a while..."
            )
            opt.tell(
                X,
                y,
                noise_vector=noise,
                gp_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
                gp_samples=settings.get("gp_initial_samples", gp_initial_samples),
                n_samples=settings.get("n_samples", 1),
                progress=True,
            )
            logging.info("Importing finished.")

    # 4. Main optimization loop:
    while True:
        logging.info("Starting iteration {}".format(iteration))
        result_every_n = settings.get("result_every", result_every)
        if (
            result_every_n > 0
            and iteration % result_every_n == 0
            and opt.gp.chain_ is not None
        ):
            result_object = create_result(Xi=X, yi=y, space=opt.space, models=[opt.gp])
            try:
                best_point, best_value = expected_ucb(result_object, alpha=0.0)
                best_point_dict = dict(zip(param_ranges.keys(), best_point))
                logging.info(f"Current optimum:\n{best_point_dict}")
                logging.info(f"Estimated value: {best_value}")
                confidence_val = settings.get("confidence", confidence)
                confidence_out = confidence_intervals(
                    optimizer=opt,
                    param_names=list(param_ranges.keys()),
                    hdi_prob=confidence_val,
                    opt_samples=1000,
                    multimodal=False,
                )
                logging.info(
                    f"{confidence_val*100}% confidence intervals:\n{confidence_out}"
                )
            except ValueError:
                logging.info(
                    "Computing current optimum was not successful. "
                    "This can happen in rare cases and running the "
                    "tuner again usually works."
                )
        plot_every_n = settings.get("plot_every", plot_every)
        if (
            plot_every_n > 0
            and iteration % plot_every_n == 0
            and opt.gp.chain_ is not None
        ):
            logging.getLogger("matplotlib.font_manager").disabled = True
            if opt.space.n_dims == 1:
                logging.warning(
                    "Plotting for only 1 parameter is not supported yet."
                )
            else:
                logging.debug("Starting to compute the next plot.")
                result_object = create_result(
                    Xi=X, yi=y, space=opt.space, models=[opt.gp]
                )
                plt.style.use("dark_background")
                fig, ax = plt.subplots(
                    nrows=opt.space.n_dims,
                    ncols=opt.space.n_dims,
                    figsize=(3 * opt.space.n_dims, 3 * opt.space.n_dims),
                )
                fig.patch.set_facecolor("#36393f")
                for i in range(opt.space.n_dims):
                    for j in range(opt.space.n_dims):
                        ax[i, j].set_facecolor("#36393f")
                timestr = time.strftime("%Y%m%d-%H%M%S")
                plot_objective(
                    result_object, dimensions=list(param_ranges.keys()), fig=fig, ax=ax
                )
                plotpath = pathlib.Path(settings.get("plot_path", plot_path))
                plotpath.mkdir(parents=True, exist_ok=True)
                full_plotpath = plotpath / f"{timestr}-{iteration}.png"
                plt.savefig(
                    full_plotpath,
                    pad_inches=0.1,
                    dpi=300,
                    bbox_inches="tight",
                    facecolor="#36393f",
                )
                logging.info(f"Saving a plot to {full_plotpath}.")
                plt.close(fig)
        point = opt.ask()
        point_dict = dict(zip(param_ranges.keys(), point))
        logging.info("Testing {}".format(point_dict))

        engine_json = prepare_engines_json(commands=commands, fixed_params=fixed_params)
        logging.debug(f"engines.json is prepared:\n{engine_json}")
        write_engines_json(engine_json, point_dict)
        logging.info("Start experiment")
        now = datetime.now()
        out_exp, out_exp_err = run_match(**settings)
        later = datetime.now()
        difference = (later - now).total_seconds()
        logging.info(f"Experiment finished ({difference}s elapsed).")
        logging.debug(f"Raw result:\n{out_exp}\n{out_exp_err}")

        score, error = parse_experiment_result(out_exp, **settings)
        logging.info("Got score: {} +- {}".format(score, error))
        logging.info("Updating model")
        while True:
            try:
                now = datetime.now()
                # We fetch kwargs manually here to avoid collisions:
                n_samples = settings.get("acq_function_samples", acq_function_samples)
                gp_burnin = settings.get("gp_burnin", gp_burnin)
                gp_samples = settings.get("gp_samples", gp_samples)
                if opt.gp.chain_ is None:
                    gp_burnin = settings.get("gp_initial_burnin", gp_initial_burnin)
                    gp_samples = settings.get("gp_initial_samples", gp_initial_samples)
                    opt.tell(
                        point,
                        score,
                        n_samples=n_samples,
                        gp_samples=gp_samples,
                        gp_burnin=gp_burnin,
                    )
                else:
                    opt.tell(
                        point,
                        score,
                        n_samples=n_samples,
                        gp_samples=gp_samples,
                        gp_burnin=gp_burnin,
                    )
                later = datetime.now()
                difference = (later - now).total_seconds()
                logging.info(f"GP sampling finished ({difference}s)")
                logging.debug(f"GP kernel: {opt.gp.kernel_}")
            except ValueError:
                logging.warning(
                    "Error encountered during fitting. Trying to sample chain a bit. "
                    "If this problem persists, restart the tuner to reinitialize."
                )
                opt.gp.sample(n_burnin=5, priors=opt.gp_priors)
            else:
                break
        X.append(point)
        y.append(score)
        noise.append(error)
        iteration = len(X)

        with AtomicWriter(data_path, mode="wb", overwrite=True).open() as f:
            np.savez_compressed(f, np.array(X), np.array(y), np.array(noise))
コード例 #10
0
async def websocket_create(hass, connection, msg):
    action = msg["action"]
    ext = msg["ext"]
    if ext not in ["yaml","py","json","conf","js","txt","log","css","all"]:
        ext = "yaml"

    def extok(e):
        if len(e)<2:
            return False
        return ( ext == 'all' or e.endswith("."+ext) )

    def rec(p, q):
        r = [
            f for f in os.listdir(p) if os.path.isfile(os.path.join(p, f)) and
            extok(f)
        ]
        for j in r:
            p = j if q == '' else os.path.join(q, j)
            listyaml.append(p)

    def drec(r, s):
        for d in os.listdir(r):
            v = os.path.join(r, d)
            if os.path.isdir(v):
                p = d if s == '' else os.path.join(s, d)
                if(p.count(os.sep) < msg["depth"]) and ( ext == 'all' or p != 'custom_components' ):
                    rec(v, p)
                    drec(v, p)

    yamlname = msg["file"].replace("../", "/").strip('/')

    if not extok(msg["file"]):
        yamlname = "temptest."+ext
        
    fullpath = hass.config.path(yamlname)
    if (action == 'load'):
        _LOGGER.info('Loading '+fullpath)
        content = ''
        res = 'Loaded'
        try:
            with open(fullpath, encoding="utf-8") as fdesc:
                content = fdesc.read()
        except:
            res = 'Reading Failed'
            _LOGGER.exception("Reading failed: %s", fullpath)
        finally:
            connection.send_result(
                msg["id"],
                {'msg': res+': '+fullpath, 'file': yamlname, 'data': content, 'ext': ext}
            )

    elif (action == 'save'):
        _LOGGER.info('Saving '+fullpath)
        content = msg["data"]
        res = "Saved"
        try:
            dirnm = os.path.dirname(fullpath)
            if not os.path.isdir(dirnm):
                os.makedirs(dirnm, exist_ok=True)
            try:
                mode = os.stat(fullpath).st_mode
            except:
                mode = 0o666
            with AtomicWriter(fullpath, overwrite=True).open() as fdesc:
                fdesc.write(content)
            with open(fullpath, 'a') as fdesc:
                try:
                    os.fchmod(fdesc.fileno(), mode)
                except:
                    pass
        except:
            res = "Saving Failed"
            _LOGGER.exception(res+": %s", fullpath)
        finally:
            connection.send_result(
                msg["id"],
                {'msg': res+': '+fullpath}
            )

    elif (action == 'list'):
        dirnm = os.path.dirname(hass.config.path(yamlname))
        listyaml = []
        rec(dirnm, '')
        if msg["depth"]>0:
            drec(dirnm, '')
        if (len(listyaml) < 1):
            listyaml = ['list_error.'+ext]
        connection.send_result(
            msg["id"],
            {'msg': str(len(listyaml))+' File(s)', 'file': listyaml, 'ext': ext}
        )
コード例 #11
0
ファイル: cli.py プロジェクト: Claes1981/chess-tuning-tools
def local(  # noqa: C901
    tuning_config,
    acq_function="mes",
    acq_function_samples=1,
    acq_function_lcb_alpha=1.96,
    confidence=0.9,
    data_path=None,
    gp_burnin=5,
    gp_samples=300,
    gp_initial_burnin=100,
    gp_initial_samples=300,
    #kernel_lengthscale_prior_lower_bound=0.1,
    #kernel_lengthscale_prior_upper_bound=0.5,
    #kernel_lengthscale_prior_lower_steepness=2.0,
    #kernel_lengthscale_prior_upper_steepness=1.0,
    gp_signal_prior_scale=4.0,
    gp_noise_prior_scale=0.0006,
    gp_lengthscale_prior_lb=0.1,
    gp_lengthscale_prior_ub=0.5,
    normalize_y=True,
    noise_scaling_coefficient=1,
    logfile="log.txt",
    n_initial_points=16,
    n_points=500,
    plot_every=1,
    plot_path="plots",
    plot_on_resume=False,
    random_seed=0,
    result_every=1,
    resume=True,
    fast_resume=True,
    model_path="model.pkl",
    point=None,
    reset=False,
    verbose=0,
    warp_inputs=True,
    rounds=10,
):
    """Run a local tune.

    Parameters defined in the `tuning_config` file always take precedence.
    """
    json_dict = json.load(tuning_config)
    settings, commands, fixed_params, param_ranges = load_tuning_config(json_dict)
    root_logger = setup_logger(
        verbose=verbose, logfile=settings.get("logfile", logfile)
    )
    root_logger.debug(f"Got the following tuning settings:\n{json_dict}")
    root_logger.debug(
        f"Acquisition function: {acq_function}, Acquisition function samples: {acq_function_samples}, Acquisition function lcb alpha: {acq_function_lcb_alpha}, GP burnin: {gp_burnin}, GP samples: {gp_samples}, GP initial burnin: {gp_initial_burnin}, GP initial samples: {gp_initial_samples}, GP signal prior scale: {gp_signal_prior_scale}, GP noise prior scale: {gp_noise_prior_scale}, GP lengthscale prior lower bound: {gp_lengthscale_prior_lb}, GP lengthscale prior upper bound: {gp_lengthscale_prior_ub}, Warp inputs: {warp_inputs}, Normalize y: {normalize_y}, Noise scaling coefficient: {noise_scaling_coefficient}, Initial points: {n_initial_points}, Next points: {n_points}, Random seed: {random_seed}"
    )
    #root_logger.debug(
        #f"Acquisition function: {acq_function}, Acquisition function samples: {acq_function_samples}, GP burnin: {gp_burnin}, GP samples: {gp_samples}, GP initial burnin: {gp_initial_burnin}, GP initial samples: {gp_initial_samples}, Kernel lengthscale prior lower bound: {kernel_lengthscale_prior_lower_bound}, Kernel lengthscale prior upper bound: {kernel_lengthscale_prior_upper_bound}, Kernel lengthscale prior lower steepness: {kernel_lengthscale_prior_lower_steepness}, Kernel lengthscale prior upper steepness: {kernel_lengthscale_prior_upper_steepness}, Warp inputs: {warp_inputs}, Normalize y: {normalize_y}, Noise scaling coefficient: {noise_scaling_coefficient}, Initial points: {n_initial_points}, Next points: {n_points}, Random seed: {random_seed}"
    #)
    root_logger.debug(
        f"Chess Tuning Tools version: {importlib.metadata.version('chess-tuning-tools')}, Bayes-skopt version: {importlib.metadata.version('bask')}, Scikit-optimize version: {importlib.metadata.version('scikit-optimize')}, Scikit-learn version: {importlib.metadata.version('scikit-learn')}, SciPy version: {importlib.metadata.version('scipy')}"
    )
    #root_logger.debug(
        #f"Chess Tuning Tools version: {pkg_resources.get_distribution('chess-tuning-tools').parsed_version}"
    #)

    # Initialize/import data structures:
    if data_path is None:
        data_path = "data.npz"
    intermediate_data_path = data_path.replace(".", "_intermediate.", 1)
    try:
        X, y, noise, iteration, round, counts_array, point = initialize_data(
            parameter_ranges=list(param_ranges.values()),
            resume=resume,
            data_path=data_path,
            intermediate_data_path=intermediate_data_path,
        )
    except ValueError:
        root_logger.error(
            "The number of parameters are not matching the number of "
            "dimensions. Rename the existing data file or ensure that the "
            "parameter ranges are correct."
        )
        sys.exit(1)

    # Initialize Optimizer object and if applicable, resume from existing
    # data/optimizer:
    gp_priors = create_priors(
        n_parameters=len(param_ranges),
        signal_scale=settings.get("gp_signal_prior_scale", gp_signal_prior_scale),
        lengthscale_lower_bound=settings.get(
            "gp_lengthscale_prior_lb", gp_lengthscale_prior_lb
        ),
        lengthscale_upper_bound=settings.get(
            "gp_lengthscale_prior_ub", gp_lengthscale_prior_ub
        ),
        noise_scale=settings.get("gp_noise_prior_scale", gp_noise_prior_scale),
    )
    opt = initialize_optimizer(
        X=X,
        y=y,
        noise=noise,
        parameter_ranges=list(param_ranges.values()),
        noise_scaling_coefficient=noise_scaling_coefficient,
        random_seed=settings.get("random_seed", random_seed),
        warp_inputs=settings.get("warp_inputs", warp_inputs),
        normalize_y=settings.get("normalize_y", normalize_y),
        #kernel_lengthscale_prior_lower_bound=settings.get("kernel_lengthscale_prior_lower_bound", kernel_lengthscale_prior_lower_bound),
        #kernel_lengthscale_prior_upper_bound=settings.get("kernel_lengthscale_prior_upper_bound", kernel_lengthscale_prior_upper_bound),
        #kernel_lengthscale_prior_lower_steepness=settings.get("kernel_lengthscale_prior_lower_steepness", kernel_lengthscale_prior_lower_steepness),
        #kernel_lengthscale_prior_upper_steepness=settings.get("kernel_lengthscale_prior_upper_steepness", kernel_lengthscale_prior_upper_steepness),
        n_points=settings.get("n_points", n_points),
        n_initial_points=settings.get("n_initial_points", n_initial_points),
        acq_function=settings.get("acq_function", acq_function),
        acq_function_samples=settings.get("acq_function_samples", acq_function_samples),
        acq_function_lcb_alpha=settings.get(
            "acq_function_lcb_alpha", acq_function_lcb_alpha
        ),
        resume=resume,
        fast_resume=fast_resume,
        model_path=model_path,
        gp_initial_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
        gp_initial_samples=settings.get("gp_initial_samples", gp_initial_samples),
        gp_priors=gp_priors,
    )

    is_first_iteration_after_program_start = True
    # Main optimization loop:
    while True:
        if round == 0:
            root_logger.info("Starting iteration {}".format(iteration))
        else:
            root_logger.info("Resuming iteration {}".format(iteration))

        # If a model has been fit, print/plot results so far:
        if len(y) > 0 and opt.gp.chain_ is not None:
            result_object = create_result(Xi=X, yi=y, space=opt.space, models=[opt.gp])
            #root_logger.debug(f"result_object:\n{result_object}")
            result_every_n = settings.get("result_every", result_every)
            if result_every_n > 0 and iteration % result_every_n == 0:
                print_results(
                    optimizer=opt,
                    result_object=result_object,
                    parameter_names=list(param_ranges.keys()),
                    confidence=settings.get("confidence", confidence),
                )
            plot_every_n = settings.get("plot_every", plot_every)
            if (
                plot_every_n > 0
                and iteration % plot_every_n == 0
                and (not is_first_iteration_after_program_start or plot_on_resume)
            ):
                plot_results(
                    optimizer=opt,
                    result_object=result_object,
                    plot_path=settings.get("plot_path", plot_path),
                    parameter_names=list(param_ranges.keys()),
                )

        if point is None:
            round = 0  # If previous tested point is not present, start over iteration.
            counts_array = np.array([0, 0, 0, 0, 0])
        if round == 0:
            point = opt.ask()  # Ask optimizer for next point.
            point_dict = dict(zip(param_ranges.keys(), point))
            root_logger.info("Testing {}".format(point_dict))
            if len(y) > 0 and opt.gp.chain_ is not None:
                testing_current_value = opt.gp.predict(opt.space.transform([point]))
                with opt.gp.noise_set_to_zero():
                    _, testing_current_std = opt.gp.predict(
                        opt.space.transform([point]), return_std=True
                    )
                root_logger.debug(
                    f"Predicted Elo: {np.around(-testing_current_value[0] * 100, 4)} +- "
                    f"{np.around(testing_current_std * 100, 4).item()}"
                )
                confidence_mult = erfinv(confidence) * np.sqrt(2)
                lower_bound = np.around(
                    -testing_current_value * 100
                    - confidence_mult * testing_current_std * 100,
                    4,
                ).item()
                upper_bound = np.around(
                    -testing_current_value * 100
                    + confidence_mult * testing_current_std * 100,
                    4,
                ).item()
                root_logger.debug(
                    f"{confidence * 100}% confidence interval of the Elo value: "
                    f"({lower_bound}, "
                    f"{upper_bound})"
                )
            root_logger.info("Start experiment")
        else:
            point_dict = dict(zip(param_ranges.keys(), point))
            root_logger.info("Testing {}".format(point_dict))
            if len(y) > 0 and opt.gp.chain_ is not None:
                testing_current_value = opt.gp.predict(opt.space.transform([point]))
                with opt.gp.noise_set_to_zero():
                    _, testing_current_std = opt.gp.predict(
                        opt.space.transform([point]), return_std=True
                    )
                root_logger.debug(
                    f"Predicted Elo: {np.around(-testing_current_value[0] * 100, 4)} +- "
                    f"{np.around(testing_current_std * 100, 4).item()}"
                )
                confidence_mult = erfinv(confidence) * np.sqrt(2)
                lower_bound = np.around(
                    -testing_current_value * 100
                    - confidence_mult * testing_current_std * 100,
                    4,
                ).item()
                upper_bound = np.around(
                    -testing_current_value * 100
                    + confidence_mult * testing_current_std * 100,
                    4,
                ).item()
                root_logger.debug(
                    f"{confidence * 100}% confidence interval of the Elo value: "
                    f"({lower_bound}, "
                    f"{upper_bound})"
                )
            root_logger.info("Continue experiment")

        # Run experiment:
        now = datetime.now()
        #settings["debug_mode"] = settings.get(
            #"debug_mode", False if verbose <= 1 else True
        #)

        while round < settings.get("rounds", rounds):
            round += 1

            if round > 1:
                root_logger.debug(
                    f"WW, WD, WL/DD, LD, LL experiment counts: {counts_array}"
                )
                score, error_variance = counts_to_penta(counts=counts_array)
                root_logger.info(
                    "Experiment Elo so far: {} +- {}".format(
                        -score * 100, np.sqrt(error_variance) * 100
                    )
                )

            root_logger.debug(f"Round: {round}")
            settings, commands, fixed_params, param_ranges = load_tuning_config(
                json_dict
            )

            # Prepare engines.json file for cutechess-cli:
            engine_json = prepare_engines_json(
                commands=commands, fixed_params=fixed_params
            )
            root_logger.debug(f"engines.json is prepared:\n{engine_json}")
            write_engines_json(engine_json, point_dict)
            out_exp = []
            out_all = []
            for output_line in run_match(
                **settings, tuning_config_name=tuning_config.name
            ):
                line = output_line.rstrip()
                is_debug = is_debug_log(line)
                if is_debug and verbose > 2:
                    root_logger.debug(line)
                if not is_debug:
                    out_exp.append(line)
                out_all.append(line)
            check_log_for_errors(cutechess_output=out_all)
            out_exp = "\n".join(out_exp)
            (
                match_score,
                match_error_variance,
                match_counts_array,
            ) = parse_experiment_result(out_exp, **settings)

            counts_array += match_counts_array
            with AtomicWriter(
                intermediate_data_path, mode="wb", overwrite=True
            ).open() as f:
                np.savez_compressed(f, np.array(round), counts_array, point)

        later = datetime.now()
        difference = (later - now).total_seconds()
        root_logger.info(f"Experiment finished ({difference}s elapsed).")

        # Parse cutechess-cli output and report results (Elo and standard deviation):
        root_logger.debug(f"WW, WD, WL/DD, LD, LL experiment counts: {counts_array}")
        score, error_variance = counts_to_penta(counts=counts_array)
        root_logger.info(
            "Got Elo: {} +- {}".format(-score * 100, np.sqrt(error_variance) * 100)
        )
        X.append(point)
        y.append(score)
        noise.append(error_variance)

        # Update data structures and persist to disk:
        with AtomicWriter(data_path, mode="wb", overwrite=True).open() as f:
            np.savez_compressed(f, np.array(X), np.array(y), np.array(noise))
        with AtomicWriter(model_path, mode="wb", overwrite=True).open() as f:
            dill.dump(opt, f)
        round = 0
        counts_array = np.array([0, 0, 0, 0, 0])
        with AtomicWriter(
            intermediate_data_path, mode="wb", overwrite=True
        ).open() as f:
            np.savez_compressed(f, np.array(round), counts_array, point)

        # Update model with the new data:
        if reset:
            root_logger.info("Deleting the model and generating a new one.")
            # Reset optimizer.
            del opt
            if acq_function == "rand":
                current_acq_func = random.choice(["mes", "pvrs", "ei", "lcb", "ts"])
                root_logger.debug(
                    f"Current random acquisition function: {current_acq_func}"
                )
            else:
                current_acq_func = acq_function
            opt = initialize_optimizer(
                X=X,
                y=y,
                noise=noise,
                parameter_ranges=list(param_ranges.values()),
                noise_scaling_coefficient=noise_scaling_coefficient,
                random_seed=settings.get("random_seed", random_seed),
                warp_inputs=settings.get("warp_inputs", warp_inputs),
                normalize_y=settings.get("normalize_y", normalize_y),
                #kernel_lengthscale_prior_lower_bound=settings.get("kernel_lengthscale_prior_lower_bound", kernel_lengthscale_prior_lower_bound),
                #kernel_lengthscale_prior_upper_bound=settings.get("kernel_lengthscale_prior_upper_bound", kernel_lengthscale_prior_upper_bound),
                #kernel_lengthscale_prior_lower_steepness=settings.get("kernel_lengthscale_prior_lower_steepness", kernel_lengthscale_prior_lower_steepness),
                #kernel_lengthscale_prior_upper_steepness=settings.get("kernel_lengthscale_prior_upper_steepness", kernel_lengthscale_prior_upper_steepness),
                n_points=settings.get("n_points", n_points),
                n_initial_points=settings.get("n_initial_points", n_initial_points),
                acq_function=current_acq_func,
                acq_function_samples=settings.get(
                    "acq_function_samples", acq_function_samples
                ),
                acq_function_lcb_alpha=settings.get(
                    "acq_function_lcb_alpha", acq_function_lcb_alpha
                ),
                resume=True,
                fast_resume=False,
                model_path=None,
                gp_initial_burnin=settings.get("gp_burnin", gp_burnin),
                gp_initial_samples=settings.get("gp_samples", gp_samples),
            )
        else:
            root_logger.info("Updating model.")
            if acq_function == "rand":
                opt.acq_func = ACQUISITION_FUNC[
                    random.choice(["mes", "pvrs", "ei", "lcb", "ts"])
                ]
                root_logger.debug(
                    f"Current random acquisition function: {opt.acq_func}"
                )
            update_model(
                optimizer=opt,
                point=point,
                score=score,
                variance=error_variance,
                noise_scaling_coefficient=noise_scaling_coefficient,
                acq_function_samples=settings.get(
                    "acq_function_samples", acq_function_samples
                ),
                acq_function_lcb_alpha=settings.get(
                    "acq_function_lcb_alpha", acq_function_lcb_alpha
                ),
                gp_burnin=settings.get("gp_burnin", gp_burnin),
                gp_samples=settings.get("gp_samples", gp_samples),
                gp_initial_burnin=settings.get("gp_initial_burnin", gp_initial_burnin),
                gp_initial_samples=settings.get(
                    "gp_initial_samples", gp_initial_samples
                ),
            )

        iteration = len(X)
        is_first_iteration_after_program_start = False

        #with AtomicWriter(data_path, mode="wb", overwrite=True).open() as f:
            #np.savez_compressed(f, np.array(X), np.array(y), np.array(noise))
        with AtomicWriter(model_path, mode="wb", overwrite=True).open() as f:
            dill.dump(opt, f)