示例#1
0
def extract_between_times(
    t_start: AcceptableTimeStamp,
    t_end: AcceptableTimeStamp,
    keys: Sequence[str] = None,
    names: Dict[str, str] = None
) -> tfs.TfsDataFrame:
    """
    Extracts data for keys between ``t_start`` and ``t_end`` from ``Timber``.

    Args:
        t_start (AcceptableTimeStamp): starting time in CERNDateTime or timestamp.
        t_end (AcceptableTimeStamp): end time in local CERNDateTime or timestamp.
        keys (Sequence[str]): the different variables names to extract data for.
        names (Dict[str, str): dict mapping keys to column names.

    Returns:
        Extracted data in a ``TfsDataFrame``.
    """
    with suppress(TypeError):
        t_start: CERNDatetime = CERNDatetime.from_timestamp(t_start)

    with suppress(TypeError):
        t_end: CERNDatetime = CERNDatetime.from_timestamp(t_end)

    db = pytimber.LoggingDB(source="nxcals")
    if keys is None:
        keys = get_tune_and_coupling_variables(db)

    # Attempt getting data from NXCALS, which can sometimes need a few retries (yay NXCALS)
    # If Java gives a feign.RetryableException, retry up to MAX_RETRIES times.
    for tries in range(MAX_RETRIES + 1):
        try:
            # We use timestamps to avoid any confusion with local time
            extract_dict = db.get(keys, t_start.timestamp(), t_end.timestamp())
        except java.lang.IllegalStateException as e:
            raise IOError("Could not get data from Timber, user probably has no access to NXCALS") from e
        except JException as e:  # Might be a case for retries
            if "RetryableException" in str(e) and (tries + 1) < MAX_RETRIES:
                LOG.warning(f"Could not get data from Timber! Trial no {tries + 1} / {MAX_RETRIES}")
                continue  # will go to the next iteratoin of the loop, so retry
            raise IOError("Could not get data from timber!") from e
        else:
            break

    out_df = tfs.TfsDataFrame()
    for key in keys:
        if extract_dict[key][1][0].size > 1:
            raise NotImplementedError("Multidimensional variables are not implemented yet")

        data = np.asarray(extract_dict[key]).transpose()
        column = key if names is None else names.get(key, key)
        key_df = tfs.TfsDataFrame(data, columns=[TIME_COL, column]).set_index(TIME_COL)
        out_df = out_df.merge(key_df, how="outer", left_index=True, right_index=True)

    out_df.index = [CERNDatetime.from_timestamp(i) for i in out_df.index]
    out_df.headers[START_TIME] = t_start.cern_utc_string()
    out_df.headers[END_TIME] = t_end.cern_utc_string()
    return out_df
示例#2
0
文件: filters.py 项目: lmalina/omc3
def _get_filtered_generic(col: str, meas: pd.DataFrame, model: pd.DataFrame, opt: DotDict) -> tfs.TfsDataFrame:
    common_bpms = meas.index.intersection(model.index)
    meas = meas.loc[common_bpms, :]
    new = tfs.TfsDataFrame(index=common_bpms)
    new[VALUE] = meas.loc[:, col].to_numpy()
    new[ERROR] = meas.loc[:, f"{ERR}{col}"].to_numpy()
    new[WEIGHT] = (
        _get_errorbased_weights(col, opt.weights[col], meas.loc[:, f"{ERR}{DELTA}{col}"])
        if opt.use_errorbars
        else opt.weights[col]
    )
    # filter cuts
    error_filter = meas.loc[:, f"{ERR}{DELTA}{col}"].to_numpy() < opt.errorcut[col]
    model_filter = np.abs(meas.loc[:, f"{DELTA}{col}"].to_numpy()) < opt.modelcut[col]
    # if opt.automatic_model_cut:  # TODO automated model cut
    #     model_filter = _get_smallest_data_mask(np.abs(meas.loc[:, f"{DELTA}{col}"].to_numpy()), portion=0.95)
    if f"{PHASE}" in col:
        new[NAME2] = meas.loc[:, NAME2].to_numpy()
        second_bpm_in = np.in1d(new.loc[:, NAME2].to_numpy(), new.index.to_numpy())
        good_bpms = error_filter & model_filter & second_bpm_in
        good_bpms[-1] = False
    else:
        good_bpms = error_filter & model_filter
    LOG.debug(f"Number of BPMs with {col}: {np.sum(good_bpms)}")
    return new.loc[good_bpms, :]
示例#3
0
def merge_tfs(directories: List[pathlib.Path],
              filename: str) -> tfs.TfsDataFrame:
    """
    Merge different kmod analysis results from a list of directories into a single `TfsDataFrame`.

    Args:
        directories (List[pathlib.Path]): list of PosixPath objects to directories holding TFS
            files with the results of kmod analysis.
        filename (str): name of the TFS files to look for in the provided directories

    Returns:
        A `TfsDataFrame` combining all the loaded files from the provided directories.
    """
    # Combine the data into one tfs
    new_tfs = tfs.TfsDataFrame()
    headers = {}
    for d in sorted(directories):
        loaded_tfs = tfs.read_tfs(d / filename)
        headers.update(loaded_tfs.headers)  # old headers are lost in `append`
        new_tfs = new_tfs.append(loaded_tfs, ignore_index=True)
    new_tfs.headers = headers
    new_tfs = new_tfs.set_index(NAME)

    # drop BPMWK and check tfs
    new_tfs = new_tfs.loc[~new_tfs.index.str.startswith(BPMWK), :]
    if not new_tfs.index.is_unique:
        raise KeyError(
            "Found duplicated entries "
            f"{', '.join(set(new_tfs.index[new_tfs.index.duplicated()]))}'.")
    return new_tfs
示例#4
0
文件: analysis.py 项目: pylhc/omc3
def analyse(magnet1_df, magnet2_df, opt, betastar_required):

    for magnet_df in (magnet1_df, magnet2_df):
        LOG.info(f'Analysing magnet {magnet_df.headers["QUADRUPOLE"]}')
        magnet_df = helper.add_tune_uncertainty(magnet_df, opt.tune_uncertainty)
        magnet_df = helper.clean_data(magnet_df, opt.no_autoclean)
        magnet_df = calc_tune(magnet_df)
        magnet_df = calc_k(magnet_df)
        magnet_df = get_av_beta(magnet_df)

    LOG.info('Simplex to determine beta waist')
    results = {plane: get_beta_waist(magnet1_df, magnet2_df, opt, plane) for plane in PLANES}

    results_df = tfs.TfsDataFrame(
        columns=['LABEL',
                 "TIME"],
        data=[np.hstack((opt.label,
                         datetime.datetime.now().strftime(formats.TIME)))])

    for plane in PLANES:
        results_df[f"{BETA}{WAIST}{plane}"] = results[plane][0]
        results_df[f"{ERR}{BETA}{WAIST}{plane}"] = results[plane][1]
        results_df[f"{WAIST}{plane}"] = results[plane][2]
        results_df[f"{ERR}{WAIST}{plane}"] = results[plane][3]

    LOG.info('Calculate betastar')
    if betastar_required:
        results_df = calc_betastar(opt, results_df, magnet1_df.headers['LSTAR'])

    LOG.info('Calculate beta at instruments')
    if opt.instruments_found:
        instrument_beta_df = calc_beta_at_instruments(opt, results_df, magnet1_df, magnet2_df)


    return magnet1_df, magnet2_df, results_df, instrument_beta_df
示例#5
0
def create_normalized_dispersion(df_disp: pd.DataFrame, df_beta: pd.DataFrame,
                                 df_model: pd.DataFrame, parameter: str,
                                 headers: Dict):
    """ Creates normalized dispersion from pre-created dispersion and beta dataframes. """
    LOG.info(f"Creating fake normalized dispersion for {parameter}.")
    plane = parameter[-1]  # 'X'

    # Measurement
    df = tfs.TfsDataFrame(index=df_disp.index)
    disp = df_disp.loc[:, f"{DISP}{plane}"]
    disp_err = df_disp.loc[:, f"{ERR}{DISP}{plane}"]
    beta = df_beta.loc[:, f"{BETA}{plane}"]
    beta_err = df_beta.loc[:, f"{ERR}{BETA}{plane}"]

    inv_beta = 1 / beta
    df[parameter] = disp / np.sqrt(beta)
    df[f"{ERR}{parameter}"] = np.sqrt(0.25 * disp**2 * inv_beta**3 *
                                      beta_err**2 + inv_beta * disp_err**2)

    # Model
    df_model[f'{parameter}'] = df_disp[f"{DISP}{plane}{MDL}"] / np.sqrt(
        df_beta[f"{BETA}{plane}{MDL}"])
    df = append_model(df, df_model, parameter, plane)

    df.headers = headers.copy()
    output_name = f'{NORM_DISP_NAME}{plane.lower()}'
    return {output_name: df}
示例#6
0
文件: model_diff.py 项目: pylhc/omc3
def diff_twiss_parameters(
        model_a: tfs.TfsDataFrame,
        model_b: tfs.TfsDataFrame,
        parameters: Sequence[str] = None) -> tfs.TfsDataFrame:
    """Create a TfsDataFrame containing of the given parameters between
    model_a and model_b."""
    # preparation ---
    if parameters is None:
        parameters = model_a.columns.intersection(model_b.columns)

    index = model_a.index.intersection(model_b.index)
    model_a, model_b = model_a.loc[index, :], model_b.loc[index, :]

    # get diff dataframe ---
    with_tune = TUNE in parameters
    if with_tune:
        parameters = [p for p in parameters if p != TUNE]

    diff_df = tfs.TfsDataFrame(index=index,
                               columns=[f"{DELTA}{p}" for p in parameters])
    fun_map = _get_mapping()
    for parameter in parameters:
        diff_function = fun_map[parameter[:-1]]
        diff_df.loc[:, f"{DELTA}{parameter}"] = diff_function(
            model_a, model_b, parameter)

    if with_tune:
        for tune in (f"{TUNE}1", f"{TUNE}2"):
            diff_df.headers[f"{DELTA}{tune}"] = _tune_diff(
                model_a, model_b, tune)

    return diff_df
示例#7
0
def generate_lin_files(model, tune, nattune, motion='_d', dpp=0.0, beam_direction=1):
    nbpms = len(model.index.to_numpy())
    lins = {}
    for plane in PLANES:
        lin = model.loc[:, ['NAME', 'S']]
        noise_freq_domain = NOISE / np.sqrt(NTURNS) / MAGIC_NUMBER
        lin['NOISE'] = noise_freq_domain
        lin['CO'] = dpp * model.loc[:, f"D{plane}{motion}"] + np.random.randn(nbpms) * (NOISE / np.sqrt(NTURNS))
        lin['CORMS'] = np.abs(np.random.randn(nbpms) * 3e-6 + 3e-6)  # TODO
        lin['PK2PK'] = 2 * (np.sqrt(model.loc[:, f"BET{plane}{motion}"] * ACTION) + 3 * NOISE)
        lin[f"TUNE{plane}"] = tune[plane] + ERRTUNE * np.random.randn(nbpms)
        lin[f"NATTUNE{plane}"] = nattune[plane] + (ERRTUNE / np.sqrt(NAT_OVER_DRV)) * np.random.randn(nbpms)
        lin[f"MU{plane}"] = np.remainder(model.loc[:, f"MU{plane}{motion}"]
                                         + dpp * model.loc[:, f"DMU{plane}{motion}"]
                                         + (noise_freq_domain / (2 * np.pi)) *np.random.randn(nbpms) + np.random.rand(), 1) * beam_direction
        lin[f"ERRMU{plane}"] = noise_freq_domain / (2 * np.pi)
        lin[f"AMP{plane}"] = np.sqrt(model.loc[:, f"BET{plane}{motion}"] * ACTION *
                                     (1 + dpp * np.sin(2 * np.pi * model.loc[:, f"PHI{plane}{motion}"])
                                      * model.loc[:, f"W{plane}{motion}"])) + noise_freq_domain * np.random.randn(nbpms)

        lin[f"NATMU{plane}"] = np.remainder(model.loc[:, f"MU{plane}{MOTION['free']}"]
                                            + (NAT_OVER_DRV * noise_freq_domain / (2 * np.pi)) * np.random.randn(nbpms) + np.random.rand(), 1) * beam_direction
        lin[f"NATAMP{plane}"] = NAT_OVER_DRV * np.sqrt(ACTION * model.loc[:, f"BET{plane}{MOTION['free']}"]) + noise_freq_domain * np.random.randn(nbpms)

        lin[f"PHASE{COUP[plane]}"] = np.remainder(model.loc[:, f"MU{OTHER[plane]}{motion}"] + dpp * model.loc[:, f"DMU{OTHER[plane]}{motion}"]
                                                  + (COUPLING * noise_freq_domain / (2 * np.pi)) * np.random.randn(nbpms) + np.random.rand(), 1) * beam_direction
        lin[f"AMP{COUP[plane]}"] = COUPLING * np.sqrt(ACTION *model.loc[:, f"BET{OTHER[plane]}{motion}"]
                                                  * (1 + dpp * np.sin(model.loc[:, f"PHI{OTHER[plane]}{motion}"]) * model.loc[:, f"W{OTHER[plane]}{motion}"])) + COUPLING * noise_freq_domain * np.random.randn(nbpms)

        # backwards compatibility with drive  TODO remove
        lin[f"AMP{plane}"] = lin.loc[:, f"AMP{plane}"].to_numpy() / 2
        lin[f"NATAMP{plane}"] = lin.loc[:, f"NATAMP{plane}"].to_numpy() / 2

        lins[plane] = tfs.TfsDataFrame(lin, headers=_get_header(tune, nattune, plane)).set_index("NAME")
    return lins
示例#8
0
def _joined_frames(input_files: dict) -> tfs.TfsDataFrame:
    """
    Merges spectrum data from the two planes from all the input files.

    Args:
        input_files (dict): `InputFiles` (dict) object containing frequency spectra files (linx/y) for each
            transverse plane (as keys).
    """
    joined_dfs = []
    assert len(input_files["X"]) == len(input_files["Y"])

    for i, (linx, liny) in enumerate(zip(input_files["X"], input_files["Y"])):
        linx = linx[COLS_TO_KEEP_X].copy()
        liny = liny[COLS_TO_KEEP_Y].copy()

        linx = linx.rename(columns=rename_col("X", i))
        liny = liny.rename(columns=rename_col("Y", i))

        merged_transverse_lins_df = pd.merge(
            left=linx,
            right=liny,
            on=[NAME],
            how="inner",
            sort=False,
        )
        joined_dfs.append(merged_transverse_lins_df)

    partial_merge = partial(pd.merge, how="inner", on=[NAME], sort=False)
    reduced = reduce(partial_merge, joined_dfs).set_index(NAME)
    reduced = reduced.rename(columns={
        f"{PHASE_ADV}X_X_0": f"{PHASE_ADV}X",
        f"{PHASE_ADV}Y_Y_0": f"{PHASE_ADV}Y"
    })
    return tfs.TfsDataFrame(reduced)
示例#9
0
def get_twiss_and_error_df(n_index, n_kmax, n_valmax):
    twiss_cols, err_cols = get_twiss_and_error_columns(n_kmax)
    data = np.random.rand(n_index, len(twiss_cols)) * n_valmax
    data[n_index // 2:, :] = -data[n_index // 2:, :]

    df_twiss = tfs.TfsDataFrame(data[:, :len(twiss_cols)],
                                index=list(string.ascii_uppercase[:n_index]),
                                columns=twiss_cols,
                                headers={
                                    "Just": "Some",
                                    "really": "nice",
                                    "header": 1111
                                })
    df_errors = tfs.TfsDataFrame(data[:, :len(err_cols)],
                                 index=list(string.ascii_uppercase[:n_index]),
                                 columns=err_cols)
    df_twiss[S] = np.linspace(0, n_valmax, n_index)
    df_errors[S] = df_twiss[S].copy()
    return df_twiss, df_errors
示例#10
0
def extract_between_times(t_start, t_end, keys=None, names=None):
    """
    Extracts data for keys between t_start and t_end from timber.

    Args:
        t_start: starting time in local time or timestamp.
        t_end: end time in local time or timestamp.
        keys: list of data to extract.
        names: dict to map keys to column names.

    Returns: tfs pandas dataframe.
    """
    db = pytimber.LoggingDB()
    if keys is None:
        keys = get_tune_and_coupling_variables(db)

    extract_dict = db.get(keys, t_start, t_end)

    out_df = tfs.TfsDataFrame()
    for key in keys:
        if extract_dict[key.upper()][1][0].size > 1:
            raise NotImplementedError(
                "Multidimensional variables are not implemented yet.")

        data = np.asarray(extract_dict[key.upper()]).transpose()
        col = names.get(key, key)

        key_df = tfs.TfsDataFrame(data, columns=[TIME_COL,
                                                 col]).set_index(TIME_COL)

        out_df = out_df.merge(key_df,
                              how="outer",
                              left_index=True,
                              right_index=True)

    out_df.index = [CERNDatetime.from_timestamp(i) for i in out_df.index]
    out_df.headers[START_TIME] = CERNDatetime.from_timestamp(
        t_start).cern_utc_string()
    out_df.headers[END_TIME] = CERNDatetime.from_timestamp(
        t_end).cern_utc_string()
    return out_df
示例#11
0
def _generate_jobs(basedir, jobid_mask, **kwargs) -> tfs.TfsDataFrame:
    """ Generates product matrix for job-values and stores it as TfsDataFrame. """
    LOG.debug("Creating Jobs")
    job_names, values_grid = get_jobs_and_values(jobid_mask, **kwargs)
    job_df = tfs.TfsDataFrame(
        headers={HEADER_BASEDIR: basedir},
        index=job_names,
        columns=list(kwargs.keys()),
        data=values_grid,
    )
    tfs.write(basedir / JOBSUMMARY_FILE, job_df, save_index=COLUMN_JOBID)
    return job_df
示例#12
0
def run_per_bunch(tbt_data, harpy_input):
    """
    Cleans data, analyses frequencies and searches resonances

    Args:
        tbt_data: single bunch TbtData
        harpy_input: Analysis settings

    Returns:
        Dictionary of TfsDataFrames per plane
    """
    model = None if harpy_input.model is None else tfs.read(harpy_input.model, index="NAME").loc[:, 'S']
    bpm_datas, usvs, lins, bad_bpms = {}, {}, {}, {}
    output_file_path = _get_output_path_without_suffix(harpy_input.outputdir, harpy_input.files)
    for plane in PLANES:
        bpm_data = _get_cut_tbt_matrix(tbt_data, harpy_input.turns, plane)
        bpm_data = _scale_to_meters(bpm_data, harpy_input.unit)
        bpm_data, usvs[plane], bad_bpms[plane], bpm_res = clean.clean(harpy_input, bpm_data, model)
        lins[plane], bpm_datas[plane] = _closed_orbit_analysis(bpm_data, model, bpm_res)

    tune_estimates = harpy_input.tunes if harpy_input.autotunes is None else frequency.estimate_tunes(
        harpy_input, usvs if harpy_input.clean else
        dict(X=clean.svd_decomposition(bpm_datas["X"], harpy_input.sing_val),
             Y=clean.svd_decomposition(bpm_datas["Y"], harpy_input.sing_val)))

    spectra = {}
    for plane in PLANES:
        with timeit(lambda spanned: LOGGER.debug(f"Time for harmonic_analysis: {spanned}")):
            harpy_results, spectra[plane], bad_bpms_summaries = frequency.harpy_per_plane(
                harpy_input, bpm_datas[plane], usvs[plane], tune_estimates, plane)
        if "bpm_summary" in harpy_input.to_write:
            bad_bpms[plane].extend(bad_bpms_summaries)
            _write_bad_bpms(output_file_path, plane, bad_bpms[plane])
        if "spectra" in harpy_input.to_write or "full_spectra" in harpy_input.to_write:
            _write_spectrum(output_file_path, plane, spectra[plane])
        lins[plane] = lins[plane].loc[harpy_results.index].join(harpy_results)
        if harpy_input.is_free_kick:
            lins[plane] = kicker.phase_correction(bpm_datas[plane], lins[plane], plane)

    measured_tunes = [lins["X"]["TUNEX"].mean(), lins["Y"]["TUNEY"].mean(),
                      lins["X"]["TUNEZ"].mean() if tune_estimates[2] > 0 else 0]

    for plane in PLANES:
        lins[plane] = lins[plane].join(frequency.find_resonances(
            measured_tunes, bpm_datas[plane].shape[1], plane, spectra[plane]))
        lins[plane] = _add_calculated_phase_errors(lins[plane])
        lins[plane] = _sync_phase(lins[plane], plane)
        lins[plane] = _rescale_amps_to_main_line_and_compute_noise(lins[plane], plane)
        lins[plane] = lins[plane].sort_values('S', axis=0, ascending=True)
        lins[plane] = tfs.TfsDataFrame(lins[plane], headers=_compute_headers(lins[plane], tbt_data.date))
        if "lin" in harpy_input.to_write:
            _write_lin_tfs(output_file_path, plane, lins[plane])
    return lins
示例#13
0
文件: helper.py 项目: tpersson/omc3
def bin_tunes_and_k(tune_dfs, k_df, magnet):
    # create bins, centered around each time step in k with width eq half distance to the next timestep
    bins = np.append(
        (k_df['TIME'] - k_df.diff()['TIME'] / 2.).fillna(value=0).values,
        k_df['TIME'].iloc[-1])
    magnet_df = k_df.loc[:, ['K']]
    magnet_df['K'] = np.abs(magnet_df['K'].values)
    for plane in PLANES:
        magnet_df[f"{TUNE}{plane}"], magnet_df[
            f"{ERR}{TUNE}{plane}"] = return_mean_of_binned_data(
                bins, tune_dfs[plane])
    return tfs.TfsDataFrame(magnet_df, headers=headers_for_df(magnet, k_df))
示例#14
0
def merge_two_plane_kick_dfs(df_x, df_y):
    df_xy = tfs.TfsDataFrame(
        pd.merge(df_x,
                 df_y,
                 how='inner',
                 left_index=True,
                 right_index=True,
                 suffixes=PLANES))
    if len(df_xy.index) != len(df_x.index) or len(df_xy.index) != len(
            df_y.index):
        raise IndexError(
            "Can't merge the two planed kick-files as their indices seem to be different!"
        )
    df_xy.headers = df_x.headers
    df_xy.headers.update(df_y.headers)
    return df_xy
示例#15
0
文件: utils.py 项目: tpersson/omc3
def _get_sussix_data(file_path, bpms, planes):
    bpm_dir = file_path.parent / 'BPM'
    files = {LIN: {}, AMPS: {}, FREQS: {}}
    for plane in planes:
        files[LIN][plane] = tfs.read(str(
            file_path.with_name(f'{file_path.name}_lin{plane.lower()}')),
                                     index=COL_NAME)
        for id_ in (FREQS, AMPS):
            files[id_][plane] = tfs.TfsDataFrame(columns=bpms)
        for bpm in bpms:
            with suppress(FileNotFoundError):
                df = tfs.read(str(bpm_dir / f'{bpm}.{plane.lower()}'))
                files[FREQS][plane][bpm] = df["FREQ"]
                files[AMPS][plane][bpm] = df["AMP"]
        for id_ in (FREQS, AMPS):
            files[id_][plane] = files[id_][plane].fillna(0)
    return files
示例#16
0
def get_pattern_twiss(
    madx: Madx, patterns: Sequence[str] = [""], columns: Sequence[str] = None, **kwargs,
) -> tfs.TfsDataFrame:
    """
    Extract the `TWISS` table for desired variables, and for certain elements matching a pattern.
    Additionally, the `SUMM` table is also returned in the form of the TfsDataFrame's headers dictionary.

    Warning:
        Although the `pattern` parameter should accept a regex, MAD-X does not implement actual regexes.
        Please refer to the MAD-X manual, section `Regular Expressions` for details on what is implemented
        in MAD-X itself.

    Args:
        madx (cpymad.madx.Madx): an instanciated cpymad Madx object.
        patterns (Sequence[str]): the different element patterns (such as `MQX` or `BPM`) to be applied to
            the command, which will determine the rows in the returned DataFrame. Defaults to [""] which
            will select all elements.
        columns (Sequence[str]): the variables to be returned, as columns in the DataFrame. Defaults to
            None, which will return all available columns.

    Keyword Args:
        Any keyword argument that can be given to the MAD-X TWISS command, such as `chrom`, `ripken`,
        `centre` or starting coordinates with `betax`, 'betay` etc.

    Returns:
        A TfsDataFrame with the selected columns for all elements matching the provided patterns,
        and the internal `summ` table as header dict.
    """
    logger.trace("Clearing 'TWISS' flag")
    madx.select(flag="twiss", clear=True)
    for pattern in patterns:
        logger.trace(f"Adding pattern {pattern} to 'TWISS' flag")
        madx.select(flag="twiss", pattern=pattern, column=columns)
    madx.twiss(**kwargs)

    logger.trace("Extracting relevant parts of the TWISS table")
    twiss_df = tfs.TfsDataFrame(madx.table.twiss.dframe().copy())
    twiss_df.headers = {var.upper(): madx.table.summ[var][0] for var in madx.table.summ}
    twiss_df = twiss_df[madx.table.twiss.selected_columns()].iloc[
        np.array(madx.table.twiss.selected_rows()).astype(bool)
    ]

    logger.trace("Clearing 'TWISS' flag")
    madx.select(flag="twiss", clear=True)
    return twiss_df
示例#17
0
def _get_sussix_data(file_path, bpms):
    directory, filename = _get_dir_and_name(file_path)
    bpm_dir = os.path.join(directory, 'BPM')
    files = {LIN: {}, AMPS: {}, FREQS: {}}
    for plane in PLANES:
        files[LIN][plane] = tfs.read(os.path.join(
            directory, f'{filename}_lin{plane.lower()}'),
                                     index=COL_NAME)
        for id_ in (FREQS, AMPS):
            files[id_][plane] = tfs.TfsDataFrame(columns=bpms)
        for bpm in bpms:
            with suppress(FileNotFoundError):
                df = tfs.read(os.path.join(bpm_dir, f'{bpm}.{plane.lower()}'))
                files[FREQS][plane][bpm] = df["FREQ"]
                files[AMPS][plane][bpm] = df["AMP"]
        for id_ in (FREQS, AMPS):
            files[id_][plane] = files[id_][plane].fillna(0)
    return files
示例#18
0
def get_df(n):
    """ Fake DF with nonsense values. """
    qx, qy = 1.31, 1.32
    phx, phy = f"{PHASE_ADV}{X}", f"{PHASE_ADV}{Y}"
    betax, betay = f"{BETA}{X}", f"{BETA}{Y}"
    df = tfs.TfsDataFrame(index=[str(i) for i in range(n)],
                          columns=[S, X, Y, betax, betay, phx, phy],
                          headers={
                              f"{TUNE}1": qx,
                              f"{TUNE}2": qy
                          })
    df[S] = np.linspace(0, n, n)
    df[phx] = np.linspace(0, qx, n + 1)[:n]
    df[phy] = np.linspace(0, qy, n + 1)[:n]
    df[betax] = 1
    df[betay] = 1
    df[[X, Y]] = 0
    return df
示例#19
0
def get_twiss_tfs(madx: Madx) -> tfs.TfsDataFrame:
    """
    Returns a tfs.TfsDataFrame from the Madx instance's twiss dframe, typically in the way we're used to
    getting it from MAD-X outputting the TWISS (uppercase names, colnames, summ table in headers).

    Args:
        madx (cpymad.madx.Madx): an instanciated cpymad Madx object.

    Returns:
        A tfs.TfsDataFrame.
    """
    logger.info("Exporting internal TWISS and SUMM tables to TfsDataFrame")
    twiss_tfs = tfs.TfsDataFrame(madx.table.twiss.dframe())
    twiss_tfs.name = [element[:-2] for element in twiss_tfs.name]
    twiss_tfs.columns = twiss_tfs.columns.str.upper()
    twiss_tfs = twiss_tfs.set_index("NAME")
    twiss_tfs.index = twiss_tfs.index.str.upper()
    twiss_tfs.headers = {var.upper(): madx.table.summ[var][0] for var in madx.table.summ}
    return twiss_tfs
示例#20
0
文件: handler.py 项目: pylhc/omc3
def _calculate_delta(resp_matrix: pd.DataFrame, meas_dict: dict,
                     keys: Sequence[str], vars_list: Sequence[str],
                     method: str, meth_opt):
    """Get the deltas for the variables.

    Output is Dataframe with one column 'DELTA' and vars_list index."""
    weight_vector = _join_columns(f"{WEIGHT}", meas_dict, keys)
    diff_vector = _join_columns(f"{DIFF}", meas_dict, keys)

    resp_weighted = resp_matrix.mul(weight_vector, axis="index")
    diff_weighted = diff_vector * weight_vector

    delta = _get_method_fun(method)(resp_weighted, diff_weighted, meth_opt)
    delta = tfs.TfsDataFrame(delta, index=vars_list, columns=[DELTA])

    # check calculations
    update = np.dot(resp_weighted, delta[DELTA])
    _print_rms(meas_dict, diff_weighted, update)

    return delta
示例#21
0
文件: filters.py 项目: pylhc/omc3
def _get_filtered_generic(col: str, meas: pd.DataFrame, model: pd.DataFrame, opt: DotDict) -> tfs.TfsDataFrame:
    """
    Filters the provided column *col* of the measurement dataframe *meas*, based on the model values
    (from the *model* dataframe) and the filtering options given at the command line (for instance,
    the ``errorcut`` and ``modelcut`` values).

    Args:
        col (str): The column name to filter.
        meas (pd.DataFrame): The measurement dataframe
        model (pd.DataFrame): The model dataframe, which we get from the ``model_creator``.
        opt (DotDict): The command line options dictionary.

    Returns:
        The filtered dataframe as a `~tfs.TfsDataFrame`.
    """
    common_bpms = meas.index.intersection(model.index)
    meas = meas.loc[common_bpms, :]

    new = tfs.TfsDataFrame(index=common_bpms)
    new[VALUE] = meas.loc[:, col].to_numpy()
    new[ERROR] = meas.loc[:, f"{ERR}{col}"].to_numpy()
    new[WEIGHT] = (
        _get_errorbased_weights(col, opt.weights[col], meas.loc[:, f"{ERR}{DELTA}{col}"])
        if opt.use_errorbars
        else opt.weights[col]
    )

    # Applying filtering cuts
    error_filter = meas.loc[:, f"{ERR}{DELTA}{col}"].to_numpy() < opt.errorcut[col]
    model_filter = np.abs(meas.loc[:, f"{DELTA}{col}"].to_numpy()) < opt.modelcut[col]
    # if opt.automatic_model_cut:  # TODO automated model cut
    #     model_filter = _get_smallest_data_mask(np.abs(meas.loc[:, f"{DELTA}{col}"].to_numpy()), portion=0.95)
    if f"{PHASE}" in col:
        new[NAME2] = meas.loc[:, NAME2].to_numpy()
        second_bpm_in = np.in1d(new.loc[:, NAME2].to_numpy(), new.index.to_numpy())
        good_bpms = error_filter & model_filter & second_bpm_in
        good_bpms[-1] = False
    else:
        good_bpms = error_filter & model_filter
    LOG.debug(f"Number of BPMs kept for column '{col}' after filtering: {np.sum(good_bpms)}")
    return new.loc[good_bpms, :]
示例#22
0
文件: analysis.py 项目: pylhc/omc3
def calc_beta_at_instruments(kmod_input_params, results_df, magnet1_df, magnet2_df):

    beta_instr = []

    for instrument in kmod_input_params.instruments_found:
        positions = getattr(kmod_input_params, instrument)

        for name, position in positions.items():
            beta_instr.append(calc_beta_inst(
                name, position, results_df, magnet1_df, magnet2_df, kmod_input_params))

    instrument_beta_df = tfs.TfsDataFrame(
        columns=['NAME',
                 f"{BETA}{'X'}",
                 f"{ERR}{BETA}{'X'}",
                 f"{BETA}{'Y'}",
                 f"{ERR}{BETA}{'Y'}",
                 ],
        data=beta_instr)

    return instrument_beta_df
示例#23
0
def get_rdts(madx: Madx, order: int = 4, file: Union[Path, str] = None) -> tfs.TfsDataFrame:
    """
    INITIAL IMPLEMENTATION CREDITS GO TO JOSCHUA DILLY (@JoschD).
    Calculate the RDTs via PTC_TWISS.

    Args:
        madx (cpymad.madx.Madx): an instanciated cpymad Madx object.
        order (int): maximum derivative order (only 0, 1 or 2 implemented in PTC). Defaults to `2`.
        file (Union[Path, str]): path to output file. Default `None`

    Returns:
        A TfsDataframe with results.
    """
    logger.info(f"Entering PTC to calculate RDTs up to order {order}")
    madx.ptc_create_universe()

    logger.trace("Creating PTC layout")
    madx.ptc_create_layout(model=3, method=4, nst=3, exact=True)
    # madx.ptc_create_layout(model=3, method=6, nst=1)  # from Michi

    logger.trace("Incorporating MAD-X alignment errors")
    madx.ptc_align()  # use madx alignment errors
    # madx.ptc_setswitch(fringe=True)  # include fringe effects

    logger.debug("Executing PTC Twiss")
    madx.ptc_twiss(icase=6, no=order, normal=True, trackrdts=True)
    madx.ptc_end()

    logger.debug("Extracting results to TfsDataFrame")
    dframe = tfs.TfsDataFrame(madx.table.twissrdt.dframe())
    dframe.columns = dframe.columns.str.upper()
    dframe.NAME = dframe.NAME.str.upper()

    if file:
        logger.debug(f"Exporting results to disk at '{Path(file).absolute()}'")
        tfs.write(file, dframe)

    return dframe
示例#24
0
def create_total_phase(df_twiss: pd.DataFrame, df_model: pd.DataFrame,
                       parameter: str, relative_error: float,
                       randomize: Sequence[str], headers: Dict):
    """ Creates total phase measurements. """
    LOG.debug(f"Creating fake total phase for {parameter}.")
    plane = parameter[-1]
    df_tot = tfs.TfsDataFrame(index=df_twiss.index)
    element0 = df_twiss.index[0]
    df_tot[NAME2] = element0

    values = df_twiss[f"{PHASE_ADV}{plane}"] - df_twiss.loc[
        element0, f"{PHASE_ADV}{plane}"]
    errors = relative_error * np.ones_like(values)
    if ERRORS in randomize:
        errors = _get_random_errors(errors, np.ones_like(values)) % 0.5
    errors[0] = 0.

    if VALUES in randomize:
        rand_val = np.random.normal(values, errors) % 1
        values += ang_diff(rand_val, values)

    df_tot[parameter] = values % 1
    df_tot[f'{ERR}{parameter}'] = errors

    # tot model
    df_tot[S] = df_model[S]
    df_tot[f'{S}2'] = df_model.loc[df_tot.index[0], S]
    df_tot[f'{parameter}{MDL}'] = (
        df_model[f"{PHASE_ADV}{plane}"] -
        df_model.loc[element0, f"{PHASE_ADV}{plane}"]) % 1
    df_tot[f'{PHASE_ADV}{plane}{MDL}'] = df_model[f'{PHASE_ADV}{plane}']

    df_tot[f"{ERR}{DELTA}{parameter}"] = df_tot[f'{ERR}{parameter}']
    df_tot[f"{DELTA}{parameter}"] = df_ang_diff(df_tot, parameter,
                                                f'{parameter}{MDL}')
    df_tot = df_tot.fillna(0)
    df_tot.headers = headers.copy()
    return {f'{TOTAL_PHASE_NAME}{plane.lower()}': df_tot}
示例#25
0
def create_phase_advance(df_twiss: pd.DataFrame, df_model: pd.DataFrame,
                         parameter: str, relative_error: float,
                         randomize: Sequence[str], headers: Dict):
    """ Creates phase advance measurements. """
    LOG.debug(f"Creating fake phase advance for {parameter}.")
    plane = parameter[-1]
    df_adv = tfs.TfsDataFrame(index=df_twiss.index[:-1])
    df_adv[NAME2] = df_twiss.index[1:].to_numpy()

    def get_phase_advances(df_source):
        return ang_diff(
            df_source.loc[df_adv[NAME2], f"{PHASE_ADV}{plane}"].to_numpy(),
            df_source.loc[df_adv.index, f"{PHASE_ADV}{plane}"].to_numpy())

    values = get_phase_advances(df_twiss)
    errors = relative_error * np.ones_like(values)
    if ERRORS in randomize:
        errors = _get_random_errors(errors, np.ones_like(values)) % 0.5

    if VALUES in randomize:
        values = np.random.normal(values, errors)
        values = ang_interval_check(values)

    df_adv[parameter] = values
    df_adv[f'{ERR}{parameter}'] = errors

    # adv model
    df_adv[S] = df_model.loc[df_adv.index, S]
    df_adv[f'{S}2'] = df_model.loc[df_adv[NAME2], S].to_numpy()
    df_adv[f'{parameter}{MDL}'] = get_phase_advances(df_model)
    df_adv[f'{PHASE_ADV}{plane}{MDL}'] = df_model.loc[df_adv.index,
                                                      f'{PHASE_ADV}{plane}']

    df_adv[f"{ERR}{DELTA}{parameter}"] = df_adv[f'{ERR}{parameter}']
    df_adv[f"{DELTA}{parameter}"] = df_ang_diff(df_adv, parameter,
                                                f'{parameter}{MDL}')
    df_adv.headers = headers.copy()
    return {f'{PHASE_NAME}{plane.lower()}': df_adv}
示例#26
0
def create_measurement(df_twiss: pd.DataFrame, parameter: str,
                       relative_error: float,
                       randomize: Sequence[str]) -> tfs.TfsDataFrame:
    """ Create a new measurement Dataframe from df_twiss from parameter. """
    LOG.debug(f"Creating fake measurement for {parameter}.")
    values = df_twiss.loc[:, parameter]
    errors = relative_error * values.abs()
    if all(values == 0):
        LOG.warning(f"All value for {parameter} are zero. "
                    f"Fake measurement will be zero as well.")
    else:
        if ERRORS in randomize:
            errors = _get_random_errors(errors, values)

        if VALUES in randomize:
            values = np.random.normal(values, errors)

    df = tfs.TfsDataFrame({
        parameter: values,
        f"{ERR}{parameter}": errors
    },
                          index=df_twiss.index)
    return df
示例#27
0
def generate_fake_data(n) -> tfs.TfsDataFrame:
    qx, qy = 1.31, 1.32
    df = tfs.TfsDataFrame(0,
                          index=[str(i) for i in range(n)],
                          columns=[
                              S, f"{ALPHA}{X}", f"{ALPHA}{Y}", f"{BETA}{X}",
                              f"{BETA}{Y}", f"{PHASE_ADV}{X}",
                              f"{PHASE_ADV}{Y}", "R11", "R12", "R21", "R22"
                          ],
                          headers={
                              f"{TUNE}1": qx,
                              f"{TUNE}2": qy
                          })

    r = np.random.rand(n)
    df[S] = np.linspace(0, n, n)
    df.loc[:, "R11"] = np.sin(r)
    df.loc[:, "R22"] = r
    df.loc[:, "R21"] = np.cos(r)
    df.loc[:, "R12"] = -r
    df[f"{PHASE_ADV}{X}"] = np.linspace(0, qx, n + 1)[:n]
    df[f"{PHASE_ADV}{Y}"] = np.linspace(0, qy, n + 1)[:n]
    df.loc[:, [f"{BETA}{X}", f"{BETA}{Y}"]] = 1
    return df
示例#28
0
def get_amplitude_detuning(madx: Madx, order: int = 2, file: Union[Path, str] = None) -> tfs.TfsDataFrame:
    """
    INITIAL IMPLEMENTATION CREDITS GO TO JOSCHUA DILLY (@JoschD).
    Calculate amplitude detuning via PTC_NORMAL.

    Args:
        madx (cpymad.madx.Madx): an instanciated cpymad Madx object.
        order (int): maximum derivative order (only 0, 1 or 2 implemented in PTC). Defaults to `2`.
        file (Union[Path, str]): path to output file. Default `None`

    Returns:
        A TfsDataframe with results.
    """
    if order >= 3:
        logger.error(f"Maximum amplitude detuning order in PTC is 2, but {order:d} was requested")
        raise NotImplementedError("PTC amplitude detuning is not implemented for order > 2")

    logger.info("Entering PTC to calculate amplitude detuning")
    madx.ptc_create_universe()

    # layout I got with mask (jdilly)
    # model=3 (Sixtrack code model: Delta-Matrix-Kick-Matrix)
    # method=4 (integration order), nst=3 (integration steps), exact=True (exact Hamiltonian)
    logger.trace("Creating PTC layout")
    madx.ptc_create_layout(model=3, method=4, nst=3, exact=True)

    # alternative layout: model=3, method=6, nst=3
    # resplit=True (adaptive splitting of magnets), thin=0.0005 (splitting of quads),
    # xbend=0.0005 (splitting of dipoles)
    # madx.ptc_create_layout(model=3, method=6, nst=3, resplit=True, thin=0.0005, xbend=0.0005)

    logger.trace("Incorporating MAD-X alignment errors")
    madx.ptc_align()  # use madx alignment errors
    # madx.ptc_setswitch(fringe=True)  # include fringe effects

    logger.trace("Selecting tune orders")
    madx.select_ptc_normal(q1="0", q2="0")
    for ii in range(1, order + 1):  # These are d^iQ/ddp^i
        madx.select_ptc_normal(dq1=f"{ii:d}", dq2=f"{ii:d}")

    # ANH = anharmonicities (ex, ey, deltap), works only with parameters as full strings
    # could be done nicer with permutations ...
    logger.trace("Selecting anharmonicities")
    if order >= 1:
        # madx.select_ptc_normal('anhx=0, 0, 1')  # dQx/ddp
        # madx.select_ptc_normal('anhy=0, 0, 1')  # dQy/ddp
        madx.select_ptc_normal("anhx=1, 0, 0")  # dQx/dex
        madx.select_ptc_normal("anhx=0, 1, 0")  # dQx/dey
        madx.select_ptc_normal("anhy=1, 0, 0")  # dQy/dex
        madx.select_ptc_normal("anhy=0, 1, 0")  # dQy/dey

    if order >= 2:
        # madx.select_ptc_normal('anhx=0, 0, 2')  # d^2Qx/ddp^2
        # madx.select_ptc_normal('anhy=0, 0, 2')  # d^2Qy/ddp^2
        madx.select_ptc_normal("anhx=2, 0, 0")  # d^2Qx/dex^2
        madx.select_ptc_normal("anhx=1, 1, 0")  # d^2Qx/dexdey
        madx.select_ptc_normal("anhx=0, 2, 0")  # d^2Qx/dey^2
        madx.select_ptc_normal("anhy=2, 0, 0")  # d^2Qy/dex^2
        madx.select_ptc_normal("anhy=1, 1, 0")  # d^2Qy/dexdey
        madx.select_ptc_normal("anhy=0, 2, 0")  # d^2Qy/dey^2

    # icase = phase-space dimensionality, no = order of map
    logger.debug("Executing PTC Normal")
    madx.ptc_normal(closed_orbit=True, normal=True, icase=5, no=5)
    madx.ptc_end()

    logger.debug("Extracting results to TfsDataFrame")
    dframe = tfs.TfsDataFrame(madx.table.normal_results.dframe())
    dframe.columns = dframe.columns.str.upper()
    dframe.NAME = dframe.NAME.str.upper()
    dframe.index = range(len(dframe.NAME))  # table has a weird index

    if file:
        logger.debug(f"Exporting results to disk at '{Path(file).absolute()}'")
        tfs.write(file, dframe)

    return dframe
示例#29
0
def _create_jobs(
    cwd,
    mask_path_or_string,
    jobid_mask,
    replace_dict,
    output_dir,
    append_jobs,
    executable,
    script_args,
    script_extension,
) -> tfs.TfsDataFrame:
    LOG.debug("Creating Jobs.")
    values_grid = np.array(list(itertools.product(*replace_dict.values())),
                           dtype=object)

    if append_jobs:
        jobfile_path = cwd / JOBSUMMARY_FILE
        try:
            job_df = tfs.read(str(jobfile_path.absolute()), index=COLUMN_JOBID)
        except FileNotFoundError as filerror:
            raise FileNotFoundError(
                "Cannot append jobs, as no previous jobfile was found at "
                f"'{jobfile_path}'") from filerror
        mask = [
            elem not in job_df[replace_dict.keys()].values
            for elem in values_grid
        ]
        njobs = mask.count(True)
        values_grid = values_grid[mask]
    else:
        njobs = len(values_grid)
        job_df = tfs.TfsDataFrame()

    if njobs == 0:
        raise ValueError(f"No (new) jobs found!")
    if njobs > HTCONDOR_JOBLIMIT:
        LOG.warning(
            f"You are attempting to submit an important number of jobs ({njobs})."
            "This can be a high stress on your system, make sure you know what you are doing."
        )

    LOG.debug(f"Initial number of jobs: {njobs:d}")
    data_df = tfs.TfsDataFrame(
        index=generate_jobdf_index(job_df, jobid_mask, replace_dict.keys(),
                                   values_grid),
        columns=list(replace_dict.keys()),
        data=values_grid,
    )
    job_df = job_df.append(data_df, sort=False, how_headers='left')
    job_df = _setup_folders(job_df, cwd)

    if htcutils.is_mask_file(mask_path_or_string):
        LOG.debug("Creating all jobs from mask.")
        script_extension = _get_script_extension(script_extension, executable,
                                                 mask_path_or_string)
        job_df = create_jobs_from_mask(job_df, mask_path_or_string,
                                       replace_dict.keys(), script_extension)

    LOG.debug("Creating shell scripts for submission.")
    job_df = htcutils.write_bash(
        job_df,
        output_dir,
        executable=executable,
        cmdline_arguments=script_args,
        mask=mask_path_or_string,
    )

    job_df[COLUMN_JOB_DIRECTORY] = job_df[COLUMN_JOB_DIRECTORY].apply(str)
    tfs.write(str(cwd / JOBSUMMARY_FILE), job_df, save_index=COLUMN_JOBID)
    return job_df
示例#30
0
文件: handler.py 项目: pylhc/omc3
def correct(accel_inst: Accelerator, opt: DotDict) -> None:
    """ Perform global correction as described in :mod:`omc3.global_correction`.

    Args:
        accel_inst (Accelerator): Accelerator Instance
        opt (DotDict): Correction options,
                       see :mod:`omc3.global_correction` for details.

    """
    method_options = opt.get_subdict(["svd_cut", "n_correctors"])
    # read data from files
    vars_list = _get_varlist(accel_inst, opt.variable_categories)
    optics_params, meas_dict = get_measurement_data(
        opt.optics_params,
        opt.meas_dir,
        opt.beta_file_name,
        opt.weights,
    )

    if opt.fullresponse_path is not None:
        resp_dict = _load_fullresponse(opt.fullresponse_path, vars_list)
    else:
        resp_dict = response_twiss.create_response(accel_inst,
                                                   opt.variable_categories,
                                                   optics_params)

    # the model in accel_inst is modified later, so save nominal model here to variables
    nominal_model = _maybe_add_coupling_to_model(accel_inst.model,
                                                 optics_params)
    # apply filters to data
    meas_dict = filters.filter_measurement(optics_params, meas_dict,
                                           nominal_model, opt)
    meas_dict = model_appenders.add_differences_to_model_to_measurements(
        nominal_model, meas_dict)

    resp_dict = filters.filter_response_index(resp_dict, meas_dict,
                                              optics_params)
    resp_matrix = _join_responses(resp_dict, optics_params, vars_list)
    delta = tfs.TfsDataFrame(0, index=vars_list, columns=[DELTA])

    # ######### Iteration Phase ######### #
    for iteration in range(opt.max_iter + 1):
        LOG.info(f"Correction Iteration {iteration} of {opt.max_iter}.")

        # ######### Update Model and Response ######### #
        if iteration > 0:
            LOG.debug("Updating model via MADX.")
            corr_model_path = opt.output_dir / f"twiss_{iteration}{EXT}"

            corr_model_elements = _create_corrected_model(
                corr_model_path, opt.change_params_path, accel_inst)
            corr_model_elements = _maybe_add_coupling_to_model(
                corr_model_elements, optics_params)

            bpms_index_mask = accel_inst.get_element_types_mask(
                corr_model_elements.index, types=["bpm"])
            corr_model = corr_model_elements.loc[bpms_index_mask, :]

            meas_dict = model_appenders.add_differences_to_model_to_measurements(
                corr_model, meas_dict)

            if opt.update_response:
                LOG.debug("Updating response.")
                # please look away for the next two lines.
                accel_inst._model = corr_model
                accel_inst._elements = corr_model_elements
                resp_dict = response_twiss.create_response(
                    accel_inst, opt.variable_categories, optics_params)
                resp_dict = filters.filter_response_index(
                    resp_dict, meas_dict, optics_params)
                resp_matrix = _join_responses(resp_dict, optics_params,
                                              vars_list)

        # ######### Actual optimization ######### #
        delta += _calculate_delta(resp_matrix, meas_dict, optics_params,
                                  vars_list, opt.method, method_options)

        # remove unused correctors from vars_list
        delta, resp_matrix, vars_list = _filter_by_strength(
            delta, resp_matrix, opt.min_corrector_strength)

        writeparams(opt.change_params_path, delta)
        writeparams(opt.change_params_correct_path, -delta)
        LOG.debug(
            f"Cumulative delta: {np.sum(np.abs(delta.loc[:, DELTA].to_numpy())):.5e}"
        )
    write_knob(opt.knob_path, delta)
    LOG.info("Finished Iterative Global Correction.")