Esempio n. 1
0
def test_two_important_number(some_numbers):
    errors = np.ones(10)
    errors[2:] = np.inf
    assert stats.circular_mean(
        some_numbers, errors=errors) == stats.circular_mean(some_numbers[:2])
    assert stats.circular_error(some_numbers, errors=errors, t_value_corr=False) ==\
           stats.circular_error(some_numbers[:2], errors=np.ones(2), t_value_corr=False)
Esempio n. 2
0
def _get_signed_dispersion(input_files, df_orbit, scaled_amps, mask):
    same_interval_phase = np.angle(np.exp(PI2I * df_orbit.loc[:, input_files.get_columns(df_orbit, 'MUZ')].values)) / (2 * np.pi)
    phase_wrt_arcs = same_interval_phase - stats.circular_mean(same_interval_phase[mask, :], period=1, axis=0)
    phase_wrt_arcs = np.abs(np.where(np.abs(phase_wrt_arcs) > 0.5, phase_wrt_arcs - np.sign(phase_wrt_arcs), phase_wrt_arcs))
    if len(input_files.get_columns(df_orbit, 'AMPZ')) > 1:
        # resolving the sign of dispersion
        dispersions = scaled_amps * np.sign(0.25 - np.abs(stats.circular_mean(phase_wrt_arcs, period=1, axis=1)))[:, None]
        # final calculation
        return np.mean(dispersions, axis=1), np.std(dispersions, axis=1) * stats.t_value_correction(
            dispersions.shape[1])
    return scaled_amps * np.sign(0.25 - np.abs(phase_wrt_arcs)), 0.0
Esempio n. 3
0
def test_nanhandling():
    vector = np.array([355., 0., 5., np.nan])
    assert stats.circular_nanmean(vector) == stats.circular_mean(vector[:-1])
    assert stats.weighted_nanmean(vector) == stats.weighted_mean(vector[:-1])
    assert stats.weighted_nanrms(vector) == stats.weighted_rms(vector[:-1])
    vector = np.array([[355., 0., 5., 0.], [355., 0., 5., 0.],
                       [355., 0., 5., np.nan]])
    assert np.all(
        stats.circular_nanerror(vector, axis=1) == stats.circular_error(
            vector[:, :-1], axis=1))
Esempio n. 4
0
def _process_rdt(meas_input, input_files, phase_data, invariants, plane, rdt):
    df = pd.DataFrame(phase_data)
    second_bpms = df.loc[:, "NAME2"].to_numpy()
    df["S2"] = df.loc[second_bpms, "S"].to_numpy()
    df["COUNT"] = len(input_files.dpp_frames(plane, 0))
    line = _determine_line(rdt, plane)
    phase_sign, suffix = get_line_sign_and_suffix(line, input_files, plane)
    comp_coeffs1 = to_complex(
        input_files.joined_frame(plane, [f"AMP{suffix}"],
                                 dpp_value=0).loc[df.index, :].to_numpy(),
        phase_sign *
        input_files.joined_frame(plane, [f"PHASE{suffix}"],
                                 dpp_value=0).loc[df.index, :].to_numpy())
    # Multiples of tunes needs to be added to phase at second BPM if that is in second turn
    phase2 = phase_sign * input_files.joined_frame(
        plane, [f"PHASE{suffix}"], dpp_value=0).loc[second_bpms, :].to_numpy()
    comp_coeffs2 = to_complex(
        input_files.joined_frame(plane, [f"AMP{suffix}"],
                                 dpp_value=0).loc[second_bpms, :].to_numpy(),
        _add_tunes_if_in_second_turn(df, input_files, line, phase2))
    # Get amplitude and phase of the line from linx/liny file
    line_amp, line_phase, line_amp_e, line_phase_e = complex_secondary_lines(  # TODO use the errors
        df.loc[:, "MEAS"].to_numpy()[:, np.newaxis] *
        meas_input.accelerator.beam_direction,
        df.loc[:,
               "ERRMEAS"].to_numpy()[:,
                                     np.newaxis], comp_coeffs1, comp_coeffs2)
    rdt_phases_per_file = _calculate_rdt_phases_from_line_phases(
        df, input_files, line, line_phase)
    rdt_angles = stats.circular_mean(rdt_phases_per_file, period=1, axis=1) % 1
    df[f"PHASE"] = rdt_angles
    df[f"{ERR}PHASE"] = stats.circular_error(rdt_phases_per_file,
                                             period=1,
                                             axis=1)
    df[AMPLITUDE], df[f"{ERR}{AMPLITUDE}"] = _fit_rdt_amplitudes(
        invariants, line_amp, plane, rdt)
    df[f"REAL"] = np.cos(
        2 * np.pi * rdt_angles) * df.loc[:, AMPLITUDE].to_numpy()
    df[f"IMAG"] = np.sin(
        2 * np.pi * rdt_angles) * df.loc[:, AMPLITUDE].to_numpy()
    # in old files there was "EAMP" and "PHASE_STD"
    return df.loc[:, [
        "S", "COUNT", AMPLITUDE, f"{ERR}{AMPLITUDE}", "PHASE", f"{ERR}PHASE",
        "REAL", "IMAG"
    ]]
Esempio n. 5
0
def calculate_coupling(
    meas_input: dict,
    input_files: dict,
    phase_dict: Dict[str, Tuple[Dict[str, tfs.TfsDataFrame],
                                Sequence[tfs.TfsDataFrame]]],
    tune_dict: Dict[str, float],
    header_dict: OrderedDict,
) -> None:
    """
    Calculates the coupling RDTs f1001 and f1010, as well as the closest tune approach Cminus (|C-|).
    This represents the "2 BPM method" in https://cds.cern.ch/record/1264111/files/CERN-BE-Note-2010-016.pdf
    (a more up-to-date reference will come in the near future).

    Two formulae are used to calculate the Cminus, taken from the following reference:
    https://cds.cern.ch/record/2135848/files/PhysRevSTAB.17.051004.pdf
    The first one (Eq(1)) is an approximation using only the amplitudes of the RDTs, while the second one
    (Eq(2) in the same paper) is more exact but needs also the phase of the RDT.

    The results are written down in the optics_measurements outputs as **f1001.tfs** and **f1010.tfs** files.

    Args:
        meas_input (dict): `OpticsInput` object containing analysis settings from the command-line.
        input_files (dict): `InputFiles` (dict) object containing frequency spectra files (linx/y) for
            each transverse plane (as keys).
        phase_dict (Dict[str, Tuple[Dict[str, tfs.TfsDataFrame], tfs.TfsDataFrame]]): dictionary containing
            the measured phase advances, with an entry for each transverse plane. In said entry is a
            dictionary with the measured phase advances for 'free' and 'uncompensated' cases, as well as
            the location of the output ``TfsDataFrames`` for the phases.
        tune_dict (Dict[str, float]): `TuneDict` object containing measured tunes. There is an entry
            calculated for the 'Q', 'QF', 'QM', 'QFM' and 'ac2bpm' modes, each value being a float.
        header_dict (OrderedDict): header dictionary of common items for coupling output files,
            will be attached as the header to the **f1001.tfs** and **f1010.tfs** files..
    """
    LOGGER.info("Calculating coupling")
    bd = meas_input.accelerator.beam_direction
    compensation = "uncompensated" if meas_input.compensation == "model" else "free"

    # We need vertical and horizontal spectra, so we have to intersect first all inputs with X and Y phase
    # output furthermore the output has to be rearranged in the order of the model (important for e.g. LHC
    # beam 2) and thus we need to intersect the *model* index with all the above. Since the first index of
    # the .intersect chain dictates the order, we have to start with the model index.
    LOGGER.debug("Intersecting measurements, starting with model")
    joined: tfs.TfsDataFrame = _joined_frames(
        input_files)  # merge transverse input frames
    joined_index: pd.Index = (meas_input.accelerator.model.index.intersection(
        joined.index).intersection(
            phase_dict["X"][compensation]["MEAS"].index).intersection(
                phase_dict["Y"][compensation]["MEAS"].index))
    joined = joined.loc[joined_index].copy()

    phases_x: tfs.TfsDataFrame = phase_dict["X"][compensation]["MEAS"].loc[
        joined_index].copy()
    phases_y: tfs.TfsDataFrame = phase_dict["Y"][compensation]["MEAS"].loc[
        joined_index].copy()

    LOGGER.debug("Averaging (arithmetic mean) amplitude columns")
    for col in [SECONDARY_AMPLITUDE_X, SECONDARY_AMPLITUDE_Y]:
        arithmetically_averaved_columns = [
            c for c in joined.columns if c.startswith(col)
        ]
        joined[col] = stats.weighted_mean(
            joined[arithmetically_averaved_columns], axis=1)

    LOGGER.debug("Averaging (circular mean) frequency columns"
                 )  # make sure to use period=1 here
    for col in [SECONDARY_FREQUENCY_X, SECONDARY_FREQUENCY_Y]:
        circularly_averaved_columns = [
            x for x in joined.columns if x.startswith(col)
        ]
        joined[col] = bd * stats.circular_mean(
            joined[circularly_averaved_columns], axis=1, period=1)

    LOGGER.debug("Finding BPM pairs for momentum reconstruction")
    bpm_pairs_x, deltas_x = _find_pair(phases_x, meas_input.coupling_pairing)
    bpm_pairs_y, deltas_y = _find_pair(phases_y, meas_input.coupling_pairing)

    LOGGER.debug("Computing complex lines from spectra")
    A01: np.ndarray = 0.5 * _get_complex_line(
        joined[SECONDARY_AMPLITUDE_X] *
        exp(joined[SECONDARY_FREQUENCY_X] * PI2I), deltas_x, bpm_pairs_x)
    B10: np.ndarray = 0.5 * _get_complex_line(
        joined[SECONDARY_AMPLITUDE_Y] *
        exp(joined[SECONDARY_FREQUENCY_Y] * PI2I), deltas_y, bpm_pairs_y)
    A0_1: np.ndarray = 0.5 * _get_complex_line(
        joined[SECONDARY_AMPLITUDE_X] *
        exp(-joined[SECONDARY_FREQUENCY_X] * PI2I), deltas_x, bpm_pairs_x)
    B_10: np.ndarray = 0.5 * _get_complex_line(
        joined[SECONDARY_AMPLITUDE_Y] *
        exp(-joined[SECONDARY_FREQUENCY_Y] * PI2I), deltas_y, bpm_pairs_y)

    q1001_from_A = -np.angle(A01) + (bd * joined[f"{COL_MU}Y"].to_numpy() -
                                     0.25) * PI2
    q1001_from_B = np.angle(B10) - (bd * joined[f"{COL_MU}X"].to_numpy() -
                                    0.25) * PI2
    eq_1001 = exp(1.0j * q1001_from_A) + exp(1.0j * q1001_from_B)

    q1010_from_A = -np.angle(A0_1) - (bd * joined[f"{COL_MU}Y"].to_numpy() -
                                      0.25) * PI2
    q1010_from_B = -np.angle(B_10) - (bd * joined[f"{COL_MU}X"].to_numpy() -
                                      0.25) * PI2
    eq_1010 = exp(1.0j * q1010_from_A) + exp(1.0j * q1010_from_B)

    LOGGER.debug("Computing average of coupling RDTs")
    f1001: np.ndarray = -0.5 * sqrt(np.abs(A01 * B10)) * eq_1001 / abs(eq_1001)
    f1010: np.ndarray = 0.5 * sqrt(np.abs(
        A0_1 * B_10)) * eq_1010 / abs(eq_1010)

    LOGGER.debug("Getting tune separation from measurements")
    tune_separation = np.abs(tune_dict["X"]["QFM"] % 1.0 -
                             tune_dict["Y"]["QFM"] % 1.0)

    LOGGER.debug("Calculating approximated Cminus")
    C_approx = 4.0 * tune_separation * np.mean(np.abs(f1001))
    header_dict["Cminus_approx"] = C_approx
    LOGGER.info(
        f"|C-| (approx) = {C_approx:.5f}, tune_sep = {tune_separation:.3f}, from Eq.1 in PRSTAB 17,051004"
    )

    LOGGER.debug("Calculating exact Cminus")
    C_exact = np.abs(
        4.0 * tune_separation *
        np.mean(f1001 * exp(1.0j *
                            (joined[f"{COL_MU}X"] - joined[f"{COL_MU}Y"]))))
    header_dict["Cminus_exact"] = C_exact
    LOGGER.info(
        f"|C-| (exact)  = {C_exact:.5f}, from Eq.2 w/o i*s*Delta/R in PRSTAB 17,051004"
    )

    if meas_input.compensation == "model":
        LOGGER.debug("Compensating coupling RDT values by model")
        f1001, f1010 = compensate_rdts_by_model(f1001, f1010, tune_dict)

    LOGGER.debug("Adding model values and deltas")
    model_coupling = coupling_via_cmatrix(
        meas_input.accelerator.model).loc[joined_index]

    f1001_df = _rdt_to_output_df(f1001, model_coupling[F1001],
                                 meas_input.accelerator.model, joined_index)
    f1010_df = _rdt_to_output_df(f1010, model_coupling[F1010],
                                 meas_input.accelerator.model, joined_index)

    tfs.write(
        Path(meas_input.outputdir) / f"{F1001.lower()}{EXT}", f1001_df,
        header_dict)
    tfs.write(
        Path(meas_input.outputdir) / f"{F1010.lower()}{EXT}", f1010_df,
        header_dict)
Esempio n. 6
0
def _calculate_with_compensation(meas_input,
                                 input_files,
                                 tunes,
                                 plane,
                                 model_df,
                                 compensation='none',
                                 no_errors=False):
    """
    Calculates phase advances.

    Args:
        meas_input: the input object including settings and the accelerator class.
        input_files: includes measurement tfs.
        tunes: `TunesDict` object containing measured and model tunes and ac2bpm object
        plane: marking the horizontal or vertical plane, **X** or **Y**.
        no_errors: if ``True``, measured errors shall not be propagated (only their spread).

    Returns:
        A `dictionary` of `TfsDataFrames` indexed (BPMi x BPMj) yielding phase advance `phi_ij`.

         - "MEAS": measured phase advances,
         - "ERRMEAS": errors of measured phase advances,
         - "MODEL": model phase advances.

        +------++--------+--------+--------+--------+
        |      ||  BPM1  |  BPM2  |  BPM3  |  BPM4  |
        +======++========+========+========+========+
        | BPM1 ||   0    | phi_12 | phi_13 | phi_14 |
        +------++--------+--------+--------+--------+
        | BPM2 || phi_21 |    0   | phi_23 | phi_24 |
        +------++--------+--------+--------+--------+
        | BPM3 || phi_31 | phi_32 |   0    | phi_34 |
        +------++--------+--------+--------+--------+
        | BPM4 || phi_41 | phi_42 | phi_43 |    0   |
        +------++--------+--------+--------+--------+

        The phase advance between BPM_i and BPM_j can be obtained via:
        phase_advances["MEAS"].loc[BPMi,BPMj]
        list of output data frames(for files)
    """
    LOGGER.info("Calculating phase advances")
    LOGGER.info(f"Measured tune in plane {plane} = {tunes[plane]['Q']}")

    df = model_df.loc[:, ["S", f"MU{plane}"]]
    how = 'outer' if meas_input.union else 'inner'
    dpp_value = meas_input.dpp if "dpp" in meas_input.keys() else 0
    df = pd.merge(df,
                  input_files.joined_frame(plane,
                                           [f"MU{plane}", f"{ERR}MU{plane}"],
                                           dpp_value=dpp_value,
                                           how=how),
                  how='inner',
                  left_index=True,
                  right_index=True)
    df[input_files.get_columns(
        df, f"MU{plane}")] = (input_files.get_data(df, f"MU{plane}") *
                              meas_input.accelerator.beam_direction)
    phases_mdl = df.loc[:, f"MU{plane}"].to_numpy()
    phase_advances = {
        "MODEL":
        _get_square_data_frame(
            (phases_mdl[np.newaxis, :] - phases_mdl[:, np.newaxis]) % 1.0,
            df.index)
    }
    if compensation == "model":
        df = _compensate_by_model(input_files, meas_input, df, plane)
    phases_meas = input_files.get_data(df, f"MU{plane}")
    if meas_input.compensation == "equation":
        phases_meas = _compensate_by_equation(phases_meas, plane, tunes)

    phases_errors = input_files.get_data(df, f"{ERR}MU{plane}")
    if phases_meas.ndim < 2:
        phase_advances["MEAS"] = _get_square_data_frame(
            (phases_meas[np.newaxis, :] - phases_meas[:, np.newaxis]) % 1.0,
            df.index)
        phase_advances["ERRMEAS"] = _get_square_data_frame(
            np.zeros((len(phases_meas), len(phases_meas))), df.index)
        return phase_advances
    if meas_input.union:
        mask = np.isnan(phases_meas)
        phases_meas[mask], phases_errors[mask] = 0.0, np.inf
        if no_errors:
            phases_errors[~mask] = 1e-10
    elif no_errors:
        phases_errors = None
    phases_3d = phases_meas[np.newaxis, :, :] - phases_meas[:, np.newaxis, :]
    if phases_errors is not None:
        errors_3d = phases_errors[
            np.newaxis, :, :] + phases_errors[:, np.newaxis, :]
    else:
        errors_3d = None
    phase_advances["MEAS"] = _get_square_data_frame(
        stats.circular_mean(phases_3d, period=1, errors=errors_3d, axis=2) %
        1.0, df.index)
    phase_advances["ERRMEAS"] = _get_square_data_frame(
        stats.circular_error(phases_3d, period=1, errors=errors_3d, axis=2),
        df.index)
    return phase_advances, [
        _create_output_df(phase_advances, df, plane),
        _create_output_df(phase_advances, df, plane, tot=True)
    ]
Esempio n. 7
0
def test_circular_zeros(zeros):
    assert stats.circular_mean(zeros) == 0
    assert stats.circular_error(zeros) == 0
Esempio n. 8
0
def test_mean_fall_back_to_linear():
    vector = np.arange(10)
    assert stats.circular_mean(vector, period=100000) ==\
           pytest.approx(np.mean(vector))
Esempio n. 9
0
def test_one_important_number(some_numbers):
    errors = np.ones(10)
    errors[1:] = np.inf
    assert stats.circular_mean(some_numbers, errors=errors) == some_numbers[0]
    assert stats.circular_error(some_numbers, errors=errors, t_value_corr=False) ==\
           stats.circular_error(errors[0], errors=1., t_value_corr=False)
Esempio n. 10
0
def test_opposite_values_cancel_out():
    vector = np.array([355., 0., 5.])
    assert stats.circular_mean(vector, period=360.) == pytest.approx(0.)
Esempio n. 11
0
def test_mean_ones_errors_is_like_no_errors(some_numbers):
    assert stats.circular_mean(some_numbers,
                               errors=np.ones(len(some_numbers))) ==\
           stats.circular_mean(some_numbers)
Esempio n. 12
0
def test_circular_empties():
    empty = np.array([])
    with pytest.warns(RuntimeWarning):
        stats.circular_mean(empty)
    with pytest.warns(RuntimeWarning):
        stats.circular_error(empty)
Esempio n. 13
0
def test_circular_nans(a_nan):
    assert np.isnan(stats.circular_mean(a_nan))