Ejemplo n.º 1
0
def test_tfs_read_write_read_pathlib_input(_tfs_file_pathlib: pathlib.Path,
                                           _test_file: str):
    original = read_tfs(_tfs_file_pathlib)
    write_tfs(_test_file, original)
    new = read_tfs(_test_file)
    assert_frame_equal(original, new)
    assert_dict_equal(original.headers, new.headers, compare_keys=True)
Ejemplo n.º 2
0
def test_tfs_read_write_read(_tfs_file, _test_file):
    original = read_tfs(_tfs_file)
    write_tfs(_test_file, original)
    new = read_tfs(_test_file)
    assert original.headers == new.headers
    assert all(original.columns == new.columns)
    for column in original:
        assert all(original.loc[:, column] == new.loc[:, column])
Ejemplo n.º 3
0
 def test_tfs_read_write_read_pathlib_input(self,
                                            _tfs_file_pathlib: pathlib.Path,
                                            tmp_path):
     original = read_tfs(_tfs_file_pathlib)
     write_location = tmp_path / "test_file.tfs"
     write_tfs(write_location, original)
     new = read_tfs(write_location)
     assert_frame_equal(original, new)
     assert_dict_equal(original.headers, new.headers, compare_keys=True)
Ejemplo n.º 4
0
def merge_tfs(directories: List[pathlib.Path],
              filename: str) -> tfs.TfsDataFrame:
    """
    Merge different kmod analysis results from a list of directories into a single `TfsDataFrame`.

    Args:
        directories (List[pathlib.Path]): list of PosixPath objects to directories holding TFS
            files with the results of kmod analysis.
        filename (str): name of the TFS files to look for in the provided directories

    Returns:
        A `TfsDataFrame` combining all the loaded files from the provided directories.
    """
    # Combine the data into one tfs
    new_tfs = tfs.TfsDataFrame()
    headers = {}
    for d in sorted(directories):
        loaded_tfs = tfs.read_tfs(d / filename)
        headers.update(loaded_tfs.headers)  # old headers are lost in `append`
        new_tfs = new_tfs.append(loaded_tfs, ignore_index=True)
    new_tfs.headers = headers
    new_tfs = new_tfs.set_index(NAME)

    # drop BPMWK and check tfs
    new_tfs = new_tfs.loc[~new_tfs.index.str.startswith(BPMWK), :]
    if not new_tfs.index.is_unique:
        raise KeyError(
            "Found duplicated entries "
            f"{', '.join(set(new_tfs.index[new_tfs.index.duplicated()]))}'.")
    return new_tfs
Ejemplo n.º 5
0
    def test_absent_attributes_and_keys(self, _tfs_file_str: str):
        test_file = read_tfs(_tfs_file_str, index="NAME")
        with pytest.raises(AttributeError):
            _ = test_file.Not_HERE

        with pytest.raises(KeyError):
            _ = test_file["Not_HERE"]
Ejemplo n.º 6
0
 def test_warn_unphysical_values(self, caplog):
     nan_tfs_path = pathlib.Path(
         __file__).parent / "inputs" / "has_nans.tfs"
     _ = read_tfs(nan_tfs_path, index="NAME")
     for record in caplog.records:
         assert record.levelname == "WARNING"
     assert "contains non-physical values at Index:" in caplog.text
Ejemplo n.º 7
0
 def test_write_read_spaces_in_strings(self, tmp_path):
     df = TfsDataFrame(data=["This is", "a test", "with spaces"],
                       columns=["A"])
     write_location = tmp_path / "test.tfs"
     write_tfs(write_location, df)
     new = read_tfs(write_location)
     assert_frame_equal(df, new)
Ejemplo n.º 8
0
def test_merge_kmod_results_commandline(tmp_path):
    paths = [str(INPUT_DIR / "kmod_ip1"), str(INPUT_DIR / "kmod_ip5")]

    with cli_args("--kmod_dirs", *paths, "--outputdir", str(tmp_path)):
        merge_kmod_results.merge_kmod_results()

    filename = f"{merge_kmod_results.LSA_RESULTS}{merge_kmod_results.EXT}"
    res_lsa_tfs = tfs.read_tfs(tmp_path / filename,
                               index=merge_kmod_results.NAME)
    control_tfs = tfs.read_tfs(INPUT_DIR / "lsa_results_merged.tfs",
                               index=merge_kmod_results.NAME)

    assert_frame_equal(res_lsa_tfs, control_tfs)
    assert_dict_equal(res_lsa_tfs.headers,
                      control_tfs.headers,
                      compare_keys=True)
Ejemplo n.º 9
0
 def test_tfs_read_str_input(self, _tfs_file_str: str):
     test_file = read_tfs(_tfs_file_str, index="NAME")
     assert len(test_file.headers) > 0
     assert len(test_file.columns) > 0
     assert len(test_file.index) > 0
     assert len(str(test_file)) > 0
     assert isinstance(test_file.index[0], str)
Ejemplo n.º 10
0
def test_tfs_write_read(_dataframe: TfsDataFrame, _test_file: str):
    write_tfs(_test_file, _dataframe)
    assert pathlib.Path(_test_file).is_file()

    new = read_tfs(_test_file)
    assert_frame_equal(_dataframe, new,
                       check_exact=False)  # float precision can be an issue
    assert_dict_equal(_dataframe.headers, new.headers, compare_keys=True)
Ejemplo n.º 11
0
def test_tfs_write_read_autoindex(_dataframe: TfsDataFrame, _test_file: str):
    df = _dataframe.set_index("a")
    df1 = _dataframe.set_index("a")
    write_tfs(_test_file, df, save_index=True)
    assert_frame_equal(df, df1)

    df_read = read_tfs(_test_file)
    assert_index_equal(df.index, df_read.index, check_exact=False)
    assert_dict_equal(_dataframe.headers, df_read.headers, compare_keys=True)
Ejemplo n.º 12
0
def test_tfs_write_read_autoindex(_dataframe, _test_file):
    df = _dataframe.set_index("a")
    df1 = _dataframe.set_index("a")
    write_tfs(_test_file, df, save_index=True)
    compare_dataframes(df, df1)  # writing should not change things

    df_read = read_tfs(_test_file)
    assert df_read.index.name == df.index.name
    assert all((df_read.index.values - df.index.values) <= 1E-12)
Ejemplo n.º 13
0
 def test_write_int_float_columns(self, tmp_path):
     """This test is here because of numeric conversion bug
     upon writing back in v2.0.1"""
     df = TfsDataFrame(data=[[1, 1.0], [2, 2.0], [3, 3.0]],
                       columns=["Int", "Float"])
     write_location = tmp_path / "test.tfs"
     write_tfs(write_location, df)
     new = read_tfs(write_location)
     assert_frame_equal(df, new)
Ejemplo n.º 14
0
def get_ip_positions(path):
    """ Returns a dict of IP positions from tfs-file of path.

    Args:
        path (str): Path to the tfs-file containing IP-positions
    """
    df = tfs.read_tfs(path).set_index('NAME')
    ip_names = [f"IP{i:d}" for i in range(1, 9)]
    ip_pos = df.loc[ip_names, 'S'].values
    return dict(zip(ip_names, ip_pos))
Ejemplo n.º 15
0
def test_merge_kmod_results(tmp_path):
    paths = [INPUT_DIR / "kmod_ip1", INPUT_DIR / "kmod_ip5"]

    res_tfs_passed = merge_kmod_results.merge_kmod_results(kmod_dirs=paths,
                                                           outputdir=tmp_path)
    filename = f"{merge_kmod_results.LSA_RESULTS}{merge_kmod_results.EXT}"
    res_lsa_tfs = tfs.read_tfs(tmp_path / filename,
                               index=merge_kmod_results.NAME)
    control_tfs = tfs.read_tfs(INPUT_DIR / "lsa_results_merged.tfs",
                               index=merge_kmod_results.NAME)

    assert_frame_equal(res_lsa_tfs, control_tfs)
    assert_dict_equal(res_lsa_tfs.headers,
                      control_tfs.headers,
                      compare_keys=True)
    assert_frame_equal(res_tfs_passed, control_tfs, check_exact=False)
    assert_dict_equal(res_tfs_passed.headers,
                      control_tfs.headers,
                      compare_keys=True)
Ejemplo n.º 16
0
 def test_write_int_float_str_columns(self, tmp_path):
     """This test is more of an extension of the test below
     (this dataframe was not affected by the bug)"""
     df = TfsDataFrame(
         data=[[1, 1.0, "one"], [2, 2.0, "two"], [3, 3.0, "three"]],
         columns=["Int", "Float", "String"],
     )
     write_location = tmp_path / "test.tfs"
     write_tfs(write_location, df)
     new = read_tfs(write_location)
     assert_frame_equal(df, new)
Ejemplo n.º 17
0
def test_tfs_write_read_pandasdf(_pddataframe: DataFrame, _test_file: str):
    write_tfs(_test_file, _pddataframe)
    assert pathlib.Path(_test_file).is_file()

    new = read_tfs(_test_file)
    assert_frame_equal(
        _pddataframe,
        new,
        check_exact=False,  # float precision can be an issue
        check_frame_type=False,  # read df is TfsDF
    )
Ejemplo n.º 18
0
    def test_tfs_write_read_autoindex(self, _tfs_dataframe, tmp_path):
        df = _tfs_dataframe.set_index("a")
        df1 = _tfs_dataframe.set_index("a")
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, df, save_index=True)
        assert_frame_equal(df, df1)

        df_read = read_tfs(write_location)
        assert_index_equal(df.index, df_read.index, check_exact=False)
        assert_dict_equal(_tfs_dataframe.headers,
                          df_read.headers,
                          compare_keys=True)
Ejemplo n.º 19
0
    def test_tfs_write_read_pandasdf(self, _pd_dataframe, tmp_path):
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, _pd_dataframe)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(
            _pd_dataframe,
            new,
            check_exact=False,  # float precision can be an issue
            check_frame_type=False,  # read df is TfsDF
        )
Ejemplo n.º 20
0
    def test_tfs_write_read(self, _tfs_dataframe, tmp_path):
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, _tfs_dataframe)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(
            _tfs_dataframe, new,
            check_exact=False)  # float precision can be an issue
        assert_dict_equal(_tfs_dataframe.headers,
                          new.headers,
                          compare_keys=True)
Ejemplo n.º 21
0
    def test_tfs_write_read_no_headers(self,
                                       _dataframe_empty_headers: TfsDataFrame,
                                       tmp_path):
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, _dataframe_empty_headers)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(_dataframe_empty_headers, new,
                           check_exact=False)  # float precision
        assert_dict_equal(_dataframe_empty_headers.headers,
                          new.headers,
                          compare_keys=True)
Ejemplo n.º 22
0
def test_tfs_read_str_input(_tfs_file_str: str):
    test_file = read_tfs(_tfs_file_str, index="NAME")
    assert len(test_file.headers) > 0
    assert len(test_file.columns) > 0
    assert len(test_file.index) > 0
    assert len(str(test_file)) > 0
    assert isinstance(test_file.index[0], str)

    with pytest.raises(AttributeError):
        test_var = test_file.Not_HERE

    with pytest.raises(KeyError):
        test_var = test_file["Not_HERE"]
Ejemplo n.º 23
0
    def test_tfs_read_pathlib_input(self, _tfs_file_pathlib: pathlib.Path):
        test_file = read_tfs(_tfs_file_pathlib, index="NAME")
        assert len(test_file.headers) > 0
        assert len(test_file.columns) > 0
        assert len(test_file.index) > 0
        assert len(str(test_file)) > 0
        assert isinstance(test_file.index[0], str)

        with pytest.raises(AttributeError):
            _ = test_file.Not_HERE

        with pytest.raises(KeyError):
            _ = test_file["Not_HERE"]
Ejemplo n.º 24
0
def clean_columns(files: Sequence[Union[Path, str]],
                  columns: Sequence[str],
                  limit: float = 0.0,
                  backup: bool = True):
    """ Clean the columns in the given files."""
    for file in files:
        file = Path(file)
        LOG.info(f"Cleaning {file.name}.")
        df = tfs.read_tfs(file, index=COL_NAME)
        for column in columns:
            df = _filter_by_column(df, column, limit)

        if backup:
            _backup_file(file)

        tfs.write_tfs(file, df, save_index=COL_NAME)
Ejemplo n.º 25
0
def test_tfs_write_empty_index_dataframe(_test_file: str):
    df = TfsDataFrame(
        index=[],
        columns=["a", "b", "c"],
        data=numpy.random.rand(0, 3),
        headers={
            "Title": "Tfs Title",
            "Value": 3.3663
        },
    )

    write_tfs(_test_file, df)
    assert pathlib.Path(_test_file).is_file()

    new = read_tfs(_test_file)
    assert_frame_equal(df, new)
    assert_dict_equal(df.headers, new.headers, compare_keys=True)
Ejemplo n.º 26
0
def load_tfs_table(filename):
    """ Takes a .tfs file and returns the Twiss and SUMM tables.

    Args:
        filename(str): Path to .tfs file from a Twiss call.
    Returns:
        A Twiss and SUMM table for the given machine and beam.
    """

    # Returns a special DataFrame with the SUMM table stored as individual,
    # capitalized attributes of the returned DataFrame.
    special_df = tfs.read_tfs(filename, index='NAME')

    # Coerce into stadard dictionary and DataFrame
    d = dict(special_df.headers)
    df = pd.DataFrame(special_df)
    return df, d
Ejemplo n.º 27
0
    def test_tfs_write_empty_index_dataframe(self, tmp_path):
        df = TfsDataFrame(
            index=[],
            columns=["a", "b", "c"],
            data=numpy.random.rand(0, 3),
            headers={
                "Title": "Tfs Title",
                "Value": 3.3663
            },
        )

        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, df)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(df, new)
        assert_dict_equal(df.headers, new.headers, compare_keys=True)
Ejemplo n.º 28
0
def load_tfs():
    """ Plots a tfs-file.

    TODO:
        * check all tfs files for common columns -> make user choose which column
        * changemarkers tickbox
    """
    LOG.debug("Load Tfs clicked.")
    paths = QFileDialog().getOpenFileNames(None, 'Load file(s)', '')[0]
    fig = None
    if paths:
        LOG.info("Files chosen: {:s}".format(", ".join(paths)))
        if len(paths) > 1:
            # load all files and check for common columns
            df_list, common_cols = _get_all_tfs_and_common_columns(paths)
            column_selector = ColumnSelectorDialog(common_cols,
                                                   single_line=True)
        elif len(paths) == 1:
            # load only one tfs
            LOG.debug("Loading only one file")
            try:
                df = tfs.read_tfs(paths[0])
            except TfsFormatError:
                LOG.error("File '{}' is not of TFS format!".format(paths[0]))
            else:
                column_selector = ColumnSelectorDialog(df.columns.tolist())
                selected = column_selector.get_selected_columns()
                if selected:
                    NotImplemented("Plotting not yet implemented")
                    # fig = plot_tfs.plot_single_file(
                    #     files=paths,
                    #     x_cols=[s["x"] for s in selected],
                    #     y_cols=[s["y"] for s in selected],
                    #     e_cols=[s["e"] for s in selected],
                    #     labels=[s["l"] for s in selected],
                    #     no_show=True,
                    # )
        return fig
    LOG.debug("No files chosen.")
    return None
Ejemplo n.º 29
0
def _get_all_tfs_and_common_columns(paths):
    tfs_list = [tfs.read_tfs(p) for p in paths]
    cols = tfs_list[0].columns
    for t in tfs_list[1:]:
        cols = cols.intersection(t.columns)
    return tfs_list, cols
Ejemplo n.º 30
0
 def test_fail_read_no_colnames(self, _no_colnames_tfs_path, caplog):
     with pytest.raises(TfsFormatError) as e:
         _ = read_tfs(_no_colnames_tfs_path)
     assert "column names" in str(e)