示例#1
0
    def test_warning_on_non_unique_index(self, tmp_path, caplog):
        df = TfsDataFrame(index=["A", "B", "A"])
        write_tfs(tmp_path / "temporary.tfs", df)

        for record in caplog.records:
            assert record.levelname == "WARNING"
        assert "Non-unique indices found" in caplog.text
示例#2
0
 def test_write_read_spaces_in_strings(self, tmp_path):
     df = TfsDataFrame(data=["This is", "a test", "with spaces"],
                       columns=["A"])
     write_location = tmp_path / "test.tfs"
     write_tfs(write_location, df)
     new = read_tfs(write_location)
     assert_frame_equal(df, new)
示例#3
0
def test_tfs_read_write_read_pathlib_input(_tfs_file_pathlib: pathlib.Path,
                                           _test_file: str):
    original = read_tfs(_tfs_file_pathlib)
    write_tfs(_test_file, original)
    new = read_tfs(_test_file)
    assert_frame_equal(original, new)
    assert_dict_equal(original.headers, new.headers, compare_keys=True)
示例#4
0
def test_tfs_write_read(_dataframe: TfsDataFrame, _test_file: str):
    write_tfs(_test_file, _dataframe)
    assert pathlib.Path(_test_file).is_file()

    new = read_tfs(_test_file)
    assert_frame_equal(_dataframe, new,
                       check_exact=False)  # float precision can be an issue
    assert_dict_equal(_dataframe.headers, new.headers, compare_keys=True)
示例#5
0
    def test_raising_on_non_unique_both(self, caplog):
        df = TfsDataFrame(index=["A", "B", "A"], columns=["A", "B", "A"])
        with pytest.raises(TfsFormatError):
            write_tfs("", df, non_unique_behavior="raise")

        for record in caplog.records:
            assert record.levelname == "WARNING"
        assert "Non-unique indices found" in caplog.text  # first checked and raised
示例#6
0
def test_tfs_read_write_read(_tfs_file, _test_file):
    original = read_tfs(_tfs_file)
    write_tfs(_test_file, original)
    new = read_tfs(_test_file)
    assert original.headers == new.headers
    assert all(original.columns == new.columns)
    for column in original:
        assert all(original.loc[:, column] == new.loc[:, column])
示例#7
0
 def test_write_int_float_columns(self, tmp_path):
     """This test is here because of numeric conversion bug
     upon writing back in v2.0.1"""
     df = TfsDataFrame(data=[[1, 1.0], [2, 2.0], [3, 3.0]],
                       columns=["Int", "Float"])
     write_location = tmp_path / "test.tfs"
     write_tfs(write_location, df)
     new = read_tfs(write_location)
     assert_frame_equal(df, new)
示例#8
0
def test_tfs_write_read_autoindex(_dataframe: TfsDataFrame, _test_file: str):
    df = _dataframe.set_index("a")
    df1 = _dataframe.set_index("a")
    write_tfs(_test_file, df, save_index=True)
    assert_frame_equal(df, df1)

    df_read = read_tfs(_test_file)
    assert_index_equal(df.index, df_read.index, check_exact=False)
    assert_dict_equal(_dataframe.headers, df_read.headers, compare_keys=True)
示例#9
0
    def test_fail_on_spaces_headers(self, caplog):
        caplog.set_level(logging.DEBUG)
        df = TfsDataFrame(headers={"allowed": 1, "not allowed": 2})
        with pytest.raises(TfsFormatError):
            write_tfs("", df)

        for record in caplog.records:
            assert record.levelname == "DEBUG"
        assert "Space(s) found in TFS header names" in caplog.text
示例#10
0
    def test_fail_on_wrong_column_type(self, caplog):
        caplog.set_level(logging.DEBUG)
        df = TfsDataFrame(columns=range(5))
        with pytest.raises(TfsFormatError):
            write_tfs("", df)

        for record in caplog.records:
            assert record.levelname == "DEBUG"
        assert "not of string-type" in caplog.text
示例#11
0
    def test_fail_on_spaces_columns(self, caplog):
        caplog.set_level(logging.DEBUG)
        df = TfsDataFrame(columns=["allowed", "not allowed"])
        with pytest.raises(TfsFormatError):
            write_tfs("", df)

        for record in caplog.records:
            assert record.levelname == "DEBUG"
        assert "Space(s) found in TFS columns" in caplog.text
示例#12
0
文件: test_reader.py 项目: pylhc/tfs
 def test_tfs_read_write_read_pathlib_input(self,
                                            _tfs_file_pathlib: pathlib.Path,
                                            tmp_path):
     original = read_tfs(_tfs_file_pathlib)
     write_location = tmp_path / "test_file.tfs"
     write_tfs(write_location, original)
     new = read_tfs(write_location)
     assert_frame_equal(original, new)
     assert_dict_equal(original.headers, new.headers, compare_keys=True)
示例#13
0
def test_tfs_write_read_autoindex(_dataframe, _test_file):
    df = _dataframe.set_index("a")
    df1 = _dataframe.set_index("a")
    write_tfs(_test_file, df, save_index=True)
    compare_dataframes(df, df1)  # writing should not change things

    df_read = read_tfs(_test_file)
    assert df_read.index.name == df.index.name
    assert all((df_read.index.values - df.index.values) <= 1E-12)
示例#14
0
 def test_write_int_float_str_columns(self, tmp_path):
     """This test is more of an extension of the test below
     (this dataframe was not affected by the bug)"""
     df = TfsDataFrame(
         data=[[1, 1.0, "one"], [2, 2.0, "two"], [3, 3.0, "three"]],
         columns=["Int", "Float", "String"],
     )
     write_location = tmp_path / "test.tfs"
     write_tfs(write_location, df)
     new = read_tfs(write_location)
     assert_frame_equal(df, new)
示例#15
0
def test_tfs_write_read_pandasdf(_pddataframe: DataFrame, _test_file: str):
    write_tfs(_test_file, _pddataframe)
    assert pathlib.Path(_test_file).is_file()

    new = read_tfs(_test_file)
    assert_frame_equal(
        _pddataframe,
        new,
        check_exact=False,  # float precision can be an issue
        check_frame_type=False,  # read df is TfsDF
    )
示例#16
0
    def test_list_column_dataframe_fails_writes(
            self, _list_column_in_dataframe: TfsDataFrame, tmp_path):
        list_col_tfs = _list_column_in_dataframe
        with pytest.raises(
                ValueError
        ):  # truth value of nested can't be assesed in _validate
            write_tfs("", list_col_tfs)

        del list_col_tfs["d"]  # should work without the column of lists
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, list_col_tfs)
        assert write_location.is_file()
示例#17
0
    def test_tfs_write_read_autoindex(self, _tfs_dataframe, tmp_path):
        df = _tfs_dataframe.set_index("a")
        df1 = _tfs_dataframe.set_index("a")
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, df, save_index=True)
        assert_frame_equal(df, df1)

        df_read = read_tfs(write_location)
        assert_index_equal(df.index, df_read.index, check_exact=False)
        assert_dict_equal(_tfs_dataframe.headers,
                          df_read.headers,
                          compare_keys=True)
示例#18
0
    def test_tfs_write_read_pandasdf(self, _pd_dataframe, tmp_path):
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, _pd_dataframe)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(
            _pd_dataframe,
            new,
            check_exact=False,  # float precision can be an issue
            check_frame_type=False,  # read df is TfsDF
        )
示例#19
0
    def test_tfs_write_read(self, _tfs_dataframe, tmp_path):
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, _tfs_dataframe)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(
            _tfs_dataframe, new,
            check_exact=False)  # float precision can be an issue
        assert_dict_equal(_tfs_dataframe.headers,
                          new.headers,
                          compare_keys=True)
示例#20
0
    def test_dict_column_dataframe_fails_writes(
            self, _dict_column_in_dataframe: TfsDataFrame, tmp_path):
        dict_col_tfs = _dict_column_in_dataframe
        with pytest.raises(
                TypeError
        ):  # tries to format dict.__dict__, can't get a % formatter
            write_tfs("", dict_col_tfs)

        del dict_col_tfs["d"]  # should work without the column of dicts
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, dict_col_tfs)
        assert write_location.is_file()
示例#21
0
    def test_tfs_write_read_no_headers(self,
                                       _dataframe_empty_headers: TfsDataFrame,
                                       tmp_path):
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, _dataframe_empty_headers)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(_dataframe_empty_headers, new,
                           check_exact=False)  # float precision
        assert_dict_equal(_dataframe_empty_headers.headers,
                          new.headers,
                          compare_keys=True)
示例#22
0
def clean_columns(files: Sequence[Union[Path, str]],
                  columns: Sequence[str],
                  limit: float = 0.0,
                  backup: bool = True):
    """ Clean the columns in the given files."""
    for file in files:
        file = Path(file)
        LOG.info(f"Cleaning {file.name}.")
        df = tfs.read_tfs(file, index=COL_NAME)
        for column in columns:
            df = _filter_by_column(df, column, limit)

        if backup:
            _backup_file(file)

        tfs.write_tfs(file, df, save_index=COL_NAME)
示例#23
0
def _copy_and_modify_linfiles(out_path: Path,
                              columns=None,
                              index=None,
                              by=0.0):
    paths = {}
    for plane in PLANES:
        lin_file_src = _get_inputs_linfile(plane)
        lin_file_dst = out_path / lin_file_src.name
        paths[plane] = lin_file_dst
        if index is not None and columns is not None:
            plane_columns = [f"{col}{plane}" for col in columns]
            df = tfs.read(lin_file_src)
            df.loc[index, plane_columns] = df.loc[index, plane_columns] + by
            tfs.write_tfs(lin_file_dst, df)
        else:
            shutil.copy(lin_file_src, lin_file_dst)
    return paths
示例#24
0
def test_tfs_write_empty_index_dataframe(_test_file: str):
    df = TfsDataFrame(
        index=[],
        columns=["a", "b", "c"],
        data=numpy.random.rand(0, 3),
        headers={
            "Title": "Tfs Title",
            "Value": 3.3663
        },
    )

    write_tfs(_test_file, df)
    assert pathlib.Path(_test_file).is_file()

    new = read_tfs(_test_file)
    assert_frame_equal(df, new)
    assert_dict_equal(df.headers, new.headers, compare_keys=True)
示例#25
0
    def test_tfs_write_empty_index_dataframe(self, tmp_path):
        df = TfsDataFrame(
            index=[],
            columns=["a", "b", "c"],
            data=numpy.random.rand(0, 3),
            headers={
                "Title": "Tfs Title",
                "Value": 3.3663
            },
        )

        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, df)
        assert write_location.is_file()

        new = read_tfs(write_location)
        assert_frame_equal(df, new)
        assert_dict_equal(df.headers, new.headers, compare_keys=True)
示例#26
0
    def test_madx_reads_written_tfsdataframes(self, _bigger_tfs_dataframe,
                                              tmp_path):
        dframe = _bigger_tfs_dataframe
        dframe.headers[
            "TYPE"] = "TWISS"  # MAD-X complains on TFS files with no "TYPE" header
        write_location = tmp_path / "test.tfs"
        write_tfs(write_location, dframe)  # this will write an eol at eof

        # The written TFS file should be accepted by MAD-X
        with Madx() as madx:
            madx.command.readtable(file=str(write_location),
                                   table="test_table")
            assert madx.table.test_table is not None  # check table has loaded

            # Check validity of the loaded table, here we use pandas.Series and assert_series_equal instead
            # of numpy.array_equal to allow for (very) small relative numerical differences on loading
            for column in dframe.columns:
                assert column in madx.table.test_table
                assert_series_equal(pandas.Series(
                    madx.table.test_table[column]),
                                    dframe[column],
                                    check_names=False)
示例#27
0
def create_da_tfs(
        jobname: str,
        basedir: Path) -> Tuple[TfsDataFrame, TfsDataFrame, TfsDataFrame]:
    """ Extracts data from db into dataframes, and writes and returns them."""
    LOG.info("Gathering DA data into tfs-files.")
    df_da = extract_da_data_from_db(jobname, basedir)
    df_angle = _create_stats_df(df_da, ANGLE)
    df_seed = _create_stats_df(df_da, SEED, global_index=0)

    write_tfs(get_tfs_da_path(jobname, basedir), df_da)
    write_tfs(get_tfs_da_angle_stats_path(jobname, basedir),
              df_angle,
              save_index=ANGLE)
    write_tfs(get_tfs_da_seed_stats_path(jobname, basedir),
              df_seed,
              save_index=SEED)
    return df_da, df_angle, df_seed
示例#28
0
def test_fail_on_spaces_headers():
    df = TfsDataFrame(headers={"allowed": 1, "not allowed": 2})
    with pytest.raises(TfsFormatError):
        write_tfs('', df)
示例#29
0
def test_write_read_spaces_in_strings(_test_file: str):
    df = TfsDataFrame(data=["This is", "a test", 'with spaces'], columns=["A"])
    write_tfs(_test_file, df)
    new = read_tfs(_test_file)
    assert_frame_equal(df, new)
示例#30
0
 def test_messed_up_dataframe_fails_writes(
         self, _messed_up_dataframe: TfsDataFrame):
     messed_tfs = _messed_up_dataframe
     with pytest.raises(ValueError):
         write_tfs("", messed_tfs)