Пример #1
0
def test_write():
    nd = scp.read_omnic('irdata/nh4y-activation.spg')

    # API write methods needs an instance of a NDDataset as the first argument
    with pytest.raises(TypeError):
        scp.write()

    # the simplest way to save a dataset, is to use the function write with a filename as argument
    filename = nd.write('essai.scp')
    assert filename == cwd / 'essai.scp'

    nd2 = NDDataset.load(filename)
    testing.assert_dataset_equal(nd2, nd)
    filename.unlink()

    # if the filename is omitted, the a dialog is opened to select a name (and a protocol)
    filename = nd.write()
    assert filename is not None
    assert filename.stem == nd.name
    assert filename.suffix == '.scp'
    filename.unlink()

    # # a write protocole can be specified
    # filename = nd.write(protocole='json')
    # assert filename is not None
    # assert filename.stem == nd.name
    # assert filename.suffix == '.json'
    # filename.unlink()

    irdatadir = pathclean(prefs.datadir) / "irdata"
    for f in ['essai.scp', 'nh4y-activation.scp']:
        if (irdatadir / f).is_file():
            (irdatadir / f).unlink()
Пример #2
0
def test_nddataset_slicing_by_index(ref_ds, ds1):
    da = ds1
    ref = ref_ds
    # case where the index is an integer: the selection is by index starting
    # at zero
    assert da.shape == ref.shape
    coords = da.coordset
    plane0 = da[0]
    # should return a dataset of with dimension x of size 1
    assert type(plane0) == type(da)
    assert plane0.ndim == 3
    assert plane0.shape == (1, 100, 3)
    assert plane0.size == 300
    assert plane0.dims == ['z', 'y', 'x']
    assert_dataset_equal(plane0.z, da.z[0])
    da1 = plane0.squeeze()
    assert da1.shape == (100, 3)
    assert da1.dims == ['y', 'x']
    plane1 = da[:, 0]
    assert type(plane1) == type(da)
    assert plane1.ndim == 3
    assert plane1.shape == (10, 1, 3)
    assert plane1.size == 30
    assert plane1.dims == ['z', 'y', 'x']
    da2 = plane1.squeeze()
    assert da2.dims == ['z', 'x']
    assert_dataset_almost_equal(da2.z, coords[-1], decimal=2)  # remember
    # coordinates
    # are ordered by name!
    assert_dataset_almost_equal(da2.x, coords[0])
    # another selection
    row0 = plane0[:, 0]
    assert type(row0) == type(da)
    assert row0.shape == (1, 1, 3)
    # and again selection
    element = row0[..., 0]
    assert type(element) == type(da)
    assert element.dims == ['z', 'y', 'x']
    # squeeze
    row1 = row0.squeeze()
    assert row1.mask == scp.NOMASK
    row1[0] = scp.MASKED
    assert row1.dims == ['x']
    assert row1.shape == (3,)
    assert row1.mask.shape == (3,)
    element = row1[..., 0]
    assert element.x == coords[0][0]
    # now a slicing in multi direction
    da[1:4, 10:40:2, :2]
    # now again a slicing in multi direction (full selection)
    da[:, :, -2]
    # now again a slicing in multi direction (ellipsis)
    da[..., -1]
    da[-1, ...]
Пример #3
0
def test_issue_227():
    # IR spectrum, we want to make a baseline correction on the absorbance vs. time axis:
    ir = scp.read("irdata/nh4y-activation.spg")

    # baseline correction along x
    blc = scp.BaselineCorrection(ir)
    s1 = blc([5999.0, 3500.0], [1800.0, 1500.0],
             method="multivariate",
             interpolation="pchip")

    # baseline correction the transposed data along x (now on axis 0) -> should produce the same results
    # baseline correction along axis -1 previuosly
    blc = scp.BaselineCorrection(ir.T)
    s2 = blc(
        [5999.0, 3500.0],
        [1800.0, 1500.0],
        dim="x",
        method="multivariate",
        interpolation="pchip",
    )

    # compare
    assert_dataset_equal(s1, s2.T)

    ir.y = ir.y - ir[0].y
    irs = ir[:, 2000.0:2020.0]
    blc = scp.BaselineCorrection(irs)
    blc.compute(*[[0.0, 2.0e3], [3.0e4, 3.3e4]],
                dim="y",
                interpolation="polynomial",
                order=1,
                method="sequential")
    blc.corrected.plot()

    # MS profiles, we want to make a baseline correction on the ion current vs. time axis:
    ms = scp.read("msdata/ion_currents.asc", timestamp=False)
    blc = scp.BaselineCorrection(ms[10.0:20.0, :])
    blc.compute(*[[10.0, 11.0], [19.0, 20.0]],
                dim="y",
                interpolation="polynomial",
                order=1,
                method="sequential")
    blc.corrected.T.plot()

    show()
Пример #4
0
def test_ndio_generic(NMR_dataset_1D):
    nmr = NMR_dataset_1D
    assert nmr.directory == nmrdatadir

    # save with the default filename or open a dialog if it doesn't exists
    # ------------------------------------------------------------------------
    # save with the default name (equivalent to save_as in this case)
    # as this file (IR_1D.scp)  doesn't yet exist a confirmation dialog is opened
    f = nmr.save()
    assert nmr.filename == f.name
    assert nmr.directory == nmrdatadir

    # load back this  file : the full path f is given so no dialog is opened
    nd = NDDataset.load(f)
    assert_dataset_equal(nd, nmr)

    # as it has been already saved, we should not get dialogs
    f = nd.save()
    assert nd.filename == "NMR_1D.scp"
    # return

    # now it opens a dialog and the name can be changed
    f1 = nmr.save_as()
    assert nmr.filename == f1.name

    # remove these files
    f.unlink()
    f1.unlink()

    # save in a specified directory
    nmr.save_as(irdatadir / "essai")  # save essai.scp
    assert nmr.directory == irdatadir
    assert nmr.filename == "essai.scp"
    (irdatadir / nmr.filename).unlink()

    # save in the current directory
    f = nmr.save_as(cwd / "essai")

    # try to load without extension specification (will first assume it is scp)
    dl = NDDataset.load("essai")
    # assert dl.directory == cwd
    assert_array_equal(dl.data, nmr.data)
    f.unlink()
Пример #5
0
def test_issue417():
    X = scp.read_omnic("irdata/nh4y-activation.spg")
    x = X - X[-1]

    print("--subtract with ds from read_omnic")
    print(f"mean x: {np.mean(x.data)}")
    print(f"var x: {np.var(x.data)}")
    print("")

    f = X.write("X.scp")
    X_r = scp.read("X.scp")
    f.unlink()

    assert_array_equal(X.data, X_r.data)
    assert_dataset_equal(X, X_r)
    assert_equal_units(X.units, X_r.units)
    assert_dataset_equal(X[-1], X_r[-1])

    x_r = X_r - X_r[-1]

    print("--subtract after write/read_scp")
    print(f"mean x_r: {np.mean(x_r.data)}")
    print(f"var x_r: {np.var(x_r.data)}")
    print("")

    x_r2 = X_r - X_r[-1].data
    print("--subtract with data field")
    print(f"mean x_r2: {np.mean(x_r2.data)}")
    print(f"var x_r2: {np.var(x_r2.data)}")

    assert_array_equal(x.data, x_r2.data)
    assert_array_equal(x.data, x_r.data)
    assert_dataset_equal(x, x_r)
Пример #6
0
def test_write():
    nd = scp.read_omnic("irdata/nh4y-activation.spg")

    # API write methods needs an instance of a NDDataset as the first argument
    with pytest.raises(TypeError):
        scp.write()

    # the simplest way to save a dataset, is to use the function write with a filename as argument
    if (cwd / "essai.scp").exists():
        (cwd / "essai.scp").unlink()

    filename = nd.write("essai.scp")  # should not open a DIALOG
    assert filename == cwd / "essai.scp"
    assert filename.exists()

    # try to write it again
    filename = nd.write("essai.scp")  # should open a DIALOG to confirm

    nd2 = NDDataset.load(filename)
    testing.assert_dataset_equal(nd2, nd)
    filename.unlink()

    # if the filename is omitted, a dialog is opened to select a name (and a protocol)
    filename = nd.write()
    assert filename is not None
    assert filename.stem == nd.name
    assert filename.suffix == ".scp"
    filename.unlink()

    # # a write protocole can be specified
    # filename = nd.write(protocole='json')
    # assert filename is not None
    # assert filename.stem == nd.name
    # assert filename.suffix == '.json'
    # filename.unlink()

    irdatadir = pathclean(prefs.datadir) / "irdata"
    for f in ["essai.scp", "nh4y-activation.scp"]:
        if (irdatadir / f).is_file():
            (irdatadir / f).unlink()
Пример #7
0
def test_compare(IR_dataset_1D, simple_project):
    # dataset comparison

    nd1 = IR_dataset_1D.copy()
    nd2 = nd1.copy()

    testing.assert_dataset_equal(nd1, nd2)

    nd3 = nd1.copy()
    nd3.title = 'ddd'

    with testing.raises(AssertionError):
        testing.assert_dataset_equal(nd1, nd3)

    nd4 = nd1.copy()
    nd4.data += 0.001

    with testing.raises(AssertionError):
        testing.assert_dataset_equal(nd1, nd4)

    testing.assert_dataset_almost_equal(nd1, nd4, decimal=3)

    with testing.raises(AssertionError):
        testing.assert_dataset_almost_equal(nd1, nd4, decimal=4)

    # project comparison

    proj1 = simple_project.copy()
    proj1.name = 'PROJ1'
    proj2 = proj1.copy()
    proj2.name = 'PROJ2'

    testing.assert_project_equal(proj1, proj2)

    proj3 = proj2.copy()
    proj3.add_script(Script(content='print()', name='just_a_try'))

    with testing.raises(AssertionError):
        testing.assert_project_equal(proj1, proj3)
Пример #8
0
def test_compare_dataset(IR_dataset_1D):
    # dataset comparison

    nd1 = IR_dataset_1D.copy()
    nd2 = nd1.copy()

    testing.assert_dataset_equal(nd1, nd2)

    nd3 = nd1.copy()
    nd3.title = "ddd"

    with testing.raises(AssertionError):
        testing.assert_dataset_equal(nd1, nd3)

    nd4 = nd1.copy()
    nd4.data += 0.001

    with testing.raises(AssertionError):
        testing.assert_dataset_equal(nd1, nd4)

    testing.assert_dataset_almost_equal(nd1, nd4, decimal=3)

    with testing.raises(AssertionError):
        testing.assert_dataset_almost_equal(nd1, nd4, decimal=4)
Пример #9
0
def test_read_omnic():

    # Class method opening a dialog (but for test it is preset)
    nd1 = NDDataset.read_omnic(IRDATA / "nh4y-activation.spg")
    assert str(nd1) == "NDDataset: [float64] a.u. (shape: (y:55, x:5549))"

    # API method
    nd2 = scp.read_omnic(IRDATA / "nh4y-activation.spg")
    assert nd1 == nd2

    # It is also possible to use more specific reader function such as
    # `read_spg`, `read_spa` or `read_srs` - they are alias of the read_omnic function.
    l2 = scp.read_spg("wodger.spg", "irdata/nh4y-activation.spg")
    assert len(l2) == 2

    # test read_omnic with byte spg content
    filename_wodger = "wodger.spg"
    with open(DATADIR / filename_wodger, "rb") as fil:
        content = fil.read()
    nd1 = scp.read_omnic(filename_wodger)
    nd2 = scp.read_omnic({filename_wodger: content})
    assert nd1 == nd2

    # Test bytes contents for spa files
    filename = IRDATA / "subdir" / "7_CZ0-100_Pd_101.SPA"
    nds = scp.read_spa(filename)
    with open(IRDATA / "subdir" / filename, "rb") as fil:
        content = fil.read()
    nd = NDDataset.read_spa({filename: content})
    assert_dataset_equal(nd, nds)

    nd = scp.read_spa(IRDATA / "subdir" / "20-50" / "7_CZ0-100_Pd_21.SPA")
    assert str(nd) == "NDDataset: [float64] a.u. (shape: (y:1, x:5549))"

    nd2 = scp.read_spg(
        IRDATA / "subdir" / "20-50" / "7_CZ0-100_Pd_21.SPA"
    )  # wrong protocol but acceptable
    assert nd2 == nd

    # test import sample IFG
    nd = scp.read_spa(IRDATA / "carroucell_samp" / "2-BaSO4_0.SPA", return_ifg="sample")
    assert str(nd) == "NDDataset: [float64] V (shape: (y:1, x:16384))"

    # test import background IFG
    nd = scp.read_spa(
        IRDATA / "carroucell_samp" / "2-BaSO4_0.SPA", return_ifg="background"
    )
    assert str(nd) == "NDDataset: [float64] V (shape: (y:1, x:16384))"

    # imùport IFG from file without IFG
    a = scp.read_spa(
        IRDATA / "subdir" / "20-50" / "7_CZ0-100_Pd_21.SPA", return_ifg="sample"
    )
    assert a is None

    # rapid_sca series
    a = scp.read_srs("irdata/omnic_series/rapid_scan.srs")
    assert str(a) == "NDDataset: [float64] V (shape: (y:643, x:4160))"

    # rapid_sca series, import bg
    a = scp.read_srs("irdata/omnic_series/rapid_scan.srs", return_bg=True)
    assert str(a) == "NDDataset: [float64] V (shape: (y:1, x:4160))"

    # non-rapid scan series
    a = scp.read_srs("irdata/omnic_series/GC_Demo.srs")
    assert a is None  # not rapid scan mode
Пример #10
0
def test_ndmath_and_api_methods(IR_dataset_1D, IR_dataset_2D):

    # CREATION _LIKE METHODS
    # ----------------------

    # from a list
    x = [1, 2, 3]

    # _like as an API method
    ds = NDDataset(x).full_like(2.5, title="empty")
    ds = scp.full_like(x, 2)
    assert np.all(ds.data == np.full((3, ), 2))
    assert ds.implements("NDDataset")

    # _like as a classmethod
    ds = NDDataset.full_like(x, 2)
    assert np.all(ds.data == np.full((3, ), 2))
    assert ds.implements("NDDataset")

    # _like as an instance method
    ds = NDDataset(x).full_like(2)
    assert np.all(ds.data == np.full((3, ), 2))
    assert ds.implements("NDDataset")

    # _like as an instance method
    ds = NDDataset(x).empty_like(title="empty")
    assert ds.implements("NDDataset")
    assert ds.title == "empty"

    # from an array
    x = np.array([1, 2, 3])

    ds = NDDataset(x).full_like(2)
    assert np.all(ds.data == np.full((3, ), 2))
    assert ds.implements("NDDataset")

    # from a NDArray subclass with units
    x = NDDataset([1, 2, 3], units="km")
    ds = scp.full_like(x, 2)
    assert np.all(ds.data == np.full((3, ), 2))
    assert ds.implements("NDDataset")
    assert ds.units == ur.km

    ds1 = scp.full_like(ds, np.nan, dtype=np.double, units="m")
    assert ds1.units == Unit("m")

    # change of units is forced
    ds2 = scp.full_like(ds, 2, dtype=np.double, units="s")
    assert ds2.units == ur.s

    # other like creation functions
    nd = scp.empty_like(ds, dtype=np.double, units="m")
    assert str(nd) == "NDDataset: [float64] m (size: 3)"
    assert nd.dtype == np.dtype(np.double)

    nd = scp.zeros_like(ds, dtype=np.double, units="m")
    assert str(nd) == "NDDataset: [float64] m (size: 3)"
    assert np.all(nd.data == np.zeros((3, )))

    nd = scp.ones_like(ds, dtype=np.double, units="m")
    assert str(nd) == "NDDataset: [float64] m (size: 3)"
    assert np.all(nd.data == np.ones((3, )))

    # FULL
    # ----

    ds = NDDataset.full((6, ), 0.1)
    assert ds.size == 6
    assert str(ds) == "NDDataset: [float64] unitless (size: 6)"

    # ZEROS
    # -----

    ds = NDDataset.zeros((6, ), units="km")
    assert ds.size == 6
    assert str(ds) == "NDDataset: [float64] km (size: 6)"

    # ONES
    # ----

    ds = NDDataset.ones((6, ))
    ds = scp.full((6, ), 0.1)
    assert ds.size == 6
    assert str(ds) == "NDDataset: [float64] unitless (size: 6)"

    ds = NDDataset.ones((6, ), units="absorbance", dtype="complex128")
    assert ds.size == 3
    assert str(ds) == "NDDataset: [complex128] a.u. (size: 3)"
    assert ds[0].data == 1.0 + 1.0j

    # LINSPACE
    # --------

    c2 = Coord.linspace(1, 20, 200, units="m", name="mycoord")
    assert c2.name == "mycoord"
    assert c2.size == 200
    assert c2[-1].data == 20
    assert c2[0].values == Quantity(1, "m")

    # ARANGE
    # -------

    c3 = Coord.arange(1, 20.0001, 1, units="s", name="mycoord")
    assert c3.name == "mycoord"
    assert c3.size == 20
    assert c3[-1].data == 20
    assert c3[0].values == Quantity(1, "s")

    # EYE
    # ----

    ds1 = scp.NDDataset.eye(2, dtype=int)
    assert str(ds1) == "NDDataset: [float64] unitless (shape: (y:2, x:2))"
    ds = scp.eye(3, k=1, units="km")
    assert (ds.data == np.eye(3, k=1)).all()
    assert ds.units == ur.km

    # IDENTITY
    # --------

    ds = scp.identity(3, units="km")
    assert (ds.data == np.identity(3, )).all()
    assert ds.units == ur.km

    # RANDOM
    # ------

    ds = scp.random((3, 3), units="km")
    assert str(ds) == "NDDataset: [float64] km (shape: (y:3, x:3))"

    # adding coordset
    c1 = Coord.linspace(1, 20, 200, units="m", name="axe_x")
    ds = scp.random((200, ), units="km", coordset=scp.CoordSet(x=c1))

    # DIAGONAL
    # --------

    # extract diagonal
    nd = scp.full((2, 2), 0.5, units="s", title="initial")
    assert str(nd) == "NDDataset: [float64] s (shape: (y:2, x:2))"
    ndd = scp.diagonal(nd, title="diag")
    assert str(ndd) == "NDDataset: [float64] s (size: 2)"
    assert ndd.units == Unit("s")

    cx = scp.Coord([0, 1])
    cy = scp.Coord([2, 5])
    nd = NDDataset.full((2, 2),
                        0.5,
                        units="s",
                        coordset=scp.CoordSet(cx, cy),
                        title="initial")
    assert str(nd) == "NDDataset: [float64] s (shape: (y:2, x:2))"
    ndd = nd.diagonal(title="diag2")
    assert str(ndd) == "NDDataset: [float64] s (size: 2)"
    assert ndd.units == Unit("s")
    assert ndd.title == "diag2"

    cx = scp.Coord([0, 1, 2])
    cy = scp.Coord([2, 5])
    nd = NDDataset.full((2, 3),
                        0.5,
                        units="s",
                        coordset=scp.CoordSet(x=cx, y=cy),
                        title="initial")
    assert str(nd) == "NDDataset: [float64] s (shape: (y:2, x:3))"
    ndd = nd.diagonal(title="diag3")
    assert str(ndd) == "NDDataset: [float64] s (size: 2)"
    assert ndd.units == Unit("s")
    assert ndd.title == "diag3"
    assert_array_equal(nd.x.data[:ndd.x.size], ndd.x.data)

    ndd = nd.diagonal(title="diag4", dim="y")
    assert str(ndd) == "NDDataset: [float64] s (size: 2)"
    assert ndd.units == Unit("s")
    assert ndd.title == "diag4"
    assert_array_equal(nd.y.data[:ndd.y.size], ndd.y.data)

    # DIAG
    # ----

    ref = NDDataset(np.diag((3, 3.4, 2.3)), units="m", title="something")

    # Three forms should return the same NDDataset
    ds = scp.diag((3, 3.4, 2.3), units="m", title="something")
    assert_dataset_equal(ds, ref)

    ds = NDDataset.diag((3, 3.4, 2.3), units="m", title="something")
    assert_dataset_equal(ds, ref)

    ds = NDDataset((3, 3.4, 2.3)).diag(units="m", title="something")
    assert_dataset_equal(ds, ref)

    # and this too
    ds1 = NDDataset((3, 3.4, 2.3), units="s", title="another")

    ds = scp.diag(ds1, units="m", title="something")
    assert_dataset_equal(ds, ref)

    ds = ds1.diag(units="m", title="something")
    assert_dataset_equal(ds, ref)

    # BOOL : ALL and ANY
    # ------------------

    ds = NDDataset([[True, False], [True, True]])
    b = np.all(ds)
    assert not b

    b = scp.all(ds)
    assert not b

    b = ds.all()
    assert not b

    b = NDDataset.any(ds)
    assert b

    b = ds.all(dim="y")
    assert_array_equal(b, np.array([True, False]))

    b = ds.any(dim="y")
    assert_array_equal(b, np.array([True, True]))

    # ARGMAX, MAX
    # -----------

    nd1 = IR_dataset_1D.copy()
    nd1[1290.0:890.0] = MASKED
    assert nd1.is_masked
    assert str(nd1) == "NDDataset: [float64] a.u. (size: 5549)"

    idx = nd1.argmax()
    assert idx == 3122

    mx = nd1.max()
    # alternative
    mx = scp.max(nd1)
    mx = NDDataset.max(nd1)
    assert mx == Quantity(3.8080601692199707, "absorbance")

    mxk = nd1.max(keepdims=True)
    assert isinstance(mxk, NDDataset)
    assert str(mxk) == "NDDataset: [float64] a.u. (size: 1)"
    assert mxk.values == mx

    # test on a 2D NDDataset
    nd2 = IR_dataset_2D.copy()
    nd2[:, 1290.0:890.0] = MASKED

    mx = nd2.max()  # no axis specified
    assert mx == Quantity(3.8080601692199707, "absorbance")
    mxk = nd2.max(keepdims=True)
    assert str(mxk) == "NDDataset: [float64] a.u. (shape: (y:1, x:1))"

    nd2m = nd2.max("y")  # axis selected
    ax = nd2m.plot()
    nd2[0].plot(ax=ax, clear=False)

    nd2m2 = nd2.max("x")  # axis selected
    nd2m2.plot()

    nd2m = nd2.max("y", keepdims=True)
    assert nd2m.shape == (1, 5549)

    nd2m = nd2.max("x", keepdims=True)
    assert nd2m.shape == (55, 1)

    mx = nd2.min()  # no axis specified
    assert mx == Quantity(-0.022955093532800674, "absorbance")
    mxk = nd2.min(keepdims=True)
    assert str(mxk) == "NDDataset: [float64] a.u. (shape: (y:1, x:1))"

    nd2m = nd2.min("y")  # axis selected
    ax = nd2m.plot()
    nd2[0].plot(ax=ax, clear=False)

    nd2m2 = nd2.min("x")  # axis selected
    nd2m2.plot()

    nd2m = nd2.min("y", keepdims=True)
    assert nd2m.shape == (1, 5549)

    nd2m = nd2.min("x", keepdims=True)
    assert nd2m.shape == (55, 1)

    # CLIP
    # ----
    nd3 = nd2 - 2.0
    assert nd3.units == nd2.units
    nd3c = nd3.clip(-0.5, 1.0)
    assert nd3c.max().m == 1.0
    assert nd3c.min().m == -0.5

    # COORDMIN AND COORDMAX
    # ---------------------
    cm = nd2.coordmin()
    assert np.around(cm["x"], 3) == Quantity(1290.165, "cm^-1")

    cm = nd2.coordmin(dim="y")
    assert cm.size == 1

    cm = nd2.coordmax(dim="y")
    assert cm.size == 1

    cm = nd2.coordmax(dim="x")
    assert cm.size == 1

    # ABS
    # ----
    nd2a = scp.abs(nd2)
    mxa = nd2a.min()
    assert mxa > 0

    nd2a = NDDataset.abs(nd2)
    mxa = nd2a.min()
    assert mxa > 0

    nd2a = np.abs(nd2)
    mxa = nd2a.min()
    assert mxa > 0

    ndd = NDDataset([1.0, 2.0 + 1j, 3.0])
    val = np.abs(ndd)

    val = ndd[1] * 1.2 - 10.0
    val = np.abs(val)

    # FROMFUNCTION
    # ------------
    # 1D
    def func1(t, v):
        d = v * t
        return d

    time = Coord.linspace(
        0,
        9,
        10,
    )
    distance = NDDataset.fromfunction(func1, v=134, coordset=CoordSet(t=time))
    assert distance.dims == ["t"]
    assert_array_equal(distance.data, np.fromfunction(func1, (10, ), v=134))

    time = Coord.linspace(0, 90, 10, units="min")
    distance = NDDataset.fromfunction(func1,
                                      v=Quantity(134, "km/hour"),
                                      coordset=CoordSet(t=time))
    assert distance.dims == ["t"]
    assert_array_equal(distance.data,
                       np.fromfunction(func1, (10, ), v=134) * 10 / 60)

    # 2D
    def func2(x, y):
        d = x + 1 / y
        return d

    c0 = Coord.linspace(0, 9, 3)
    c1 = Coord.linspace(10, 20, 2)

    # implicit ordering of coords (y,x)
    distance = NDDataset.fromfunction(func2, coordset=CoordSet(c1, c0))
    assert distance.shape == (2, 3)
    assert distance.dims == ["y", "x"]

    # or equivalent
    distance = NDDataset.fromfunction(func2, coordset=[c1, c0])
    assert distance.shape == (2, 3)
    assert distance.dims == ["y", "x"]

    # explicit ordering of coords (y,x)  #
    distance = NDDataset.fromfunction(func2, coordset=CoordSet(u=c0, v=c1))

    assert distance.shape == (2, 3)
    assert distance.dims == ["v", "u"]
    assert distance[0, 2].data == distance.u[2].data + 1.0 / distance.v[0].data

    # with units
    def func3(x, y):
        d = x + y
        return d

    c0u = Coord.linspace(0, 9, 3, units="km")
    c1u = Coord.linspace(10, 20, 2, units="m")
    distance = NDDataset.fromfunction(func3, coordset=CoordSet(u=c0u, v=c1u))

    assert distance.shape == (2, 3)
    assert distance.dims == ["v", "u"]
    assert distance[0, 2].values == distance.u[2].values + distance.v[0].values

    c0u = Coord.linspace(0, 9, 3, units="km")
    c1u = Coord.linspace(10, 20, 2, units="m^-1")
    distance = NDDataset.fromfunction(func2, coordset=CoordSet(u=c0u, v=c1u))

    assert distance.shape == (2, 3)
    assert distance.dims == ["v", "u"]
    assert distance[
        0, 2].values == distance.u[2].values + 1.0 / distance.v[0].values

    # FROMITER
    # --------
    iterable = (x * x for x in range(5))
    nit = scp.fromiter(iterable, float, units="km")
    assert str(nit) == "NDDataset: [float64] km (size: 5)"
    assert_array_equal(nit.data, np.array([0, 1, 4, 9, 16]))

    # MEAN, AVERAGE
    # -----

    nd = IR_dataset_2D.copy()

    m = scp.mean(nd)

    assert m.shape == ()
    assert m == Quantity(np.mean(nd.data), "absorbance")

    m = scp.average(nd)
    assert m.shape == ()
    assert m == Quantity(np.average(nd.data), "absorbance")

    mx = scp.mean(nd, keepdims=True)
    assert mx.shape == (1, 1)

    mxd = scp.mean(nd, dim="y")
    assert str(mxd) == "NDDataset: [float64] a.u. (size: 5549)"
    assert str(mxd.x) == "LinearCoord: [float64] cm⁻¹ (size: 5549)"

    # ----
    nd2 = NDDataset([[0, 1, 2], [3, 4, 5]])  # no coord (check issues

    m = scp.mean(nd2)

    assert m.shape == ()
    assert m == np.mean(nd2.data)
    assert m == 2.5

    m = scp.mean(nd2, keepdims=True)
    assert m.shape == (1, 1)
    assert m.data == [[2.5]]

    m = scp.mean(nd2, dim="y")
    assert m.shape == (3, )
    assert_array_equal(m.data, [1.5, 2.5, 3.5])
    assert str(m) == "NDDataset: [float64] unitless (size: 3)"

    m = scp.mean(nd2, dim=0, keepdims=True)
    assert m.shape == (1, 3)
    assert_array_equal(m.data, [[1.5, 2.5, 3.5]])
    assert str(m) == "NDDataset: [float64] unitless (shape: (y:1, x:3))"

    m = nd2.mean(dim="y")
    assert m.shape == (3, )
    assert_array_equal(m.data, [1.5, 2.5, 3.5])
    assert str(m) == "NDDataset: [float64] unitless (size: 3)"
Пример #11
0
def test_round_docstring_example():
    ds = scp.read("wodger.spg")
    ds_transformed1 = np.round(ds, 3)
    ds_transformed2 = np.around(ds, 3)
    ds_transformed3 = scp.around(ds, 3)
    ds_transformed4 = scp.round(ds, 3)
    ds_transformed5 = ds.round(3)
    ds_transformed6 = NDDataset.round(ds, 3)

    assert_dataset_equal(ds_transformed1, ds_transformed2)
    assert_dataset_equal(ds_transformed1, ds_transformed3)
    assert_dataset_equal(ds_transformed1, ds_transformed4)
    assert_dataset_equal(ds_transformed1, ds_transformed5)
    assert_dataset_equal(ds_transformed1, ds_transformed6)

    ds[:, 3000.0:3500.0] = scp.MASKED
    dsm_transformed1 = np.ma.round(ds)
    dsm_transformed2 = np.around(ds)
    dsm_transformed3 = scp.around(ds)
    dsm_transformed4 = ds.round()

    assert_dataset_equal(dsm_transformed1, dsm_transformed2)
    assert_dataset_equal(dsm_transformed1, dsm_transformed3)
    assert_dataset_equal(dsm_transformed1, dsm_transformed4)
Пример #12
0
def test_2D_NDDataset(arr):
    # 2D
    ds = scp.NDDataset(arr)
    assert ds.size == arr.size
    assert ds.shape == arr.shape
    if ds.size == 0:
        assert ds.dtype is None
        assert ds.dims == []
    else:
        assert ds.dtype == np.float64
        assert ds.dims == ['y', 'x'][-ds.ndim:]
    assert ds.title == "<untitled>"
    assert ds.mask == scp.NOMASK
    assert ds.meta == {}
    assert ds.name.startswith("NDDataset")
    assert ds.author == get_user_and_node()
    assert not ds.history
    assert ds.description == ""
    # force dtype
    ds = scp.NDDataset(arr, dtype=np.float32)
    if ds.size == 0:
        assert ds.dtype is None
    else:
        assert ds.dtype == np.float32
    if ds.shape[-1] % 2 == 0:  # must be even
        ds = scp.NDDataset(arr, dtype=np.complex)
        if ds.size == 0:
            assert ds.dtype is None
        else:
            assert ds.dtype == np.complex128
    else:
        with pytest.raises(ValueError):
            ds = scp.NDDataset(arr, dtype=np.complex)
    if (arr.shape[-1] % 2) == 0 and (arr.shape[-2] % 2) == 0 and arr.ndim == 2:
        ds = scp.NDDataset(arr, dtype=np.quaternion)
        if ds.size == 0:
            assert ds.dtype is None
        else:
            assert ds.dtype == np.quaternion
    else:
        with pytest.raises(ValueError):
            ds = scp.NDDataset(arr, dtype=np.quaternion)
    # test units
    ds1 = scp.NDDataset(arr * scp.ur.km)
    ds2 = scp.NDDataset(arr, units=scp.ur.km)
    assert ds1.units == scp.ur.km
    assert ds2.units == scp.ur.km
    assert_dataset_equal(ds1, ds2)
    # masking
    ds1[0] = scp.MASKED
    assert ds1.is_masked
    # init with another dataset
    ds2 = scp.NDDataset(ds1)
    assert_dataset_equal(ds1, ds2)
    # check no coordinates
    assert not ds2.is_complex
    assert ds2.coordset is None  # no coordinates
    assert ds2.x is None  # no coordinates
    with pytest.raises(AttributeError):
        ds2.t
    # dim attributes
    ds2 = scp.NDDataset(arr, dims=['u', 'w'])
    assert ds2.ndim == 2
    assert ds2.dims == ['u', 'w']