示例#1
0
def test_no_version_raises(monkeypatch):
    name = "fakemodule"
    module = types.ModuleType(name)
    sys.modules[name] = module
    monkeypatch.setitem(VERSIONS, name, "1.0.0")

    with pytest.raises(ImportError, match="Can't determine .* fakemodule"):
        import_optional_dependency(name)
示例#2
0
def test_import_optional():
    match = "Missing optional dependency 'notapackage'.  Use conda or pip to install notapackage."
    with pytest.raises(ImportError, match=match) as exc_info:
        import_optional_dependency("notapackage")
    # The original exception should be there as context:
    assert isinstance(exc_info.value.__context__, ImportError)

    result = import_optional_dependency("notapackage", errors="ignore")
    assert result is None
示例#3
0
def get_plotly_figure(clear=True, fig=None, **kwargs):
    """
    Get the figure where to plot.

    Parameters
    ----------
    clear : bool
        If False the figure provided in the `fig` parameters is used.
    fig : plotly figure
        If provided, and clear is not True, it will be used for plotting
    kwargs : any
        Keywords arguments to be passed to the plotly figure constructor.

    Returns
    -------
    Plotly figure instance
    """
    go = import_optional_dependency("plotly.graph_objects", errors="ignore")

    if go is None:
        raise ImportError(
            "Plotly is not installed. Uee pip or conda to install it")

    if clear or fig is None:
        # create a figure
        return go.Figure()

    # a figure already exists - if several we take the last
    return fig
示例#4
0
def test_bad_version(monkeypatch):
    name = "fakemodule"
    module = types.ModuleType(name)
    module.__version__ = "0.9.0"
    sys.modules[name] = module
    monkeypatch.setitem(VERSIONS, name, "1.0.0")

    match = "SpectroChemPy requires .*1.0.0.* of .fakemodule.*'0.9.0'"
    with pytest.raises(ImportError, match=match):
        import_optional_dependency("fakemodule")

    # Test min_version parameter
    result = import_optional_dependency("fakemodule", min_version="0.8")
    assert result is module

    with tm.assert_produces_warning(UserWarning):
        result = import_optional_dependency("fakemodule", errors="warn")
    assert result is None

    module.__version__ = "1.0.0"  # exact match is OK
    result = import_optional_dependency("fakemodule")
    assert result is module
示例#5
0
def test_compare_scikit_learn():

    try:
        import_optional_dependency("scikit-learn")
    except ImportError:
        return

    from sklearn.decomposition import PCA as sklPCA

    X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])

    pcas = sklPCA(n_components=2)
    pcas.fit(X)

    pca = PCA(NDDataset(X))
    pca.printev(n_pc=2)

    assert_array_almost_equal(pca._sv.data, pcas.singular_values_)
    assert_array_almost_equal(pca.ev_ratio.data,
                              pcas.explained_variance_ratio_ * 100.0)

    dataset = NDDataset.read("irdata/nh4y-activation.spg")
    X1 = dataset.copy().data

    pcas = sklPCA(n_components=5, svd_solver="full")
    pcas.fit(X1)

    X2 = NDDataset(dataset.copy())
    pca = PCA(X2)

    pca.printev(n_pc=5)

    assert_array_almost_equal(pca._sv.data[:5], pcas.singular_values_[:5], 4)
    assert_array_almost_equal(pca.ev_ratio.data[:5],
                              pcas.explained_variance_ratio_[:5] * 100.0, 4)

    show()
示例#6
0
def test_submodule(monkeypatch):
    # Create a fake module with a submodule
    name = "fakemodule"
    module = types.ModuleType(name)
    module.__version__ = "0.9.0"
    sys.modules[name] = module
    sub_name = "submodule"
    submodule = types.ModuleType(sub_name)
    setattr(module, sub_name, submodule)
    sys.modules[f"{name}.{sub_name}"] = submodule
    monkeypatch.setitem(VERSIONS, name, "1.0.0")

    match = "SpectroChemPy requires .*1.0.0.* of .fakemodule.*'0.9.0'"
    with pytest.raises(ImportError, match=match):
        import_optional_dependency("fakemodule.submodule")

    with tm.assert_produces_warning(UserWarning):
        result = import_optional_dependency("fakemodule.submodule",
                                            errors="warn")
    assert result is None

    module.__version__ = "1.0.0"  # exact match is OK
    result = import_optional_dependency("fakemodule.submodule")
    assert result is submodule
示例#7
0
from scipy.optimize import minimize, differential_evolution, least_squares

from spectrochempy.optional import import_optional_dependency
from spectrochempy.core import error_
from spectrochempy.core.dataset.nddataset import NDDataset, Coord

__all__ = [
    "coverages_vs_time",
    "concentrations_vs_time",
    "modify_rate",
    "modify_surface_kinetics",
    "fit_to_concentrations",
    "PFR",
]

ct = import_optional_dependency("cantera", errors="ignore")


def _cantera_is_not_available():
    if ct is None:
        error_(
            "Missing optional dependency 'cantera'.  Use conda or pip to install cantera."
        )
    return ct is None


def coverages_vs_time(surface, t, returnNDDataset=False):
    """
    Returns the surface coverages at time(s) t.

    Parameters
示例#8
0
    def to_xarray(self):
        """
        Convert a NDDataset instance to an `~xarray.DataArray` object.

        Warning: the xarray library must be available.

        Returns
        -------
        object
            A axrray.DataArray object.
        """
        # Information about DataArray from the DataArray docstring
        #
        # Attributes
        # ----------
        # dims: tuple
        #     Dimension names associated with this array.
        # values: np.ndarray
        #     Access or modify DataArray values as a numpy array.
        # coords: dict-like
        #     Dictionary of DataArray objects that label values along each dimension.
        # name: str or None
        #     Name of this array.
        # attrs: OrderedDict
        #     Dictionary for holding arbitrary metadata.
        # Init docstring
        #
        # Parameters
        # ----------
        # data: array_like
        #     Values for this array. Must be an ``numpy.ndarray``, ndarray like,
        #     or castable to an ``ndarray``.
        # coords: sequence or dict of array_like objects, optional
        #     Coordinates (tick labels) to use for indexing along each dimension.
        #     If dict-like, should be a mapping from dimension names to the
        #     corresponding coordinates. If sequence-like, should be a sequence
        #     of tuples where the first element is the dimension name and the
        #     second element is the corresponding coordinate array_like object.
        # dims: str or sequence of str, optional
        #     Name(s) of the data dimension(s). Must be either a string (only
        #     for 1D data) or a sequence of strings with length equal to the
        #     number of dimensions. If this argument is omitted, dimension names
        #     are taken from ``coords`` (if possible) and otherwise default to
        #     ``['dim_0', ... 'dim_n']``.
        # name: str or None, optional
        #     Name of this array.
        # attrs: dict_like or None, optional
        #     Attributes to assign to the new instance. By default, an empty
        #     attribute dictionary is initialized.
        # encoding: dict_like or None, optional
        #     Dictionary specifying how to encode this array's data into a
        #     serialized format like netCDF4. Currently used keys (for netCDF)
        #     include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
        #     'units' and 'calendar' (the later two only for datetime arrays).
        #     Unrecognized keys are ignored.

        xr = import_optional_dependency("xarray")
        if xr is None:
            return

        x, y = self.x, self.y
        tx = x.title
        if y:
            ty = y.title
            da = xr.DataArray(
                np.array(self.data, dtype=np.float64),
                coords=[(ty, y.data), (tx, x.data)],
            )

            da.attrs["units"] = self.units
        else:
            da = xr.DataArray(
                np.array(self.data, dtype=np.float64),
                coords=[(tx, x.data)],
            )

            da.attrs["units"] = self.units

        da.attrs["title"] = self.title

        return da
def show_versions(file=sys.stdout):
    """print the versions of spectrochempy and its dependencies

    Parameters
    ----------
    file : file-like, optional
        print to the given file-like object. Defaults to sys.stdout.
    """

    print("\nINSTALLED VERSIONS", file=file)
    print("------------------", file=file)

    for key, val in get_sys_info():
        print(f"{key}: {val}", file=file)
    print(file=file)

    # dependancies - TODO: update this list upon changes in env_template.yml
    deps = ("""
        quadprog,
        brukeropusreader,
        quaternion,
        cantera,
        colorama,
        dill,
        ipython,
        jinja2,
        matplotlib,
        numba,
        numpy,
        pint,
        requests,
        scipy,
        tqdm,
        traitlets,
        traittypes,
        xlrd,
        pyyaml,
        ipywidgets,
        ipympl,
        setuptools,
        setuptools_scm,
        git,
        jupyterlab,
        nodejs,
        pytest,
        pytest-doctestplus,
        pytest-flake8,
        pytest-mock,
        pyfakefs,
        scikit-image,
        coverage,
        black,
        pre-commit,
        cffconvert,
        mamba,
        jupytext,
        sphinx,
        sphinx_rtd_theme,
        autodocsumm,
        sphinx-gallery,
        nbsphinx,
        jupyter_sphinx,
        json5,
        sphinx-copybutton,
        numpydoc,
        pandoc,
        conda-build,
        conda-verify,
        anaconda-client,
        xarray,
        scikit-learn,
        dash,
        dash-bootstrap-components,
        dash,
        daq,
        jupyter-dash,
        plotly,
        pip,
        autodoc_traits,
        dash_defer_js_import,
        dash-ace,
        spectrochempy
        """.replace("\n", "").replace(" ", "").split(","))

    for dep in deps:
        mod = optional.import_optional_dependency(dep, errors="ignore")
        try:
            print(
                f"{dep}: "
                f"{optional.get_module_version(mod) if mod is not None else None}",
                file=file,
            )
        except ImportError:
            print(f"{dep}: (Can't determine version string)", file=file)
示例#10
0
from matplotlib import pyplot as plt

from mpl_toolkits.axes_grid1 import make_axes_locatable
from traitlets import Dict, HasTraits, Instance, Union, default, TraitError

from spectrochempy.utils.plots import get_figure, _Axes, _Axes3D
from spectrochempy.utils import pathclean
from spectrochempy.core.dataset.meta import Meta
from spectrochempy.core import preferences, plot_preferences, error_
from spectrochempy.core.plotters.plot1d import plot_1D
from spectrochempy.core.plotters.plot2d import plot_2D
from spectrochempy.core.plotters.plot3d import plot_3D

from spectrochempy.optional import import_optional_dependency

go = import_optional_dependency("plotly.graph_objects", errors="ignore")
HAS_PLOTLY = go is not None

# from spectrochempy.utils import deprecated

# ======================================================================================================================
# Management of the preferences for datasets
# ======================================================================================================================


class PreferencesSet(Meta):
    """
    Preferences setting.
    """
    def __getitem__(self, key):
示例#11
0
def download_iris():
    """
    Upload the classical `IRIS` dataset.

    The `IRIS` dataset is a classical example for machine learning.It is downloaded from
    the [UCI distant repository](https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data)

    Returns
    -------
    dataset
        The `IRIS` dataset.

    See Also
    --------
    read : Read data from experimental data.
    """
    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"

    try:
        connection = True
        response = requests.get(url, stream=True, timeout=10)
    except OSError:
        error_(
            "OSError: Cannot connect to the UCI repository. Try Scikit-Learn")
        connection = False

    if connection:  # Download data
        txtdata = ""
        for rd in response.iter_content():
            txtdata += rd.decode("utf8")

        fil = StringIO(txtdata)
        try:
            data = np.loadtxt(fil, delimiter=",", usecols=range(4))
            fil.seek(0)
            labels = np.loadtxt(fil, delimiter=",", usecols=(4, ), dtype="|S")
            labels = list((lab.decode("utf8") for lab in labels))
        except Exception:
            raise OSError("can't read JCAMP file")

        coordx = Coord(
            labels=[
                "sepal_length", "sepal width", "petal_length", "petal_width"
            ],
            title="features",
        )
        coordy = Coord(labels=labels, title="samples")

        new = NDDataset(
            data,
            coordset=[coordy, coordx],
            title="size",
            name="`IRIS` Dataset",
            units="cm",
        )

        new.history = "Loaded from UC Irvine machine learning repository"

        return new

    else:
        # Cannot download - use the scikit-learn dataset (if scikit-learn is installed)

        sklearn = import_optional_dependency("sklearn", errors="ignore")
        if sklearn is None:
            raise OSError("Failed in uploading the `IRIS` dataset!")
        else:
            from sklearn import datasets

        data = datasets.load_iris()

        coordx = Coord(
            labels=[
                "sepal_length", "sepal width", "petal_length", "petal_width"
            ],
            title="features",
        )
        labels = [data.target_names[i] for i in data.target]
        coordy = Coord(labels=labels, title="samples")

        new = NDDataset(
            data.data,
            coordset=[coordy, coordx],
            title="size",
            name="`IRIS` Dataset",
            units="cm",
        )

        new.history = "Loaded from scikit-learn datasets"

        return new