예제 #1
0
파일: testing.py 프로젝트: noorbuchi/pygmt
def download_test_data():
    """
    Convenience function to download remote data files used in PyGMT tests and
    docs.
    """
    # List of datasets to download
    datasets = [
        # Earth relief grids
        "@earth_relief_01d_p",
        "@earth_relief_01d_g",
        "@earth_relief_30m_p",
        "@earth_relief_30m_g",
        "@earth_relief_10m_p",
        "@earth_relief_05m_p",
        "@earth_relief_05m_g",
        # List of tiles of 03s srtm data.
        # Names like @N35E135.earth_relief_03s_g.nc is for internal use only.
        # The naming scheme may change. DO NOT USE IT IN YOUR SCRIPTS.
        "@N35E135.earth_relief_03s_g.nc",
        # Other cache files
        "@fractures_06.txt",
        "@ridge.txt",
        "@srtm_tiles.nc",  # needed for 03s and 01s relief data
        "@Table_5_11.txt",
        "@test.dat.nc",
        "@tut_bathy.nc",
        "@tut_quakes.ngdc",
        "@tut_ship.xyz",
        "@usgs_quakes_22.txt",
    ]
    which(fname=datasets, download="a")
예제 #2
0
def load_sample_bathymetry(**kwargs):
    """
    (Deprecated) Load a table of ship observations of bathymetry off Baja
    California as a pandas.DataFrame.

    .. warning:: Deprecated since v0.6.0. This function has been replaced with
       ``load_sample_data(name="bathymetry")`` and will be removed in
       v0.9.0.

    This is the ``@tut_ship.xyz`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table. Columns are longitude, latitude, and bathymetry.
    """

    if "suppress_warning" not in kwargs:
        warnings.warn(
            "This function has been deprecated since v0.6.0 and will be "
            "removed in v0.9.0. Please use "
            "load_sample_data(name='bathymetry') instead.",
            category=FutureWarning,
            stacklevel=2,
        )
    fname = which("@tut_ship.xyz", download="c")
    data = pd.read_csv(fname,
                       sep="\t",
                       header=None,
                       names=["longitude", "latitude", "bathymetry"])
    return data
예제 #3
0
def load_japan_quakes():
    """
    Load a table of earthquakes around Japan as a pandas.Dataframe.

    Data is from the NOAA NGDC database. This is the ``@tut_quakes.ngdc``
    dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data :  pandas.Dataframe
        The data table. Columns are year, month, day, latitude, longitude,
        depth (in km), and magnitude of the earthquakes.
    """
    fname = which("@tut_quakes.ngdc", download="c")
    data = pd.read_csv(fname, header=1, sep=r"\s+")
    data.columns = [
        "year",
        "month",
        "day",
        "latitude",
        "longitude",
        "depth_km",
        "magnitude",
    ]
    return data
예제 #4
0
def load_usgs_quakes(**kwargs):
    """
    (Deprecated) Load a table of global earthquakes from the USGS as a
    pandas.DataFrame.

    .. warning:: Deprecated since v0.6.0. This function has been replaced with
       ``load_sample_data(name="usgs_quakes")`` and will be removed in
       v0.9.0.

    This is the ``@usgs_quakes_22.txt`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table. Use ``print(data.describe())`` to see the available
        columns.
    """

    if "suppress_warning" not in kwargs:
        warnings.warn(
            "This function has been deprecated since v0.6.0 and will be "
            "removed in v0.9.0. Please use "
            "load_sample_data(name='usgs_quakes') instead.",
            category=FutureWarning,
            stacklevel=2,
        )
    fname = which("@usgs_quakes_22.txt", download="c")
    data = pd.read_csv(fname)
    return data
예제 #5
0
def load_hotspots():
    """
    Load a table with the locations, names, and suggested symbol sizes of
    hotspots.

    This is the ``@hotspots.txt`` dataset used in the GMT tutorials, with data
    from Mueller, Royer, and Lawver, 1993, Geology, vol. 21, pp. 275-278. The
    main 5 hotspots used by Doubrovine et al. [2012] have symbol sizes twice
    the size of all other hotspots.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table with columns "longitude", "latitude", "symbol_size", and
        "placename".
    """
    fname = which("@hotspots.txt", download="c")
    columns = ["longitude", "latitude", "symbol_size", "place_name"]
    data = pd.read_table(filepath_or_buffer=fname,
                         sep="\t",
                         skiprows=3,
                         names=columns)
    return data
예제 #6
0
파일: testing.py 프로젝트: seisman/pygmt
def download_test_data():
    """
    Convenience function to download remote data files used in PyGMT tests and
    docs.
    """
    # List of datasets to download
    datasets = [
        # Earth relief grids
        "@earth_relief_01d_p",
        "@earth_relief_01d_g",
        "@earth_relief_30m_p",
        "@earth_relief_30m_g",
        "@earth_relief_10m_p",
        "@earth_relief_05m_p",
        "@earth_relief_05m_g",
        # List of tiles of 03s srtm data.
        # Names like @N35E135.earth_relief_03s_g.nc is for internal use only.
        # The naming scheme may change. DO NOT USE IT IN YOUR SCRIPTS.
        "@N30W120.earth_relief_15s_p.nc",
        "@N35E135.earth_relief_03s_g.nc",
        "@N37W120.earth_relief_03s_g.nc",
        "@N00W090.earth_relief_03m_p.nc",
        # Earth seafloor age grids
        "@earth_age_01d_g",
        "@S90W180.earth_age_05m_g.nc",  # Specific grid for 05m test
        # Other cache files
        "@EGM96_to_36.txt",
        "@MaunaLoa_CO2.txt",
        "@RidgeTest.shp",
        "@RidgeTest.shx",
        "@RidgeTest.dbf",
        "@RidgeTest.prj",
        "@Table_5_11.txt",
        "@Table_5_11_mean.xyz",
        "@fractures_06.txt",
        "@hotspots.txt",
        "@ridge.txt",
        "@mars370d.txt",
        "@srtm_tiles.nc",  # needed for 03s and 01s relief data
        "@static_earth_relief.nc",
        "@test.dat.nc",
        "@tut_bathy.nc",
        "@tut_quakes.ngdc",
        "@tut_ship.xyz",
        "@usgs_quakes_22.txt",
    ]
    which(fname=datasets, download="a")
예제 #7
0
def fixture_table():
    """
    Load the grid data from the sample earth_relief file.
    """
    fname = which("@MaunaLoa_CO2.txt", download="c")
    data = pd.read_csv(fname,
                       header=None,
                       skiprows=1,
                       sep=r"\s+",
                       names=["date", "co2_ppm"])
    return data
예제 #8
0
파일: testing.py 프로젝트: weiji14/pygmt
def load_static_earth_relief():
    """
    Load the static_earth_relief file for internal testing.

    Returns
    -------
    data : xarray.DataArray
        A grid of Earth relief for internal tests.
    """
    fname = which("@static_earth_relief.nc", download="c")
    return load_dataarray(fname)
예제 #9
0
def load_japan_quakes(**kwargs):
    """
    (Deprecated) Load a table of earthquakes around Japan as a
    pandas.DataFrame.

    .. warning:: Deprecated since v0.6.0. This function has been replaced with
       ``load_sample_data(name="japan_quakes")`` and will be removed in
       v0.9.0.

    Data is from the NOAA NGDC database. This is the ``@tut_quakes.ngdc``
    dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table. Columns are year, month, day, latitude, longitude,
        depth (in km), and magnitude of the earthquakes.
    """

    if "suppress_warning" not in kwargs:
        warnings.warn(
            "This function has been deprecated since v0.6.0 and will be "
            "removed in v0.9.0. Please use "
            "load_sample_data(name='japan_quakes') instead.",
            category=FutureWarning,
            stacklevel=2,
        )

    fname = which("@tut_quakes.ngdc", download="c")
    data = pd.read_csv(fname, header=1, sep=r"\s+")
    data.columns = [
        "year",
        "month",
        "day",
        "latitude",
        "longitude",
        "depth_km",
        "magnitude",
    ]

    return data
예제 #10
0
def load_usgs_quakes():
    """
    Load a table of global earthquakes form the USGS as a pandas.Dataframe.

    This is the ``@usgs_quakes_22.txt`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.Dataframe
        The data table. Use ``print(data.describe())`` to see the available
        columns.
    """
    fname = which("@usgs_quakes_22.txt", download="c")
    data = pd.read_csv(fname)
    return data
예제 #11
0
def load_hotspots(**kwargs):
    """
    (Deprecated) Load a table with the locations, names, and suggested symbol
    sizes of hotspots.

    .. warning:: Deprecated since v0.6.0. This function has been replaced with
       ``load_sample_data(name="hotspots")`` and will be removed in
       v0.9.0.

    This is the ``@hotspots.txt`` dataset used in the GMT tutorials, with data
    from Mueller, Royer, and Lawver, 1993, Geology, vol. 21, pp. 275-278. The
    main 5 hotspots used by Doubrovine et al. [2012] have symbol sizes twice
    the size of all other hotspots.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table with columns "longitude", "latitude", "symbol_size", and
        "placename".
    """

    if "suppress_warning" not in kwargs:
        warnings.warn(
            "This function has been deprecated since v0.6.0 and will be "
            "removed in v0.9.0. Please use "
            "load_sample_data(name='hotspots') instead.",
            category=FutureWarning,
            stacklevel=2,
        )
    fname = which("@hotspots.txt", download="c")
    columns = ["longitude", "latitude", "symbol_size", "place_name"]
    data = pd.read_table(filepath_or_buffer=fname,
                         sep="\t",
                         skiprows=3,
                         names=columns)
    return data
예제 #12
0
def load_sample_bathymetry():
    """
    Load a table of ship observations of bathymetry off Baja California as a
    pandas.DataFrame.

    This is the ``@tut_ship.xyz`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data :  pandas.Dataframe
        The data table. Columns are longitude, latitude, and bathymetry.
    """
    fname = which("@tut_ship.xyz", download="c")
    data = pd.read_csv(fname,
                       sep="\t",
                       header=None,
                       names=["longitude", "latitude", "bathymetry"])
    return data
예제 #13
0
def load_ocean_ridge_points():
    """
    Load a table of ocean ridge points for the entire world as a
    pandas.DataFrame.

    This is the ``@ridge.txt`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data :  pandas.Dataframe
        The data table. Columns are longitude and latitude.
    """
    fname = which("@ridge.txt", download="c")
    data = pd.read_csv(fname,
                       sep=r"\s+",
                       names=["longitude", "latitude"],
                       skiprows=1,
                       comment=">")
    return data
예제 #14
0
def load_fractures_compilation():
    """
    Load a table of fracture lengths and azimuths as hypothetically digitized
    from geological maps as a pandas.DataFrame.

    This is the ``@fractures_06.txt`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table. Use ``print(data.describe())`` to see the available
        columns.
    """
    fname = which("@fractures_06.txt", download="c")
    data = pd.read_csv(fname,
                       header=None,
                       sep=r"\s+",
                       names=["azimuth", "length"])
    return data[["length", "azimuth"]]
예제 #15
0
def load_ocean_ridge_points(**kwargs):
    """
    (Deprecated) Load a table of ocean ridge points for the entire world as a
    pandas.DataFrame.

    .. warning:: Deprecated since v0.6.0. This function has been replaced with
       ``load_sample_data(name="ocean_ridge_points")`` and will be removed in
       v0.9.0.

    This is the ``@ridge.txt`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table. Columns are longitude and latitude.
    """

    if "suppress_warning" not in kwargs:
        warnings.warn(
            "This function has been deprecated since v0.6.0 and will be removed "
            "in v0.9.0. Please use load_sample_data(name='ocean_ridge_points') "
            "instead.",
            category=FutureWarning,
            stacklevel=2,
        )

    fname = which("@ridge.txt", download="c")
    data = pd.read_csv(fname,
                       sep=r"\s+",
                       names=["longitude", "latitude"],
                       skiprows=1,
                       comment=">")
    return data
예제 #16
0
def load_mars_shape(**kwargs):
    """
    (Deprecated) Load a table of data for the shape of Mars.

    .. warning:: Deprecated since v0.6.0. This function has been replaced with
       ``load_sample_data(name="mars_shape")`` and will be removed in
       v0.9.0.

    This is the ``@mars370d.txt`` dataset used in GMT examples, with data and
    information from Smith, D. E., and M. T. Zuber (1996), The shape of Mars
    and the topographic signature of the hemispheric dichotomy. Data columns
    are "longitude," "latitude", and "radius (meters)."

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table with columns "longitude", "latitude", and "radius(m)".
    """

    if "suppress_warning" not in kwargs:
        warnings.warn(
            "This function has been deprecated since v0.6.0 and will be "
            "removed in v0.9.0. Please use "
            "load_sample_data(name='mars_shape') instead.",
            category=FutureWarning,
            stacklevel=2,
        )
    fname = which("@mars370d.txt", download="c")
    data = pd.read_csv(fname,
                       sep="\t",
                       header=None,
                       names=["longitude", "latitude", "radius(m)"])
    return data
예제 #17
0
def load_fractures_compilation(**kwargs):
    """
    (Deprecated) Load a table of fracture lengths and azimuths as
    hypothetically digitized from geological maps as a pandas.DataFrame.

    .. warning:: Deprecated since v0.6.0. This function has been replaced with
       ``load_sample_data(name="fractures")`` and will be removed in
       v0.9.0.

    This is the ``@fractures_06.txt`` dataset used in the GMT tutorials.

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table. Use ``print(data.describe())`` to see the available
        columns.
    """

    if "suppress_warning" not in kwargs:
        warnings.warn(
            "This function has been deprecated since v0.6.0 and will be "
            "removed in v0.9.0. Please use "
            "load_sample_data(name='fractures') instead.",
            category=FutureWarning,
            stacklevel=2,
        )
    fname = which("@fractures_06.txt", download="c")
    data = pd.read_csv(fname,
                       header=None,
                       sep=r"\s+",
                       names=["azimuth", "length"])
    return data[["length", "azimuth"]]
예제 #18
0
def load_mars_shape():
    """
    Load a table of data for the shape of Mars.

    This is the ``@mars370d.txt`` dataset used in GMT examples, with data and
    information from Smith, D. E., and M. T. Zuber (1996), The shape of Mars
    and the topographic signature of the hemispheric dichotomy. Data columns
    are "longitude," "latitude", and "radius (meters)."

    The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
    first time you invoke this function. Afterwards, it will load the data from
    the cache. So you'll need an internet connection the first time around.

    Returns
    -------
    data : pandas.DataFrame
        The data table with columns "longitude", "latitude", and "radius(m)".
    """
    fname = which("@mars370d.txt", download="c")
    data = pd.read_csv(fname,
                       sep="\t",
                       header=None,
                       names=["longitude", "latitude", "radius(m)"])
    return data
예제 #19
0
def load_earth_relief(resolution="01d",
                      region=None,
                      registration=None,
                      use_srtm=False):
    r"""
    Load Earth relief grids (topography and bathymetry) in various resolutions.

    The grids are downloaded to a user data directory
    (usually ``~/.gmt/server/earth/earth_relief/``) the first time you invoke
    this function. Afterwards, it will load the grid from the data directory.
    So you'll need an internet connection the first time around.

    These grids can also be accessed by passing in the file name
    **@earth_relief**\_\ *res*\[_\ *reg*] to any grid plotting/processing
    function. *res* is the grid resolution (see below), and *reg* is grid
    registration type (**p** for pixel registration or **g** for gridline
    registration).

    Refer to :gmt-docs:`datasets/remote-data.html#global-earth-relief-grids`
    for more details.

    Parameters
    ----------
    resolution : str
        The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for
        arc-degree, arc-minute and arc-second. It can be ``'01d'``, ``'30m'``,
        ``'20m'``, ``'15m'``, ``'10m'``, ``'06m'``, ``'05m'``, ``'04m'``,
        ``'03m'``, ``'02m'``, ``'01m'``, ``'30s'``, ``'15s'``, ``'03s'``,
        or ``'01s'``.

    region : str or list
        The subregion of the grid to load, in the forms of a list
        [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*.
        Required for Earth relief grids with resolutions higher than 5
        arc-minute (i.e., ``05m``).

    registration : str
        Grid registration type. Either ``pixel`` for pixel registration or
        ``gridline`` for gridline registration. Default is ``None``, where
        a pixel-registered grid is returned unless only the
        gridline-registered grid is available.

    use_srtm : bool
        By default, the land-only SRTM tiles from NASA are used to generate the
        ``'03s'`` and ``'01s'`` grids, and the missing ocean values are filled
        by up-sampling the SRTM15+V2.1 tiles which have a resolution of 15
        arc-second (i.e., ``'15s'``). If True, will only load the original
        land-only SRTM tiles.

    Returns
    -------
    grid : :class:`xarray.DataArray`
        The Earth relief grid. Coordinates are latitude and longitude in
        degrees. Relief is in meters.

    Notes
    -----
    The :class:`xarray.DataArray` grid doesn't support slice operation, for
    Earth relief data with resolutions higher than "05m", which are stored as
    smaller tiles.

    Examples
    --------

    >>> # load the default grid (pixel-registered 01d grid)
    >>> grid = load_earth_relief()
    >>> # load the 30m grid with "gridline" registration
    >>> grid = load_earth_relief("30m", registration="gridline")
    >>> # load high-resolution grid for a specific region
    >>> grid = load_earth_relief(
    ...     "05m", region=[120, 160, 30, 60], registration="gridline"
    ... )
    >>> # load the original 3 arc-second land-only SRTM tiles from NASA
    >>> grid = load_earth_relief(
    ...     "03s",
    ...     region=[135, 136, 35, 36],
    ...     registration="gridline",
    ...     use_srtm=True,
    ... )
    """

    # earth relief data stored as single grids for low resolutions
    non_tiled_resolutions = ["01d", "30m", "20m", "15m", "10m", "06m"]
    # earth relief data stored as tiles for high resolutions
    tiled_resolutions = [
        "05m", "04m", "03m", "02m", "01m", "30s", "15s", "03s", "01s"
    ]
    # resolutions of original land-only SRTM tiles from NASA
    land_only_srtm_resolutions = ["03s", "01s"]

    if registration in ("pixel", "gridline", None):
        # If None, let GMT decide on Pixel/Gridline type
        reg = f"_{registration[0]}" if registration else ""
    else:
        raise GMTInvalidInput(
            f"Invalid grid registration: '{registration}', should be either "
            "'pixel', 'gridline' or None. Default is None, where a "
            "pixel-registered grid is returned unless only the "
            "gridline-registered grid is available.")

    if resolution not in non_tiled_resolutions + tiled_resolutions:
        raise GMTInvalidInput(
            f"Invalid Earth relief resolution '{resolution}'.")

    # Check combination of resolution and registration.
    if (resolution == "15s"
            and registration == "gridline") or (resolution in ("03s", "01s")
                                                and registration == "pixel"):
        raise GMTInvalidInput(
            f"{registration}-registered Earth relief data for "
            f"resolution '{resolution}' is not supported.")

    # Choose earth relief data prefix
    earth_relief_prefix = "earth_relief_"
    if use_srtm and resolution in land_only_srtm_resolutions:
        earth_relief_prefix = "srtm_relief_"

    # different ways to load tiled and non-tiled earth relief data
    # Known issue: tiled grids don't support slice operation
    # See https://github.com/GenericMappingTools/pygmt/issues/524
    if region is None:
        if resolution not in non_tiled_resolutions:
            raise GMTInvalidInput(
                f"'region' is required for Earth relief resolution '{resolution}'."
            )
        fname = which(f"@earth_relief_{resolution}{reg}", download="a")
        with xr.open_dataarray(fname) as dataarray:
            grid = dataarray.load()
            _ = grid.gmt  # load GMTDataArray accessor information
    else:
        grid = grdcut(f"@{earth_relief_prefix}{resolution}{reg}",
                      region=region)

    # Add some metadata to the grid
    grid.name = "elevation"
    grid.attrs["long_name"] = "elevation relative to the geoid"
    grid.attrs["units"] = "meters"
    grid.attrs["vertical_datum"] = "EMG96"
    grid.attrs["horizontal_datum"] = "WGS84"
    # Remove the actual range because it gets outdated when indexing the grid,
    # which causes problems when exporting it to netCDF for usage on the
    # command-line.
    grid.attrs.pop("actual_range")
    for coord in grid.coords:
        grid[coord].attrs.pop("actual_range")
    return grid
예제 #20
0
def load_earth_age(resolution="01d", region=None, registration=None):
    r"""
    Load Earth seafloor crustal ages in various resolutions.

    The grids are downloaded to a user data directory
    (usually ``~/.gmt/server/earth/earth_age/``) the first time you invoke
    this function. Afterwards, it will load the grid from the data directory.
    So you'll need an internet connection the first time around.

    These grids can also be accessed by passing in the file name
    **@earth_age**\_\ *res*\[_\ *reg*] to any grid plotting/processing
    function. *res* is the grid resolution (see below), and *reg* is grid
    registration type (**p** for pixel registration or **g** for gridline
    registration).

    Refer to
    :gmt-docs:`datasets/remote-data.html#global-earth-seafloor-crustal-age-grids`
    for more details.

    Parameters
    ----------
    resolution : str
        The grid resolution. The suffix ``d`` and ``m`` stand for
        arc-degree, arc-minute and arc-second. It can be ``'01d'``, ``'30m'``,
        ``'20m'``, ``'15m'``, ``'10m'``, ``'06m'``, ``'05m'``, ``'04m'``,
        ``'03m'``, ``'02m'``, or ``'01m'``.

    region : str or list
        The subregion of the grid to load, in the forms of a list
        [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*.
        Required for grids with resolutions higher than 5
        arc-minute (i.e., ``05m``).

    registration : str
        Grid registration type. Either ``pixel`` for pixel registration or
        ``gridline`` for gridline registration. Default is ``None``, where
        a pixel-registered grid is returned unless only the
        gridline-registered grid is available.

    Returns
    -------
    grid : :class:`xarray.DataArray`
        The Earth seafloor crustal age grid. Coordinates are latitude and
        longitude in degrees. Age is in millions of years (Myr).

    Notes
    -----
    The :class:`xarray.DataArray` grid doesn't support slice operation, for
    Earth seafloor crustal age with resolutions of 5 arc-minutes or higher,
    which are stored as smaller tiles.
    """

    # earth seafloor crust age data stored as single grids for low resolutions
    non_tiled_resolutions = ["01d", "30m", "20m", "15m", "10m", "06m"]
    # earth seafloor crust age data stored as tiles for high resolutions
    tiled_resolutions = ["05m", "04m", "03m", "02m", "01m"]

    if registration in ("pixel", "gridline", None):
        # If None, let GMT decide on Pixel/Gridline type
        reg = f"_{registration[0]}" if registration else ""
    else:
        raise GMTInvalidInput(
            f"Invalid grid registration: '{registration}', should be either "
            "'pixel', 'gridline' or None. Default is None, where a "
            "pixel-registered grid is returned unless only the "
            "gridline-registered grid is available.")

    if resolution not in non_tiled_resolutions + tiled_resolutions:
        raise GMTInvalidInput(
            f"Invalid Earth relief resolution '{resolution}'.")

    # Choose earth relief data prefix
    earth_age_prefix = "earth_age_"

    # different ways to load tiled and non-tiled earth relief data
    # Known issue: tiled grids don't support slice operation
    # See https://github.com/GenericMappingTools/pygmt/issues/524
    if region is None:
        if resolution not in non_tiled_resolutions:
            raise GMTInvalidInput(
                f"'region' is required for Earth age resolution '{resolution}'."
            )
        fname = which(f"@earth_age_{resolution}{reg}", download="a")
        grid = load_dataarray(fname, engine="netcdf4")
    else:
        grid = grdcut(f"@{earth_age_prefix}{resolution}{reg}", region=region)

    # Add some metadata to the grid
    grid.name = "seafloor_age"
    grid.attrs["long_name"] = "age of seafloor crust"
    grid.attrs["units"] = "Myr"
    grid.attrs["vertical_datum"] = "EMG96"
    grid.attrs["horizontal_datum"] = "WGS84"
    # Remove the actual range because it gets outdated when indexing the grid,
    # which causes problems when exporting it to netCDF for usage on the
    # command-line.
    grid.attrs.pop("actual_range")
    for coord in grid.coords:
        grid[coord].attrs.pop("actual_range")
    return grid