示例#1
0
def legend(self, spec=None, position="JTR+jTR+o0.2c", box="+gwhite+p1p", **kwargs):
    r"""
    Plot legends on maps.

    Makes legends that can be overlaid on maps. Reads specific
    legend-related information from an input file, or automatically creates
    legend entries from plotted symbols that have labels. Unless otherwise
    noted, annotations will be made using the primary annotation font and
    size in effect (i.e., FONT_ANNOT_PRIMARY).

    Full option list at :gmt-docs:`legend.html`

    {aliases}

    Parameters
    ----------
    spec : None or str
        Either ``None`` [default] for using the automatically generated legend
        specification file, or a *filename* pointing to the legend
        specification file.
    {J}
    {R}
    position : str
        [**g**\|\ **j**\|\ **J**\|\ **n**\|\ **x**]\ *refpoint*\
        **+w**\ *width*\ [/*height*]\ [**+j**\ *justify*]\ [**+l**\ *spacing*]\
        [**+o**\ *dx*\ [/*dy*]].
        Defines the reference point on the map for the
        legend. By default, uses **JTR**\ +\ **jTR**\ +\ **o**\ *0.2c* which
        places the legend at the top-right corner inside the map frame, with a
        0.2 cm offset.
    box : bool or str
        [**+c**\ *clearances*][**+g**\ *fill*][**+i**\ [[*gap*/]\ *pen*]]\
        [**+p**\ [*pen*]][**+r**\ [*radius*]][**+s**\ [[*dx*/*dy*/][*shade*]]].
        Without further arguments, draws a rectangular border around the legend
        using :gmt-term:`MAP_FRAME_PEN`. By default, uses
        **+g**\ white\ **+p**\ 1p which draws a box around the legend using a
        1p black pen and adds a white background.
    {V}
    {XY}
    {c}
    {p}
    {t}
    """
    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access

    if "D" not in kwargs:
        kwargs["D"] = position

        if "F" not in kwargs:
            kwargs["F"] = box

    with Session() as lib:
        if spec is None:
            specfile = ""
        elif data_kind(spec) == "file":
            specfile = spec
        else:
            raise GMTInvalidInput("Unrecognized data type: {}".format(type(spec)))
        arg_str = " ".join([specfile, build_arg_string(kwargs)])
        lib.call_module("legend", arg_str)
示例#2
0
def test_x2sys_cross_invalid_tracks_input_type(tracks):
    """
    Run x2sys_cross using tracks input that is not a pandas.DataFrame (matrix)
    or str (file) type, which would raise a GMTInvalidInput error.
    """
    invalid_tracks = tracks[0].to_xarray().z
    assert data_kind(invalid_tracks) == "grid"
    with pytest.raises(GMTInvalidInput):
        x2sys_cross(tracks=[invalid_tracks])
示例#3
0
def test_delaunay_triples_wrong_kind_of_input(dataframe):
    """
    Run triangulate.delaunay_triples using grid input that is not
    file/matrix/vectors.
    """
    data = dataframe.z.to_xarray()  # convert pandas.Series to xarray.DataArray
    assert data_kind(data) == "grid"
    with pytest.raises(GMTInvalidInput):
        triangulate.delaunay_triples(data=data)
示例#4
0
def test_blockmean_wrong_kind_of_input_table_grid(dataframe):
    """
    Run blockmean using table input that is not a pandas.DataFrame or file but
    a grid.
    """
    invalid_table = dataframe.bathymetry.to_xarray()
    assert data_kind(invalid_table) == "grid"
    with pytest.raises(GMTInvalidInput):
        blockmean(data=invalid_table, spacing="5m", region=[245, 255, 20, 30])
示例#5
0
def test_nearneighbor_wrong_kind_of_input(ship_data):
    """
    Run nearneighbor using grid input that is not file/matrix/vectors.
    """
    data = ship_data.bathymetry.to_xarray()  # convert pandas.Series to xarray.DataArray
    assert data_kind(data) == "grid"
    with pytest.raises(GMTInvalidInput):
        nearneighbor(
            data=data, spacing="5m", region=[245, 255, 20, 30], search_radius="10m"
        )
示例#6
0
def test_blockmean_wrong_kind_of_input_table_matrix():
    """
    Run blockmean using table input that is not a pandas.DataFrame but still a
    matrix.
    """
    dataframe = load_sample_bathymetry()
    invalid_table = dataframe.values
    assert data_kind(invalid_table) == "matrix"
    with pytest.raises(GMTInvalidInput):
        blockmean(table=invalid_table, spacing="5m", region=[245, 255, 20, 30])
示例#7
0
def test_grdview_wrong_kind_of_grid(xrgrid):
    """
    Run grdview using grid input that is not an xarray.DataArray or file.
    """
    dataset = xrgrid.to_dataset()  # convert xarray.DataArray to xarray.Dataset
    assert data_kind(dataset) == "matrix"

    fig = Figure()
    with pytest.raises(GMTInvalidInput):
        fig.grdview(grid=dataset)
示例#8
0
def test_surface_wrong_kind_of_input():
    """
    Run surface using grid input that is not file/matrix/vectors
    """
    ship_data = load_sample_bathymetry()
    data = ship_data.bathymetry.to_xarray(
    )  # convert pandas.Series to xarray.DataArray
    assert data_kind(data) == "grid"
    with pytest.raises(GMTInvalidInput):
        surface(data=data, spacing="5m", region=[245, 255, 20, 30])
示例#9
0
def test_grdtrack_wrong_kind_of_points_input(dataarray, dataframe):
    """
    Run grdtrack using points input that is not a pandas.DataFrame (matrix) or
    file.
    """
    invalid_points = dataframe.longitude.to_xarray()

    assert data_kind(invalid_points) == "grid"
    with pytest.raises(GMTInvalidInput):
        grdtrack(points=invalid_points, grid=dataarray, newcolname="bathymetry")
示例#10
0
def test_grdtrack_wrong_kind_of_grid_input(dataarray, dataframe):
    """
    Run grdtrack using grid input that is not as xarray.DataArray (grid) or
    file.
    """
    invalid_grid = dataarray.to_dataset()

    assert data_kind(invalid_grid) == "matrix"
    with pytest.raises(GMTInvalidInput):
        grdtrack(points=dataframe, grid=invalid_grid, newcolname="bathymetry")
示例#11
0
def _blockm(block_method, table, outfile, **kwargs):
    r"""
    Block average (x,y,z) data tables by mean or median estimation.

    Reads arbitrarily located (x,y,z) triples [or optionally weighted
    quadruples (x,y,z,w)] from a table and writes to the output a mean or
    median (depending on ``block_method``) position and value for every
    non-empty block in a grid region defined by the ``region`` and ``spacing``
    parameters.

    Parameters
    ----------
    block_method : str
        Name of the GMT module to call. Must be "blockmean" or "blockmedian".

    Returns
    -------
    output : pandas.DataFrame or None
        Return type depends on whether the ``outfile`` parameter is set:

        - :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
          is not set
        - None if ``outfile`` is set (filtered output will be stored in file
          set by ``outfile``)
    """

    kind = data_kind(table)
    with GMTTempFile(suffix=".csv") as tmpfile:
        with Session() as lib:
            if kind == "matrix":
                if not hasattr(table, "values"):
                    raise GMTInvalidInput(
                        f"Unrecognized data type: {type(table)}")
                file_context = lib.virtualfile_from_matrix(table.values)
            elif kind == "file":
                if outfile is None:
                    raise GMTInvalidInput("Please pass in a str to 'outfile'")
                file_context = dummy_context(table)
            else:
                raise GMTInvalidInput(f"Unrecognized data type: {type(table)}")

            with file_context as infile:
                if outfile is None:
                    outfile = tmpfile.name
                arg_str = " ".join(
                    [infile, build_arg_string(kwargs), "->" + outfile])
                lib.call_module(module=block_method, args=arg_str)

        # Read temporary csv output to a pandas table
        if outfile == tmpfile.name:  # if user did not set outfile, return pd.DataFrame
            result = pd.read_csv(tmpfile.name, sep="\t", names=table.columns)
        elif outfile != tmpfile.name:  # return None if outfile set, output in outfile
            result = None

    return result
示例#12
0
def grdinfo(grid, **kwargs):
    """
    Get information about a grid.

    Can read the grid from a file or given as an xarray.DataArray grid.

    Full option list at :gmt-docs:`grdinfo.html`

    Parameters
    ----------
    grid : str or xarray.DataArray
        The file name of the input grid or the grid loaded as a DataArray.

    {V}

    Returns
    -------
    info : str
        A string with information about the grid.

    """
    kind = data_kind(grid, None, None)
    with GMTTempFile() as outfile:
        with Session() as lib:
            if kind == "file":
                file_context = dummy_context(grid)
            elif kind == "grid":
                file_context = lib.virtualfile_from_grid(grid)
            else:
                raise GMTInvalidInput("Unrecognized data type: {}".format(
                    type(grid)))
            with file_context as infile:
                arg_str = " ".join(
                    [infile,
                     build_arg_string(kwargs), "->" + outfile.name])
                lib.call_module("grdinfo", arg_str)
        result = outfile.read()
    return result
示例#13
0
def grdview(self, grid, **kwargs):
    r"""
    Create 3-D perspective image or surface mesh from a grid.

    Reads a 2-D grid file and produces a 3-D perspective plot by drawing a
    mesh, painting a colored/gray-shaded surface made up of polygons, or by
    scanline conversion of these polygons to a raster image. Options
    include draping a data set on top of a surface, plotting of contours on
    top of the surface, and apply artificial illumination based on
    intensities provided in a separate grid file.

    Full option list at :gmt-docs:`grdview.html`

    {aliases}

    Parameters
    ----------
    grid : str or xarray.DataArray
        The file name of the input relief grid or the grid loaded as a
        DataArray.
    region : str or list
        *xmin/xmax/ymin/ymax*\ [**+r**][**+u**\ *unit*].
        Specify the :doc:`region </tutorials/basics/regions>` of interest.
        When used with ``perspective``, optionally append */zmin/zmax* to
        indicate the range to use for the 3-D axes [Default is the region in
        the input grid].
    {J}
    zscale/zsize : float or str
        Set z-axis scaling or z-axis size.
    {B}
    cmap : str
        The name of the color palette table to use.
    drapegrid : str or xarray.DataArray
        The file name or a DataArray of the image grid to be draped on top
        of the relief provided by grid. [Default determines colors from
        grid]. Note that ``zscale`` and ``plane`` always refers to the grid.
        The drapegrid only provides the information pertaining to colors, which
        (if drapegrid is a grid) will be looked-up via the CPT (see ``cmap``).
    plane : float or str
        *level*\ [**+g**\ *fill*].
        Draws a plane at this z-level. If the optional color is provided
        via the **+g** modifier, and the projection is not oblique, the frontal
        facade between the plane and the data perimeter is colored.
    surftype : str
        Specifies cover type of the grid.
        Select one of following settings:

        - **m** - mesh plot [Default].
        - **mx** or **my** - waterfall plots (row or column profiles).
        - **s** - surface plot, and optionally append **m** to have mesh lines
          drawn on top of the surface.
        - **i** - image plot.
        - **c** - Same as **i** but will make nodes with z = NaN transparent.

        For any of these choices, you may force a monochrome image by
        appending the modifier **+m**.
    contourpen : str
        Draw contour lines on top of surface or mesh (not image). Append
        pen attributes used for the contours.
    meshpen : str
        Sets the pen attributes used for the mesh. You must also select
        ``surftype`` of **m** or **sm** for meshlines to be drawn.
    facadepen :str
        Sets the pen attributes used for the facade. You must also select
        ``plane`` for the facade outline to be drawn.
    shading : str
        Provide the name of a grid file with intensities in the (-1,+1)
        range, or a constant intensity to apply everywhere (affects the
        ambient light). Alternatively, derive an intensity grid from the
        input data grid reliefgrid via a call to ``grdgradient``; append
        **+a**\ *azimuth*, **+n**\ *args*, and **+m**\ *ambient* to specify
        azimuth, intensity, and ambient arguments for that method, or just give
        **+d** to select the default arguments
        [Default is **+a**\ -45\ **+nt**\ 1\ **+m**\ 0].
    {V}
    {XY}
    {c}
    {f}
    {n}
    {p}
    {t}
    """
    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access
    with Session() as lib:
        file_context = lib.virtualfile_from_data(check_kind="raster",
                                                 data=grid)

        with contextlib.ExitStack() as stack:
            if kwargs.get("G") is not None:
                # deal with kwargs["G"] if drapegrid is xr.DataArray
                drapegrid = kwargs["G"]
                if data_kind(drapegrid) in ("file", "grid"):
                    if data_kind(drapegrid) == "grid":
                        drape_context = lib.virtualfile_from_grid(drapegrid)
                        kwargs["G"] = stack.enter_context(drape_context)
                else:
                    raise GMTInvalidInput(
                        f"Unrecognized data type for drapegrid: {type(drapegrid)}"
                    )
            fname = stack.enter_context(file_context)
            lib.call_module(module="grdview",
                            args=build_arg_string(kwargs, infile=fname))
示例#14
0
文件: plot3d.py 项目: seisman/pygmt
def plot3d(self,
           data=None,
           x=None,
           y=None,
           z=None,
           size=None,
           direction=None,
           **kwargs):
    r"""
    Plot lines, polygons, and symbols in 3-D.

    Takes a matrix, (x,y,z) triplets, or a file name as input and plots
    lines, polygons, or symbols at those locations in 3-D.

    Must provide either ``data`` or ``x``/``y``/``z``.

    If providing data through ``x/y/z``, ``color`` can be a 1d array
    that will be mapped to a colormap.

    If a symbol is selected and no symbol size given, then plot3d will
    interpret the fourth column of the input data as symbol size. Symbols
    whose size is <= 0 are skipped. If no symbols are specified then the
    symbol code (see ``style`` below) must be present as last column in the
    input. If ``style`` is not used, a line connecting the data points will
    be drawn instead. To explicitly close polygons, use ``close``. Select a
    fill with ``color``. If ``color`` is set, ``pen`` will control whether the
    polygon outline is drawn or not. If a symbol is selected, ``color`` and
    ``pen`` determines the fill and outline/no outline, respectively.

    Full option list at :gmt-docs:`plot3d.html`

    {aliases}

    Parameters
    ----------
    data : str or {table-like}
        Either a data file name, a 2d {table-classes}.
        Optionally, use parameter ``incols`` to specify which columns are x, y,
        z, color, and size, respectively.
    x/y/z : float or 1d arrays
        The x, y, and z coordinates, or arrays of x, y and z coordinates of
        the data points
    size : 1d array
        The size of the data points in units specified in ``style``.
        Only valid if using ``x``/``y``/``z``.
    direction : list of two 1d arrays
        If plotting vectors (using ``style='V'`` or ``style='v'``), then
        should be a list of two 1d arrays with the vector directions. These
        can be angle and length, azimuth and length, or x and y components,
        depending on the style options chosen.
    {J}
    zscale/zsize : float or str
        Set z-axis scaling or z-axis size.
    {R}
    straight_line : bool or str
        [**m**\|\ **p**\|\ **x**\|\ **y**].
        By default, geographic line segments are drawn as great circle
        arcs. To draw them as straight lines, use ``straight_line``.
        Alternatively, add **m** to draw the line by first following a
        meridian, then a parallel. Or append **p** to start following a
        parallel, then a meridian. (This can be practical to draw a line
        along parallels, for example). For Cartesian data, points are
        simply connected, unless you append **x** or **y** to draw
        stair-case curves that whose first move is along *x* or *y*,
        respectively. **Note**: The ``straight_line`` parameter requires
        constant *z*-coordinates.
    {B}
    {CPT}
    offset : str
        *dx*/*dy*\ [/*dz*].
        Offset the plot symbol or line locations by the given amounts
        *dx*/*dy*\ [/*dz*] [Default is no offset].
    {G}
        *color* can be a 1d array, but it is only valid if using ``x``/``y``
        and ``cmap=True`` is also required.
    intensity : float or bool or 1d array
        Provide an *intensity* value (nominally in the -1 to +1 range) to
        modulate the fill color by simulating illumination. If using
        ``intensity=True``, we will instead read *intensity* from the first
        data column after the symbol parameters (if given). *intensity* can
        also be a 1d array to set varying intensity for symbols, but it is only
        valid for ``x``/``y``/``z``.

    close : str
        [**+b**\|\ **d**\|\ **D**][**+xl**\|\ **r**\|\ *x0*]\
        [**+yl**\|\ **r**\|\ *y0*][**+p**\ *pen*].
        Force closed polygons. Full documentation is at
        :gmt-docs:`plot3d.html#l`.
    no_clip : bool or str
        [**c**\|\ **r**].
        Do NOT clip symbols that fall outside map border [Default plots
        points whose coordinates are strictly inside the map border only].
        This parameter does not apply to lines and polygons which are always
        clipped to the map region. For periodic (360-longitude) maps we
        must plot all symbols twice in case they are clipped by the
        repeating boundary. ``no_clip=True`` will turn off clipping and not
        plot repeating symbols. Use ``no_clip="r"`` to turn off clipping
        but retain the plotting of such repeating symbols, or use
        ``no_clip="c"`` to retain clipping but turn off plotting of
        repeating symbols.
    no_sort : bool
        Turn off the automatic sorting of items based on their distance
        from the viewer. The default is to sort the items so that items in
        the foreground are plotted after items in the background.
    style : str
        Plot symbols. Full documentation is at :gmt-docs:`plot3d.html#s`.
    {U}
    {V}
    {W}
    {XY}
    zvalue : str
        *value*\|\ *file*.
        Instead of specifying a symbol or polygon fill and outline color
        via ``color`` and ``pen``, give both a *value* via **zvalue** and a
        color lookup table via ``cmap``.  Alternatively, give the name of a
        *file* with one z-value (read from the last column) for each
        polygon in the input data. To apply it to the fill color, use
        ``color='+z'``. To apply it to the pen color, append **+z** to
        ``pen``.
    {a}
    {b}
    {c}
    {d}
    {e}
    {f}
    {g}
    {h}
    {i}
    {l}
    {p}
    {t}
        *transparency* can also be a 1d array to set varying transparency
        for symbols, but this option is only valid if using x/y/z.
    {w}
    """
    # pylint: disable=too-many-locals
    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access

    kind = data_kind(data, x, y, z)

    extra_arrays = []
    if kwargs.get("S") is not None and kwargs["S"][
            0] in "vV" and direction is not None:
        extra_arrays.extend(direction)
    elif (
            kwargs.get("S") is None and kind == "geojson"
            and data.geom_type.isin(["Point", "MultiPoint"]).all()
    ):  # checking if the geometry of a geoDataFrame is Point or MultiPoint
        kwargs["S"] = "u0.2c"
    elif kwargs.get("S") is None and kind == "file" and str(data).endswith(
            ".gmt"):
        # checking that the data is a file path to set default style
        try:
            with open(which(data), mode="r", encoding="utf8") as file:
                line = file.readline()
            if "@GMULTIPOINT" in line or "@GPOINT" in line:
                # if the file is gmt style and geometry is set to Point
                kwargs["S"] = "u0.2c"
        except FileNotFoundError:
            pass
    if kwargs.get("G") is not None and is_nonstr_iter(kwargs["G"]):
        if kind != "vectors":
            raise GMTInvalidInput(
                "Can't use arrays for color if data is matrix or file.")
        extra_arrays.append(kwargs["G"])
        del kwargs["G"]
    if size is not None:
        if kind != "vectors":
            raise GMTInvalidInput(
                "Can't use arrays for 'size' if data is a matrix or a file.")
        extra_arrays.append(size)

    for flag in ["I", "t"]:
        if kwargs.get(flag) is not None and is_nonstr_iter(kwargs[flag]):
            if kind != "vectors":
                raise GMTInvalidInput(
                    f"Can't use arrays for {plot3d.aliases[flag]} if data is matrix or file."
                )
            extra_arrays.append(kwargs[flag])
            kwargs[flag] = ""

    with Session() as lib:
        # Choose how data will be passed in to the module
        file_context = lib.virtualfile_from_data(
            check_kind="vector",
            data=data,
            x=x,
            y=y,
            z=z,
            extra_arrays=extra_arrays,
            required_z=True,
        )

        with file_context as fname:
            lib.call_module(module="plot3d",
                            args=build_arg_string(kwargs, infile=fname))
示例#15
0
def blockmedian(table, outfile=None, **kwargs):
    r"""
    Block average (x,y,z) data tables by median estimation.

    Reads arbitrarily located (x,y,z) triples [or optionally weighted
    quadruples (x,y,z,w)] from a table and writes to the output a median
    position and value for every non-empty block in a grid region defined by
    the region and spacing arguments.

    Full option list at :gmt-docs:`blockmedian.html`

    {aliases}

    Parameters
    ----------
    table : pandas.DataFrame or str
        Either a pandas dataframe with (x, y, z) or (longitude, latitude,
        elevation) values in the first three columns, or a file name to an
        ASCII data table.

    spacing : str
        *xinc*\[\ *unit*\][**+e**\|\ **n**]
        [/*yinc*\ [*unit*][**+e**\|\ **n**]].
        *xinc* [and optionally *yinc*] is the grid spacing.

    region : str or list
        *xmin/xmax/ymin/ymax*\[\ **+r**\][**+u**\ *unit*].
        Specify the region of interest.

    outfile : str
        Required if ``table`` is a file. The file name for the output ASCII
        file.

    {V}

    Returns
    -------
    output : pandas.DataFrame or None
        Return type depends on whether the ``outfile`` parameter is set:

        - :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
          is not set
        - None if ``outfile`` is set (filtered output will be stored in file
          set by ``outfile``)
    """
    kind = data_kind(table)
    with GMTTempFile(suffix=".csv") as tmpfile:
        with Session() as lib:
            if kind == "matrix":
                if not hasattr(table, "values"):
                    raise GMTInvalidInput(
                        f"Unrecognized data type: {type(table)}")
                file_context = lib.virtualfile_from_matrix(table.values)
            elif kind == "file":
                if outfile is None:
                    raise GMTInvalidInput("Please pass in a str to 'outfile'")
                file_context = dummy_context(table)
            else:
                raise GMTInvalidInput(f"Unrecognized data type: {type(table)}")

            with file_context as infile:
                if outfile is None:
                    outfile = tmpfile.name
                arg_str = " ".join(
                    [infile, build_arg_string(kwargs), "->" + outfile])
                lib.call_module(module="blockmedian", args=arg_str)

        # Read temporary csv output to a pandas table
        if outfile == tmpfile.name:  # if user did not set outfile, return pd.DataFrame
            result = pd.read_csv(tmpfile.name, sep="\t", names=table.columns)
        elif outfile != tmpfile.name:  # return None if outfile set, output in outfile
            result = None

    return result
示例#16
0
文件: plot.py 项目: noorbuchi/pygmt
def plot(self, x=None, y=None, data=None, size=None, direction=None, **kwargs):
    r"""
    Plot lines, polygons, and symbols in 2-D.

    Takes a matrix, (x,y) pairs, or a file name as input and plots lines,
    polygons, or symbols at those locations on a map.

    Must provide either ``data`` or ``x``/``y``.

    If providing data through ``x``/``y``, ``color`` can be a 1d array that
    will be mapped to a colormap.

    If a symbol is selected and no symbol size given, then plot will
    interpret the third column of the input data as symbol size. Symbols
    whose size is <= 0 are skipped. If no symbols are specified then the
    symbol code (see ``style`` below) must be present as last column in the
    input. If ``style`` is not used, a line connecting the data points will
    be drawn instead. To explicitly close polygons, use ``close``. Select a
    fill with ``color``. If ``color`` is set, ``pen`` will control whether the
    polygon outline is drawn or not. If a symbol is selected, ``color`` and
    ``pen`` determines the fill and outline/no outline, respectively.

    Full parameter list at :gmt-docs:`plot.html`

    {aliases}

    Parameters
    ----------
    x/y : float or 1d arrays
        The x and y coordinates, or arrays of x and y coordinates of the
        data points
    data : str or {table-like}
        Pass in either a file name to an ASCII data table, a 2D
        {table-classes}.
        Use parameter ``columns`` to choose which columns are x, y, color, and
        size, respectively.
    size : 1d array
        The size of the data points in units specified using ``style``.
        Only valid if using ``x``/``y``.
    direction : list of two 1d arrays
        If plotting vectors (using ``style='V'`` or ``style='v'``), then
        should be a list of two 1d arrays with the vector directions. These
        can be angle and length, azimuth and length, or x and y components,
        depending on the style options chosen.
    {J}
    {R}
    straight_line : bool or str
        [**m**\|\ **p**\|\ **x**\|\ **y**].
        By default, geographic line segments are drawn as great circle
        arcs. To draw them as straight lines, use ``straight_line``.
        Alternatively, add **m** to draw the line by first following a
        meridian, then a parallel. Or append **p** to start following a
        parallel, then a meridian. (This can be practical to draw a line
        along parallels, for example). For Cartesian data, points are
        simply connected, unless you append **x** or **y** to draw
        stair-case curves that whose first move is along *x* or *y*,
        respectively.
    {B}
    {CPT}
    offset : str
        *dx*/*dy*.
        Offset the plot symbol or line locations by the given amounts
        *dx/dy* [Default is no offset]. If *dy* is not given it is set
        equal to *dx*.
    error_bar : bool or str
        [**+b**\|\ **d**\|\ **D**][**+xl**\|\ **r**\|\ *x0*]\
        [**+yl**\|\ **r**\|\ *y0*][**+p**\ *pen*].
        Draw symmetrical error bars. Full documentation is at
        :gmt-docs:`plot.html#e`.
    connection : str
        [**c**\|\ **n**\|\ **r**]\
        [**a**\|\ **f**\|\ **s**\|\ **r**\|\ *refpoint*].
        Alter the way points are connected (by specifying a *scheme*) and
        data are grouped (by specifying a *method*). Append one of three
        line connection schemes:

        - **c** : Draw continuous line segments for each group [Default].
        - **r** : Draw line segments from a reference point reset for each
          group.
        - **n** : Draw networks of line segments between all points in
          each group.

        Optionally, append the one of four segmentation methods to define
        the group:

        - **a** : Ignore all segment headers, i.e., let all points belong
          to a single group, and set group reference point to the very
          first point of the first file.
        - **f** : Consider all data in each file to be a single separate
          group and reset the group reference point to the first point of
          each group.
        - **s** : Segment headers are honored so each segment is a group;
          the group reference point is reset to the first point of each
          incoming segment [Default].
        - **r** : Same as **s**, but the group reference point is reset
          after each record to the previous point (this method is only
          available with the ``connection='r'`` scheme).

        Instead of the codes **a**\|\ **f**\|\ **s**\|\ **r** you may append
        the coordinates of a *refpoint* which will serve as a fixed external
        reference point for all groups.
    {G}
        *color* can be a 1d array, but it is only valid if using ``x``/``y``
        and ``cmap=True`` is also required.
    intensity : float or bool or 1d array
        Provide an *intensity* value (nominally in the -1 to +1 range) to
        modulate the fill color by simulating illumination. If using
        ``intensity=True``, we will instead read *intensity* from the first
        data column after the symbol parameters (if given). *intensity* can
        also be a 1d array to set varying intensity for symbols, but it is only
        valid for ``x``/``y`` pairs.
    close : str
        [**+b**\|\ **d**\|\ **D**][**+xl**\|\ **r**\|\ *x0*]\
        [**+yl**\|\ **r**\|\ *y0*][**+p**\ *pen*].
        Force closed polygons. Full documentation is at
        :gmt-docs:`plot.html#l`.
    no_clip : bool or str
        [**c**\|\ **r**].
        Do NOT clip symbols that fall outside map border [Default plots
        points whose coordinates are strictly inside the map border only].
        The parameter does not apply to lines and polygons which are always
        clipped to the map region. For periodic (360-longitude) maps we
        must plot all symbols twice in case they are clipped by the
        repeating boundary. ``no_clip=True`` will turn off clipping and not
        plot repeating symbols. Use ``no_clip="r"`` to turn off clipping
        but retain the plotting of such repeating symbols, or use
        ``no_clip="c"`` to retain clipping but turn off plotting of
        repeating symbols.
    style : str
        Plot symbols (including vectors, pie slices, fronts, decorated or
        quoted lines).
    {W}
    {U}
    {V}
    {XY}
    zvalue : str
        *value*\|\ *file*.
        Instead of specifying a symbol or polygon fill and outline color
        via ``color`` and ``pen``, give both a *value* via ``zvalue`` and a
        color lookup table via ``cmap``.  Alternatively, give the name of a
        *file* with one z-value (read from the last column) for each
        polygon in the input data. To apply it to the fill color, use
        ``color='+z'``. To apply it to the pen color, append **+z** to
        ``pen``.
    {a}
    {c}
    {f}
    {i}
    label : str
        Add a legend entry for the symbol or line being plotted.

    {p}
    {t}
        *transparency* can also be a 1d array to set varying transparency
        for symbols, but this option is only valid if using x/y.
    """
    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access

    kind = data_kind(data, x, y)

    extra_arrays = []
    if "S" in kwargs and kwargs["S"][0] in "vV" and direction is not None:
        extra_arrays.extend(direction)
    if "G" in kwargs and not isinstance(kwargs["G"], str):
        if kind != "vectors":
            raise GMTInvalidInput(
                "Can't use arrays for color if data is matrix or file."
            )
        extra_arrays.append(kwargs["G"])
        del kwargs["G"]
    if size is not None:
        if kind != "vectors":
            raise GMTInvalidInput(
                "Can't use arrays for 'size' if data is a matrix or file."
            )
        extra_arrays.append(size)

    for flag in ["I", "t"]:
        if flag in kwargs and is_nonstr_iter(kwargs[flag]):
            if kind != "vectors":
                raise GMTInvalidInput(
                    f"Can't use arrays for {plot.aliases[flag]} if data is matrix or file."
                )
            extra_arrays.append(kwargs[flag])
            kwargs[flag] = ""

    with Session() as lib:
        # Choose how data will be passed in to the module
        file_context = lib.virtualfile_from_data(
            check_kind="vector", data=data, x=x, y=y, extra_arrays=extra_arrays
        )

        with file_context as fname:
            arg_str = " ".join([fname, build_arg_string(kwargs)])
            lib.call_module("plot", arg_str)
示例#17
0
def x2sys_cross(tracks=None, outfile=None, **kwargs):
    r"""
    Calculate crossovers between track data files.

    Determines all intersections between ("external cross-overs") or within
    ("internal cross-overs") tracks (Cartesian or geographic), and report the
    time, position, distance along track, heading and speed along each track
    segment, and the crossover error (COE) and mean values for all observables.
    By default, :meth:`pygmt.x2sys_cross` will look for both external and
    internal COEs. As an option, you may choose to project all data using one
    of the map projections prior to calculating the COE.

    Full option list at :gmt-docs:`supplements/x2sys/x2sys_cross.html`

    {aliases}

    Parameters
    ----------
    tracks : pandas.DataFrame or str or list
        A table or a list of tables with (x, y) or (lon, lat) values in the
        first two columns. Track(s) can be provided as pandas DataFrame tables
        or file names. Supported file formats are ASCII, native binary, or
        COARDS netCDF 1-D data. More columns may also be present.

        If the filenames are missing their file extension, we will append the
        suffix specified for this TAG. Track files will be searched for first
        in the current directory and second in all directories listed in
        $X2SYS_HOME/TAG/TAG_paths.txt (if it exists). [If $X2SYS_HOME is not
        set it will default to $GMT_SHAREDIR/x2sys]. (Note: MGD77 files will
        also be looked for via $MGD77_HOME/mgd77_paths.txt and .gmt files
        will be searched for via $GMT_SHAREDIR/mgg/gmtfile_paths).

    outfile : str
        Optional. The file name for the output ASCII txt file to store the
        table in.

    tag : str
        Specify the x2sys TAG which identifies the attributes of this data
        type.

    combitable : str
        Only process the pair-combinations found in the file *combitable*
        [Default process all possible combinations among the specified files].
        The file *combitable* is created by :gmt-docs:`x2sys_get's -L option
        <supplements/x2sys/x2sys_get.html#l>`.

    runtimes : bool or str
        Compute and append the processing run-time for each pair to the
        progress message (use ``runtimes=True``). Pass in a filename (e.g.
        ``runtimes="file.txt"``) to save these run-times to file. The idea here
        is to use the knowledge of run-times to split the main process in a
        number of sub-processes that can each be launched in a different
        processor of your multi-core machine. See the MATLAB function
        `split_file4coes.m
        <https://github.com/GenericMappingTools/gmt/blob/master/src/x2sys/>`_.

    override : bool or str
        **S**\|\ **N**.
        Control how geographic coordinates are handled (Cartesian data are
        unaffected). By default, we determine if the data are closer to one
        pole than the other, and then we use a cylindrical polar conversion to
        avoid problems with longitude jumps. You can turn this off entirely
        with ``override`` and then the calculations uses the original data (we
        have protections against longitude jumps). However, you can force the
        selection of the pole for the projection by appending **S** or **N**
        for the south or north pole, respectively. The conversion is used
        because the algorithm used to find crossovers is inherently a
        Cartesian algorithm that can run into trouble with data that has large
        longitudinal range at higher latitudes.

    interpolation : str
        **l**\|\ **a**\|\ **c**.
        Sets the interpolation mode for estimating values at the crossover.
        Choose among:

        - **l** - Linear interpolation [Default].
        - **a** - Akima spline interpolation.
        - **c** - Cubic spline interpolation.

    coe : str
        Use **e** for external COEs only, and **i** for internal COEs only
        [Default is all COEs].

    {R}

    speed : str or list
        **l**\|\ **u**\|\ **h**\ *speed*.
        Defines window of track speeds. If speeds are outside this window we do
        not calculate a COE. Specify:

        - **l** sets lower speed [Default is 0].
        - **u** sets upper speed [Default is infinity].
        - **h** does not limit the speed but sets a lower speed below which
          headings will not be computed (i.e., set to NaN) [Default
          calculates headings regardless of speed].

        For example, you can use ``speed=["l0", "u10", "h5"]`` to set a lower
        speed of 0, upper speed of 10, and disable heading calculations for
        speeds below 5.

    {V}

    numpoints : int
        Give the maximum number of data points on either side of the crossover
        to use in the spline interpolation [Default is 3].

    trackvalues : bool
        Report the values of each track at the crossover [Default reports the
        crossover value and the mean value].

    Returns
    -------
    crossover_errors : :class:`pandas.DataFrame` or None
        Table containing crossover error information.
        Return type depends on whether the ``outfile`` parameter is set:

        - :class:`pandas.DataFrame` with (x, y, ..., etc) if ``outfile`` is not
          set
        - None if ``outfile`` is set (track output will be stored in the set in
          ``outfile``)
    """
    with Session() as lib:
        file_contexts = []
        for track in tracks:
            kind = data_kind(track)
            if kind == "file":
                file_contexts.append(dummy_context(track))
            elif kind == "matrix":
                # find suffix (-E) of trackfiles used (e.g. xyz, csv, etc) from
                # $X2SYS_HOME/TAGNAME/TAGNAME.tag file
                lastline = (Path(
                    os.environ["X2SYS_HOME"], kwargs["T"],
                    f"{kwargs['T']}.tag").read_text().strip().split("\n")[-1]
                            )  # e.g. "-Dxyz -Etsv -I1/1"
                for item in sorted(
                        lastline.split()):  # sort list alphabetically
                    if item.startswith(
                        ("-E", "-D")):  # prefer -Etsv over -Dxyz
                        suffix = item[
                            2:]  # e.g. tsv (1st choice) or xyz (2nd choice)

                # Save pandas.DataFrame track data to temporary file
                file_contexts.append(
                    tempfile_from_dftrack(track=track, suffix=suffix))
            else:
                raise GMTInvalidInput(f"Unrecognized data type: {type(track)}")

        with GMTTempFile(suffix=".txt") as tmpfile:
            with contextlib.ExitStack() as stack:
                fnames = [stack.enter_context(c) for c in file_contexts]
                if outfile is None:
                    outfile = tmpfile.name
                arg_str = " ".join(
                    [*fnames,
                     build_arg_string(kwargs), "->" + outfile])
                lib.call_module(module="x2sys_cross", args=arg_str)

            # Read temporary csv output to a pandas table
            if outfile == tmpfile.name:  # if outfile isn't set, return pd.DataFrame
                # Read the tab-separated ASCII table
                table = pd.read_csv(
                    tmpfile.name,
                    sep="\t",
                    header=2,  # Column names are on 2nd row
                    comment=">",  # Skip the 3rd row with a ">"
                    parse_dates=[2, 3],  # Datetimes on 3rd and 4th column
                )
                # Remove the "# " from "# x" in the first column
                table = table.rename(
                    columns={table.columns[0]: table.columns[0][2:]})
            elif outfile != tmpfile.name:  # if outfile is set, output in outfile only
                table = None

    return table
示例#18
0
def info(table, **kwargs):
    r"""
    Get information about data tables.

    Reads from files and finds the extreme values in each of the columns
    reported as min/max pairs. It recognizes NaNs and will print warnings if
    the number of columns vary from record to record. As an option, it will
    find the extent of the first two columns rounded up and down to the nearest
    multiple of the supplied increments given by ``spacing``. Such output will
    be in a numpy.ndarray form [*w*, *e*, *s*, *n*], which can be used
    directly as the ``region`` parameter for other modules (hence only *dx*
    and *dy* are needed). If the ``per_column`` parameter is combined with
    ``spacing``, then the numpy.ndarray output will be rounded up/down for as
    many columns as there are increments provided in ``spacing``. A similar
    parameter ``nearest_multiple`` will provide a numpy.ndarray in the form
    of [*zmin*, *zmax*, *dz*] for makecpt.

    Full option list at :gmt-docs:`gmtinfo.html`

    {aliases}

    Parameters
    ----------
    table : str or np.ndarray or pandas.DataFrame or xarray.Dataset
        Pass in either a file name to an ASCII data table, a 1D/2D numpy array,
        a pandas dataframe, or an xarray dataset made up of 1D xarray.DataArray
        data variables.
    per_column : bool
        Report the min/max values per column in separate columns.
    spacing : str
        [**b**\|\ **p**\|\ **f**\|\ **s**]\ *dx*\[/*dy*\[/*dz*...]].
        Report the min/max of the first n columns to the nearest multiple of
        the provided increments and output results in the form
        ``[w, e, s, n]``.
    nearest_multiple : str
        **dz**\[\ **+c**\ *col*].
        Report the min/max of the first (0'th) column to the nearest multiple
        of dz and output this in the form ``[zmin, zmax, dz]``.

    {V}

    Returns
    -------
    output : np.ndarray or str
        Return type depends on whether any of the ``per_column``,
        ``spacing``, or ``nearest_multiple`` parameters are set.

        - :class:`numpy.ndarray` if either of the above parameters are used.
        - str if none of the above parameters are used.
    """
    kind = data_kind(table)
    with Session() as lib:
        if kind == "file":
            file_context = dummy_context(table)
        elif kind == "matrix":
            try:
                # pandas.DataFrame and xarray.Dataset types
                arrays = [array for _, array in table.items()]
            except AttributeError:
                # Python lists, tuples, and numpy ndarray types
                arrays = np.atleast_2d(np.asanyarray(table).T)
            file_context = lib.virtualfile_from_vectors(*arrays)
        else:
            raise GMTInvalidInput(f"Unrecognized data type: {type(table)}")

        with GMTTempFile() as tmpfile:
            with file_context as fname:
                arg_str = " ".join(
                    [fname,
                     build_arg_string(kwargs), "->" + tmpfile.name])
                lib.call_module("info", arg_str)
            result = tmpfile.read()

        if any(arg in kwargs for arg in ["C", "I", "T"]):
            # Converts certain output types into a numpy array
            # instead of a raw string that is less useful.
            if result.startswith(("-R", "-T")):  # e.g. -R0/1/2/3 or -T0/9/1
                result = result[2:].replace("/", " ")
            result = np.loadtxt(result.splitlines())

        return result
示例#19
0
def contour(self, x=None, y=None, z=None, data=None, **kwargs):
    r"""
    Contour table data by direct triangulation.

    Takes a matrix, (x,y,z) pairs, or a file name as input and plots lines,
    polygons, or symbols at those locations on a map.

    Must provide either ``data`` or ``x``/``y``/``z``.

    Full option list at :gmt-docs:`contour.html`

    {aliases}

    Parameters
    ----------
    x/y/z : 1d arrays
        Arrays of x and y coordinates and values z of the data points.
    data : str or 2d array
        Either a data file name or a 2d numpy array with the tabular data.
    {J}
    {R}
    annotation : str or int
        Specify or disable annotated contour levels, modifies annotated
        contours specified in ``interval``.

        - Specify a fixed annotation interval *annot_int* or a
          single annotation level +\ *annot_int*.
    {B}
    levels : str or int
        Specify the contour lines to generate.

        - The filename of a CPT file where the color boundaries will
          be used as contour levels.
        - The filename of a 2 (or 3) column file containing the contour
          levels (col 1), (**C**)ontour or (**A**)nnotate (col 2), and optional
          angle (col 3)
        - A fixed contour interval *cont_int* or a single contour with
          +\ *cont_int*
    D : str
        Dump contour coordinates.
    E : str
        Network information.
    label_placement : str
        Placement of labels.
    I : bool
        Color the triangles using CPT.
    triangular_mesh_pen : str
        Pen to draw the underlying triangulation [Default is none].
    no_clip : bool
        Do NOT clip contours or image at the boundaries [Default will clip
        to fit inside region].
    Q : float or str
        [*cut*][**+z**].
        Do not draw contours with less than cut number of points.
    skip : bool or str
        [**p**\|\ **t**].
        Skip input points outside region.
    {W}
    label : str
        Add a legend entry for the contour being plotted. Normally, the
        annotated contour is selected for the legend. You can select the
        regular contour instead, or both of them, by considering the label
        to be of the format [*annotcontlabel*][/*contlabel*]. If either
        label contains a slash (/) character then use ``|`` as the
        separator for the two labels instead.
    {V}
    {XY}
    {c}
    {p}
    {t}
    """
    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access

    kind = data_kind(data, x, y, z)
    if kind == "vectors" and z is None:
        raise GMTInvalidInput("Must provided both x, y, and z.")

    with Session() as lib:
        # Choose how data will be passed in to the module
        if kind == "file":
            file_context = dummy_context(data)
        elif kind == "matrix":
            file_context = lib.virtualfile_from_matrix(data)
        elif kind == "vectors":
            file_context = lib.virtualfile_from_vectors(x, y, z)

        with file_context as fname:
            arg_str = " ".join([fname, build_arg_string(kwargs)])
            lib.call_module("contour", arg_str)
示例#20
0
def grdfilter(grid, **kwargs):
    r"""
    Filter a grid in the space (or time) domain.

    Filter a grid file in the time domain using one of the selected convolution
    or non-convolution isotropic or rectangular filters and compute distances
    using Cartesian or Spherical geometries. The output grid file can
    optionally be generated as a sub-region of the input (via ``region``)
    and/or with new increment (via ``spacing``) or registration
    (via ``toggle``). In this way, one may have "extra space" in the input
    data so that the edges will not be used and the output can be within one
    half-width of the input edges. If the filter is low-pass, then the output
    may be less frequently sampled than the input.

    Full option list at :gmt-docs:`grdfilter.html`

    {aliases}

    Parameters
    ----------
    grid : str or xarray.DataArray
        The file name of the input grid or the grid loaded as a DataArray.
    outgrid : str or None
        The name of the output netCDF file with extension .nc to store the grid
        in.
    filter : str
        **b**\|\ **c**\|\ **g**\|\ **o**\|\ **m**\|\ **p**\|\ **h**\ *xwidth*\
        [/*width2*\][*modifiers*].
        Name of filter type you which to apply, followed by the width:

        b: Box Car

        c: Cosine Arch

        g: Gaussian

        o: Operator

        m: Median

        p: Maximum Likelihood probability

        h: histogram
    distance : str
        Distance *flag* tells how grid (x,y) relates to filter width as
        follows:

        p: grid (px,py) with *width* an odd number of pixels; Cartesian
        distances.

        0: grid (x,y) same units as *width*, Cartesian distances.

        1: grid (x,y) in degrees, *width* in kilometers, Cartesian distances.

        2: grid (x,y) in degrees, *width* in km, dx scaled by cos(middle y),
        Cartesian distances.

        The above options are fastest because they allow weight matrix to be
        computed only once. The next three options are slower because they
        recompute weights for each latitude.

        3: grid (x,y) in degrees, *width* in km, dx scaled by cosine(y),
        Cartesian distance calculation.

        4: grid (x,y) in degrees, *width* in km, Spherical distance
        calculation.

        5: grid (x,y) in Mercator ``projection='m1'`` img units, *width* in km,
        Spherical distance calculation.

    spacing : str
        *xinc*\[\ *unit*\][**+e**\|\ **n**]
        [/*yinc*\ [*unit*][**+e**\|\ **n**]].
        *xinc* [and optionally *yinc*] is the grid spacing.
    nans : str or float
        **i**\|\ **p**\|\ **r**.
        Determine how NaN-values in the input grid affects the filtered output.
    {R}
    toggle : bool
        Toggle the node registration for the output grid so as to become the
        opposite of the input grid. [Default gives the same registration as the
        input grid].
    {V}

    Returns
    -------
    ret: xarray.DataArray or None
        Return type depends on whether the ``outgrid`` parameter is set:

        - :class:`xarray.DataArray` if ``outgrid`` is not set
        - None if ``outgrid`` is set (grid output will be stored in file set by
          ``outgrid``)

    Examples
    --------
    >>> import os
    >>> import pygmt

    >>> # Apply a filter of 600km (full width) to the @earth_relief_30m file
    >>> # and return a filtered field (saved as netcdf)
    >>> pygmt.grdfilter(
    ...     grid="@earth_relief_30m",
    ...     filter="m600",
    ...     distance="4",
    ...     region=[150, 250, 10, 40],
    ...     spacing=0.5,
    ...     outgrid="filtered_pacific.nc",
    ... )
    >>> os.remove("filtered_pacific.nc")  # cleanup file

    >>> # Apply a gaussian smoothing filter of 600 km in the input data array,
    >>> # and returns a filtered data array with the smoothed field.
    >>> grid = pygmt.datasets.load_earth_relief()
    >>> smooth_field = pygmt.grdfilter(grid=grid, filter="g600", distance="4")
    """
    kind = data_kind(grid)

    with GMTTempFile(suffix=".nc") as tmpfile:
        with Session() as lib:
            if kind == "file":
                file_context = dummy_context(grid)
            elif kind == "grid":
                file_context = lib.virtualfile_from_grid(grid)
            else:
                raise GMTInvalidInput("Unrecognized data type: {}".format(
                    type(grid)))

            with file_context as infile:
                if "G" not in kwargs.keys(
                ):  # if outgrid is unset, output to tempfile
                    kwargs.update({"G": tmpfile.name})
                outgrid = kwargs["G"]
                arg_str = " ".join([infile, build_arg_string(kwargs)])
                lib.call_module("grdfilter", arg_str)

        if outgrid == tmpfile.name:  # if user did not set outgrid, return DataArray
            with xr.open_dataarray(outgrid) as dataarray:
                result = dataarray.load()
                _ = result.gmt  # load GMTDataArray accessor information
        else:
            result = None  # if user sets an outgrid, return None

        return result
示例#21
0
def grdimage(self, grid, **kwargs):
    r"""
    Project and plot grids or images.

    Reads a 2-D grid file and produces a gray-shaded (or colored) map by
    building a rectangular image and assigning pixels a gray-shade (or color)
    based on the z-value and the CPT file. Optionally, illumination may be
    added by providing a file with intensities in the (-1,+1) range or
    instructions to derive intensities from the input data grid. Values outside
    this range will be clipped. Such intensity files can be created from the
    grid using :meth:`pygmt.grdgradient` and, optionally, modified by
    ``grdmath`` or ``grdhisteq``. If GMT is built with GDAL support, ``grid``
    can be an image file (geo-referenced or not). In this case the image can
    optionally be illuminated with the file provided via the ``shading``
    parameter. Here, if image has no coordinates then those of the intensity
    file will be used.

    When using map projections, the grid is first resampled on a new
    rectangular grid with the same dimensions. Higher resolution images can
    be obtained by using the ``dpi`` parameter. To obtain the resampled value
    (and hence shade or color) of each map pixel, its location is inversely
    projected back onto the input grid after which a value is interpolated
    between the surrounding input grid values. By default bi-cubic
    interpolation is used. Aliasing is avoided by also forward projecting
    the input grid nodes. If two or more nodes are projected onto the same
    pixel, their average will dominate in the calculation of the pixel
    value. Interpolation and aliasing is controlled with the
    ``interpolation`` parameter.

    The ``region`` parameter can be used to select a map region larger or
    smaller than that implied by the extent of the grid.

    Full parameter list at :gmt-docs:`grdimage.html`

    {aliases}

    Parameters
    ----------
    grid : str or xarray.DataArray
        The file name or a DataArray containing the input 2-D gridded data
        set or image to be plotted (See GRID FILE FORMATS at
        :gmt-docs:`grdimage.html#grid-file-formats`).
    img_out : str
        *out_img*\[=\ *driver*].
        Save an image in a raster format instead of PostScript. Use
        extension .ppm for a Portable Pixel Map format which is the only
        raster format GMT can natively write. For GMT installations
        configured with GDAL support there are more choices: Append
        *out_img* to select the image file name and extension. If the
        extension is one of .bmp, .gif, .jpg, .png, or .tif then no driver
        information is required. For other output formats you must append
        the required GDAL driver. The *driver* is the driver code name used
        by GDAL; see your GDAL installation's documentation for available
        drivers. Append a **+c**\ *args* string where *args* is a list
        of one or more concatenated number of GDAL **-co** arguments. For
        example, to write a GeoPDF with the TerraGo format use
        ``=PDF+cGEO_ENCODING=OGC_BP``. Notes: (1) If a tiff file (.tif) is
        selected then we will write a GeoTiff image if the GMT projection
        syntax translates into a PROJ syntax, otherwise a plain tiff file
        is produced. (2) Any vector elements will be lost.
    {B}
    {CPT}
    img_in : str
        [**r**].
        GMT will automatically detect standard image files (Geotiff, TIFF,
        JPG, PNG, GIF, etc.) and will read those via GDAL. For very obscure
        image formats you may need to explicitly set ``img_in``, which
        specifies that the grid is in fact an image file to be read via
        GDAL. Append **r** to assign the region specified by ``region``
        to the image. For example, if you have used ``region='d'`` then the
        image will be assigned a global domain. This mode allows you to
        project a raw image (an image without referencing coordinates).
    dpi : int
        [**i**\|\ *dpi*].
        Sets the resolution of the projected grid that will be created if a
        map projection other than Linear or Mercator was selected [100]. By
        default, the projected grid will be of the same size (rows and
        columns) as the input file. Specify **i** to use the PostScript
        image operator to interpolate the image at the device resolution.
    bit_color : str
        *color*\ [**+b**\|\ **f**\].
        This parameter only applies when a resulting 1-bit image otherwise
        would consist of only two colors: black (0) and white (255). If so,
        this parameter will instead use the image as a transparent mask and
        paint the mask with the given color. Append **+b** to paint the
        background pixels (1) or **+f** for the foreground pixels
        [Default is **+f**].
    shading : str or xarray.DataArray
        [*intensfile*\|\ *intensity*\|\ *modifiers*].
        Give the name of a grid file or a DataArray with intensities in the
        (-1,+1) range, or a constant intensity to apply everywhere (affects the
        ambient light). Alternatively, derive an intensity grid from the input
        data grid via a call to :meth:`pygmt.grdgradient`; append
        **+a**\ *azimuth*, **+n**\ *args*, and **+m**\ *ambient* to specify
        azimuth, intensity, and ambient arguments for that module, or just give
        **+d** to select the default arguments (``+a-45+nt1+m0``). If you want
        a more specific intensity scenario then run :meth:`pygmt.grdgradient`
        separately first. If we should derive intensities from another file
        than grid, specify the file with suitable modifiers [Default is no
        illumination]. Note: If the input data is an *image* then an
        *intensfile* or constant *intensity* must be provided.
    {J}
    monochrome : bool
        Force conversion to monochrome image using the (television) YIQ
        transformation. Cannot be used with ``nan_transparent``.
    no_clip : bool
        Do not clip the image at the map boundary (only relevant for
        non-rectangular maps).
    nan_transparent : bool
        Make grid nodes with z = NaN transparent, using the color-masking
        feature in PostScript Level 3 (the PS device must support PS Level
        3).
    {R}
    {V}
    {XY}
    {c}
    {f}
    {n}
    {p}
    {t}
    {x}
    """
    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access
    with Session() as lib:
        file_context = lib.virtualfile_from_data(check_kind="raster",
                                                 data=grid)
        with contextlib.ExitStack() as stack:
            # shading using an xr.DataArray
            if "I" in kwargs and data_kind(kwargs["I"]) == "grid":
                shading_context = lib.virtualfile_from_grid(kwargs["I"])
                kwargs["I"] = stack.enter_context(shading_context)

            fname = stack.enter_context(file_context)
            arg_str = " ".join([fname, build_arg_string(kwargs)])
            lib.call_module("grdimage", arg_str)
示例#22
0
def meca(
    self,  # pylint: disable=unused-argument
    spec,
    scale,
    longitude=None,
    latitude=None,
    depth=None,
    convention=None,
    component="full",
    plot_longitude=None,
    plot_latitude=None,
    **kwargs,
):
    """
    Plot focal mechanisms.

    Full option list at :gmt-docs:`supplements/seis/meca.html`

    Note
    ----
        Currently, labeling of beachballs with text strings is only supported
        via providing a file to `spec` as input.

    {aliases}

    Parameters
    ----------
    spec: dict, 1D array, 2D array, pd.DataFrame, or str
        Either a filename containing focal mechanism parameters as columns, a
        1- or 2-D array with the same, or a dictionary. If a filename or array,
        `convention` is required so we know how to interpret the
        columns/entries. If a dictionary, the following combinations of keys
        are supported; these determine the convention. Dictionary may contain
        values for a single focal mechanism or lists of values for many focal
        mechanisms. A Pandas DataFrame may optionally contain columns latitude,
        longitude, depth, plot_longitude, and/or plot_latitude instead of
        passing them to the meca method.

        - ``"aki"`` — *strike, dip, rake, magnitude*
        - ``"gcmt"`` — *strike1, dip1, rake1, strike2, dip2, rake2, mantissa,
          exponent*
        - ``"mt"`` — *mrr, mtt, mff, mrt, mrf, mtf, exponent*
        - ``"partial"`` — *strike1, dip1, strike2, fault_type, magnitude*
        - ``"principal_axis"`` — *t_exponent, t_azimuth, t_plunge, n_exponent,
          n_azimuth, n_plunge, p_exponent, p_azimuth, p_plunge, exponent*

    scale: str
        Adjusts the scaling of the radius of the beachball, which is
        proportional to the magnitude. Scale defines the size for magnitude = 5
        (i.e. scalar seismic moment M0 = 4.0E23 dynes-cm)
    longitude: int, float, list, or 1d numpy array
        Longitude(s) of event location. Ignored if `spec` is not a dictionary.
        List must be the length of the number of events. Ignored if `spec` is a
        DataFrame and contains a 'longitude' column.
    latitude: int, float, list, or 1d numpy array
        Latitude(s) of event location. Ignored if `spec` is not a dictionary.
        List must be the length of the number of events. Ignored if `spec` is a
        DataFrame and contains a 'latitude' column.
    depth: int, float, list, or 1d numpy array
        Depth(s) of event location in kilometers. Ignored if `spec` is not a
        dictionary. List must be the length of the number of events. Ignored if
        `spec` is a DataFrame and contains a 'depth' column.
    convention: str
        ``"aki"`` (Aki & Richards), ``"gcmt"`` (global CMT), ``"mt"`` (seismic
        moment tensor), ``"partial"`` (partial focal mechanism), or
        ``"principal_axis"`` (principal axis). Ignored if `spec` is a
        dictionary or dataframe.
    component: str
        The component of the seismic moment tensor to plot. ``"full"`` (the
        full seismic moment tensor), ``"dc"`` (the closest double couple with
        zero trace and zero determinant), ``"deviatoric"`` (zero trace)
    plot_longitude: int, float, list, or 1d numpy array
        Longitude(s) at which to place beachball, only used if `spec` is a
        dictionary. List must be the length of the number of events. Ignored if
        `spec` is a DataFrame and contains a 'plot_longitude' column.
    plot_latitude: int, float, list, or 1d numpy array
        Latitude(s) at which to place beachball, only used if `spec` is a
        dictionary. List must be the length of the number of events. Ignored if
        `spec` is a DataFrame and contains a 'plot_latitude' column.
    offset: bool or str
        Offsets beachballs to the longitude, latitude specified in the last two
        columns of the input file or array, or by `plot_longitude` and
        `plot_latitude` if provided. A small circle is plotted at the initial
        location and a line connects the beachball to the circle. Specify pen
        and optionally append ``+ssize`` to change the line style and/or size
        of the circle.
    no_clip : bool
        Does NOT skip symbols that fall outside frame boundary specified by
        *region* [Default is False, i.e. plot symbols inside map frame only].
    {J}
    {R}
    {B}
    {V}
    {XY}
    {p}
    {t}
    """

    # pylint warnings that need to be fixed
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-nested-blocks
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements

    def set_pointer(data_pointers, spec):
        """
        Set optional parameter pointers based on DataFrame or dict, if those
        parameters are present in the DataFrame or dict.
        """
        for param in list(data_pointers.keys()):
            if param in spec:
                # set pointer based on param name
                data_pointers[param] = spec[param]

    def update_pointers(data_pointers):
        """
        Updates variables based on the location of data, as the following data
        can be passed as parameters or it can be contained in `spec`.
        """
        # update all pointers
        longitude = data_pointers["longitude"]
        latitude = data_pointers["latitude"]
        depth = data_pointers["depth"]
        plot_longitude = data_pointers["plot_longitude"]
        plot_latitude = data_pointers["plot_latitude"]
        return (longitude, latitude, depth, plot_longitude, plot_latitude)

    # Check the spec and parse the data according to the specified
    # convention
    if isinstance(spec, (dict, pd.DataFrame)):
        # dicts and DataFrames are handed similarly but not identically
        if (longitude is None or latitude is None
                or depth is None) and not isinstance(spec,
                                                     (dict, pd.DataFrame)):
            raise GMTError("Location not fully specified.")

        param_conventions = {
            "AKI": ["strike", "dip", "rake", "magnitude"],
            "GCMT":
            ["strike1", "dip1", "dip2", "rake2", "mantissa", "exponent"],
            "MT": ["mrr", "mtt", "mff", "mrt", "mrf", "mtf", "exponent"],
            "PARTIAL":
            ["strike1", "dip1", "strike2", "fault_type", "magnitude"],
            "PRINCIPAL_AXIS": [
                "t_exponent",
                "t_azimuth",
                "t_plunge",
                "n_exponent",
                "n_azimuth",
                "n_plunge",
                "p_exponent",
                "p_azimuth",
                "p_plunge",
                "exponent",
            ],
        }

        # to keep track of where optional parameters exist
        data_pointers = {
            "longitude": longitude,
            "latitude": latitude,
            "depth": depth,
            "plot_longitude": plot_longitude,
            "plot_latitude": plot_latitude,
        }

        # make a DataFrame copy to check convention if it contains
        # other parameters
        if isinstance(spec, (dict, pd.DataFrame)):
            # check if a copy is necessary
            copy = False
            drop_list = []
            for pointer in data_pointers:
                if pointer in spec:
                    copy = True
                    drop_list.append(pointer)
            if copy:
                spec_conv = spec.copy()
                # delete optional parameters from copy for convention check
                for item in drop_list:
                    del spec_conv[item]
            else:
                spec_conv = spec

        # set convention and focal parameters based on spec convention
        convention_assigned = False
        for conv in param_conventions:
            if set(spec_conv.keys()) == set(param_conventions[conv]):
                convention = conv.lower()
                foc_params = param_conventions[conv]
                convention_assigned = True
                break
        if not convention_assigned:
            raise GMTError("Parameters in spec dictionary do not match known "
                           "conventions.")

        # create a dict type pointer for easier to read code
        if isinstance(spec, dict):
            dict_type_pointer = list(spec.values())[0]
        elif isinstance(spec, pd.DataFrame):
            # use df.values as pointer for DataFrame behavior
            dict_type_pointer = spec.values

        # assemble the 1D array for the case of floats and ints as values
        if isinstance(dict_type_pointer, (int, float)):
            # update pointers
            set_pointer(data_pointers, spec)
            # look for optional parameters in the right place
            (
                longitude,
                latitude,
                depth,
                plot_longitude,
                plot_latitude,
            ) = update_pointers(data_pointers)

            # Construct the array (order matters)
            spec = [longitude, latitude, depth
                    ] + [spec[key] for key in foc_params]

            # Add in plotting options, if given, otherwise add 0s
            for arg in plot_longitude, plot_latitude:
                if arg is None:
                    spec.append(0)
                else:
                    if "C" not in kwargs:
                        kwargs["C"] = True
                    spec.append(arg)

        # or assemble the 2D array for the case of lists as values
        elif isinstance(dict_type_pointer, list):
            # update pointers
            set_pointer(data_pointers, spec)
            # look for optional parameters in the right place
            (
                longitude,
                latitude,
                depth,
                plot_longitude,
                plot_latitude,
            ) = update_pointers(data_pointers)

            # before constructing the 2D array lets check that each key
            # of the dict has the same quantity of values to avoid bugs
            list_length = len(list(spec.values())[0])
            for value in list(spec.values()):
                if len(value) != list_length:
                    raise GMTError("Unequal number of focal mechanism "
                                   "parameters supplied in 'spec'.")
                # lets also check the inputs for longitude, latitude,
                # and depth if it is a list or array
                if (isinstance(longitude, (list, np.ndarray))
                        or isinstance(latitude, (list, np.ndarray))
                        or isinstance(depth, (list, np.ndarray))):
                    if (len(longitude) != len(latitude)) or (len(longitude) !=
                                                             len(depth)):
                        raise GMTError("Unequal number of focal mechanism "
                                       "locations supplied.")

            # values are ok, so build the 2D array
            spec_array = []
            for index in range(list_length):
                # Construct the array one row at a time (note that order
                # matters here, hence the list comprehension!)
                row = [longitude[index], latitude[index], depth[index]
                       ] + [spec[key][index] for key in foc_params]

                # Add in plotting options, if given, otherwise add 0s as
                # required by GMT
                for arg in plot_longitude, plot_latitude:
                    if arg is None:
                        row.append(0)
                    else:
                        if "C" not in kwargs:
                            kwargs["C"] = True
                        row.append(arg[index])
                spec_array.append(row)
            spec = spec_array

        # or assemble the array for the case of pd.DataFrames
        elif isinstance(dict_type_pointer, np.ndarray):
            # update pointers
            set_pointer(data_pointers, spec)
            # look for optional parameters in the right place
            (
                longitude,
                latitude,
                depth,
                plot_longitude,
                plot_latitude,
            ) = update_pointers(data_pointers)

            # lets also check the inputs for longitude, latitude, and depth
            # just in case the user entered different length lists
            if (isinstance(longitude, (list, np.ndarray))
                    or isinstance(latitude, (list, np.ndarray))
                    or isinstance(depth, (list, np.ndarray))):
                if (len(longitude) != len(latitude)) or (len(longitude) !=
                                                         len(depth)):
                    raise GMTError(
                        "Unequal number of focal mechanism locations supplied."
                    )

            # values are ok, so build the 2D array in the correct order
            spec_array = []
            for index in range(len(spec)):
                # Construct the array one row at a time (note that order
                # matters here, hence the list comprehension!)
                row = [longitude[index], latitude[index], depth[index]
                       ] + [spec[key][index] for key in foc_params]

                # Add in plotting options, if given, otherwise add 0s as
                # required by GMT
                for arg in plot_longitude, plot_latitude:
                    if arg is None:
                        row.append(0)
                    else:
                        if "C" not in kwargs:
                            kwargs["C"] = True
                        row.append(arg[index])
                spec_array.append(row)
            spec = spec_array

        else:
            raise GMTError(
                "Parameter 'spec' contains values of an unsupported type.")

    # Add condition and scale to kwargs
    if convention == "aki":
        data_format = "a"
    elif convention == "gcmt":
        data_format = "c"
    elif convention == "mt":
        # Check which component of mechanism the user wants plotted
        if component == "deviatoric":
            data_format = "z"
        elif component == "dc":
            data_format = "d"
        else:  # component == 'full'
            data_format = "m"
    elif convention == "partial":
        data_format = "p"
    elif convention == "principal_axis":
        # Check which component of mechanism the user wants plotted
        if component == "deviatoric":
            data_format = "t"
        elif component == "dc":
            data_format = "y"
        else:  # component == 'full'
            data_format = "x"
    # Support old-school GMT format options
    elif convention in ["a", "c", "m", "d", "z", "p", "x", "y", "t"]:
        data_format = convention
    else:
        raise GMTError("Convention not recognized.")

    # Assemble -S flag
    kwargs["S"] = data_format + scale

    kind = data_kind(spec)
    with Session() as lib:
        if kind == "matrix":
            file_context = lib.virtualfile_from_matrix(np.atleast_2d(spec))
        elif kind == "file":
            file_context = dummy_context(spec)
        else:
            raise GMTInvalidInput("Unrecognized data type: {}".format(
                type(spec)))
        with file_context as fname:
            arg_str = " ".join([fname, build_arg_string(kwargs)])
            lib.call_module("meca", arg_str)
示例#23
0
def surface(x=None, y=None, z=None, data=None, **kwargs):
    r"""
    Grids table data using adjustable tension continuous curvature splines.

    Surface reads randomly-spaced (x,y,z) triples and produces gridded values
    z(x,y) by solving:

    .. math::    (1 - t)\nabla^2(z)+t\nabla(z) = 0

    where :math:`t` is a tension factor between 0 and 1, and :math:`\nabla`
    indicates the Laplacian operator.

    Takes a matrix, xyz triples, or a file name as input.

    Must provide either ``data`` or ``x``, ``y``, and ``z``.

    Full option list at :gmt-docs:`surface.html`

    {aliases}

    Parameters
    ----------
    x/y/z : 1d arrays
        Arrays of x and y coordinates and values z of the data points.
    data : str or 2d array
        Either a data file name or a 2d numpy array with the tabular data.

    spacing : str
        *xinc*\[\ *unit*\][**+e**\|\ **n**]\
        [/*yinc*\ [*unit*][**+e**\|\ **n**]].
        *xinc* [and optionally *yinc*] is the grid spacing.

    region : str or list
        *xmin/xmax/ymin/ymax*\[**+r**][**+u**\ *unit*].
        Specify the region of interest.

    outfile : str
        Optional. The file name for the output netcdf file with extension .nc
        to store the grid in.

    {V}
    {a}
    {f}
    {r}

    Returns
    -------
    ret: xarray.DataArray or None
        Return type depends on whether the ``outfile`` parameter is set:

        - :class:`xarray.DataArray`: if ``outfile`` is not set
        - None if ``outfile`` is set (grid output will be stored in file set by
          ``outfile``)
    """
    kind = data_kind(data, x, y, z)
    if kind == "vectors" and z is None:
        raise GMTInvalidInput("Must provide z with x and y.")

    with GMTTempFile(suffix=".nc") as tmpfile:
        with Session() as lib:
            if kind == "file":
                file_context = dummy_context(data)
            elif kind == "matrix":
                file_context = lib.virtualfile_from_matrix(data)
            elif kind == "vectors":
                file_context = lib.virtualfile_from_vectors(x, y, z)
            else:
                raise GMTInvalidInput("Unrecognized data type: {}".format(
                    type(data)))
            with file_context as infile:
                if "G" not in kwargs.keys(
                ):  # if outfile is unset, output to tmpfile
                    kwargs.update({"G": tmpfile.name})
                outfile = kwargs["G"]
                arg_str = " ".join([infile, build_arg_string(kwargs)])
                lib.call_module(module="surface", args=arg_str)

        if outfile == tmpfile.name:  # if user did not set outfile, return DataArray
            with xr.open_dataarray(outfile) as dataarray:
                result = dataarray.load()
                _ = result.gmt  # load GMTDataArray accessor information
        elif outfile != tmpfile.name:  # if user sets an outfile, return None
            result = None

    return result
示例#24
0
文件: text.py 项目: weiji14/pygmt
def text_(
    self,
    textfiles=None,
    x=None,
    y=None,
    position=None,
    text=None,
    angle=None,
    font=None,
    justify=None,
    **kwargs,
):
    r"""
    Plot or typeset text strings of variable size, font type, and orientation.

    Must provide at least one of the following combinations as input:

    - ``textfiles``
    - ``x``/``y``, and ``text``
    - ``position`` and ``text``

    Full parameter list at :gmt-docs:`text.html`

    {aliases}

    Parameters
    ----------
    textfiles : str or list
        A text data file name, or a list of filenames containing 1 or more
        records with (x, y[, angle, font, justify], text).
    x/y : float or 1d arrays
        The x and y coordinates, or an array of x and y coordinates to plot
        the text
    position : str
        Sets reference point on the map for the text by using x,y
        coordinates extracted from ``region`` instead of providing them
        through ``x``/``y``. Specify with a two letter (order independent)
        code, chosen from:

        * Horizontal: **L**\ (eft), **C**\ (entre), **R**\ (ight)
        * Vertical: **T**\ (op), **M**\ (iddle), **B**\ (ottom)

        For example, ``position="TL"`` plots the text at the Upper Left corner
        of the map.
    text : str or 1d array
        The text string, or an array of strings to plot on the figure
    angle: int, float, str or bool
        Set the angle measured in degrees counter-clockwise from
        horizontal (e.g. 30 sets the text at 30 degrees). If no angle is
        explicitly given (i.e. ``angle=True``) then the input to ``textfiles``
        must have this as a column.
    font : str or bool
        Set the font specification with format *size*\ ,\ *font*\ ,\ *color*
        where *size* is text size in points, *font* is the font to use, and
        *color* sets the font color. For example,
        ``font="12p,Helvetica-Bold,red"`` selects a 12p, red, Helvetica-Bold
        font. If no font info is explicitly given (i.e. ``font=True``), then
        the input to ``textfiles`` must have this information in one of its
        columns.
    justify : str or bool
        Set the alignment which refers to the part of the text string that
        will be mapped onto the (x,y) point. Choose a 2 character
        combination of **L**, **C**, **R** (for left, center, or right) and
        **T**, **M**, **B** for top, middle, or bottom. E.g., **BL** for lower
        left. If no justification is explicitly given (i.e. ``justify=True``),
        then the input to ``textfiles`` must have this as a column.
    {J}
    {R}
        *Required if this is the first plot command.*
    clearance : str
        [*dx/dy*][**+to**\|\ **O**\|\ **c**\|\ **C**].
        Adjust the clearance between the text and the surrounding box
        [Default is 15% of the font size]. Only used if ``pen`` or ``fill`` are
        specified. Append the unit you want (*c* for cm, *i* for inch, or *p*
        for point; if not given we consult **PROJ_LENGTH_UNIT**) or *%* for a
        percentage of the font size. Optionally, use modifier **+t** to set
        the shape of the textbox when using ``fill`` and/or ``pen``. Append
        lower case **o** to get a straight rectangle [Default is **o**]. Append
        upper case **O** to get a rounded rectangle. In paragraph mode
        (*paragraph*) you can also append lower case **c** to get a concave
        rectangle or append upper case **C** to get a convex rectangle.
    fill : str
        Sets the shade or color used for filling the text box [Default is
        no fill].
    offset : str
        [**j**\|\ **J**]\ *dx*\[/*dy*][**+v**\[*pen*]].
        Offsets the text from the projected (x,y) point by *dx*,\ *dy* [0/0].
        If *dy* is not specified then it is set equal to *dx*. Use **j** to
        offset the text away from the point instead (i.e., the text
        justification will determine the direction of the shift). Using
        **J** will shorten diagonal offsets at corners by sqrt(2).
        Optionally, append **+v** which will draw a line from the original
        point to the shifted point; append a pen to change the attributes
        for this line.
    pen : str
        Sets the pen used to draw a rectangle around the text string
        (see ``clearance``) [Default is width = default, color = black,
        style = solid].
    no_clip : bool
        Do NOT clip text at map boundaries [Default is will clip].
    {U}
    {V}
    {XY}
    {a}
    {c}
    {e}
    {f}
    {h}
    {i}
    {p}
    {t}
        *transparency* can also be a 1d array to set varying transparency
        for texts, but this option is only valid if using x/y/text.
    {w}
    """

    # pylint: disable=too-many-locals
    # pylint: disable=too-many-branches

    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access

    # Ensure inputs are either textfiles, x/y/text, or position/text
    if position is None:
        kind = data_kind(textfiles, x, y, text)
    else:
        if x is not None or y is not None:
            raise GMTInvalidInput(
                "Provide either position only, or x/y pairs, not both")
        kind = "vectors"

    if kind == "vectors" and text is None:
        raise GMTInvalidInput("Must provide text with x/y pairs or position")

    # Build the -F option in gmt text.
    if "F" not in kwargs and ((position is not None or angle is not None
                               or font is not None or justify is not None)):
        kwargs.update({"F": ""})

    if angle is True:
        kwargs["F"] += "+a"
    elif isinstance(angle, (int, float, str)):
        kwargs["F"] += f"+a{str(angle)}"

    if font is True:
        kwargs["F"] += "+f"
    elif isinstance(font, str):
        kwargs["F"] += f"+f{font}"

    if justify is True:
        kwargs["F"] += "+j"
    elif isinstance(justify, str):
        kwargs["F"] += f"+j{justify}"

    if isinstance(position, str):
        kwargs["F"] += f'+c{position}+t"{text}"'

    extra_arrays = []
    # If an array of transparency is given, GMT will read it from
    # the last numerical column per data record.
    if "t" in kwargs and is_nonstr_iter(kwargs["t"]):
        extra_arrays.append(kwargs["t"])
        kwargs["t"] = ""

    with Session() as lib:
        file_context = dummy_context(textfiles) if kind == "file" else ""
        if kind == "vectors":
            if position is not None:
                file_context = dummy_context("")
            else:
                file_context = lib.virtualfile_from_vectors(
                    np.atleast_1d(x),
                    np.atleast_1d(y),
                    *extra_arrays,
                    # text must be in str type, see issue #706
                    np.atleast_1d(text).astype(str),
                )
        with file_context as fname:
            arg_str = " ".join([fname, build_arg_string(kwargs)])
            lib.call_module("text", arg_str)
示例#25
0
文件: gridops.py 项目: jlmaurer/pygmt
def grdcut(grid, **kwargs):
    """
    Extract subregion from a grid.

    Produce a new *outgrid* file which is a subregion of *grid*. The
    subregion is specified with *region*; the specified range must not exceed
    the range of *grid* (but see *extend*). If in doubt, run
    :meth:`pygmt.grdinfo` to check range. Alternatively, define the subregion
    indirectly via a range check on the node values or via distances from a
    given point. Finally, you can give *projection* for oblique projections to
    determine the corresponding rectangular *region* setting that will give a
    grid that fully covers the oblique domain.

    Full option list at :gmt-docs:`grdcut.html`

    {aliases}

    Parameters
    ----------
    grid : str or xarray.DataArray
        The file name of the input grid or the grid loaded as a DataArray.
    outgrid : str or None
        The name of the output netCDF file with extension .nc to store the grid
        in.
    {J}
    {R}
    extend : bool or int or float
        Allow grid to be extended if new *region* exceeds existing boundaries.
        Give a value to initialize nodes outside current region.
    circ_subregion : str
        ``'lon/lat/radius[unit][+n]'``.
        Specify an origin (*lon* and *lat*) and *radius*; append a distance
        *unit* and we determine the corresponding rectangular region so that
        all grid nodes on or inside the circle are contained in the subset.
        If **+n** is appended we set all nodes outside the circle to NaN.
    z_subregion : str
        ``'[min/max][+n|N|r]'``.
        Determine a new rectangular region so that all nodes outside this
        region are also outside the given z-range [-inf/+inf]. To indicate no
        limit on *min* or *max* only, specify a hyphen (-). Normally, any NaNs
        encountered are simply skipped and not considered in the
        range-decision. Append **+n** to consider a NaN to be outside the given
        z-range. This means the new subset will be NaN-free. Alternatively,
        append **+r** to consider NaNs to be within the data range. In this
        case we stop shrinking the boundaries once a NaN is found [Default
        simply skips NaNs when making the range decision]. Finally, if your
        core subset grid is surrounded by rows and/or columns that are all
        NaNs, append **+N** to strip off such columns before (optionally)
        considering the range of the core subset for further reduction of the
        area.

    {V}

    Returns
    -------
    ret: xarray.DataArray or None
        Return type depends on whether the *outgrid* parameter is set:

        - xarray.DataArray if *outgrid* is not set
        - None if *outgrid* is set (grid output will be stored in *outgrid*)
    """
    kind = data_kind(grid)

    with GMTTempFile(suffix=".nc") as tmpfile:
        with Session() as lib:
            if kind == "file":
                file_context = dummy_context(grid)
            elif kind == "grid":
                file_context = lib.virtualfile_from_grid(grid)
            else:
                raise GMTInvalidInput("Unrecognized data type: {}".format(type(grid)))

            with file_context as infile:
                if "G" not in kwargs.keys():  # if outgrid is unset, output to tempfile
                    kwargs.update({"G": tmpfile.name})
                outgrid = kwargs["G"]
                arg_str = " ".join([infile, build_arg_string(kwargs)])
                lib.call_module("grdcut", arg_str)

        if outgrid == tmpfile.name:  # if user did not set outgrid, return DataArray
            with xr.open_dataarray(outgrid) as dataarray:
                result = dataarray.load()
                _ = result.gmt  # load GMTDataArray accessor information
        else:
            result = None  # if user sets an outgrid, return None

        return result
示例#26
0
def grdtrack(points, grid, newcolname=None, outfile=None, **kwargs):
    """
    Sample grids at specified (x,y) locations.

    Grdtrack reads one or more grid files and a table with (x,y) [or (lon,lat)]
    positions in the first two columns (more columns may be present). It
    interpolates the grid(s) at the positions in the table and writes out the
    table with the interpolated values added as (one or more) new columns. A
    bicubic [Default], bilinear, B-spline or nearest-neighbor interpolation is
    used, requiring boundary conditions at the limits of the region (see
    ``interpolation``; Default uses "natural" conditions (second partial
    derivative normal to edge is zero) unless the grid is automatically
    recognized as periodic.)

    Full option list at :gmt-docs:`grdtrack.html`

    {aliases}

    Parameters
    ----------
    points : pandas.DataFrame or str
        Either a table with (x, y) or (lon, lat) values in the first two
        columns, or a filename (e.g. csv, txt format). More columns may be
        present.

    grid : xarray.DataArray or str
        Gridded array from which to sample values from, or a filename (netcdf
        format).

    newcolname : str
        Required if ``points`` is a :class:`pandas.DataFrame`. The name for the
        new column in the track :class:`pandas.DataFrame` table where the
        sampled values will be placed.

    outfile : str
        The file name for the output ASCII file.

    {V}
    {f}
    {n}

    Returns
    -------
    track: pandas.DataFrame or None
        Return type depends on whether the ``outfile`` parameter is set:

        - :class:`pandas.DataFrame` table with (x, y, ..., newcolname) if
          ``outfile`` is not set
        - None if ``outfile`` is set (track output will be stored in file set
          by ``outfile``)
    """
    if data_kind(points) == "matrix" and newcolname is None:
        raise GMTInvalidInput("Please pass in a str to 'newcolname'")

    with GMTTempFile(suffix=".csv") as tmpfile:
        with Session() as lib:
            # Choose how data will be passed into the module
            table_context = lib.virtualfile_from_data(check_kind="vector",
                                                      data=points)
            # Store the xarray.DataArray grid in virtualfile
            grid_context = lib.virtualfile_from_data(check_kind="raster",
                                                     data=grid)

            # Run grdtrack on the temporary (csv) points table
            # and (netcdf) grid virtualfile
            with table_context as csvfile:
                with grid_context as grdfile:
                    kwargs.update({"G": grdfile})
                    if outfile is None:  # Output to tmpfile if outfile is not set
                        outfile = tmpfile.name
                    arg_str = " ".join(
                        [csvfile,
                         build_arg_string(kwargs), "->" + outfile])
                    lib.call_module(module="grdtrack", args=arg_str)

        # Read temporary csv output to a pandas table
        if outfile == tmpfile.name:  # if user did not set outfile, return pd.DataFrame
            try:
                column_names = points.columns.to_list() + [newcolname]
                result = pd.read_csv(tmpfile.name,
                                     sep="\t",
                                     names=column_names)
            except AttributeError:  # 'str' object has no attribute 'columns'
                result = pd.read_csv(tmpfile.name,
                                     sep="\t",
                                     header=None,
                                     comment=">")
        elif outfile != tmpfile.name:  # return None if outfile set, output in outfile
            result = None

    return result
示例#27
0
def plot3d(self,
           x=None,
           y=None,
           z=None,
           data=None,
           sizes=None,
           direction=None,
           **kwargs):
    r"""
    Plot lines, polygons, and symbols in 3-D.

    Takes a matrix, (x,y,z) triplets, or a file name as input and plots
    lines, polygons, or symbols at those locations in 3-D.

    Must provide either ``data`` or ``x``/``y``/``z``.

    If providing data through ``x/y/z``, ``color`` can be a 1d array
    that will be mapped to a colormap.

    If a symbol is selected and no symbol size given, then plot3d will
    interpret the fourth column of the input data as symbol size. Symbols
    whose size is <= 0 are skipped. If no symbols are specified then the
    symbol code (see ``style`` below) must be present as last column in the
    input. If ``style`` is not used, a line connecting the data points will
    be drawn instead. To explicitly close polygons, use ``close``. Select a
    fill with ``color``. If ``color`` is set, ``pen`` will control whether the
    polygon outline is drawn or not. If a symbol is selected, ``color`` and
    ``pen`` determines the fill and outline/no outline, respectively.

    Full parameter list at :gmt-docs:`plot3d.html`

    {aliases}

    Parameters
    ----------
    x/y/z : float or 1d arrays
        The x, y, and z coordinates, or arrays of x, y and z coordinates of
        the data points
    data : str or 2d array
        Either a data file name or a 2d numpy array with the tabular data.
        Use parameter ``columns`` to choose which columns are x, y, z,
        color, and size, respectively.
    sizes : 1d array
        The sizes of the data points in units specified in ``style``.
        Only valid if using ``x``/``y``/``z``.
    direction : list of two 1d arrays
        If plotting vectors (using ``style='V'`` or ``style='v'``), then
        should be a list of two 1d arrays with the vector directions. These
        can be angle and length, azimuth and length, or x and y components,
        depending on the style options chosen.
    {J}
    zscale/zsize : float or str
        Set z-axis scaling or z-axis size.
    {R}
    straight_line : bool or str
        [**m**\|\ **p**\|\ **x**\|\ **y**].
        By default, geographic line segments are drawn as great circle
        arcs. To draw them as straight lines, use ``straight_line``.
        Alternatively, add **m** to draw the line by first following a
        meridian, then a parallel. Or append **p** to start following a
        parallel, then a meridian. (This can be practical to draw a line
        along parallels, for example). For Cartesian data, points are
        simply connected, unless you append **x** or **y** to draw
        stair-case curves that whose first move is along *x* or *y*,
        respectively. **Note**: The ``straight_line`` parameter requires
        constant *z*-coordinates.
    {B}
    {CPT}
    offset : str
        *dx*/*dy*\ [/*dz*].
        Offset the plot symbol or line locations by the given amounts
        *dx*/*dy*\ [/*dz*] [Default is no offset].
    {G}
    intensity : float or bool
        Provide an *intens* value (nominally in the -1 to +1 range) to
        modulate the fill color by simulating illumination [Default is None].
        If using ``intensity=True``, we will instead read *intens* from the
        first data column after the symbol parameters (if given).
    close : str
        [**+b**\|\ **d**\|\ **D**][**+xl**\|\ **r**\|\ *x0*]\
        [**+yl**\|\ **r**\|\ *y0*][**+p**\ *pen*].
        Force closed polygons. Full documentation is at
        :gmt-docs:`plot3d.html#l`.
    no_clip : bool or str
        [**c**\|\ **r**].
        Do NOT clip symbols that fall outside map border [Default plots
        points whose coordinates are strictly inside the map border only].
        This parameter does not apply to lines and polygons which are always
        clipped to the map region. For periodic (360-longitude) maps we
        must plot all symbols twice in case they are clipped by the
        repeating boundary. ``no_clip=True`` will turn off clipping and not
        plot repeating symbols. Use ``no_clip="r"`` to turn off clipping
        but retain the plotting of such repeating symbols, or use
        ``no_clip="c"`` to retain clipping but turn off plotting of
        repeating symbols.
    no_sort : bool
        Turn off the automatic sorting of items based on their distance
        from the viewer. The default is to sort the items so that items in
        the foreground are plotted after items in the background.
    style : str
        Plot symbols. Full documentation is at :gmt-docs:`plot3d.html#s`.
    {U}
    {V}
    {W}
    {XY}
    zvalue : str
        *value*\|\ *file*.
        Instead of specifying a symbol or polygon fill and outline color
        via ``color`` and ``pen``, give both a *value* via **zvalue** and a
        color lookup table via ``cmap``.  Alternatively, give the name of a
        *file* with one z-value (read from the last column) for each
        polygon in the input data. To apply it to the fill color, use
        ``color='+z'``. To apply it to the pen color, append **+z** to
        ``pen``.
    {c}
    label : str
        Add a legend entry for the symbol or line being plotted.
    {p}
    {t}
        *transparency* can also be a 1d array to set varying transparency
        for symbols.
    """
    kwargs = self._preprocess(**kwargs)  # pylint: disable=protected-access

    kind = data_kind(data, x, y, z)

    extra_arrays = []
    if "S" in kwargs and kwargs["S"][0] in "vV" and direction is not None:
        extra_arrays.extend(direction)
    if "G" in kwargs and not isinstance(kwargs["G"], str):
        if kind != "vectors":
            raise GMTInvalidInput(
                "Can't use arrays for color if data is matrix or file.")
        extra_arrays.append(kwargs["G"])
        del kwargs["G"]
    if sizes is not None:
        if kind != "vectors":
            raise GMTInvalidInput(
                "Can't use arrays for sizes if data is matrix or file.")
        extra_arrays.append(sizes)

    if "t" in kwargs and is_nonstr_iter(kwargs["t"]):
        extra_arrays.append(kwargs["t"])
        kwargs["t"] = ""

    with Session() as lib:
        # Choose how data will be passed in to the module
        if kind == "file":
            file_context = dummy_context(data)
        elif kind == "matrix":
            file_context = lib.virtualfile_from_matrix(data)
        elif kind == "vectors":
            file_context = lib.virtualfile_from_vectors(
                np.atleast_1d(x), np.atleast_1d(y), np.atleast_1d(z),
                *extra_arrays)

        with file_context as fname:
            arg_str = " ".join([fname, build_arg_string(kwargs)])
            lib.call_module("plot3d", arg_str)
示例#28
0
def surface(x=None, y=None, z=None, data=None, **kwargs):
    """
    Grids table data using adjustable tension continuous curvature splines.

    Surface reads randomly-spaced (x,y,z) triples and produces gridded values
    z(x,y) by solving:

        (1 - T) * L (L (z)) + T * L (z) = 0

    where T is a tension factor between 0 and 1, and L indicates the Laplacian
    operator.

    Takes a matrix, xyz triples, or a file name as input.

    Must provide either *data* or *x*, *y*, and *z*.

    Full option list at :gmt-docs:`surface.html`

    {aliases}

    Parameters
    ----------
    x/y/z : 1d arrays
        Arrays of x and y coordinates and values z of the data points.
    data : str or 2d array
        Either a data file name or a 2d numpy array with the tabular data.

    spacing : str
        ``'xinc[unit][+e|n][/yinc[unit][+e|n]]'``.
        x_inc [and optionally y_inc] is the grid spacing.

    region : str or list
        ``'xmin/xmax/ymin/ymax[+r][+uunit]'``.
        Specify the region of interest.

    outfile : str
        Optional. The file name for the output netcdf file with extension .nc
        to store the grid in.

    {V}

    Returns
    -------
    ret: xarray.DataArray or None
        Return type depends on whether the outfile (G) parameter is set:

        - xarray.DataArray if outfile (G) is not set
        - None if outfile (G) is set (grid output will be stored in outfile)
    """
    kind = data_kind(data, x, y, z)
    if kind == "vectors" and z is None:
        raise GMTInvalidInput("Must provide z with x and y.")

    with GMTTempFile(suffix=".nc") as tmpfile:
        with Session() as lib:
            if kind == "file":
                file_context = dummy_context(data)
            elif kind == "matrix":
                file_context = lib.virtualfile_from_matrix(data)
            elif kind == "vectors":
                file_context = lib.virtualfile_from_vectors(x, y, z)
            else:
                raise GMTInvalidInput("Unrecognized data type: {}".format(
                    type(data)))
            with file_context as infile:
                if "G" not in kwargs.keys(
                ):  # if outfile is unset, output to tmpfile
                    kwargs.update({"G": tmpfile.name})
                outfile = kwargs["G"]
                arg_str = " ".join([infile, build_arg_string(kwargs)])
                lib.call_module(module="surface", args=arg_str)

        if outfile == tmpfile.name:  # if user did not set outfile, return DataArray
            with xr.open_dataarray(outfile) as dataarray:
                result = dataarray.load()
                _ = result.gmt  # load GMTDataArray accessor information
        elif outfile != tmpfile.name:  # if user sets an outfile, return None
            result = None

    return result
示例#29
0
def grdtrack(points, grid, newcolname=None, outfile=None, **kwargs):
    """
    Sample grids at specified (x,y) locations.

    Grdtrack reads one or more grid files and a table with (x,y) [or (lon,lat)]
    positions in the first two columns (more columns may be present). It
    interpolates the grid(s) at the positions in the table and writes out the
    table with the interpolated values added as (one or more) new columns. A
    bicubic [Default], bilinear, B-spline or nearest-neighbor interpolation is
    used, requiring boundary conditions at the limits of the region (see
    *interpolation*; Default uses “natural” conditions (second partial
    derivative normal to edge is zero) unless the grid is automatically
    recognized as periodic.)

    Full option list at :gmt-docs:`grdtrack.html`

    {aliases}

    Parameters
    ----------
    points : pandas.DataFrame or str
        Either a table with (x, y) or (lon, lat) values in the first two
        columns, or a filename (e.g. csv, txt format). More columns may be
        present.

    grid : xarray.DataArray or str
        Gridded array from which to sample values from, or a filename (netcdf
        format).

    newcolname : str
        Required if 'points' is a pandas.DataFrame. The name for the new column
        in the track pandas.DataFrame table where the sampled values will be
        placed.

    outfile : str
        Required if 'points' is a file. The file name for the output ASCII
        file.

    {V}

    {n}

    Returns
    -------
    track: pandas.DataFrame or None
        Return type depends on whether the outfile parameter is set:

        - pandas.DataFrame table with (x, y, ..., newcolname) if outfile is not
          set
        - None if outfile is set (track output will be stored in outfile)
    """

    with GMTTempFile(suffix=".csv") as tmpfile:
        with Session() as lib:
            # Store the pandas.DataFrame points table in virtualfile
            if data_kind(points) == "matrix":
                if newcolname is None:
                    raise GMTInvalidInput(
                        "Please pass in a str to 'newcolname'")
                table_context = lib.virtualfile_from_matrix(points.values)
            elif data_kind(points) == "file":
                if outfile is None:
                    raise GMTInvalidInput("Please pass in a str to 'outfile'")
                table_context = dummy_context(points)
            else:
                raise GMTInvalidInput(f"Unrecognized data type {type(points)}")

            # Store the xarray.DataArray grid in virtualfile
            if data_kind(grid) == "grid":
                grid_context = lib.virtualfile_from_grid(grid)
            elif data_kind(grid) == "file":
                grid_context = dummy_context(grid)
            else:
                raise GMTInvalidInput(f"Unrecognized data type {type(grid)}")

            # Run grdtrack on the temporary (csv) points table
            # and (netcdf) grid virtualfile
            with table_context as csvfile:
                with grid_context as grdfile:
                    kwargs.update({"G": grdfile})
                    if outfile is None:  # Output to tmpfile if outfile is not set
                        outfile = tmpfile.name
                    arg_str = " ".join(
                        [csvfile,
                         build_arg_string(kwargs), "->" + outfile])
                    lib.call_module(module="grdtrack", args=arg_str)

        # Read temporary csv output to a pandas table
        if outfile == tmpfile.name:  # if user did not set outfile, return pd.DataFrame
            column_names = points.columns.to_list() + [newcolname]
            result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
        elif outfile != tmpfile.name:  # return None if outfile set, output in outfile
            result = None

    return result
示例#30
0
def test_data_kind_fails(data, x, y):
    """
    Make sure data_kind raises exceptions when it should.
    """
    with pytest.raises(GMTInvalidInput):
        data_kind(data=data, x=x, y=y)