Esempio n. 1
0
def init_chan_data(conn):
    chan_data = conn.memory.setdefault("chan_data", DataDict())
    chan_data.clear()

    users = conn.memory.setdefault("users", WeakValueDictionary())
    users.clear()
Esempio n. 2
0
from weakref import WeakValueDictionary
from copy import copy
from traceback import format_exc
from twisted.internet.defer import inlineCallbacks, returnValue
from django.conf import settings
from evennia.comms.channelhandler import CHANNELHANDLER
from evennia.utils import logger, utils
from evennia.commands.cmdparser import at_multimatch_cmd
from evennia.utils.utils import string_suggestions, to_unicode

from django.utils.translation import ugettext as _

__all__ = ("cmdhandler", )
_GA = object.__getattribute__
_CMDSET_MERGE_CACHE = WeakValueDictionary()

# This decides which command parser is to be used.
# You have to restart the server for changes to take effect.
_COMMAND_PARSER = utils.variable_from_module(
    *settings.COMMAND_PARSER.rsplit('.', 1))

# System command names - import these variables rather than trying to
# remember the actual string constants. If not defined, Evennia
# hard-coded defaults are used instead.

# command to call if user just presses <return> with no input
CMD_NOINPUT = "__noinput_command"
# command to call if no command match was found
CMD_NOMATCH = "__nomatch_command"
# command to call if multiple command matches were found
Esempio n. 3
0
class Path:
    """
    A series of possibly disconnected, possibly closed, line and curve
    segments.

    The underlying storage is made up of two parallel numpy arrays:

    - *vertices*: an Nx2 float array of vertices
    - *codes*: an N-length uint8 array of vertex types, or None

    These two arrays always have the same length in the first
    dimension.  For example, to represent a cubic curve, you must
    provide three vertices as well as three codes ``CURVE3``.

    The code types are:

    - ``STOP``   :  1 vertex (ignored)
        A marker for the end of the entire path (currently not required and
        ignored)

    - ``MOVETO`` :  1 vertex
        Pick up the pen and move to the given vertex.

    - ``LINETO`` :  1 vertex
        Draw a line from the current position to the given vertex.

    - ``CURVE3`` :  1 control point, 1 endpoint
        Draw a quadratic Bezier curve from the current position, with the given
        control point, to the given end point.

    - ``CURVE4`` :  2 control points, 1 endpoint
        Draw a cubic Bezier curve from the current position, with the given
        control points, to the given end point.

    - ``CLOSEPOLY`` : 1 vertex (ignored)
        Draw a line segment to the start point of the current polyline.

    If *codes* is None, it is interpreted as a ``MOVETO`` followed by a series
    of ``LINETO``.

    Users of Path objects should not access the vertices and codes arrays
    directly.  Instead, they should use `iter_segments` or `cleaned` to get the
    vertex/code pairs.  This helps, in particular, to consistently handle the
    case of *codes* being None.

    Some behavior of Path objects can be controlled by rcParams. See the
    rcParams whose keys start with 'path.'.

    .. note::

        The vertices and codes arrays should be treated as
        immutable -- there are a number of optimizations and assumptions
        made up front in the constructor that will not change when the
        data changes.
    """

    code_type = np.uint8

    # Path codes
    STOP = code_type(0)  # 1 vertex
    MOVETO = code_type(1)  # 1 vertex
    LINETO = code_type(2)  # 1 vertex
    CURVE3 = code_type(3)  # 2 vertices
    CURVE4 = code_type(4)  # 3 vertices
    CLOSEPOLY = code_type(79)  # 1 vertex

    #: A dictionary mapping Path codes to the number of vertices that the
    #: code expects.
    NUM_VERTICES_FOR_CODE = {
        STOP: 1,
        MOVETO: 1,
        LINETO: 1,
        CURVE3: 2,
        CURVE4: 3,
        CLOSEPOLY: 1
    }

    def __init__(self,
                 vertices,
                 codes=None,
                 _interpolation_steps=1,
                 closed=False,
                 readonly=False):
        """
        Create a new path with the given vertices and codes.

        Parameters
        ----------
        vertices : array-like
            The ``(N, 2)`` float array, masked array or sequence of pairs
            representing the vertices of the path.

            If *vertices* contains masked values, they will be converted
            to NaNs which are then handled correctly by the Agg
            PathIterator and other consumers of path data, such as
            :meth:`iter_segments`.
        codes : array-like or None, optional
            n-length array integers representing the codes of the path.
            If not None, codes must be the same length as vertices.
            If None, *vertices* will be treated as a series of line segments.
        _interpolation_steps : int, optional
            Used as a hint to certain projections, such as Polar, that this
            path should be linearly interpolated immediately before drawing.
            This attribute is primarily an implementation detail and is not
            intended for public use.
        closed : bool, optional
            If *codes* is None and closed is True, vertices will be treated as
            line segments of a closed polygon.  Note that the last vertex will
            then be ignored (as the corresponding code will be set to
            CLOSEPOLY).
        readonly : bool, optional
            Makes the path behave in an immutable way and sets the vertices
            and codes as read-only arrays.
        """
        vertices = _to_unmasked_float_array(vertices)
        _api.check_shape((None, 2), vertices=vertices)

        if codes is not None:
            codes = np.asarray(codes, self.code_type)
            if codes.ndim != 1 or len(codes) != len(vertices):
                raise ValueError("'codes' must be a 1D list or array with the "
                                 "same length of 'vertices'. "
                                 f"Your vertices have shape {vertices.shape} "
                                 f"but your codes have shape {codes.shape}")
            if len(codes) and codes[0] != self.MOVETO:
                raise ValueError("The first element of 'code' must be equal "
                                 f"to 'MOVETO' ({self.MOVETO}).  "
                                 f"Your first code is {codes[0]}")
        elif closed and len(vertices):
            codes = np.empty(len(vertices), dtype=self.code_type)
            codes[0] = self.MOVETO
            codes[1:-1] = self.LINETO
            codes[-1] = self.CLOSEPOLY

        self._vertices = vertices
        self._codes = codes
        self._interpolation_steps = _interpolation_steps
        self._update_values()

        if readonly:
            self._vertices.flags.writeable = False
            if self._codes is not None:
                self._codes.flags.writeable = False
            self._readonly = True
        else:
            self._readonly = False

    @classmethod
    def _fast_from_codes_and_verts(cls, verts, codes, internals_from=None):
        """
        Creates a Path instance without the expense of calling the constructor.

        Parameters
        ----------
        verts : numpy array
        codes : numpy array
        internals_from : Path or None
            If not None, another `Path` from which the attributes
            ``should_simplify``, ``simplify_threshold``, and
            ``interpolation_steps`` will be copied.  Note that ``readonly`` is
            never copied, and always set to ``False`` by this constructor.
        """
        pth = cls.__new__(cls)
        pth._vertices = _to_unmasked_float_array(verts)
        pth._codes = codes
        pth._readonly = False
        if internals_from is not None:
            pth._should_simplify = internals_from._should_simplify
            pth._simplify_threshold = internals_from._simplify_threshold
            pth._interpolation_steps = internals_from._interpolation_steps
        else:
            pth._should_simplify = True
            pth._simplify_threshold = mpl.rcParams['path.simplify_threshold']
            pth._interpolation_steps = 1
        return pth

    def _update_values(self):
        self._simplify_threshold = mpl.rcParams['path.simplify_threshold']
        self._should_simplify = (self._simplify_threshold > 0
                                 and mpl.rcParams['path.simplify']
                                 and len(self._vertices) >= 128
                                 and (self._codes is None
                                      or np.all(self._codes <= Path.LINETO)))

    @property
    def vertices(self):
        """
        The list of vertices in the `Path` as an Nx2 numpy array.
        """
        return self._vertices

    @vertices.setter
    def vertices(self, vertices):
        if self._readonly:
            raise AttributeError("Can't set vertices on a readonly Path")
        self._vertices = vertices
        self._update_values()

    @property
    def codes(self):
        """
        The list of codes in the `Path` as a 1D numpy array.  Each
        code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
        or `CLOSEPOLY`.  For codes that correspond to more than one
        vertex (`CURVE3` and `CURVE4`), that code will be repeated so
        that the length of `self.vertices` and `self.codes` is always
        the same.
        """
        return self._codes

    @codes.setter
    def codes(self, codes):
        if self._readonly:
            raise AttributeError("Can't set codes on a readonly Path")
        self._codes = codes
        self._update_values()

    @property
    def simplify_threshold(self):
        """
        The fraction of a pixel difference below which vertices will
        be simplified out.
        """
        return self._simplify_threshold

    @simplify_threshold.setter
    def simplify_threshold(self, threshold):
        self._simplify_threshold = threshold

    @property
    def should_simplify(self):
        """
        `True` if the vertices array should be simplified.
        """
        return self._should_simplify

    @should_simplify.setter
    def should_simplify(self, should_simplify):
        self._should_simplify = should_simplify

    @property
    def readonly(self):
        """
        `True` if the `Path` is read-only.
        """
        return self._readonly

    def __copy__(self):
        """
        Return a shallow copy of the `Path`, which will share the
        vertices and codes with the source `Path`.
        """
        import copy
        return copy.copy(self)

    copy = __copy__

    def __deepcopy__(self, memo=None):
        """
        Return a deepcopy of the `Path`.  The `Path` will not be
        readonly, even if the source `Path` is.
        """
        try:
            codes = self.codes.copy()
        except AttributeError:
            codes = None
        return self.__class__(self.vertices.copy(),
                              codes,
                              _interpolation_steps=self._interpolation_steps)

    deepcopy = __deepcopy__

    @classmethod
    def make_compound_path_from_polys(cls, XY):
        """
        Make a compound path object to draw a number
        of polygons with equal numbers of sides XY is a (numpolys x
        numsides x 2) numpy array of vertices.  Return object is a
        :class:`Path`

        .. plot:: gallery/misc/histogram_path.py

        """

        # for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
        # the CLOSEPOLY; the vert for the closepoly is ignored but we still
        # need it to keep the codes aligned with the vertices
        numpolys, numsides, two = XY.shape
        if two != 2:
            raise ValueError("The third dimension of 'XY' must be 2")
        stride = numsides + 1
        nverts = numpolys * stride
        verts = np.zeros((nverts, 2))
        codes = np.full(nverts, cls.LINETO, dtype=cls.code_type)
        codes[0::stride] = cls.MOVETO
        codes[numsides::stride] = cls.CLOSEPOLY
        for i in range(numsides):
            verts[i::stride] = XY[:, i]

        return cls(verts, codes)

    @classmethod
    def make_compound_path(cls, *args):
        """
        Make a compound path from a list of Path objects. Blindly removes all
        Path.STOP control points.
        """
        # Handle an empty list in args (i.e. no args).
        if not args:
            return Path(np.empty([0, 2], dtype=np.float32))
        vertices = np.concatenate([x.vertices for x in args])
        codes = np.empty(len(vertices), dtype=cls.code_type)
        i = 0
        for path in args:
            if path.codes is None:
                codes[i] = cls.MOVETO
                codes[i + 1:i + len(path.vertices)] = cls.LINETO
            else:
                codes[i:i + len(path.codes)] = path.codes
            i += len(path.vertices)
        # remove STOP's, since internal STOPs are a bug
        not_stop_mask = codes != cls.STOP
        vertices = vertices[not_stop_mask, :]
        codes = codes[not_stop_mask]

        return cls(vertices, codes)

    def __repr__(self):
        return "Path(%r, %r)" % (self.vertices, self.codes)

    def __len__(self):
        return len(self.vertices)

    def iter_segments(self,
                      transform=None,
                      remove_nans=True,
                      clip=None,
                      snap=False,
                      stroke_width=1.0,
                      simplify=None,
                      curves=True,
                      sketch=None):
        """
        Iterate over all curve segments in the path.

        Each iteration returns a pair ``(vertices, code)``, where ``vertices``
        is a sequence of 1-3 coordinate pairs, and ``code`` is a `Path` code.

        Additionally, this method can provide a number of standard cleanups and
        conversions to the path.

        Parameters
        ----------
        transform : None or :class:`~matplotlib.transforms.Transform`
            If not None, the given affine transformation will be applied to the
            path.
        remove_nans : bool, optional
            Whether to remove all NaNs from the path and skip over them using
            MOVETO commands.
        clip : None or (float, float, float, float), optional
            If not None, must be a four-tuple (x1, y1, x2, y2)
            defining a rectangle in which to clip the path.
        snap : None or bool, optional
            If True, snap all nodes to pixels; if False, don't snap them.
            If None, snap if the path contains only segments
            parallel to the x or y axes, and no more than 1024 of them.
        stroke_width : float, optional
            The width of the stroke being drawn (used for path snapping).
        simplify : None or bool, optional
            Whether to simplify the path by removing vertices
            that do not affect its appearance.  If None, use the
            :attr:`should_simplify` attribute.  See also :rc:`path.simplify`
            and :rc:`path.simplify_threshold`.
        curves : bool, optional
            If True, curve segments will be returned as curve segments.
            If False, all curves will be converted to line segments.
        sketch : None or sequence, optional
            If not None, must be a 3-tuple of the form
            (scale, length, randomness), representing the sketch parameters.
        """
        if not len(self):
            return

        cleaned = self.cleaned(transform=transform,
                               remove_nans=remove_nans,
                               clip=clip,
                               snap=snap,
                               stroke_width=stroke_width,
                               simplify=simplify,
                               curves=curves,
                               sketch=sketch)

        # Cache these object lookups for performance in the loop.
        NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
        STOP = self.STOP

        vertices = iter(cleaned.vertices)
        codes = iter(cleaned.codes)
        for curr_vertices, code in zip(vertices, codes):
            if code == STOP:
                break
            extra_vertices = NUM_VERTICES_FOR_CODE[code] - 1
            if extra_vertices:
                for i in range(extra_vertices):
                    next(codes)
                    curr_vertices = np.append(curr_vertices, next(vertices))
            yield curr_vertices, code

    def iter_bezier(self, **kwargs):
        """
        Iterate over each bezier curve (lines included) in a Path.

        Parameters
        ----------
        **kwargs
            Forwarded to `.iter_segments`.

        Yields
        ------
        B : matplotlib.bezier.BezierSegment
            The bezier curves that make up the current path. Note in particular
            that freestanding points are bezier curves of order 0, and lines
            are bezier curves of order 1 (with two control points).
        code : Path.code_type
            The code describing what kind of curve is being returned.
            Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE4 correspond to
            bezier curves with 1, 2, 3, and 4 control points (respectively).
            Path.CLOSEPOLY is a Path.LINETO with the control points correctly
            chosen based on the start/end points of the current stroke.
        """
        first_vert = None
        prev_vert = None
        for verts, code in self.iter_segments(**kwargs):
            if first_vert is None:
                if code != Path.MOVETO:
                    raise ValueError("Malformed path, must start with MOVETO.")
            if code == Path.MOVETO:  # a point is like "CURVE1"
                first_vert = verts
                yield BezierSegment(np.array([first_vert])), code
            elif code == Path.LINETO:  # "CURVE2"
                yield BezierSegment(np.array([prev_vert, verts])), code
            elif code == Path.CURVE3:
                yield BezierSegment(np.array([prev_vert, verts[:2],
                                              verts[2:]])), code
            elif code == Path.CURVE4:
                yield BezierSegment(
                    np.array([prev_vert, verts[:2], verts[2:4],
                              verts[4:]])), code
            elif code == Path.CLOSEPOLY:
                yield BezierSegment(np.array([prev_vert, first_vert])), code
            elif code == Path.STOP:
                return
            else:
                raise ValueError("Invalid Path.code_type: " + str(code))
            prev_vert = verts[-2:]

    @cbook._delete_parameter("3.3", "quantize")
    def cleaned(self,
                transform=None,
                remove_nans=False,
                clip=None,
                quantize=False,
                simplify=False,
                curves=False,
                stroke_width=1.0,
                snap=False,
                sketch=None):
        """
        Return a new Path with vertices and codes cleaned according to the
        parameters.

        See Also
        --------
        Path.iter_segments : for details of the keyword arguments.
        """
        vertices, codes = _path.cleanup_path(self, transform, remove_nans,
                                             clip, snap, stroke_width,
                                             simplify, curves, sketch)
        pth = Path._fast_from_codes_and_verts(vertices, codes, self)
        if not simplify:
            pth._should_simplify = False
        return pth

    def transformed(self, transform):
        """
        Return a transformed copy of the path.

        See Also
        --------
        matplotlib.transforms.TransformedPath
            A specialized path class that will cache the transformed result and
            automatically update when the transform changes.
        """
        return Path(transform.transform(self.vertices), self.codes,
                    self._interpolation_steps)

    def contains_point(self, point, transform=None, radius=0.0):
        """
        Return whether the area enclosed by the path contains the given point.

        The path is always treated as closed; i.e. if the last code is not
        CLOSEPOLY an implicit segment connecting the last vertex to the first
        vertex is assumed.

        Parameters
        ----------
        point : (float, float)
            The point (x, y) to check.
        transform : `matplotlib.transforms.Transform`, optional
            If not ``None``, *point* will be compared to ``self`` transformed
            by *transform*; i.e. for a correct check, *transform* should
            transform the path into the coordinate system of *point*.
        radius : float, default: 0
            Add an additional margin on the path in coordinates of *point*.
            The path is extended tangentially by *radius/2*; i.e. if you would
            draw the path with a linewidth of *radius*, all points on the line
            would still be considered to be contained in the area. Conversely,
            negative values shrink the area: Points on the imaginary line
            will be considered outside the area.

        Returns
        -------
        bool

        Notes
        -----
        The current algorithm has some limitations:

        - The result is undefined for points exactly at the boundary
          (i.e. at the path shifted by *radius/2*).
        - The result is undefined if there is no enclosed area, i.e. all
          vertices are on a straight line.
        - If bounding lines start to cross each other due to *radius* shift,
          the result is not guaranteed to be correct.
        """
        if transform is not None:
            transform = transform.frozen()
        # `point_in_path` does not handle nonlinear transforms, so we
        # transform the path ourselves.  If *transform* is affine, letting
        # `point_in_path` handle the transform avoids allocating an extra
        # buffer.
        if transform and not transform.is_affine:
            self = transform.transform_path(self)
            transform = None
        return _path.point_in_path(point[0], point[1], radius, self, transform)

    def contains_points(self, points, transform=None, radius=0.0):
        """
        Return whether the area enclosed by the path contains the given points.

        The path is always treated as closed; i.e. if the last code is not
        CLOSEPOLY an implicit segment connecting the last vertex to the first
        vertex is assumed.

        Parameters
        ----------
        points : (N, 2) array
            The points to check. Columns contain x and y values.
        transform : `matplotlib.transforms.Transform`, optional
            If not ``None``, *points* will be compared to ``self`` transformed
            by *transform*; i.e. for a correct check, *transform* should
            transform the path into the coordinate system of *points*.
        radius : float, default: 0
            Add an additional margin on the path in coordinates of *points*.
            The path is extended tangentially by *radius/2*; i.e. if you would
            draw the path with a linewidth of *radius*, all points on the line
            would still be considered to be contained in the area. Conversely,
            negative values shrink the area: Points on the imaginary line
            will be considered outside the area.

        Returns
        -------
        length-N bool array

        Notes
        -----
        The current algorithm has some limitations:

        - The result is undefined for points exactly at the boundary
          (i.e. at the path shifted by *radius/2*).
        - The result is undefined if there is no enclosed area, i.e. all
          vertices are on a straight line.
        - If bounding lines start to cross each other due to *radius* shift,
          the result is not guaranteed to be correct.
        """
        if transform is not None:
            transform = transform.frozen()
        result = _path.points_in_path(points, radius, self, transform)
        return result.astype('bool')

    def contains_path(self, path, transform=None):
        """
        Return whether this (closed) path completely contains the given path.

        If *transform* is not ``None``, the path will be transformed before
        checking for containment.
        """
        if transform is not None:
            transform = transform.frozen()
        return _path.path_in_path(self, None, path, transform)

    def get_extents(self, transform=None, **kwargs):
        """
        Get Bbox of the path.

        Parameters
        ----------
        transform : matplotlib.transforms.Transform, optional
            Transform to apply to path before computing extents, if any.
        **kwargs
            Forwarded to `.iter_bezier`.

        Returns
        -------
        matplotlib.transforms.Bbox
            The extents of the path Bbox([[xmin, ymin], [xmax, ymax]])
        """
        from .transforms import Bbox
        if transform is not None:
            self = transform.transform_path(self)
        if self.codes is None:
            xys = self.vertices
        elif len(np.intersect1d(self.codes, [Path.CURVE3, Path.CURVE4])) == 0:
            xys = self.vertices[self.codes != Path.CLOSEPOLY]
        else:
            xys = []
            for curve, code in self.iter_bezier(**kwargs):
                # places where the derivative is zero can be extrema
                _, dzeros = curve.axis_aligned_extrema()
                # as can the ends of the curve
                xys.append(curve([0, *dzeros, 1]))
            xys = np.concatenate(xys)
        if len(xys):
            return Bbox([xys.min(axis=0), xys.max(axis=0)])
        else:
            return Bbox.null()

    def intersects_path(self, other, filled=True):
        """
        Return whether if this path intersects another given path.

        If *filled* is True, then this also returns True if one path completely
        encloses the other (i.e., the paths are treated as filled).
        """
        return _path.path_intersects_path(self, other, filled)

    def intersects_bbox(self, bbox, filled=True):
        """
        Return whether this path intersects a given `~.transforms.Bbox`.

        If *filled* is True, then this also returns True if the path completely
        encloses the `.Bbox` (i.e., the path is treated as filled).

        The bounding box is always considered filled.
        """
        return _path.path_intersects_rectangle(self, bbox.x0, bbox.y0, bbox.x1,
                                               bbox.y1, filled)

    def interpolated(self, steps):
        """
        Return a new path resampled to length N x steps.

        Codes other than LINETO are not handled correctly.
        """
        if steps == 1:
            return self

        vertices = simple_linear_interpolation(self.vertices, steps)
        codes = self.codes
        if codes is not None:
            new_codes = np.full((len(codes) - 1) * steps + 1,
                                Path.LINETO,
                                dtype=self.code_type)
            new_codes[0::steps] = codes
        else:
            new_codes = None
        return Path(vertices, new_codes)

    def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
        """
        Convert this path to a list of polygons or polylines.  Each
        polygon/polyline is an Nx2 array of vertices.  In other words,
        each polygon has no ``MOVETO`` instructions or curves.  This
        is useful for displaying in backends that do not support
        compound paths or Bezier curves.

        If *width* and *height* are both non-zero then the lines will
        be simplified so that vertices outside of (0, 0), (width,
        height) will be clipped.

        If *closed_only* is `True` (default), only closed polygons,
        with the last point being the same as the first point, will be
        returned.  Any unclosed polylines in the path will be
        explicitly closed.  If *closed_only* is `False`, any unclosed
        polygons in the path will be returned as unclosed polygons,
        and the closed polygons will be returned explicitly closed by
        setting the last point to the same as the first point.
        """
        if len(self.vertices) == 0:
            return []

        if transform is not None:
            transform = transform.frozen()

        if self.codes is None and (width == 0 or height == 0):
            vertices = self.vertices
            if closed_only:
                if len(vertices) < 3:
                    return []
                elif np.any(vertices[0] != vertices[-1]):
                    vertices = [*vertices, vertices[0]]

            if transform is None:
                return [vertices]
            else:
                return [transform.transform(vertices)]

        # Deal with the case where there are curves and/or multiple
        # subpaths (using extension code)
        return _path.convert_path_to_polygons(self, transform, width, height,
                                              closed_only)

    _unit_rectangle = None

    @classmethod
    def unit_rectangle(cls):
        """
        Return a `Path` instance of the unit rectangle from (0, 0) to (1, 1).
        """
        if cls._unit_rectangle is None:
            cls._unit_rectangle = cls([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]],
                                      closed=True,
                                      readonly=True)
        return cls._unit_rectangle

    _unit_regular_polygons = WeakValueDictionary()

    @classmethod
    def unit_regular_polygon(cls, numVertices):
        """
        Return a :class:`Path` instance for a unit regular polygon with the
        given *numVertices* such that the circumscribing circle has radius 1.0,
        centered at (0, 0).
        """
        if numVertices <= 16:
            path = cls._unit_regular_polygons.get(numVertices)
        else:
            path = None
        if path is None:
            theta = (
                (2 * np.pi / numVertices) * np.arange(numVertices + 1)
                # This initial rotation is to make sure the polygon always
                # "points-up".
                + np.pi / 2)
            verts = np.column_stack((np.cos(theta), np.sin(theta)))
            path = cls(verts, closed=True, readonly=True)
            if numVertices <= 16:
                cls._unit_regular_polygons[numVertices] = path
        return path

    _unit_regular_stars = WeakValueDictionary()

    @classmethod
    def unit_regular_star(cls, numVertices, innerCircle=0.5):
        """
        Return a :class:`Path` for a unit regular star with the given
        numVertices and radius of 1.0, centered at (0, 0).
        """
        if numVertices <= 16:
            path = cls._unit_regular_stars.get((numVertices, innerCircle))
        else:
            path = None
        if path is None:
            ns2 = numVertices * 2
            theta = (2 * np.pi / ns2 * np.arange(ns2 + 1))
            # This initial rotation is to make sure the polygon always
            # "points-up"
            theta += np.pi / 2.0
            r = np.ones(ns2 + 1)
            r[1::2] = innerCircle
            verts = (r * np.vstack((np.cos(theta), np.sin(theta)))).T
            path = cls(verts, closed=True, readonly=True)
            if numVertices <= 16:
                cls._unit_regular_stars[(numVertices, innerCircle)] = path
        return path

    @classmethod
    def unit_regular_asterisk(cls, numVertices):
        """
        Return a :class:`Path` for a unit regular asterisk with the given
        numVertices and radius of 1.0, centered at (0, 0).
        """
        return cls.unit_regular_star(numVertices, 0.0)

    _unit_circle = None

    @classmethod
    def unit_circle(cls):
        """
        Return the readonly :class:`Path` of the unit circle.

        For most cases, :func:`Path.circle` will be what you want.
        """
        if cls._unit_circle is None:
            cls._unit_circle = cls.circle(center=(0, 0),
                                          radius=1,
                                          readonly=True)
        return cls._unit_circle

    @classmethod
    def circle(cls, center=(0., 0.), radius=1., readonly=False):
        """
        Return a `Path` representing a circle of a given radius and center.

        Parameters
        ----------
        center : (float, float), default: (0, 0)
            The center of the circle.
        radius : float, default: 1
            The radius of the circle.
        readonly : bool
            Whether the created path should have the "readonly" argument
            set when creating the Path instance.

        Notes
        -----
        The circle is approximated using 8 cubic Bezier curves, as described in

          Lancaster, Don.  `Approximating a Circle or an Ellipse Using Four
          Bezier Cubic Splines <https://www.tinaja.com/glib/ellipse4.pdf>`_.
        """
        MAGIC = 0.2652031
        SQRTHALF = np.sqrt(0.5)
        MAGIC45 = SQRTHALF * MAGIC

        vertices = np.array([
            [0.0, -1.0], [MAGIC, -1.0
                          ], [SQRTHALF - MAGIC45, -SQRTHALF - MAGIC45],
            [SQRTHALF, -SQRTHALF], [SQRTHALF + MAGIC45, -SQRTHALF + MAGIC45],
            [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC],
            [SQRTHALF + MAGIC45, SQRTHALF - MAGIC45], [SQRTHALF, SQRTHALF],
            [SQRTHALF - MAGIC45, SQRTHALF + MAGIC45], [MAGIC, 1.0], [0.0, 1.0],
            [-MAGIC, 1.0], [-SQRTHALF + MAGIC45, SQRTHALF + MAGIC45],
            [-SQRTHALF, SQRTHALF], [-SQRTHALF - MAGIC45, SQRTHALF - MAGIC45],
            [-1.0, MAGIC], [-1.0, 0.0], [-1.0, -MAGIC],
            [-SQRTHALF - MAGIC45, -SQRTHALF + MAGIC45], [-SQRTHALF, -SQRTHALF],
            [-SQRTHALF + MAGIC45, -SQRTHALF - MAGIC45], [-MAGIC, -1.0],
            [0.0, -1.0], [0.0, -1.0]
        ],
                            dtype=float)

        codes = [cls.CURVE4] * 26
        codes[0] = cls.MOVETO
        codes[-1] = cls.CLOSEPOLY
        return Path(vertices * radius + center, codes, readonly=readonly)

    _unit_circle_righthalf = None

    @classmethod
    def unit_circle_righthalf(cls):
        """
        Return a `Path` of the right half of a unit circle.

        See `Path.circle` for the reference on the approximation used.
        """
        if cls._unit_circle_righthalf is None:
            MAGIC = 0.2652031
            SQRTHALF = np.sqrt(0.5)
            MAGIC45 = SQRTHALF * MAGIC

            vertices = np.array([[0.0, -1.0], [MAGIC, -1.0],
                                 [SQRTHALF - MAGIC45, -SQRTHALF - MAGIC45],
                                 [SQRTHALF, -SQRTHALF],
                                 [SQRTHALF + MAGIC45, -SQRTHALF + MAGIC45],
                                 [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC],
                                 [SQRTHALF + MAGIC45, SQRTHALF - MAGIC45],
                                 [SQRTHALF, SQRTHALF],
                                 [SQRTHALF - MAGIC45, SQRTHALF + MAGIC45],
                                 [MAGIC, 1.0], [0.0, 1.0], [0.0, -1.0]], float)

            codes = np.full(14, cls.CURVE4, dtype=cls.code_type)
            codes[0] = cls.MOVETO
            codes[-1] = cls.CLOSEPOLY

            cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
        return cls._unit_circle_righthalf

    @classmethod
    def arc(cls, theta1, theta2, n=None, is_wedge=False):
        """
        Return the unit circle arc from angles *theta1* to *theta2* (in
        degrees).

        *theta2* is unwrapped to produce the shortest arc within 360 degrees.
        That is, if *theta2* > *theta1* + 360, the arc will be from *theta1* to
        *theta2* - 360 and not a full circle plus some extra overlap.

        If *n* is provided, it is the number of spline segments to make.
        If *n* is not provided, the number of spline segments is
        determined based on the delta between *theta1* and *theta2*.

           Masionobe, L.  2003.  `Drawing an elliptical arc using
           polylines, quadratic or cubic Bezier curves
           <http://www.spaceroots.org/documents/ellipse/index.html>`_.
        """
        halfpi = np.pi * 0.5

        eta1 = theta1
        eta2 = theta2 - 360 * np.floor((theta2 - theta1) / 360)
        # Ensure 2pi range is not flattened to 0 due to floating-point errors,
        # but don't try to expand existing 0 range.
        if theta2 != theta1 and eta2 <= eta1:
            eta2 += 360
        eta1, eta2 = np.deg2rad([eta1, eta2])

        # number of curve segments to make
        if n is None:
            n = int(2**np.ceil((eta2 - eta1) / halfpi))
        if n < 1:
            raise ValueError("n must be >= 1 or None")

        deta = (eta2 - eta1) / n
        t = np.tan(0.5 * deta)
        alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0

        steps = np.linspace(eta1, eta2, n + 1, True)
        cos_eta = np.cos(steps)
        sin_eta = np.sin(steps)

        xA = cos_eta[:-1]
        yA = sin_eta[:-1]
        xA_dot = -yA
        yA_dot = xA

        xB = cos_eta[1:]
        yB = sin_eta[1:]
        xB_dot = -yB
        yB_dot = xB

        if is_wedge:
            length = n * 3 + 4
            vertices = np.zeros((length, 2), float)
            codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
            vertices[1] = [xA[0], yA[0]]
            codes[0:2] = [cls.MOVETO, cls.LINETO]
            codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
            vertex_offset = 2
            end = length - 2
        else:
            length = n * 3 + 1
            vertices = np.empty((length, 2), float)
            codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
            vertices[0] = [xA[0], yA[0]]
            codes[0] = cls.MOVETO
            vertex_offset = 1
            end = length

        vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
        vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
        vertices[vertex_offset + 1:end:3, 0] = xB - alpha * xB_dot
        vertices[vertex_offset + 1:end:3, 1] = yB - alpha * yB_dot
        vertices[vertex_offset + 2:end:3, 0] = xB
        vertices[vertex_offset + 2:end:3, 1] = yB

        return cls(vertices, codes, readonly=True)

    @classmethod
    def wedge(cls, theta1, theta2, n=None):
        """
        Return the unit circle wedge from angles *theta1* to *theta2* (in
        degrees).

        *theta2* is unwrapped to produce the shortest wedge within 360 degrees.
        That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1*
        to *theta2* - 360 and not a full circle plus some extra overlap.

        If *n* is provided, it is the number of spline segments to make.
        If *n* is not provided, the number of spline segments is
        determined based on the delta between *theta1* and *theta2*.

        See `Path.arc` for the reference on the approximation used.
        """
        return cls.arc(theta1, theta2, n, True)

    @staticmethod
    @lru_cache(8)
    def hatch(hatchpattern, density=6):
        """
        Given a hatch specifier, *hatchpattern*, generates a Path that
        can be used in a repeated hatching pattern.  *density* is the
        number of lines per unit square.
        """
        from matplotlib.hatch import get_path
        return (get_path(hatchpattern, density)
                if hatchpattern is not None else None)

    def clip_to_bbox(self, bbox, inside=True):
        """
        Clip the path to the given bounding box.

        The path must be made up of one or more closed polygons.  This
        algorithm will not behave correctly for unclosed paths.

        If *inside* is `True`, clip to the inside of the box, otherwise
        to the outside of the box.
        """
        # Use make_compound_path_from_polys
        verts = _path.clip_path_to_rect(self, bbox, inside)
        paths = [Path(poly) for poly in verts]
        return self.make_compound_path(*paths)
Esempio n. 4
0
import asyncio
import functools
import operator
import threading
from typing import Callable, Union, Optional, List, Awaitable, Tuple
from weakref import WeakValueDictionary

from streamlit import StopException
from .base_connection import _BaseConnection, _TimeBuffering
from .callbacks import _get_loop, _wrapper
from streamlit.script_runner import RerunException
from tornado.websocket import websocket_connect, WebSocketClosedError, WebSocketClientConnection

_ws_connections = WeakValueDictionary()
_ws_connections_lock = threading.Lock()
reconnect_time_seconds = 1.0


class _WebsocketConnection(_BaseConnection):
    @staticmethod
    def get_reconnect_time_seconds():
        return reconnect_time_seconds

    def _get_event(self, connection) -> Awaitable:
        return connection.read_message()

    def _handle_event(self, connection, data):
        if data is None:
            raise ConnectionError("Websocket sent EOF")

        self._call_all_callback(connection, data)
Esempio n. 5
0
class Registry(Mapping):
    """ Model registry for a particular database.

    The registry is essentially a mapping between model names and model classes.
    There is one registry instance per database.

    """
    _lock = threading.RLock()
    _saved_lock = None

    # a cache for model classes, indexed by their base classes
    model_cache = WeakValueDictionary()

    @lazy_classproperty
    def registries(cls):
        """ A mapping from database names to registries. """
        size = config.get('registry_lru_size', None)
        if not size:
            # Size the LRU depending of the memory limits
            if os.name != 'posix':
                # cannot specify the memory limit soft on windows...
                size = 42
            else:
                # A registry takes 10MB of memory on average, so we reserve
                # 10Mb (registry) + 5Mb (working memory) per registry
                avgsz = 15 * 1024 * 1024
                size = int(config['limit_memory_soft'] / avgsz)
        return LRU(size)

    def __new__(cls, db_name):
        """ Return the registry for the given database name."""
        with cls._lock:
            try:
                return cls.registries[db_name]
            except KeyError:
                return cls.new(db_name)
            finally:
                # set db tracker - cleaned up at the WSGI dispatching phase in
                # odoo.service.wsgi_server.application
                threading.current_thread().dbname = db_name

    @classmethod
    def new(cls, db_name, force_demo=False, status=None, update_module=False):
        """ Create and return a new registry for the given database name. """
        with cls._lock:
            with odoo.api.Environment.manage():
                registry = object.__new__(cls)
                registry.init(db_name)

                # Initializing a registry will call general code which will in
                # turn call Registry() to obtain the registry being initialized.
                # Make it available in the registries dictionary then remove it
                # if an exception is raised.
                cls.delete(db_name)
                cls.registries[db_name] = registry
                try:
                    registry.setup_signaling()
                    # This should be a method on Registry
                    try:
                        odoo.modules.load_modules(registry._db, force_demo,
                                                  status, update_module)
                    except Exception:
                        odoo.modules.reset_modules_state(db_name)
                        raise
                except Exception:
                    _logger.error('Failed to load registry')
                    del cls.registries[db_name]
                    raise

                # load_modules() above can replace the registry by calling
                # indirectly new() again (when modules have to be uninstalled).
                # Yeah, crazy.
                registry = cls.registries[db_name]

            registry._init = False
            registry.ready = True
            registry.registry_invalidated = bool(update_module)

        return registry

    def init(self, db_name):
        self.models = {}  # model name/model instance mapping
        self._sql_constraints = set()
        self._init = True
        self._assertion_report = assertion_report.assertion_report()
        self._fields_by_model = None
        self._ordinary_tables = None
        self._constraint_queue = deque()

        # modules fully loaded (maintained during init phase by `loading` module)
        self._init_modules = set()
        self.updated_modules = []  # installed/updated modules
        self.loaded_xmlids = set()

        self.db_name = db_name
        self._db = odoo.sql_db.db_connect(db_name)

        # cursor for test mode; None means "normal" mode
        self.test_cr = None
        self.test_lock = None

        # Indicates that the registry is
        self.loaded = False  # whether all modules are loaded
        self.ready = False  # whether everything is set up

        # Inter-process signaling:
        # The `base_registry_signaling` sequence indicates the whole registry
        # must be reloaded.
        # The `base_cache_signaling sequence` indicates all caches must be
        # invalidated (i.e. cleared).
        self.registry_sequence = None
        self.cache_sequence = None

        # Flags indicating invalidation of the registry or the cache.
        self.registry_invalidated = False
        self.cache_invalidated = False

        with closing(self.cursor()) as cr:
            self.has_unaccent = odoo.modules.db.has_unaccent(cr)

    @classmethod
    def delete(cls, db_name):
        """ Delete the registry linked to a given database. """
        with cls._lock:
            if db_name in cls.registries:
                del cls.registries[db_name]

    @classmethod
    def delete_all(cls):
        """ Delete all the registries. """
        with cls._lock:
            cls.registries.clear()

    #
    # Mapping abstract methods implementation
    # => mixin provides methods keys, items, values, get, __eq__, and __ne__
    #
    def __len__(self):
        """ Return the size of the registry. """
        return len(self.models)

    def __iter__(self):
        """ Return an iterator over all model names. """
        return iter(self.models)

    def __getitem__(self, model_name):
        """ Return the model with the given name or raise KeyError if it doesn't exist."""
        return self.models[model_name]

    def __call__(self, model_name):
        """ Same as ``self[model_name]``. """
        return self.models[model_name]

    def __setitem__(self, model_name, model):
        """ Add or replace a model in the registry."""
        self.models[model_name] = model

    def __delitem__(self, model_name):
        """ Remove a (custom) model from the registry. """
        del self.models[model_name]
        # the custom model can inherit from mixins ('mail.thread', ...)
        for Model in self.models.values():
            Model._inherit_children.discard(model_name)

    def descendants(self, model_names, *kinds):
        """ Return the models corresponding to ``model_names`` and all those
        that inherit/inherits from them.
        """
        assert all(kind in ('_inherit', '_inherits') for kind in kinds)
        funcs = [attrgetter(kind + '_children') for kind in kinds]

        models = OrderedSet()
        queue = deque(model_names)
        while queue:
            model = self[queue.popleft()]
            models.add(model._name)
            for func in funcs:
                queue.extend(func(model))
        return models

    def load(self, cr, module):
        """ Load a given module in the registry, and return the names of the
        modified models.

        At the Python level, the modules are already loaded, but not yet on a
        per-registry level. This method populates a registry with the given
        modules, i.e. it instanciates all the classes of a the given module
        and registers them in the registry.

        """
        from .. import models

        lazy_property.reset_all(self)

        # Instantiate registered classes (via the MetaModel automatic discovery
        # or via explicit constructor call), and add them to the pool.
        model_names = []
        for cls in models.MetaModel.module_to_models.get(module.name, []):
            # models register themselves in self.models
            model = cls._build_model(self, cr)
            model_names.append(model._name)

        return self.descendants(model_names, '_inherit', '_inherits')

    def setup_models(self, cr):
        """ Complete the setup of models.
            This must be called after loading modules and before using the ORM.
        """
        lazy_property.reset_all(self)
        self.registry_invalidated = True
        env = odoo.api.Environment(cr, SUPERUSER_ID, {})

        if env.all.tocompute:
            _logger.error(
                "Remaining fields to compute before setting up registry: %s",
                env.all.tocompute,
                stack_info=True,
            )

        # add manual models
        if self._init_modules:
            env['ir.model']._add_manual_models()

        # prepare the setup on all models
        models = list(env.values())
        for model in models:
            model._prepare_setup()

        # do the actual setup from a clean state
        self._m2m = defaultdict(list)
        for model in models:
            model._setup_base()

        for model in models:
            model._setup_fields()

        for model in models:
            model._setup_complete()

        self.registry_invalidated = True

    @lazy_property
    def field_computed(self):
        """ Return a dict mapping each field to the fields computed by the same method. """
        computed = {}
        for model_name, Model in self.models.items():
            groups = defaultdict(list)
            for field in Model._fields.values():
                if field.compute:
                    computed[field] = group = groups[field.compute]
                    group.append(field)
            for fields in groups.values():
                if len({field.compute_sudo for field in fields}) > 1:
                    _logger.warning(
                        "%s: inconsistent 'compute_sudo' for computed fields: %s",
                        model_name, ", ".join(field.name for field in fields))
        return computed

    @lazy_property
    def field_triggers(self):
        # determine field dependencies
        dependencies = {}
        for Model in self.models.values():
            if Model._abstract:
                continue
            for field in Model._fields.values():
                # dependencies of custom fields may not exist; ignore that case
                exceptions = (Exception, ) if field.base_field.manual else ()
                with ignore(*exceptions):
                    dependencies[field] = set(field.resolve_depends(self))

        # determine transitive dependencies
        def transitive_dependencies(field, seen=[]):
            if field in seen:
                return
            for seq1 in dependencies[field]:
                yield seq1
                exceptions = (Exception, ) if field.base_field.manual else ()
                with ignore(*exceptions):
                    for seq2 in transitive_dependencies(
                            seq1[-1], seen + [field]):
                        yield concat(seq1[:-1], seq2)

        def concat(seq1, seq2):
            if seq1 and seq2:
                f1, f2 = seq1[-1], seq2[0]
                if f1.type == 'one2many' and f2.type == 'many2one' and \
                        f1.model_name == f2.comodel_name and f1.inverse_name == f2.name:
                    return concat(seq1[:-1], seq2[1:])
            return seq1 + seq2

        # determine triggers based on transitive dependencies
        triggers = {}
        for field in dependencies:
            for path in transitive_dependencies(field):
                if path:
                    tree = triggers
                    for label in reversed(path):
                        tree = tree.setdefault(label, {})
                    tree.setdefault(None, set()).add(field)

        return triggers

    def post_init(self, func, *args, **kwargs):
        """ Register a function to call at the end of :meth:`~.init_models`. """
        self._post_init_queue.append(partial(func, *args, **kwargs))

    def post_constraint(self, func, *args, **kwargs):
        """ Call the given function, and delay it if it fails during an upgrade. """
        try:
            func(*args, **kwargs)
        except Exception as e:
            if self._is_install:
                _schema.error(*e.args)
            else:
                _schema.info(*e.args)
                self._constraint_queue.append(partial(func, *args, **kwargs))

    def finalize_constraints(self):
        """ Call the delayed functions from above. """
        while self._constraint_queue:
            func = self._constraint_queue.popleft()
            try:
                func()
            except Exception as e:
                _schema.error(*e.args)

    def init_models(self, cr, model_names, context, install=True):
        """ Initialize a list of models (given by their name). Call methods
            ``_auto_init`` and ``init`` on each model to create or update the
            database tables supporting the models.

            The ``context`` may contain the following items:
             - ``module``: the name of the module being installed/updated, if any;
             - ``update_custom_fields``: whether custom fields should be updated.
        """
        if not model_names:
            return

        if 'module' in context:
            _logger.info('module %s: creating or updating database tables',
                         context['module'])
        elif context.get('models_to_check', False):
            _logger.info("verifying fields for every extended model")

        env = odoo.api.Environment(cr, SUPERUSER_ID, context)
        models = [env[model_name] for model_name in model_names]

        try:
            self._post_init_queue = deque()
            self._foreign_keys = {}
            self._is_install = install

            for model in models:
                model._auto_init()
                model.init()

            env['ir.model']._reflect_models(model_names)
            env['ir.model.fields']._reflect_fields(model_names)
            env['ir.model.fields.selection']._reflect_selections(model_names)
            env['ir.model.constraint']._reflect_constraints(model_names)

            self._ordinary_tables = None

            while self._post_init_queue:
                func = self._post_init_queue.popleft()
                func()

            self.check_indexes(cr, model_names)
            self.check_foreign_keys(cr)

            env['base'].flush()

            # make sure all tables are present
            self.check_tables_exist(cr)

        finally:
            del self._post_init_queue
            del self._foreign_keys
            del self._is_install

    def check_indexes(self, cr, model_names):
        """ Create or drop column indexes for the given models. """
        expected = [("%s_%s_index" % (Model._table, field.name), Model._table,
                     field.name, field.index) for model_name in model_names
                    for Model in [self.models[model_name]]
                    if Model._auto and not Model._abstract
                    for field in Model._fields.values()
                    if field.column_type and field.store]
        if not expected:
            return

        cr.execute("SELECT indexname FROM pg_indexes WHERE indexname IN %s",
                   [tuple(row[0] for row in expected)])
        existing = {row[0] for row in cr.fetchall()}

        for indexname, tablename, columnname, index in expected:
            if index and indexname not in existing:
                try:
                    with cr.savepoint(flush=False):
                        sql.create_index(cr, indexname, tablename,
                                         ['"%s"' % columnname])
                except psycopg2.OperationalError:
                    _schema.error("Unable to add index for %s", self)
            elif not index and indexname in existing:
                sql.drop_index(cr, indexname, tablename)

    def add_foreign_key(self,
                        table1,
                        column1,
                        table2,
                        column2,
                        ondelete,
                        model,
                        module,
                        force=True):
        """ Specify an expected foreign key. """
        key = (table1, column1)
        val = (table2, column2, ondelete, model, module)
        if force:
            self._foreign_keys[key] = val
        else:
            self._foreign_keys.setdefault(key, val)

    def check_foreign_keys(self, cr):
        """ Create or update the expected foreign keys. """
        if not self._foreign_keys:
            return

        # determine existing foreign keys on the tables
        query = """
            SELECT fk.conname, c1.relname, a1.attname, c2.relname, a2.attname, fk.confdeltype
            FROM pg_constraint AS fk
            JOIN pg_class AS c1 ON fk.conrelid = c1.oid
            JOIN pg_class AS c2 ON fk.confrelid = c2.oid
            JOIN pg_attribute AS a1 ON a1.attrelid = c1.oid AND fk.conkey[1] = a1.attnum
            JOIN pg_attribute AS a2 ON a2.attrelid = c2.oid AND fk.confkey[1] = a2.attnum
            WHERE fk.contype = 'f' AND c1.relname IN %s
        """
        cr.execute(query,
                   [tuple({table
                           for table, column in self._foreign_keys})])
        existing = {(table1, column1): (name, table2, column2, deltype)
                    for name, table1, column1, table2, column2, deltype in
                    cr.fetchall()}

        # create or update foreign keys
        for key, val in self._foreign_keys.items():
            table1, column1 = key
            table2, column2, ondelete, model, module = val
            conname = '%s_%s_fkey' % key
            deltype = sql._CONFDELTYPES[ondelete.upper()]
            spec = existing.get(key)
            if spec is None:
                sql.add_foreign_key(cr, table1, column1, table2, column2,
                                    ondelete)
                model.env['ir.model.constraint']._reflect_constraint(
                    model, conname, 'f', None, module)
            elif spec != (conname, table2, column2, deltype):
                sql.drop_constraint(cr, table1, spec[0])
                sql.add_foreign_key(cr, table1, column1, table2, column2,
                                    ondelete)
                model.env['ir.model.constraint']._reflect_constraint(
                    model, conname, 'f', None, module)

    def check_tables_exist(self, cr):
        """
        Verify that all tables are present and try to initialize those that are missing.
        """
        env = odoo.api.Environment(cr, SUPERUSER_ID, {})
        table2model = {
            model._table: name
            for name, model in env.items() if not model._abstract
        }
        missing_tables = set(table2model).difference(
            existing_tables(cr, table2model))

        if missing_tables:
            missing = {table2model[table] for table in missing_tables}
            _logger.info("Models have no table: %s.", ", ".join(missing))
            # recreate missing tables
            for name in missing:
                _logger.info("Recreate table of model %s.", name)
                env[name].init()
            env['base'].flush()
            # check again, and log errors if tables are still missing
            missing_tables = set(table2model).difference(
                existing_tables(cr, table2model))
            for table in missing_tables:
                _logger.error("Model %s has no table.", table2model[table])

    @lazy_property
    def cache(self):
        """ A cache for model methods. """
        # this lazy_property is automatically reset by lazy_property.reset_all()
        return LRU(8192)

    def _clear_cache(self):
        """ Clear the cache and mark it as invalidated. """
        self.cache.clear()
        self.cache_invalidated = True

    def clear_caches(self):
        """ Clear the caches associated to methods decorated with
        ``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
        """
        for model in self.models.values():
            model.clear_caches()

    def is_an_ordinary_table(self, model):
        """ Return whether the given model has an ordinary table. """
        if self._ordinary_tables is None:
            cr = model.env.cr
            query = """
                SELECT c.relname
                  FROM pg_class c
                  JOIN pg_namespace n ON (n.oid = c.relnamespace)
                 WHERE c.relname IN %s
                   AND c.relkind = 'r'
                   AND n.nspname = 'public'
            """
            tables = tuple(m._table for m in self.models.values())
            cr.execute(query, [tables])
            self._ordinary_tables = {row[0] for row in cr.fetchall()}

        return model._table in self._ordinary_tables

    def setup_signaling(self):
        """ Setup the inter-process signaling on this registry. """
        if self.in_test_mode():
            return

        with self.cursor() as cr:
            # The `base_registry_signaling` sequence indicates when the registry
            # must be reloaded.
            # The `base_cache_signaling` sequence indicates when all caches must
            # be invalidated (i.e. cleared).
            cr.execute(
                "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'"
            )
            if not cr.fetchall():
                cr.execute(
                    "CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1"
                )
                cr.execute("SELECT nextval('base_registry_signaling')")
                cr.execute(
                    "CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1"
                )
                cr.execute("SELECT nextval('base_cache_signaling')")

            cr.execute(""" SELECT base_registry_signaling.last_value,
                                  base_cache_signaling.last_value
                           FROM base_registry_signaling, base_cache_signaling"""
                       )
            self.registry_sequence, self.cache_sequence = cr.fetchone()
            _logger.debug(
                "Multiprocess load registry signaling: [Registry: %s] [Cache: %s]",
                self.registry_sequence, self.cache_sequence)

    def check_signaling(self):
        """ Check whether the registry has changed, and performs all necessary
        operations to update the registry. Return an up-to-date registry.
        """
        if self.in_test_mode():
            return self

        with closing(self.cursor()) as cr:
            cr.execute(""" SELECT base_registry_signaling.last_value,
                                  base_cache_signaling.last_value
                           FROM base_registry_signaling, base_cache_signaling"""
                       )
            r, c = cr.fetchone()
            _logger.debug(
                "Multiprocess signaling check: [Registry - %s -> %s] [Cache - %s -> %s]",
                self.registry_sequence, r, self.cache_sequence, c)
            # Check if the model registry must be reloaded
            if self.registry_sequence != r:
                _logger.info(
                    "Reloading the model registry after database signaling.")
                self = Registry.new(self.db_name)
            # Check if the model caches must be invalidated.
            elif self.cache_sequence != c:
                _logger.info(
                    "Invalidating all model caches after database signaling.")
                self.clear_caches()
                self.cache_invalidated = False
            self.registry_sequence = r
            self.cache_sequence = c

        return self

    def signal_changes(self):
        """ Notifies other processes if registry or cache has been invalidated. """
        if self.registry_invalidated and not self.in_test_mode():
            _logger.info("Registry changed, signaling through the database")
            with closing(self.cursor()) as cr:
                cr.execute("select nextval('base_registry_signaling')")
                self.registry_sequence = cr.fetchone()[0]

        # no need to notify cache invalidation in case of registry invalidation,
        # because reloading the registry implies starting with an empty cache
        elif self.cache_invalidated and not self.in_test_mode():
            _logger.info(
                "At least one model cache has been invalidated, signaling through the database."
            )
            with closing(self.cursor()) as cr:
                cr.execute("select nextval('base_cache_signaling')")
                self.cache_sequence = cr.fetchone()[0]

        self.registry_invalidated = False
        self.cache_invalidated = False

    def reset_changes(self):
        """ Reset the registry and cancel all invalidations. """
        if self.registry_invalidated:
            with closing(self.cursor()) as cr:
                self.setup_models(cr)
                self.registry_invalidated = False
        if self.cache_invalidated:
            self.cache.clear()
            self.cache_invalidated = False

    @contextmanager
    def manage_changes(self):
        """ Context manager to signal/discard registry and cache invalidations. """
        try:
            yield self
            self.signal_changes()
        except Exception:
            self.reset_changes()
            raise

    def in_test_mode(self):
        """ Test whether the registry is in 'test' mode. """
        return self.test_cr is not None

    def enter_test_mode(self, cr):
        """ Enter the 'test' mode, where one cursor serves several requests. """
        assert self.test_cr is None
        self.test_cr = cr
        self.test_lock = threading.RLock()
        assert Registry._saved_lock is None
        Registry._saved_lock = Registry._lock
        Registry._lock = DummyRLock()

    def leave_test_mode(self):
        """ Leave the test mode. """
        assert self.test_cr is not None
        self.test_cr = None
        self.test_lock = None
        assert Registry._saved_lock is not None
        Registry._lock = Registry._saved_lock
        Registry._saved_lock = None

    def cursor(self):
        """ Return a new cursor for the database. The cursor itself may be used
            as a context manager to commit/rollback and close automatically.
        """
        if self.test_cr is not None:
            # When in test mode, we use a proxy object that uses 'self.test_cr'
            # underneath.
            return TestCursor(self.test_cr, self.test_lock)
        return self._db.cursor()
Esempio n. 6
0
 def emptyObjectCache(self):
     if self.setting('CacheObjectsForever', False):
         return {}
     else:
         return WeakValueDictionary()
Esempio n. 7
0
class ServiceRegistry(metaclass=Singleton):
    _history = WeakValueDictionary()
    _registry = WeakValueDictionary()

    class History(list):
        # subclass the list type to enable weakref of histories.
        pass

    @classmethod
    def register(cls, service, func, base=None, force=False):
        # take a given business process function and implement via given user
        #  function
        named_service = None
        if (not isinstance(func, types.MethodType)) and \
                (not isinstance(func, types.FunctionType)):
            raise RuntimeError('Expected a function or method. Got %s' %
                               type(func))
        if isinstance(service, types.MethodType) or \
                isinstance(service, types.FunctionType):
            named_service = service.__name__
        elif isinstance(service, str):
            named_service = service
        elif isinstance(service, Sequence):
            # validate if the service is of type Enum and BusinessProcessEnum
            if len(service) == 3 and all(service):
                named_service = '%s%d%d' % service
        service_hash = sha512(str.encode(named_service)).hexdigest()
        if service_hash in cls._registry and not force:
            return
        if hasattr(base, named_service):
            previous = getattr(base, named_service)
            history = cls._history.get(service_hash, cls.History())
            history.append((copy.copy(previous), datetime.utcnow()))
            cls._history.update({service_hash: history})
            setattr(base, named_service, func)
            cls._registry[service_hash] = (named_service, func)
        elif not (base and isinstance(service, Sequence)):
            # override the function in globals while keeping previous
            # Caution! Avoid using functions only class methods should be
            # used as updating globals may have side effects
            globals().update({named_service: func})
        elif isinstance(service, namedtuple) and not base:
            business_process = [x for x in service._fields
                                if x == 'process'].pop()
            if business_process:
                cls._registry[service_hash] = (named_service, func)

    @classmethod
    def history(cls, service_hash=None, service=None):
        # return a history of a given service hash or named function
        history = None
        if service_hash:
            history = [
                records for x, records in enumerate(cls._history)
                if x == service_hash
            ].pop()
        elif service and (isinstance(service, types.MethodType)
                          or isinstance(service, types.FunctionType)):
            service_hash = sha512(str.encode(service.__name__)).hexdigest()
            history = [
                records for x, records in enumerate(cls._history)
                if x == service_hash
            ].pop()
        return history if history else cls._history

    @property
    def registry(self):
        return self._registry

    @registry.setter
    def registry(self, value):
        raise RuntimeError('Registry cannot be modified')

    @classmethod
    def get(cls, service_hash=None, service=None):
        # return the value of the service_hash or service if registered
        if isinstance(service_hash, str) and not service:
            cls._registry.get(service_hash, None)
        elif isinstance(service, types.MethodType) or \
                isinstance(service, types.FunctionType) and not service_hash:
            service_hash = sha512(str.encode(service.__name__)).hexdigest()
            return cls._registry.get(service_hash, None)
        elif isinstance(service_hash, str) and \
            (isinstance(service, types.MethodType) or
                isinstance(service, types.FunctionType)):
            cal_hash = sha512(service.__name__).hexdigest()
            if not cal_hash == service_hash:
                raise RuntimeError('The hash or checksum provided does not '
                                   'match the specified function')
            return cls._registry.get(service_hash, None)
Esempio n. 8
0
    def __init__(
        self,
        filename: Union[str, PathLike],
        *,
        map_size: int = 1024 * 1024 * 1024 * 1024,
        read_only: bool = False,
    ) -> None:
        """
        Creates a `TensorCache` by either opening an existing one on disk, or creating
        a new one. Its interface is almost exactly like a Python dictionary, where the
        keys are strings and the values are `torch.Tensor`.

        Parameters
        ----------
        filename: `str`
            Path to the location of the cache
        map_size: `int`, optional, defaults to 1TB
            This is the maximum size the cache will ever grow to. On reasonable operating
            systems, there is no penalty to making this a large value.
            `TensorCache` uses a memory-mapped file to store the data. When the file is
            first opened, we have to give the maximum size it can ever grow to. This is
            that number. Reasonable operating systems don't actually allocate that space
            until it is really needed.
        """
        self.lmdb_env: lmdb.Environment
        if hasattr(self, "lmdb_env"):
            # We're being initialized again after a cache hit in _active_tensor_caches, thanks
            # to __new__. In this case, we may have to upgrade to read/write, but other than
            # that we are good to go.
            if read_only:
                return
            if not self.read_only:
                return

            # Upgrade a read-only lmdb env to a read/write lmdb env.
            filename = self.lmdb_env.path()
            old_info = self.lmdb_env.info()

            self.lmdb_env.close()
            self.lmdb_env = lmdb.open(
                filename,
                map_size=old_info["map_size"],
                subdir=False,
                metasync=False,
                sync=True,
                readahead=False,
                meminit=False,
                readonly=False,
                lock=True,
            )
        else:
            filename = str(filename)

            cpu_count = os.cpu_count() or 1
            if os.path.exists(filename):
                if os.path.isfile(filename):
                    # If the file is not writable, set read_only to True, but issue a warning.
                    if not os.access(filename, os.W_OK):
                        if not read_only:
                            warnings.warn(
                                f"File '{filename}' is read-only, so cache will be read-only",
                                UserWarning,
                            )
                        read_only = True
                else:
                    # If it's not a file, raise an error.
                    raise ValueError(
                        "Expect a file, found a directory instead")

            use_lock = True
            if read_only:
                # Check if the lock file is writable. If it's not, then we won't be able to use the lock.

                # This is always how lmdb names the lock file.
                lock_filename = filename + "-lock"
                if os.path.isfile(lock_filename):
                    use_lock = os.access(lock_filename, os.W_OK)
                else:
                    # If the lock file doesn't exist yet, then the directory needs to be writable in
                    # order to create and use the lock file.
                    use_lock = os.access(os.path.dirname(lock_filename),
                                         os.W_OK)

            if not use_lock:
                warnings.warn(
                    f"Lacking permissions to use lock file on cache '{filename}'.\nUse at your own risk!",
                    UserWarning,
                )

            self.lmdb_env = lmdb.open(
                filename,
                subdir=False,
                map_size=map_size,
                max_readers=cpu_count * 4,
                max_spare_txns=cpu_count * 4,
                metasync=False,
                sync=True,
                readahead=False,
                meminit=False,
                readonly=read_only,
                lock=use_lock,
            )
            _active_tensor_caches[_unique_file_id(filename)] = self

            # We have another cache here that makes sure we return the same object for the same key. Without it,
            # you would get a different tensor, using different memory, every time you call __getitem__(), even
            # if you call it with the same key.
            # The downside is that we can't keep self.cache_cache up to date when multiple processes modify the
            # cache at the same time. We can guarantee though that it is up to date as long as processes either
            # write new values, or read existing ones.
            self.cache_cache: MutableMapping[str,
                                             Tensor] = WeakValueDictionary()
Esempio n. 9
0
    def __init__(self, path, path_func=None):
        self._storage_path = os.path.join(path, "cache")
        self._path_func = path_func or (lambda _: _)

        self._file_locks = WeakValueDictionary()
        self._load_info_file()
Esempio n. 10
0
import logging
import applicationinsights
from weakref import WeakValueDictionary
from applicationinsights.channel import AsynchronousSender, AsynchronousQueue
from applicationinsights.channel import SynchronousSender, SynchronousQueue
from applicationinsights.channel import TelemetryChannel

enabled_instrumentation_keys = WeakValueDictionary()

def enable(instrumentation_key, *args, **kwargs):
    """Enables the Application Insights logging handler for the root logger for the supplied instrumentation key.
    Multiple calls to this function with different instrumentation keys result in multiple handler instances.

    .. code:: python

        import logging
        from applicationinsights.logging import enable

        # set up logging
        enable('<YOUR INSTRUMENTATION KEY GOES HERE>')

        # log something (this will be sent to the Application Insights service as a trace)
        logging.info('This is a message')

        # logging shutdown will cause a flush of all un-sent telemetry items
        # alternatively set up an async channel via enable('<YOUR INSTRUMENTATION KEY GOES HERE>', async_=True)

    Args:
        instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.

    Keyword Args:
Esempio n. 11
0
The module acts as intermediate layer to make use of jinja2 library if it is
available but keep running smoothly if not.
"""
try:
    # avoid exceptions if dependency is not yet satisfied
    from jinja2 import meta
    from jinja2 import Environment
    from jinja2 import TemplateSyntaxError
    from weakref import WeakValueDictionary

    from .utils import log_message

    # jinja environment to use to create templates
    _jinja_env = Environment()
    # template cache to reuse existing templates
    _templates_cache = WeakValueDictionary()

    def create(settings, key, simple_template):
        """Create a template from source and store a weak reference as cache.

        Instead of creating a `Template` per view, the source of the template
        is used to identify the template and reuse one `Template` object for
        each matching source read from the view's settings.

        If no custom (view-, syntax-, project-specific) template is set
        up anywhere this dictionary holds only one `Template` normally.

        Arguments:
            settings (Settings):
                An settings object which can be queried via `get` to read the
                source of the template.
Esempio n. 12
0
import decimal
import logging
import datetime
from weakref import WeakValueDictionary

from pyhdb.protocol.constants import type_codes
from pyhdb.exceptions import InterfaceError
from pyhdb.compat import PY26, PY2, PY3, with_metaclass, iter_range, int_types, \
    string_types, byte_type, text_type
from pyhdb.protocol.headers import WriteLobHeader

logger = logging.getLogger('pyhdb')
debug = logger.debug

# Dictionary: keys: numeric type_code, values: Type-(sub)classes (from below)
by_type_code = WeakValueDictionary()
# Dictionary: keys: Python type classes, values: Type-(sub)classes (from below)
by_python_type = WeakValueDictionary()


class TypeMeta(type):
    """
    Meta class for Type classes.
    """
    @staticmethod
    def _add_type_to_type_code_mapping(type_class, type_code):
        if not 0 <= type_code <= 127:
            raise InterfaceError(
                "%s type type_code must be between 0 and 127" %
                type_class.__name__)
        by_type_code[type_code] = type_class
Esempio n. 13
0
 def __init__(self):
     self.lock = Lock()
     self._dict = WeakValueDictionary()
Esempio n. 14
0
class SearchEngine(object):
    """A search engine capable of performing multi-table searches."""

    _created_engines = WeakValueDictionary()

    @classmethod
    def get_created_engines(cls):
        """Returns all created search engines."""
        return list(cls._created_engines.items())

    def __init__(self,
                 engine_slug,
                 search_context_manager=search_context_manager):
        """Initializes the search engine."""
        # Check the slug is unique for this project.
        if engine_slug in SearchEngine._created_engines:
            raise SearchEngineError(
                "A search engine has already been created with the slug {engine_slug!r}"
                .format(engine_slug=engine_slug, ))
        # Initialize thie engine.
        self._registered_models = {}
        self._engine_slug = engine_slug
        # Store the search context.
        self._search_context_manager = search_context_manager
        # Store a reference to this engine.
        self.__class__._created_engines[engine_slug] = self

    def is_registered(self, model):
        """Checks whether the given model is registered with this search engine."""
        return model in self._registered_models

    def register(self, model, adapter_cls=SearchAdapter, **field_overrides):
        """
        Registers the given model with this search engine.

        If the given model is already registered with this search engine, a
        RegistrationError will be raised.
        """
        # Add in custom live filters.
        if isinstance(model, QuerySet):
            live_queryset = model
            model = model.model
            field_overrides[
                "get_live_queryset"] = lambda self_: live_queryset.all()
        # Check for existing registration.
        if self.is_registered(model):
            raise RegistrationError(
                "{model!r} is already registered with this search engine".
                format(model=model, ))
        # Perform any customization.
        if field_overrides:
            # Conversion to str is needed because Python 2 doesn't accept unicode for class name
            adapter_cls = type(
                str("Custom") + adapter_cls.__name__, (adapter_cls, ),
                field_overrides)
        # Perform the registration.
        adapter_obj = adapter_cls(model)
        self._registered_models[model] = adapter_obj
        # Connect to the signalling framework.
        post_save.connect(self._post_save_receiver, model)
        pre_delete.connect(self._pre_delete_receiver, model)

    def unregister(self, model):
        """
        Unregisters the given model with this search engine.

        If the given model is not registered with this search engine, a RegistrationError
        will be raised.
        """
        # Add in custom live filters.
        if isinstance(model, QuerySet):
            model = model.model
        # Check for registration.
        if not self.is_registered(model):
            raise RegistrationError(
                "{model!r} is not registered with this search engine".format(
                    model=model, ))
        # Perform the unregistration.
        del self._registered_models[model]
        # Disconnect from the signalling framework.
        post_save.disconnect(self._post_save_receiver, model)
        pre_delete.disconnect(self._pre_delete_receiver, model)

    def get_registered_models(self):
        """Returns a sequence of models that have been registered with this search engine."""
        return list(self._registered_models.keys())

    def get_adapter(self, model):
        """Returns the adapter associated with the given model."""
        if self.is_registered(model):
            return self._registered_models[model]
        raise RegistrationError(
            "{model!r} is not registered with this search engine".format(
                model=model, ))

    def _get_entries_for_obj(self, obj):
        """Returns a queryset of entries associate with the given obj."""
        from django.contrib.contenttypes.models import ContentType
        from watson.models import SearchEntry, has_int_pk
        model = obj.__class__
        content_type = ContentType.objects.get_for_model(model)
        object_id = force_text(obj.pk)
        # Get the basic list of search entries.
        search_entries = SearchEntry.objects.filter(
            content_type=content_type,
            engine_slug=self._engine_slug,
        )
        if has_int_pk(model):
            # Do a fast indexed lookup.
            object_id_int = int(obj.pk)
            search_entries = search_entries.filter(
                object_id_int=object_id_int, )
        else:
            # Alas, have to do a slow unindexed lookup.
            object_id_int = None
            search_entries = search_entries.filter(object_id=object_id, )
        return object_id_int, search_entries

    def _update_obj_index_iter(self, obj):
        """Either updates the given object index, or yields an unsaved search entry."""
        from django.contrib.contenttypes.models import ContentType
        from watson.models import SearchEntry
        model = obj.__class__
        adapter = self.get_adapter(model)
        content_type = ContentType.objects.get_for_model(model)
        object_id = force_text(obj.pk)
        # Create the search entry data.
        search_entry_data = {
            "engine_slug": self._engine_slug,
            "title": adapter.get_title(obj),
            "description": adapter.get_description(obj),
            "content": adapter.get_content(obj),
            "url": adapter.get_url(obj),
            "meta_encoded": adapter.serialize_meta(obj),
        }
        # Try to get the existing search entry.
        object_id_int, search_entries = self._get_entries_for_obj(obj)
        # Attempt to update the search entries.
        update_count = search_entries.update(**search_entry_data)
        if update_count == 0:
            # This is the first time the entry was created.
            search_entry_data.update((
                ("content_type", content_type),
                ("object_id", object_id),
                ("object_id_int", object_id_int),
            ))
            yield SearchEntry(**search_entry_data)
        elif update_count > 1:
            # Oh no! Somehow we've got duplicated search entries!
            search_entries.exclude(id=search_entries[0].id).delete()

    def update_obj_index(self, obj):
        """Updates the search index for the given obj."""
        _bulk_save_search_entries(list(self._update_obj_index_iter(obj)))

    # Signalling hooks.

    def _post_save_receiver(self, instance, **kwargs):
        """Signal handler for when a registered model has been saved."""
        if self._search_context_manager.is_active():
            self._search_context_manager.add_to_context(self, instance)
        else:
            self.update_obj_index(instance)

    def _pre_delete_receiver(self, instance, **kwargs):
        """Signal handler for when a registered model has been deleted."""
        _, search_entries = self._get_entries_for_obj(instance)
        search_entries.delete()

    # Searching.

    def _create_model_filter(self, models, backend):
        """Creates a filter for the given model/queryset list."""
        from django.contrib.contenttypes.models import ContentType
        from watson.models import has_int_pk
        filters = Q()
        for model in models:
            filter = Q()
            # Process querysets.
            if isinstance(model, QuerySet):
                sub_queryset = model
                model = model.model
                queryset = sub_queryset.values_list("pk", flat=True)
                if has_int_pk(model):
                    filter &= Q(object_id_int__in=queryset, )
                else:
                    queryset = queryset.annotate(watson_pk_str=RawSQL(
                        backend.do_string_cast(
                            connections[queryset.db],
                            model._meta.pk.db_column or model._meta.pk.attname,
                        ), ()), ).values_list("watson_pk_str", flat=True)
                    filter &= Q(object_id__in=queryset, )
            # Add the model to the filter.
            content_type = ContentType.objects.get_for_model(model)
            filter &= Q(content_type=content_type, )
            # Combine with the other filters.
            filters |= filter
        return filters

    def _get_included_models(self, models):
        """Returns an iterable of models and querysets that should be included
        in the search query."""
        for model in models or self.get_registered_models():
            if isinstance(model, QuerySet):
                yield model
            else:
                adaptor = self.get_adapter(model)
                queryset = adaptor.get_live_queryset()
                if queryset is None:
                    yield model
                else:
                    yield queryset.all()

    def search(self,
               search_text,
               models=(),
               exclude=(),
               ranking=True,
               backend_name=None):
        """Performs a search using the given text, returning a queryset of SearchEntry."""
        from watson.models import SearchEntry
        backend = get_backend(backend_name=backend_name)
        # Check for blank search text.
        search_text = search_text.strip()
        if not search_text:
            return SearchEntry.objects.none()
        # Get the initial queryset.
        queryset = SearchEntry.objects.filter(engine_slug=self._engine_slug, )
        # Process the allowed models.
        queryset = queryset.filter(
            self._create_model_filter(
                self._get_included_models(models),
                backend)).exclude(self._create_model_filter(exclude, backend))
        # Perform the backend-specific full text match.
        queryset = backend.do_search(self._engine_slug, queryset, search_text)
        # Perform the backend-specific full-text ranking.
        if ranking:
            queryset = backend.do_search_ranking(self._engine_slug, queryset,
                                                 search_text)
        # Return the complete queryset.
        return queryset

    def filter(self, queryset, search_text, ranking=True, backend_name=None):
        """
        Filters the given model or queryset using the given text, returning the
        modified queryset.
        """
        # If the queryset is a model, get all of them.
        if isinstance(queryset, type) and issubclass(queryset, models.Model):
            queryset = queryset._default_manager.all()
        # Check for blank search text.
        search_text = search_text.strip()
        if not search_text:
            return queryset
        # Perform the backend-specific full text match.
        backend = get_backend(backend_name=backend_name)
        queryset = backend.do_filter(self._engine_slug, queryset, search_text)
        # Perform the backend-specific full-text ranking.
        if ranking:
            queryset = backend.do_filter_ranking(self._engine_slug, queryset,
                                                 search_text)
        # Return the complete queryset.
        return queryset
Esempio n. 15
0
import xarray as xr
import dask
import dask.array as da

from pyresample.ewa import fornav, ll2cr
from pyresample.geometry import SwathDefinition, AreaDefinition
from pyresample.kd_tree import XArrayResamplerNN
from pyresample.bilinear.xarr import XArrayResamplerBilinear
from satpy import CHUNK_SIZE
from satpy.config import config_search_paths, get_config_path

LOG = getLogger(__name__)

CACHE_SIZE = 10

resamplers_cache = WeakValueDictionary()


def hash_dict(the_dict, the_hash=None):
    if the_hash is None:
        the_hash = hashlib.sha1()
    the_hash.update(json.dumps(the_dict, sort_keys=True).encode('utf-8'))
    return the_hash


def get_area_file():
    """Find area file(s) to use.

    The files are to be named `areas.yaml` or `areas.def`.
    """
    paths = config_search_paths('areas.yaml')
Esempio n. 16
0
from evennia.utils import logger
from evennia.utils.utils import dbref, get_evennia_pids, to_str

from .manager import SharedMemoryManager

AUTO_FLUSH_MIN_INTERVAL = 60.0 * 5  # at least 5 mins between cache flushes

_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
_MONITOR_HANDLER = None

# References to db-updated objects are stored here so the
# main process can be informed to re-cache itself.
PROC_MODIFIED_COUNT = 0
PROC_MODIFIED_OBJS = WeakValueDictionary()

# get info about the current process and thread; determine if our
# current pid is different from the server PID (i.e.  # if we are in a
# subprocess or not)
_SELF_PID = os.getpid()
_SERVER_PID, _PORTAL_PID = get_evennia_pids()
_IS_SUBPROCESS = (_SERVER_PID and
                  _PORTAL_PID) and not _SELF_PID in (_SERVER_PID, _PORTAL_PID)
_IS_MAIN_THREAD = threading.currentThread().getName() == "MainThread"


class SharedMemoryModelBase(ModelBase):
    # CL: upstream had a __new__ method that skipped ModelBase's __new__ if
    # SharedMemoryModelBase was not in the model class's ancestors. It's not
    # clear what was the intended purpose, but skipping ModelBase.__new__
Esempio n. 17
0
    def on_poll_init(self, pool, w, hub):
        apply_after = hub.timer.apply_after
        apply_at = hub.timer.apply_at
        on_soft_timeout = pool.on_soft_timeout
        on_hard_timeout = pool.on_hard_timeout
        maintain_pool = pool.maintain_pool
        add_reader = hub.add_reader
        remove = hub.remove
        now = time.time
        cache = pool._pool._cache

        # did_start_ok will verify that pool processes were able to start,
        # but this will only work the first time we start, as
        # maxtasksperchild will mess up metrics.
        if not w.consumer.restart_count and not pool.did_start_ok():
            raise WorkerLostError('Could not start worker processes')

        # need to handle pool results before every task
        # since multiple tasks can be received in a single poll()
        hub.on_task.append(pool.maybe_handle_result)

        hub.update_readers(pool.readers)
        for handler, interval in pool.timers.iteritems():
            hub.timer.apply_interval(interval * 1000.0, handler)

        trefs = pool._tref_for_id = WeakValueDictionary()

        def _discard_tref(job):
            try:
                tref = trefs.pop(job)
                tref.cancel()
                del (tref)
            except (KeyError, AttributeError):
                pass  # out of scope

        def _on_hard_timeout(job):
            try:
                result = cache[job]
            except KeyError:
                pass  # job ready
            else:
                on_hard_timeout(result)
            finally:
                # remove tref
                _discard_tref(job)

        def _on_soft_timeout(job, soft, hard, hub):
            if hard:
                trefs[job] = apply_at(
                    now() + (hard - soft),
                    _on_hard_timeout,
                    (job, ),
                )
            try:
                result = cache[job]
            except KeyError:
                pass  # job ready
            else:
                on_soft_timeout(result)
            finally:
                if not hard:
                    # remove tref
                    _discard_tref(job)

        def on_timeout_set(R, soft, hard):
            if soft:
                trefs[R._job] = apply_after(
                    soft * 1000.0,
                    _on_soft_timeout,
                    (R._job, soft, hard, hub),
                )
            elif hard:
                trefs[R._job] = apply_after(hard * 1000.0, _on_hard_timeout,
                                            (R._job, ))

        def on_timeout_cancel(R):
            _discard_tref(R._job)

        pool.init_callbacks(
            on_process_up=lambda w: add_reader(w.sentinel, maintain_pool),
            on_process_down=lambda w: remove(w.sentinel),
            on_timeout_set=on_timeout_set,
            on_timeout_cancel=on_timeout_cancel,
        )
Esempio n. 18
0
 def _prepare(cls):
     super(WeakSharedMemoryModelBase, cls)._prepare()
     cls.__dbclass__.__instance_cache__ = WeakValueDictionary()
Esempio n. 19
0
class Path(object):
    """
    :class:`Path` represents a series of possibly disconnected,
    possibly closed, line and curve segments.

    The underlying storage is made up of two parallel numpy arrays:
      - *vertices*: an Nx2 float array of vertices
      - *codes*: an N-length uint8 array of vertex types

    These two arrays always have the same length in the first
    dimension.  For example, to represent a cubic curve, you must
    provide three vertices as well as three codes ``CURVE3``.

    The code types are:

       - ``STOP``   :  1 vertex (ignored)
           A marker for the end of the entire path (currently not
           required and ignored)

       - ``MOVETO`` :  1 vertex
            Pick up the pen and move to the given vertex.

       - ``LINETO`` :  1 vertex
            Draw a line from the current position to the given vertex.

       - ``CURVE3`` :  1 control point, 1 endpoint
          Draw a quadratic Bezier curve from the current position,
          with the given control point, to the given end point.

       - ``CURVE4`` :  2 control points, 1 endpoint
          Draw a cubic Bezier curve from the current position, with
          the given control points, to the given end point.

       - ``CLOSEPOLY`` : 1 vertex (ignored)
          Draw a line segment to the start point of the current
          polyline.

    Users of Path objects should not access the vertices and codes
    arrays directly.  Instead, they should use :meth:`iter_segments`
    or :meth:`cleaned` to get the vertex/code pairs.  This is important,
    since many :class:`Path` objects, as an optimization, do not store a
    *codes* at all, but have a default one provided for them by
    :meth:`iter_segments`.

    Some behavior of Path objects can be controlled by rcParams. See
    the rcParams whose keys contain 'path.'.

    .. note::

        The vertices and codes arrays should be treated as
        immutable -- there are a number of optimizations and assumptions
        made up front in the constructor that will not change when the
        data changes.

    """

    # Path codes
    STOP = 0  # 1 vertex
    MOVETO = 1  # 1 vertex
    LINETO = 2  # 1 vertex
    CURVE3 = 3  # 2 vertices
    CURVE4 = 4  # 3 vertices
    CLOSEPOLY = 79  # 1 vertex

    #: A dictionary mapping Path codes to the number of vertices that the
    #: code expects.
    NUM_VERTICES_FOR_CODE = {
        STOP: 1,
        MOVETO: 1,
        LINETO: 1,
        CURVE3: 2,
        CURVE4: 3,
        CLOSEPOLY: 1
    }

    code_type = np.uint8

    def __init__(self,
                 vertices,
                 codes=None,
                 _interpolation_steps=1,
                 closed=False,
                 readonly=False):
        """
        Create a new path with the given vertices and codes.

        Parameters
        ----------
        vertices : array_like
            The ``(n, 2)`` float array, masked array or sequence of pairs
            representing the vertices of the path.

            If *vertices* contains masked values, they will be converted
            to NaNs which are then handled correctly by the Agg
            PathIterator and other consumers of path data, such as
            :meth:`iter_segments`.
        codes : {None, array_like}, optional
            n-length array integers representing the codes of the path.
            If not None, codes must be the same length as vertices.
            If None, *vertices* will be treated as a series of line segments.
        _interpolation_steps : int, optional
            Used as a hint to certain projections, such as Polar, that this
            path should be linearly interpolated immediately before drawing.
            This attribute is primarily an implementation detail and is not
            intended for public use.
        closed : bool, optional
            If *codes* is None and closed is True, vertices will be treated as
            line segments of a closed polygon.
        readonly : bool, optional
            Makes the path behave in an immutable way and sets the vertices
            and codes as read-only arrays.
        """
        vertices = _to_unmasked_float_array(vertices)
        if vertices.ndim != 2 or vertices.shape[1] != 2:
            raise ValueError(
                "'vertices' must be a 2D list or array with shape Nx2")

        if codes is not None:
            codes = np.asarray(codes, self.code_type)
            if codes.ndim != 1 or len(codes) != len(vertices):
                raise ValueError("'codes' must be a 1D list or array with the "
                                 "same length of 'vertices'")
            if len(codes) and codes[0] != self.MOVETO:
                raise ValueError("The first element of 'code' must be equal "
                                 "to 'MOVETO' ({})".format(self.MOVETO))
        elif closed and len(vertices):
            codes = np.empty(len(vertices), dtype=self.code_type)
            codes[0] = self.MOVETO
            codes[1:-1] = self.LINETO
            codes[-1] = self.CLOSEPOLY

        self._vertices = vertices
        self._codes = codes
        self._interpolation_steps = _interpolation_steps
        self._update_values()

        if readonly:
            self._vertices.flags.writeable = False
            if self._codes is not None:
                self._codes.flags.writeable = False
            self._readonly = True
        else:
            self._readonly = False

    @classmethod
    def _fast_from_codes_and_verts(cls, verts, codes, internals=None):
        """
        Creates a Path instance without the expense of calling the constructor

        Parameters
        ----------
        verts : numpy array
        codes : numpy array
        internals : dict or None
            The attributes that the resulting path should have.
            Allowed keys are ``readonly``, ``should_simplify``,
            ``simplify_threshold``, ``has_nonfinite`` and
            ``interpolation_steps``.

        """
        internals = internals or {}
        pth = cls.__new__(cls)
        pth._vertices = _to_unmasked_float_array(verts)
        pth._codes = codes
        pth._readonly = internals.pop('readonly', False)
        pth.should_simplify = internals.pop('should_simplify', True)
        pth.simplify_threshold = (internals.pop(
            'simplify_threshold', rcParams['path.simplify_threshold']))
        pth._has_nonfinite = internals.pop('has_nonfinite', False)
        pth._interpolation_steps = internals.pop('interpolation_steps', 1)
        if internals:
            raise ValueError('Unexpected internals provided to '
                             '_fast_from_codes_and_verts: '
                             '{0}'.format('\n *'.join(internals)))
        return pth

    def _update_values(self):
        self._simplify_threshold = rcParams['path.simplify_threshold']
        self._should_simplify = (self._simplify_threshold > 0
                                 and rcParams['path.simplify']
                                 and len(self._vertices) >= 128
                                 and (self._codes is None
                                      or np.all(self._codes <= Path.LINETO)))
        self._has_nonfinite = not np.isfinite(self._vertices).all()

    @property
    def vertices(self):
        """
        The list of vertices in the `Path` as an Nx2 numpy array.
        """
        return self._vertices

    @vertices.setter
    def vertices(self, vertices):
        if self._readonly:
            raise AttributeError("Can't set vertices on a readonly Path")
        self._vertices = vertices
        self._update_values()

    @property
    def codes(self):
        """
        The list of codes in the `Path` as a 1-D numpy array.  Each
        code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
        or `CLOSEPOLY`.  For codes that correspond to more than one
        vertex (`CURVE3` and `CURVE4`), that code will be repeated so
        that the length of `self.vertices` and `self.codes` is always
        the same.
        """
        return self._codes

    @codes.setter
    def codes(self, codes):
        if self._readonly:
            raise AttributeError("Can't set codes on a readonly Path")
        self._codes = codes
        self._update_values()

    @property
    def simplify_threshold(self):
        """
        The fraction of a pixel difference below which vertices will
        be simplified out.
        """
        return self._simplify_threshold

    @simplify_threshold.setter
    def simplify_threshold(self, threshold):
        self._simplify_threshold = threshold

    @property
    def has_nonfinite(self):
        """
        `True` if the vertices array has nonfinite values.
        """
        return self._has_nonfinite

    @property
    def should_simplify(self):
        """
        `True` if the vertices array should be simplified.
        """
        return self._should_simplify

    @should_simplify.setter
    def should_simplify(self, should_simplify):
        self._should_simplify = should_simplify

    @property
    def readonly(self):
        """
        `True` if the `Path` is read-only.
        """
        return self._readonly

    def __copy__(self):
        """
        Returns a shallow copy of the `Path`, which will share the
        vertices and codes with the source `Path`.
        """
        import copy
        return copy.copy(self)

    copy = __copy__

    def __deepcopy__(self, memo=None):
        """
        Returns a deepcopy of the `Path`.  The `Path` will not be
        readonly, even if the source `Path` is.
        """
        try:
            codes = self.codes.copy()
        except AttributeError:
            codes = None
        return self.__class__(self.vertices.copy(),
                              codes,
                              _interpolation_steps=self._interpolation_steps)

    deepcopy = __deepcopy__

    @classmethod
    def make_compound_path_from_polys(cls, XY):
        """
        Make a compound path object to draw a number
        of polygons with equal numbers of sides XY is a (numpolys x
        numsides x 2) numpy array of vertices.  Return object is a
        :class:`Path`

        .. plot:: gallery/misc/histogram_path.py

        """

        # for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
        # the CLOSEPOLY; the vert for the closepoly is ignored but we still
        # need it to keep the codes aligned with the vertices
        numpolys, numsides, two = XY.shape
        if two != 2:
            raise ValueError("The third dimension of 'XY' must be 2")
        stride = numsides + 1
        nverts = numpolys * stride
        verts = np.zeros((nverts, 2))
        codes = np.ones(nverts, int) * cls.LINETO
        codes[0::stride] = cls.MOVETO
        codes[numsides::stride] = cls.CLOSEPOLY
        for i in range(numsides):
            verts[i::stride] = XY[:, i]

        return cls(verts, codes)

    @classmethod
    def make_compound_path(cls, *args):
        """Make a compound path from a list of Path objects."""
        # Handle an empty list in args (i.e. no args).
        if not args:
            return Path(np.empty([0, 2], dtype=np.float32))

        lengths = [len(x) for x in args]
        total_length = sum(lengths)

        vertices = np.vstack([x.vertices for x in args])
        vertices.reshape((total_length, 2))

        codes = np.empty(total_length, dtype=cls.code_type)
        i = 0
        for path in args:
            if path.codes is None:
                codes[i] = cls.MOVETO
                codes[i + 1:i + len(path.vertices)] = cls.LINETO
            else:
                codes[i:i + len(path.codes)] = path.codes
            i += len(path.vertices)

        return cls(vertices, codes)

    def __repr__(self):
        return "Path(%r, %r)" % (self.vertices, self.codes)

    def __len__(self):
        return len(self.vertices)

    def iter_segments(self,
                      transform=None,
                      remove_nans=True,
                      clip=None,
                      snap=False,
                      stroke_width=1.0,
                      simplify=None,
                      curves=True,
                      sketch=None):
        """
        Iterates over all of the curve segments in the path.  Each
        iteration returns a 2-tuple (*vertices*, *code*), where
        *vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
        one of the :class:`Path` codes.

        Additionally, this method can provide a number of standard
        cleanups and conversions to the path.

        Parameters
        ----------
        transform : None or :class:`~matplotlib.transforms.Transform` instance
            If not None, the given affine transformation will
            be applied to the path.
        remove_nans : {False, True}, optional
            If True, will remove all NaNs from the path and
            insert MOVETO commands to skip over them.
        clip : None or sequence, optional
            If not None, must be a four-tuple (x1, y1, x2, y2)
            defining a rectangle in which to clip the path.
        snap : None or bool, optional
            If None, auto-snap to pixels, to reduce
            fuzziness of rectilinear lines.  If True, force snapping, and
            if False, don't snap.
        stroke_width : float, optional
            The width of the stroke being drawn.  Needed
             as a hint for the snapping algorithm.
        simplify : None or bool, optional
            If True, perform simplification, to remove
             vertices that do not affect the appearance of the path.  If
             False, perform no simplification.  If None, use the
             should_simplify member variable.  See also the rcParams
             path.simplify and path.simplify_threshold.
        curves : {True, False}, optional
            If True, curve segments will be returned as curve
            segments.  If False, all curves will be converted to line
            segments.
        sketch : None or sequence, optional
            If not None, must be a 3-tuple of the form
            (scale, length, randomness), representing the sketch
            parameters.
        """
        if not len(self):
            return

        cleaned = self.cleaned(transform=transform,
                               remove_nans=remove_nans,
                               clip=clip,
                               snap=snap,
                               stroke_width=stroke_width,
                               simplify=simplify,
                               curves=curves,
                               sketch=sketch)
        vertices = cleaned.vertices
        codes = cleaned.codes
        len_vertices = vertices.shape[0]

        # Cache these object lookups for performance in the loop.
        NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
        STOP = self.STOP

        i = 0
        while i < len_vertices:
            code = codes[i]
            if code == STOP:
                return
            else:
                num_vertices = NUM_VERTICES_FOR_CODE[code]
                curr_vertices = vertices[i:i + num_vertices].flatten()
                yield curr_vertices, code
                i += num_vertices

    def cleaned(self,
                transform=None,
                remove_nans=False,
                clip=None,
                quantize=False,
                simplify=False,
                curves=False,
                stroke_width=1.0,
                snap=False,
                sketch=None):
        """
        Cleans up the path according to the parameters returning a new
        Path instance.

        .. seealso::

            See :meth:`iter_segments` for details of the keyword arguments.

        Returns
        -------
        Path instance with cleaned up vertices and codes.

        """
        vertices, codes = _path.cleanup_path(self, transform, remove_nans,
                                             clip, snap, stroke_width,
                                             simplify, curves, sketch)
        internals = {
            'should_simplify': self.should_simplify and not simplify,
            'has_nonfinite': self.has_nonfinite and not remove_nans,
            'simplify_threshold': self.simplify_threshold,
            'interpolation_steps': self._interpolation_steps
        }
        return Path._fast_from_codes_and_verts(vertices, codes, internals)

    def transformed(self, transform):
        """
        Return a transformed copy of the path.

        .. seealso::

            :class:`matplotlib.transforms.TransformedPath`
                A specialized path class that will cache the
                transformed result and automatically update when the
                transform changes.
        """
        return Path(transform.transform(self.vertices), self.codes,
                    self._interpolation_steps)

    def contains_point(self, point, transform=None, radius=0.0):
        """
        Returns whether the (closed) path contains the given point.

        If *transform* is not ``None``, the path will be transformed before
        performing the test.

        *radius* allows the path to be made slightly larger or smaller.
        """
        if transform is not None:
            transform = transform.frozen()
        # `point_in_path` does not handle nonlinear transforms, so we
        # transform the path ourselves.  If `transform` is affine, letting
        # `point_in_path` handle the transform avoids allocating an extra
        # buffer.
        if transform and not transform.is_affine:
            self = transform.transform_path(self)
            transform = None
        return _path.point_in_path(point[0], point[1], radius, self, transform)

    def contains_points(self, points, transform=None, radius=0.0):
        """
        Returns a bool array which is ``True`` if the (closed) path contains
        the corresponding point.

        If *transform* is not ``None``, the path will be transformed before
        performing the test.

        *radius* allows the path to be made slightly larger or smaller.
        """
        if transform is not None:
            transform = transform.frozen()
        result = _path.points_in_path(points, radius, self, transform)
        return result.astype('bool')

    def contains_path(self, path, transform=None):
        """
        Returns whether this (closed) path completely contains the given path.

        If *transform* is not ``None``, the path will be transformed before
        performing the test.
        """
        if transform is not None:
            transform = transform.frozen()
        return _path.path_in_path(self, None, path, transform)

    def get_extents(self, transform=None):
        """
        Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
        path.

        Unlike computing the extents on the *vertices* alone, this
        algorithm will take into account the curves and deal with
        control points appropriately.
        """
        from .transforms import Bbox
        path = self
        if transform is not None:
            transform = transform.frozen()
            if not transform.is_affine:
                path = self.transformed(transform)
                transform = None
        return Bbox(_path.get_path_extents(path, transform))

    def intersects_path(self, other, filled=True):
        """
        Returns *True* if this path intersects another given path.

        *filled*, when True, treats the paths as if they were filled.
        That is, if one path completely encloses the other,
        :meth:`intersects_path` will return True.
        """
        return _path.path_intersects_path(self, other, filled)

    def intersects_bbox(self, bbox, filled=True):
        """
        Returns *True* if this path intersects a given
        :class:`~matplotlib.transforms.Bbox`.

        *filled*, when True, treats the path as if it was filled.
        That is, if the path completely encloses the bounding box,
        :meth:`intersects_bbox` will return True.

        The bounding box is always considered filled.
        """
        return _path.path_intersects_rectangle(self, bbox.x0, bbox.y0, bbox.x1,
                                               bbox.y1, filled)

    def interpolated(self, steps):
        """
        Returns a new path resampled to length N x steps.  Does not
        currently handle interpolating curves.
        """
        if steps == 1:
            return self

        vertices = simple_linear_interpolation(self.vertices, steps)
        codes = self.codes
        if codes is not None:
            new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
            new_codes[0::steps] = codes
        else:
            new_codes = None
        return Path(vertices, new_codes)

    def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
        """
        Convert this path to a list of polygons or polylines.  Each
        polygon/polyline is an Nx2 array of vertices.  In other words,
        each polygon has no ``MOVETO`` instructions or curves.  This
        is useful for displaying in backends that do not support
        compound paths or Bezier curves.

        If *width* and *height* are both non-zero then the lines will
        be simplified so that vertices outside of (0, 0), (width,
        height) will be clipped.

        If *closed_only* is `True` (default), only closed polygons,
        with the last point being the same as the first point, will be
        returned.  Any unclosed polylines in the path will be
        explicitly closed.  If *closed_only* is `False`, any unclosed
        polygons in the path will be returned as unclosed polygons,
        and the closed polygons will be returned explicitly closed by
        setting the last point to the same as the first point.
        """
        if len(self.vertices) == 0:
            return []

        if transform is not None:
            transform = transform.frozen()

        if self.codes is None and (width == 0 or height == 0):
            vertices = self.vertices
            if closed_only:
                if len(vertices) < 3:
                    return []
                elif np.any(vertices[0] != vertices[-1]):
                    vertices = [*vertices, vertices[0]]

            if transform is None:
                return [vertices]
            else:
                return [transform.transform(vertices)]

        # Deal with the case where there are curves and/or multiple
        # subpaths (using extension code)
        return _path.convert_path_to_polygons(self, transform, width, height,
                                              closed_only)

    _unit_rectangle = None

    @classmethod
    def unit_rectangle(cls):
        """
        Return a :class:`Path` instance of the unit rectangle
        from (0, 0) to (1, 1).
        """
        if cls._unit_rectangle is None:
            cls._unit_rectangle = \
                cls([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0],
                     [0.0, 0.0]],
                    [cls.MOVETO, cls.LINETO, cls.LINETO, cls.LINETO,
                     cls.CLOSEPOLY],
                    readonly=True)
        return cls._unit_rectangle

    _unit_regular_polygons = WeakValueDictionary()

    @classmethod
    def unit_regular_polygon(cls, numVertices):
        """
        Return a :class:`Path` instance for a unit regular polygon with the
        given *numVertices* and radius of 1.0, centered at (0, 0).
        """
        if numVertices <= 16:
            path = cls._unit_regular_polygons.get(numVertices)
        else:
            path = None
        if path is None:
            theta = (
                (2 * np.pi / numVertices) * np.arange(numVertices + 1)
                # This initial rotation is to make sure the polygon always
                # "points-up".
                + np.pi / 2)
            verts = np.column_stack((np.cos(theta), np.sin(theta)))
            codes = np.empty(numVertices + 1)
            codes[0] = cls.MOVETO
            codes[1:-1] = cls.LINETO
            codes[-1] = cls.CLOSEPOLY
            path = cls(verts, codes, readonly=True)
            if numVertices <= 16:
                cls._unit_regular_polygons[numVertices] = path
        return path

    _unit_regular_stars = WeakValueDictionary()

    @classmethod
    def unit_regular_star(cls, numVertices, innerCircle=0.5):
        """
        Return a :class:`Path` for a unit regular star with the given
        numVertices and radius of 1.0, centered at (0, 0).
        """
        if numVertices <= 16:
            path = cls._unit_regular_stars.get((numVertices, innerCircle))
        else:
            path = None
        if path is None:
            ns2 = numVertices * 2
            theta = (2 * np.pi / ns2 * np.arange(ns2 + 1))
            # This initial rotation is to make sure the polygon always
            # "points-up"
            theta += np.pi / 2.0
            r = np.ones(ns2 + 1)
            r[1::2] = innerCircle
            verts = np.vstack(
                (r * np.cos(theta), r * np.sin(theta))).transpose()
            codes = np.empty((ns2 + 1, ))
            codes[0] = cls.MOVETO
            codes[1:-1] = cls.LINETO
            codes[-1] = cls.CLOSEPOLY
            path = cls(verts, codes, readonly=True)
            if numVertices <= 16:
                cls._unit_regular_stars[(numVertices, innerCircle)] = path
        return path

    @classmethod
    def unit_regular_asterisk(cls, numVertices):
        """
        Return a :class:`Path` for a unit regular asterisk with the given
        numVertices and radius of 1.0, centered at (0, 0).
        """
        return cls.unit_regular_star(numVertices, 0.0)

    _unit_circle = None

    @classmethod
    def unit_circle(cls):
        """
        Return the readonly :class:`Path` of the unit circle.

        For most cases, :func:`Path.circle` will be what you want.

        """
        if cls._unit_circle is None:
            cls._unit_circle = cls.circle(center=(0, 0),
                                          radius=1,
                                          readonly=True)
        return cls._unit_circle

    @classmethod
    def circle(cls, center=(0., 0.), radius=1., readonly=False):
        """
        Return a Path representing a circle of a given radius and center.

        Parameters
        ----------
        center : pair of floats
            The center of the circle. Default ``(0, 0)``.
        radius : float
            The radius of the circle. Default is 1.
        readonly : bool
            Whether the created path should have the "readonly" argument
            set when creating the Path instance.

        Notes
        -----
        The circle is approximated using cubic Bezier curves.  This
        uses 8 splines around the circle using the approach presented
        here:

          Lancaster, Don.  `Approximating a Circle or an Ellipse Using Four
          Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.

        """
        MAGIC = 0.2652031
        SQRTHALF = np.sqrt(0.5)
        MAGIC45 = SQRTHALF * MAGIC

        vertices = np.array([
            [0.0, -1.0], [MAGIC, -1.0
                          ], [SQRTHALF - MAGIC45, -SQRTHALF - MAGIC45],
            [SQRTHALF, -SQRTHALF], [SQRTHALF + MAGIC45, -SQRTHALF + MAGIC45],
            [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC],
            [SQRTHALF + MAGIC45, SQRTHALF - MAGIC45], [SQRTHALF, SQRTHALF],
            [SQRTHALF - MAGIC45, SQRTHALF + MAGIC45], [MAGIC, 1.0], [0.0, 1.0],
            [-MAGIC, 1.0], [-SQRTHALF + MAGIC45, SQRTHALF + MAGIC45],
            [-SQRTHALF, SQRTHALF], [-SQRTHALF - MAGIC45, SQRTHALF - MAGIC45],
            [-1.0, MAGIC], [-1.0, 0.0], [-1.0, -MAGIC],
            [-SQRTHALF - MAGIC45, -SQRTHALF + MAGIC45], [-SQRTHALF, -SQRTHALF],
            [-SQRTHALF + MAGIC45, -SQRTHALF - MAGIC45], [-MAGIC, -1.0],
            [0.0, -1.0], [0.0, -1.0]
        ],
                            dtype=float)

        codes = [cls.CURVE4] * 26
        codes[0] = cls.MOVETO
        codes[-1] = cls.CLOSEPOLY
        return Path(vertices * radius + center, codes, readonly=readonly)

    _unit_circle_righthalf = None

    @classmethod
    def unit_circle_righthalf(cls):
        """
        Return a :class:`Path` of the right half
        of a unit circle. The circle is approximated using cubic Bezier
        curves.  This uses 4 splines around the circle using the approach
        presented here:

          Lancaster, Don.  `Approximating a Circle or an Ellipse Using Four
          Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
        """
        if cls._unit_circle_righthalf is None:
            MAGIC = 0.2652031
            SQRTHALF = np.sqrt(0.5)
            MAGIC45 = SQRTHALF * MAGIC

            vertices = np.array([[0.0, -1.0], [MAGIC, -1.0],
                                 [SQRTHALF - MAGIC45, -SQRTHALF - MAGIC45],
                                 [SQRTHALF, -SQRTHALF],
                                 [SQRTHALF + MAGIC45, -SQRTHALF + MAGIC45],
                                 [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC],
                                 [SQRTHALF + MAGIC45, SQRTHALF - MAGIC45],
                                 [SQRTHALF, SQRTHALF],
                                 [SQRTHALF - MAGIC45, SQRTHALF + MAGIC45],
                                 [MAGIC, 1.0], [0.0, 1.0], [0.0, -1.0]], float)

            codes = cls.CURVE4 * np.ones(14)
            codes[0] = cls.MOVETO
            codes[-1] = cls.CLOSEPOLY

            cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
        return cls._unit_circle_righthalf

    @classmethod
    def arc(cls, theta1, theta2, n=None, is_wedge=False):
        """
        Return an arc on the unit circle from angle
        *theta1* to angle *theta2* (in degrees).

        *theta2* is unwrapped to produce the shortest arc within 360 degrees.
        That is, if *theta2* > *theta1* + 360, the arc will be from *theta1* to
        *theta2* - 360 and not a full circle plus some extra overlap.

        If *n* is provided, it is the number of spline segments to make.
        If *n* is not provided, the number of spline segments is
        determined based on the delta between *theta1* and *theta2*.

           Masionobe, L.  2003.  `Drawing an elliptical arc using
           polylines, quadratic or cubic Bezier curves
           <http://www.spaceroots.org/documents/ellipse/index.html>`_.
        """
        halfpi = np.pi * 0.5

        eta1 = theta1
        eta2 = theta2 - 360 * np.floor((theta2 - theta1) / 360)
        # Ensure 2pi range is not flattened to 0 due to floating-point errors,
        # but don't try to expand existing 0 range.
        if theta2 != theta1 and eta2 <= eta1:
            eta2 += 360
        eta1, eta2 = np.deg2rad([eta1, eta2])

        # number of curve segments to make
        if n is None:
            n = int(2**np.ceil((eta2 - eta1) / halfpi))
        if n < 1:
            raise ValueError("n must be >= 1 or None")

        deta = (eta2 - eta1) / n
        t = np.tan(0.5 * deta)
        alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0

        steps = np.linspace(eta1, eta2, n + 1, True)
        cos_eta = np.cos(steps)
        sin_eta = np.sin(steps)

        xA = cos_eta[:-1]
        yA = sin_eta[:-1]
        xA_dot = -yA
        yA_dot = xA

        xB = cos_eta[1:]
        yB = sin_eta[1:]
        xB_dot = -yB
        yB_dot = xB

        if is_wedge:
            length = n * 3 + 4
            vertices = np.zeros((length, 2), float)
            codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
            vertices[1] = [xA[0], yA[0]]
            codes[0:2] = [cls.MOVETO, cls.LINETO]
            codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
            vertex_offset = 2
            end = length - 2
        else:
            length = n * 3 + 1
            vertices = np.empty((length, 2), float)
            codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
            vertices[0] = [xA[0], yA[0]]
            codes[0] = cls.MOVETO
            vertex_offset = 1
            end = length

        vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
        vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
        vertices[vertex_offset + 1:end:3, 0] = xB - alpha * xB_dot
        vertices[vertex_offset + 1:end:3, 1] = yB - alpha * yB_dot
        vertices[vertex_offset + 2:end:3, 0] = xB
        vertices[vertex_offset + 2:end:3, 1] = yB

        return cls(vertices, codes, readonly=True)

    @classmethod
    def wedge(cls, theta1, theta2, n=None):
        """
        Return a wedge of the unit circle from angle
        *theta1* to angle *theta2* (in degrees).

        *theta2* is unwrapped to produce the shortest wedge within 360 degrees.
        That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1*
        to *theta2* - 360 and not a full circle plus some extra overlap.

        If *n* is provided, it is the number of spline segments to make.
        If *n* is not provided, the number of spline segments is
        determined based on the delta between *theta1* and *theta2*.
        """
        return cls.arc(theta1, theta2, n, True)

    @staticmethod
    @lru_cache(8)
    def hatch(hatchpattern, density=6):
        """
        Given a hatch specifier, *hatchpattern*, generates a Path that
        can be used in a repeated hatching pattern.  *density* is the
        number of lines per unit square.
        """
        from matplotlib.hatch import get_path
        return (get_path(hatchpattern, density)
                if hatchpattern is not None else None)

    def clip_to_bbox(self, bbox, inside=True):
        """
        Clip the path to the given bounding box.

        The path must be made up of one or more closed polygons.  This
        algorithm will not behave correctly for unclosed paths.

        If *inside* is `True`, clip to the inside of the box, otherwise
        to the outside of the box.
        """
        # Use make_compound_path_from_polys
        verts = _path.clip_path_to_rect(self, bbox, inside)
        paths = [Path(poly) for poly in verts]
        return self.make_compound_path(*paths)
Esempio n. 20
0
at_exit(server.shut_down)


class Monitor(object):
    def __init__(self, destructor):
        self.destructor = destructor

    def __del__(self):
        try:
            self.destructor()
        except:
            pass


resource_types = {}
resources = WeakValueDictionary()
resource_ids = WeakKeyDictionary()
resource_monitors = WeakKeyDictionary()


def reference(resource):
    return resource_ids[resource]


def dereference(resource_id):
    if resource_id in resources:
        return resources[resource_id]
    type_names = resource_id[0].split(':')[::-1] + ['ID']
    best_type_name = next(n for n in type_names if n in resource_types)
    resource = object.__new__(resource_types[best_type_name])
    monitor = Monitor(lambda: server.release(resource_id))
Esempio n. 21
0
class ServiceRunner(metaclass=Singleton):
    _action_requests = WeakValueDictionary()
    _performance = WeakValueDictionary()

    @classmethod
    def register(cls,
                 action,
                 params,
                 requester,
                 performer,
                 conditions=None,
                 target=None):
        """
        Register a given action that will be executed when specified
        conditions are reached
        :param action: bespoke function to execute
        :param params: tuple of positional and keyword arguments needed by
        the action object
        :param requester: requesting entity or source address
        :param performer: performing entity or target address
        :param conditions: specified boolean conditions or flags for
        deciding if action will be performed or not. The evaluation uses a
        boolean AND operation for all conditions.
        Ensure target condition for executing function is written to be TRUE
        when duly compared
        :param target: target outcome if action was specified.
        The target may be provided as a means of asserting the action behaved
        as expected. An equality comparison is carried to assert the target
        :return: None
        """
        action_request = namedtuple(
            'ActionRequest',
            ('action', 'params', 'requester', 'performer', 'conditions',
             'target', 'timestamp', 'status', 'outcome'))
        if isinstance(action, types.FunctionType) or isinstance(
                action, types.MethodType):
            action_hash = sha512(str.encode(action.__name__)).hexdigest()
            cls._action_requests[action_hash] = \
                action_request(action, params, requester, performer,
                               conditions, target, datetime.timestamp(),
                               None, None)

    @classmethod
    def execute(cls, action):
        outcome = None
        execution = None
        target = None
        action_performed = namedtuple('ActionPerformed', ('status', 'outcome'))
        if isinstance(action, str):
            if action in cls._action_requests.keys():
                execution = cls._action_requests[action]
                if all(execution.conditions):
                    outcome = execution.action(*execution.params)
                    target = execution.target
                    # assign new action_request object to the key
                    cls._performance[action] = action_performed(
                        status=None, outcome=outcome)
        elif isinstance(action, types.FunctionType) or \
                isinstance(action, types.MethodType):
            action_hash = sha512(str.encode(action.__name__)).hexdigest()
            execution = cls._action_requests.get(action_hash, None)
            if execution:
                if all(execution.conditions):
                    outcome = execution.action(*execution.params)
                    target = execution.target
                    cls._performance[action_hash] = action_performed(
                        status=None, outcome=outcome)
        if execution and (outcome == target):
            return outcome
        else:
            raise AssertionError('Target outcome not achieved upon execution '
                                 'of action %s' % action.__name__)
Esempio n. 22
0
from __future__ import unicode_literals
from builtins import range

import random
import string
from threading import RLock
from weakref import WeakValueDictionary

objects = WeakValueDictionary()
lock = RLock()


def next_id(object2add=None):
    with lock:
        if not objects:
            id = 1
        else:
            id = max(objects.keys()) + 1
        if object2add is not None: objects[id] = object2add
        return id


def change_id(old, new):
    with lock:
        if old not in objects: return
        if new in objects: raise ValueError("ID actualy exists in ID manager")
        objects[new] = objects[old]
        objects.pop(old)


# sure, it's pseudo unique id
Esempio n. 23
0
try:
    from typing import AsyncGenerator
except ImportError:
    AsyncGenerator = None

try:
    from inspect import isasyncgenfunction
except ImportError:

    def isasyncgenfunction(func):
        return False


_type_hints_map = WeakKeyDictionary(
)  # type: Dict[FunctionType, Dict[str, Any]]
_functions_map = WeakValueDictionary()  # type: Dict[CodeType, FunctionType]

T_CallableOrType = TypeVar('T_CallableOrType', Callable, Type[Any])


class ForwardRefPolicy(Enum):
    """Defines how unresolved forward references are handled."""

    ERROR = 1  #: propagate the :exc:`NameError` from :func:`~typing.get_type_hints`
    WARN = 2  #: remove the annotation and emit a TypeHintWarning
    #: replace the annotation with the argument's class if the qualified name matches, else remove
    #: the annotation
    GUESS = 3


class TypeHintWarning(UserWarning):
Esempio n. 24
0
 def __init__(self):
     self.TOPLEVEL = WeakValueDictionary()
     self.PENDING = set()  # set of references with pending callbacks
Esempio n. 25
0
class Registry(Mapping):
    """ Model registry for a particular database.

    The registry is essentially a mapping between model names and model classes.
    There is one registry instance per database.

    """
    _lock = threading.RLock()
    _saved_lock = None

    # a cache for model classes, indexed by their base classes
    model_cache = WeakValueDictionary()

    @lazy_classproperty
    def registries(cls):
        """ A mapping from database names to registries. """
        size = config.get('registry_lru_size', None)
        if not size:
            # Size the LRU depending of the memory limits
            if os.name != 'posix':
                # cannot specify the memory limit soft on windows...
                size = 42
            else:
                # A registry takes 10MB of memory on average, so we reserve
                # 10Mb (registry) + 5Mb (working memory) per registry
                avgsz = 15 * 1024 * 1024
                size = int(config['limit_memory_soft'] / avgsz)
        return LRU(size)

    def __new__(cls, db_name):
        """ Return the registry for the given database name."""
        with cls._lock:
            try:
                return cls.registries[db_name]
            except KeyError:
                return cls.new(db_name)
            finally:
                # set db tracker - cleaned up at the WSGI dispatching phase in
                # odoo.service.wsgi_server.application
                threading.current_thread().dbname = db_name

    @classmethod
    def new(cls, db_name, force_demo=False, status=None, update_module=False):
        """ Create and return a new registry for the given database name. """
        with cls._lock:
            with odoo.api.Environment.manage():
                registry = object.__new__(cls)
                registry.init(db_name)

                # Initializing a registry will call general code which will in
                # turn call Registry() to obtain the registry being initialized.
                # Make it available in the registries dictionary then remove it
                # if an exception is raised.
                cls.delete(db_name)
                cls.registries[db_name] = registry
                try:
                    registry.setup_signaling()
                    # This should be a method on Registry
                    try:
                        odoo.modules.load_modules(registry._db, force_demo,
                                                  status, update_module)
                    except Exception:
                        odoo.modules.reset_modules_state(db_name)
                        raise
                except Exception:
                    _logger.exception('Failed to load registry')
                    del cls.registries[db_name]
                    raise

                # load_modules() above can replace the registry by calling
                # indirectly new() again (when modules have to be uninstalled).
                # Yeah, crazy.
                registry = cls.registries[db_name]

            registry._init = False
            registry.ready = True
            registry.registry_invalidated = bool(update_module)

        return registry

    def init(self, db_name):
        self.models = {}  # model name/model instance mapping
        self._sql_error = {}
        self._init = True
        self._assertion_report = assertion_report.assertion_report()
        self._fields_by_model = None
        self._post_init_queue = deque()

        # modules fully loaded (maintained during init phase by `loading` module)
        self._init_modules = set()
        self.updated_modules = []  # installed/updated modules
        self.loaded_xmlids = set()

        self.db_name = db_name
        self._db = odoo.sql_db.db_connect(db_name)

        # cursor for test mode; None means "normal" mode
        self.test_cr = None
        self.test_lock = None

        # Indicates that the registry is
        self.loaded = False  # whether all modules are loaded
        self.ready = False  # whether everything is set up

        # Inter-process signaling:
        # The `base_registry_signaling` sequence indicates the whole registry
        # must be reloaded.
        # The `base_cache_signaling sequence` indicates all caches must be
        # invalidated (i.e. cleared).
        self.registry_sequence = None
        self.cache_sequence = None

        # Flags indicating invalidation of the registry or the cache.
        self.registry_invalidated = False
        self.cache_invalidated = False

        with closing(self.cursor()) as cr:
            has_unaccent = odoo.modules.db.has_unaccent(cr)
            if odoo.tools.config['unaccent'] and not has_unaccent:
                _logger.warning(
                    "The option --unaccent was given but no unaccent() function was found in database."
                )
            self.has_unaccent = odoo.tools.config['unaccent'] and has_unaccent

    @classmethod
    def delete(cls, db_name):
        """ Delete the registry linked to a given database. """
        with cls._lock:
            if db_name in cls.registries:
                cls.registries.pop(db_name)

    @classmethod
    def delete_all(cls):
        """ Delete all the registries. """
        with cls._lock:
            for db_name in list(cls.registries.keys()):
                cls.delete(db_name)

    #
    # Mapping abstract methods implementation
    # => mixin provides methods keys, items, values, get, __eq__, and __ne__
    #
    def __len__(self):
        """ Return the size of the registry. """
        return len(self.models)

    def __iter__(self):
        """ Return an iterator over all model names. """
        return iter(self.models)

    def __getitem__(self, model_name):
        """ Return the model with the given name or raise KeyError if it doesn't exist."""
        return self.models[model_name]

    def __call__(self, model_name):
        """ Same as ``self[model_name]``. """
        return self.models[model_name]

    def __setitem__(self, model_name, model):
        """ Add or replace a model in the registry."""
        self.models[model_name] = model

    @lazy_property
    def field_sequence(self):
        """ Return a function mapping a field to an integer. The value of a
            field is guaranteed to be strictly greater than the value of the
            field's dependencies.
        """
        # map fields on their dependents
        dependents = {
            field:
            set(dep for dep, _ in model._field_triggers[field] if dep != field)
            for model in self.values() for field in model._fields.values()
        }
        # sort them topologically, and associate a sequence number to each field
        mapping = {
            field: num
            for num, field in enumerate(reversed(topological_sort(dependents)))
        }
        return mapping.get

    def descendants(self, model_names, *kinds):
        """ Return the models corresponding to ``model_names`` and all those
        that inherit/inherits from them.
        """
        assert all(kind in ('_inherit', '_inherits') for kind in kinds)
        funcs = [attrgetter(kind + '_children') for kind in kinds]

        models = OrderedSet()
        queue = deque(model_names)
        while queue:
            model = self[queue.popleft()]
            models.add(model._name)
            for func in funcs:
                queue.extend(func(model))
        return models

    def load(self, cr, module):
        """ Load a given module in the registry, and return the names of the
        modified models.

        At the Python level, the modules are already loaded, but not yet on a
        per-registry level. This method populates a registry with the given
        modules, i.e. it instanciates all the classes of a the given module
        and registers them in the registry.

        """
        from .. import models

        lazy_property.reset_all(self)

        # Instantiate registered classes (via the MetaModel automatic discovery
        # or via explicit constructor call), and add them to the pool.
        model_names = []
        for cls in models.MetaModel.module_to_models.get(module.name, []):
            # models register themselves in self.models
            model = cls._build_model(self, cr)
            model_names.append(model._name)

        return self.descendants(model_names, '_inherit', '_inherits')

    def setup_models(self, cr):
        """ Complete the setup of models.
            This must be called after loading modules and before using the ORM.
        """
        lazy_property.reset_all(self)
        env = odoo.api.Environment(cr, SUPERUSER_ID, {})

        # add manual models
        if self._init_modules:
            env['ir.model']._add_manual_models()

        # prepare the setup on all models
        models = list(env.values())
        for model in models:
            model._prepare_setup()

        # do the actual setup from a clean state
        self._m2m = {}
        for model in models:
            model._setup_base()

        for model in models:
            model._setup_fields()

        for model in models:
            model._setup_complete()

        self.registry_invalidated = True

    def post_init(self, func, *args, **kwargs):
        """ Register a function to call at the end of :meth:`~.init_models`. """
        self._post_init_queue.append(partial(func, *args, **kwargs))

    def init_models(self, cr, model_names, context):
        """ Initialize a list of models (given by their name). Call methods
            ``_auto_init`` and ``init`` on each model to create or update the
            database tables supporting the models.

            The ``context`` may contain the following items:
             - ``module``: the name of the module being installed/updated, if any;
             - ``update_custom_fields``: whether custom fields should be updated.
        """
        if 'module' in context:
            _logger.info('module %s: creating or updating database tables',
                         context['module'])
        elif context.get('models_to_check', False):
            _logger.info("verifying fields for every extended model")

        env = odoo.api.Environment(cr, SUPERUSER_ID, context)
        models = [env[model_name] for model_name in model_names]

        # make sure the queue does not contain some leftover from a former call
        self._post_init_queue.clear()

        for model in models:
            model._auto_init()
            model.init()

        while self._post_init_queue:
            func = self._post_init_queue.popleft()
            func()

        if models:
            models[0].recompute()

        # make sure all tables are present
        self.check_tables_exist(cr)

    def check_tables_exist(self, cr):
        """
        Verify that all tables are present and try to initialize those that are missing.
        """
        env = odoo.api.Environment(cr, SUPERUSER_ID, {})
        table2model = {
            model._table: name
            for name, model in env.items() if not model._abstract
        }
        missing_tables = set(table2model).difference(
            existing_tables(cr, table2model))

        if missing_tables:
            missing = {table2model[table] for table in missing_tables}
            _logger.warning("Models have no table: %s.", ", ".join(missing))
            # recreate missing tables following model dependencies
            deps = {name: model._depends for name, model in env.items()}
            for name in topological_sort(deps):
                if name in missing:
                    _logger.info("Recreate table of model %s.", name)
                    env[name].init()
            # check again, and log errors if tables are still missing
            missing_tables = set(table2model).difference(
                existing_tables(cr, table2model))
            for table in missing_tables:
                _logger.error("Model %s has no table.", table2model[table])

    @lazy_property
    def cache(self):
        """ A cache for model methods. """
        # this lazy_property is automatically reset by lazy_property.reset_all()
        return LRU(8192)

    def _clear_cache(self):
        """ Clear the cache and mark it as invalidated. """
        self.cache.clear()
        self.cache_invalidated = True

    def clear_caches(self):
        """ Clear the caches associated to methods decorated with
        ``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
        """
        for model in self.models.values():
            model.clear_caches()

    def setup_signaling(self):
        """ Setup the inter-process signaling on this registry. """
        if self.in_test_mode():
            return

        with self.cursor() as cr:
            # The `base_registry_signaling` sequence indicates when the registry
            # must be reloaded.
            # The `base_cache_signaling` sequence indicates when all caches must
            # be invalidated (i.e. cleared).
            cr.execute(
                "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'"
            )
            if not cr.fetchall():
                cr.execute(
                    "CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1"
                )
                cr.execute("SELECT nextval('base_registry_signaling')")
                cr.execute(
                    "CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1"
                )
                cr.execute("SELECT nextval('base_cache_signaling')")

            cr.execute(""" SELECT base_registry_signaling.last_value,
                                  base_cache_signaling.last_value
                           FROM base_registry_signaling, base_cache_signaling"""
                       )
            self.registry_sequence, self.cache_sequence = cr.fetchone()
            _logger.debug(
                "Multiprocess load registry signaling: [Registry: %s] [Cache: %s]",
                self.registry_sequence, self.cache_sequence)

    def check_signaling(self):
        """ Check whether the registry has changed, and performs all necessary
        operations to update the registry. Return an up-to-date registry.
        """
        if self.in_test_mode():
            return self

        with closing(self.cursor()) as cr:
            cr.execute(""" SELECT base_registry_signaling.last_value,
                                  base_cache_signaling.last_value
                           FROM base_registry_signaling, base_cache_signaling"""
                       )
            r, c = cr.fetchone()
            _logger.debug(
                "Multiprocess signaling check: [Registry - %s -> %s] [Cache - %s -> %s]",
                self.registry_sequence, r, self.cache_sequence, c)
            # Check if the model registry must be reloaded
            if self.registry_sequence != r:
                _logger.info(
                    "Reloading the model registry after database signaling.")
                self = Registry.new(self.db_name)
            # Check if the model caches must be invalidated.
            elif self.cache_sequence != c:
                _logger.info(
                    "Invalidating all model caches after database signaling.")
                # Bypass self.clear_caches() to avoid invalidation loops in multi-threaded
                # configs due to the `cache_invalidated` flag being set, causing more signaling.
                self.cache.clear()
            self.registry_sequence = r
            self.cache_sequence = c

        return self

    def signal_changes(self):
        """ Notifies other processes if registry or cache has been invalidated. """
        if self.registry_invalidated and not self.in_test_mode():
            _logger.info("Registry changed, signaling through the database")
            with closing(self.cursor()) as cr:
                cr.execute("select nextval('base_registry_signaling')")
                self.registry_sequence = cr.fetchone()[0]

        # no need to notify cache invalidation in case of registry invalidation,
        # because reloading the registry implies starting with an empty cache
        elif self.cache_invalidated and not self.in_test_mode():
            _logger.info(
                "At least one model cache has been invalidated, signaling through the database."
            )
            with closing(self.cursor()) as cr:
                cr.execute("select nextval('base_cache_signaling')")
                self.cache_sequence = cr.fetchone()[0]

        self.registry_invalidated = False
        self.cache_invalidated = False

    def reset_changes(self):
        """ Reset the registry and cancel all invalidations. """
        if self.registry_invalidated:
            with closing(self.cursor()) as cr:
                self.setup_models(cr)
                self.registry_invalidated = False
        if self.cache_invalidated:
            self.cache.clear()
            self.cache_invalidated = False

    @contextmanager
    def manage_changes(self):
        """ Context manager to signal/discard registry and cache invalidations. """
        try:
            yield self
            self.signal_changes()
        except Exception:
            self.reset_changes()
            raise

    def in_test_mode(self):
        """ Test whether the registry is in 'test' mode. """
        return self.test_cr is not None

    def enter_test_mode(self, cr):
        """ Enter the 'test' mode, where one cursor serves several requests. """
        assert self.test_cr is None
        self.test_cr = cr
        self.test_lock = threading.RLock()
        assert Registry._saved_lock is None
        Registry._saved_lock = Registry._lock
        Registry._lock = DummyRLock()

    def leave_test_mode(self):
        """ Leave the test mode. """
        assert self.test_cr is not None
        self.test_cr = None
        self.test_lock = None
        assert Registry._saved_lock is not None
        Registry._lock = Registry._saved_lock
        Registry._saved_lock = None

    def cursor(self):
        """ Return a new cursor for the database. The cursor itself may be used
            as a context manager to commit/rollback and close automatically.
        """
        if self.test_cr is not None:
            # When in test mode, we use a proxy object that uses 'self.test_cr'
            # underneath.
            return TestCursor(self.test_cr, self.test_lock)
        return self._db.cursor()
Esempio n. 26
0
class BridgeDriver:
    """Respond to scene object changes and handle messaging through the Unity bridge"""

    # Location of the Coherence DLL - relative to addon root
    DLL_PATH = 'lib/LibCoherence.dll'

    METABALLS_OBJECT_NAME = get_string_buffer("__Metaballs")

    MAX_TEXTURE_SLOTS = 64
    UNASSIGNED_TEXTURE_SLOT_NAME = '-- Unassigned --'

    running = False
    lib = None
    connection_name: str = None
    blender_version: str = None
    has_metaballs: bool = False

    image_editor_handle = None  # <capsule object RNA_HANDLE>
    image_buffer = None  # np.ndarray

    # Mapping between viewport IDs and RenderEngine instances.
    # Weakref is used so that we don't hold onto RenderEngine references
    # since Blender uses __del__ to release them after use
    viewports = WeakValueDictionary()

    # Tracked object names already synced to the DLL
    objects = set()

    def __init__(self):
        path = Path(__file__).parent.parent.joinpath(self.DLL_PATH).absolute()
        log('Loading DLL from {}'.format(path))
        self.lib = cdll.LoadLibrary(str(path))

        # Typehint all the API calls we actually need to typehint
        self.lib.Connect.restype = c_int
        self.lib.Disconnect.restype = c_int
        self.lib.Clear.restype = c_int
        self.lib.SetViewportCamera.argtypes = (c_int, InteropCamera)

        #self.lib.GetTextureSlots.argtypes = (
        #    POINTER(InteropString64),   # Target buffer
        #    c_int                       # size
        #)
        self.lib.GetTextureSlots.restype = c_int

        self.lib.UpdateTexturePixels.argtypes = (
            c_void_p,  # name
            c_int,  # width
            c_int,  # height
            c_void_p  # pixels
        )
        self.lib.UpdateTexturePixels.restype = c_int

        self.lib.CopyMeshDataNative.argtypes = (
            c_void_p,  # name
            c_void_p,  # loops
            c_uint,  # loopSize
            c_void_p,  # loopTris
            c_uint,  # loopTrisSize
            c_void_p,  # verts
            c_uint,  # verticesSize
            c_void_p,  # loopCols
            c_void_p,  # loopUVs
            c_void_p,  # loopUV2s
            c_void_p,  # loopUV3s
            c_void_p,  # loopUV4s
        )
        self.lib.CopyMeshDataNative.restype = c_int

        self.lib.GetRenderTexture.argtypes = (c_uint, )
        self.lib.GetRenderTexture.restype = RenderTextureData

        self.lib.ReleaseRenderTextureLock.argtypes = (c_uint, )
        self.lib.ReleaseRenderTextureLock.restype = c_int

        self.lib.AddObjectToScene.argtypes = (
            c_void_p,  # name
            c_uint,  # SceneObjectType
            InteropTransform,  # transform
        )
        self.lib.AddObjectToScene.restype = c_int

        self.lib.SetObjectTransform.argtypes = (
            c_void_p,  # name
            InteropTransform,  # transform
        )
        self.lib.SetObjectTransform.restype = c_int

        # bpy.types.SpaceView3D.draw_handler_add(post_view_draw, (), 'WINDOW', 'POST_PIXEL')

    def __del__(self):
        debug('__del__ on bridge')

        # bpy.types.SpaceView3D.draw_handler_remove(post_view_draw, 'WINDOW')

    def start(self):
        """Start trying to connect to Unity"""
        if self.is_running():
            return

        log('Starting the DCC')

        # TODO: Pull connection name from scene's coherence.connection_name
        self.connection_name = create_string_buffer("Coherence".encode())
        self.blender_version = create_string_buffer(
            bpy.app.version_string.encode())
        self.running = True

        # Register active viewports
        for render_engine in self.viewports.values():
            self.add_viewport(render_engine)

        # Register listeners for Blender events
        depsgraph_update_post.append(self.on_depsgraph_update)
        load_pre.append(self.on_load_pre)

        # Register timers for frequent updates
        bpy.app.timers.register(self.on_tick)
        bpy.app.timers.register(self.check_texture_sync)

        # Monitor updates in SpaceImageEditor for texture syncing
        self.image_editor_handle = bpy.types.SpaceImageEditor.draw_handler_add(
            self.on_image_editor_update, (bpy.context, ), 'WINDOW',
            'POST_PIXEL')

        # Sync the current scene state into the bridge
        self.sync_tracked_objects(bpy.context.scene,
                                  bpy.context.evaluated_depsgraph_get())

        self.tag_redraw_viewports()

    def stop(self):
        """Disconnect from Unity and cleanup synced objects"""
        if not self.is_running():
            return

        log('DCC teardown')
        self.lib.Disconnect()
        self.lib.Clear()

        # Clear local tracking
        self.objects = set()
        self.has_metaballs = False

        # Turning off `running` will also destroy the `on_tick` timer.
        self.running = False

        if self.on_depsgraph_update in depsgraph_update_post:
            depsgraph_update_post.remove(self.on_depsgraph_update)

        if self.on_load_pre in load_pre:
            load_pre.remove(self.on_load_pre)

        if self.image_editor_handle:
            bpy.types.SpaceImageEditor.draw_handler_remove(
                self.image_editor_handle, 'WINDOW')
            self.image_editor_handle = None

        self.tag_redraw_viewports()

    def free_lib(self):  # UNUSED
        pass
        # # Windows-specific handling for freeing the DLL.
        # # See: https://stackoverflow.com/questions/359498/how-can-i-unload-a-dll-using-ctypes-in-python
        # handle = self.lib._handle
        # del self.lib
        # self.lib = None

        # kernel32 = WinDLL('kernel32', use_last_error=True)
        # kernel32.FreeLibrary.argtypes = [wintypes.HMODULE]
        # kernel32.FreeLibrary(handle)

    def get_texture_slots(self) -> list:
        """Return all sync-able texture slot names exposed by Unity

        Returns:
            list[str]
        """
        if not self.is_connected():
            return []

        buffer = (InteropString64 * self.MAX_TEXTURE_SLOTS)()
        size = self.lib.GetTextureSlots(buffer, len(buffer))

        # Convert byte arrays to a list of strings.
        return [self.UNASSIGNED_TEXTURE_SLOT_NAME
                ] + [buffer[i].buffer.decode('utf-8') for i in range(size)]

    def sync_texture(self, image):
        """Send updated pixel data for a texture to Unity

        Args:
            image (bpy.types.Image): The image to sync
        """
        settings = image.coherence
        if settings.error or settings.texture_slot == self.UNASSIGNED_TEXTURE_SLOT_NAME:
            return

        # TODO: Optimize further (e.g. don't allocate
        # the numpy buffer each time, etc etc)
        w, h = image.size

        if self.image_buffer is None:
            self.image_buffer = np.empty(w * h * 4, dtype=np.float32)
        else:
            self.image_buffer.resize(w * h * 4, refcheck=False)

        image.pixels.foreach_get(self.image_buffer)
        pixels_ptr = self.image_buffer.ctypes.data

        self.lib.UpdateTexturePixels(get_string_buffer(settings.texture_slot),
                                     image.size[0], image.size[1], pixels_ptr)

    def add_viewport(self, render_engine):
        """Add a RenderEngine instance as a tracked viewport

        Args:
            render_engine (CoherenceRenderEngine)
        """
        log('Create Viewport {} from {}'.format(render_engine.viewport_id,
                                                id(render_engine)))

        self.viewports[render_engine.viewport_id] = render_engine
        self.lib.AddViewport(render_engine.viewport_id)

    def remove_viewport(self, uid):
        """Remove a RenderEngine instance as a tracked viewport

        Args:
            uid (int): Unique identifier for the viewport RenderEngine
        """
        log('***REMOVE VIEWPORT {}'.format(uid))

        del self.viewports[uid]
        self.lib.RemoveViewport(uid)

    def is_connected(self) -> bool:
        """Is the bridge currently connected to an instance of Unity

        Returns:
            bool
        """
        return self.running and self.lib.IsConnectedToUnity()

    def is_running(self) -> bool:
        """Is the driver actively trying to / is connected to Unity

        Returns:
            bool
        """
        return self.running

    def on_tick(self):
        """
            Timer registered through bpy.app.timers to handle
            connecting/reconnecting to Unity and processing messages

        Returns:
            float for next time to run the timer, or None to destroy it
        """
        if not self.running:
            log('Deactivating on_tick timer')
            return None

        # While actively connected to Unity, send typical IO,
        # get viewport renders, and run as fast as possible
        if self.is_connected():
            self.lib.Update()
            self.lib.ConsumeRenderTextures()

            self.tag_redraw_viewports()

            # If we lost connection while polling - flag a disconnect
            if not self.is_connected():
                self.on_disconnected_from_unity()

            #return 0.0001
            #return 0.016 # 60 FPS update rate
            return 0.008  # 120 FPS

        # Attempt to connect to shared memory if not already
        if not self.lib.IsConnectedToSharedMemory():
            response = self.lib.Connect(self.connection_name,
                                        self.blender_version)
            if response == 1:
                self.on_connected_to_shared_memory()
            elif response == -1:
                print('UNKNOWN ERROR WOO!')
                exit()
            # else the space doesn't exist.

        # Poll for updates from Unity until we get one.
        self.lib.Update()

        if self.is_connected():
            self.on_connected_to_unity()

        return 0.05

    def check_texture_sync(self) -> float:
        """Push image updates to Unity if we're actively drawing
            on an image bound to one of the synced texture slots

        Returns:
            float: Milliseconds until the next check
        """
        delay = bpy.context.scene.coherence.texture_slot_update_frequency

        # Don't do anything if we're still not connected
        if bpy.context.mode != 'PAINT_TEXTURE' or not self.is_connected():
            return delay

        image = bpy.context.tool_settings.image_paint.canvas

        # Tool is active but we don't have an image assigned
        if image is None:
            return delay

        self.sync_texture(image)

        return delay

    def on_connected_to_unity(self):
        debug('on_connected_to_unity')
        self.tag_redraw_viewports()
        pass

    def on_connected_to_shared_memory(self):
        debug('on_connected_to_shared_memory')
        pass

    def on_disconnected_from_unity(self):
        debug('on_disconnected_from_unity')
        self.tag_redraw_viewports()
        pass

    def tag_redraw_viewports(self):
        """Tag all active render engines for a redraw"""
        for v in self.viewports.items():
            try:
                v[1].on_update()
            except e:
                error(sys.exc_info()[0])

    def sync_tracked_objects(self, scene, depsgraph):
        """Add/remove objects from the bridge to match the scene.

        Objects may be added/removed in case of undo, rename, etc.

        Args:
            scene (bpy.types.Scene)
            depsgraph (bpy.types.Depsgraph)
        """
        current = set()  # Set of tracked object names
        found_metaballs = False

        # Check for added objects
        for obj in scene.objects:
            if is_supported_object(obj):
                if obj.name not in self.objects:
                    self.on_add_object(obj, depsgraph)

                current.add(obj.name)
            elif obj.type == 'META':
                found_metaballs = True
                if not self.has_metaballs:
                    self.on_add_metaballs(obj, depsgraph)

        # Check for removed objects
        removed = self.objects - current
        for name in removed:
            self.on_remove_object(name)

        if not found_metaballs and self.has_metaballs:
            self.on_remove_metaballs()

        # Update current tracked list
        self.objects = current

    def on_image_editor_update(self, context):
        space = context.space_data

        # Only try to sync updates if we're actively painting
        # on an image. Any other action (masking, viewing) are ignored.
        if space.mode == 'PAINT' and space.image:
            self.sync_texture(space.image)

    def on_load_pre(self, *args, **kwargs):
        """Stop Coherence when our Blender file changes.

        This is to prevent Coherence from entering some invalid state where
        synced objects/viewports no longer exist in the Blender sync.
        """
        self.stop()

    def on_depsgraph_update(self, scene, depsgraph):
        """Sync the bridge with the scene's dependency graph on each update

        Args:
            scene (bpy.types.Scene)
            depsgraph (bpy.types.Depsgraph)
        """
        debug('on depsgraph update')

        # Only update metaballs as a whole once per tick
        has_metaball_updates = False
        geometry_updates = {}

        self.sync_tracked_objects(scene, depsgraph)

        # Check for updates to objects (geometry changes, transform changes, etc)
        for update in depsgraph.updates:
            if type(update.id) == bpy.types.Material:
                self.on_update_material(update.id)
            elif type(update.id) == bpy.types.Object:
                # Get the real object, not the copy in the update
                obj = bpy.data.objects.get(update.id.name)
                if obj.type == 'META':
                    has_metaball_updates = True
                elif update.id.name in self.objects:
                    # If it's a tracked object - update transform/geo/etc where appropriate
                    if update.is_updated_transform:
                        self.on_update_transform(obj)

                    if update.is_updated_geometry:
                        # Aggregate *unique* meshes that need to be updated this
                        # frame. This de-duplicates any instanced meshes that all
                        # fired the same is_updated_geometry update.
                        geometry_updates[get_mesh_uid(obj)] = obj

                    # Push any other updates we may be tracking for this object
                    self.on_update_properties(obj)

        # Handle all geometry updates
        for uid, obj in geometry_updates.items():
            debug('GEO UPDATE uid={}, obj={}'.format(uid, obj.name))
            self.on_update_geometry(obj, depsgraph)

        # A change to any metaball will trigger a re-evaluation of them all as one object
        if has_metaball_updates:
            self.on_update_metaballs(scene, depsgraph)

    def on_add_object(self, obj, depsgraph):
        """Notify the bridge that the object has been added to the scene

        Args:
            obj (bpy.types.Object):             The object that was added to the scene
            depsgraph (bpy.types.Depsgraph):    Dependency graph to use for generating a final mesh
        """
        mat_name = get_material_uid(obj.active_material)

        debug('on_add_object - name={}, mat_name={}'.format(
            obj.name, mat_name))

        # TODO: Other object types

        parent_name = ''
        if obj.parent_type == 'OBJECT' and obj.parent is not None:
            parent_name = obj.parent.name

        self.lib.AddObjectToScene(get_string_buffer(obj.name),
                                  to_interop_type(obj),
                                  to_interop_transform(obj))

        # When an object is renamed - it's treated as an add. But the rename
        # doesn't propagate any change events to children, so we need to manually
        # trigger a transform update for everything parented to this object
        # so they can all update their parent name to match.
        for child in obj.children:
            if child.name in self.objects:
                self.on_update_transform(child)

        # Send up initial state and geometry
        self.on_update_properties(obj)
        self.on_update_geometry(obj, depsgraph)

    def on_remove_object(self, name):
        """Notify the bridge that the object has been removed from the scene

        Args:
            name (str): Unique object name shared with the Bridge
        """
        debug('on_remove_object - name={}'.format(name))

        self.lib.RemoveObjectFromScene(get_string_buffer(name))

    def on_add_metaballs(self, obj, depsgraph):
        """Update our sync state when metaballs have been first added to the scene.

        We treat all metaballs as a single entity synced with Unity.

        Args:
            obj (bpy.types.Object): The first metaball object in the scene
            depsgraph (bpy.types.Depsgraph):    Dependency graph to use for generating a final mesh
        """
        self.has_metaballs = True
        debug('on_add_metaballs')

        mat_name = get_material_uid(obj.active_material)

        transform = to_interop_transform(obj)
        self.lib.AddObjectToScene(
            self.METABALLS_OBJECT_NAME,
            2,  # SceneObject.Metaball - TODO: Don't hardcode this
            transform)

        # Send an initial set of geometry - already done in update I think?
        # self.on_update_metaballs(obj, depsgraph)

    def on_remove_metaballs(self):
        """Update our sync state when all metaballs have been removed from the scene"""
        self.has_metaballs = False
        debug('on_remove_metaballs')

        self.lib.RemoveObjectFromScene(self.METABALLS_OBJECT_NAME)

    def on_update_transform(self, obj):
        """Notify the bridge that the object has been transformed in the scene.

        Args:
            obj (bpy.types.Object): The object that was updated
        """
        debug('on_update_transform - name={}'.format(obj.name))

        transform = to_interop_transform(obj)
        self.lib.SetObjectTransform(get_string_buffer(obj.name), transform)

    def on_update_properties(self, obj):
        """Notify Unity that object props may have changed

        Args:
            obj (bpy.types.Object): The object that was updated
        """
        mesh_uid = get_mesh_uid(obj)
        mat_uid = get_material_uid(obj.active_material)

        debug('on_update_properties - name={}, mesh={}, mat={}'.format(
            obj.name, mesh_uid, mat_uid))

        self.lib.UpdateObjectProperties(get_string_buffer(obj.name),
                                        int(obj.coherence.display_mode),
                                        get_string_buffer(mesh_uid),
                                        get_string_buffer(mat_uid))

    def on_update_geometry(self, obj, depsgraph):
        """Notify Unity that mesh geometry may have changed

        An object is provided instead of the bpy.types.Mesh
        to evaluate modifiers that need to be applied before
        transferring mesh data to Unity.

        Args:
            obj (bpy.types.Object):             The object that received the update
            depsgraph (bpy.types.Depsgraph):    Dependency graph to use for generating a final mesh
        """
        mesh_uid = get_mesh_uid(obj)

        debug('on_update_geometry - name={}, mesh={}'.format(
            obj.name, mesh_uid))

        # We need to do both evaluated_get() and preserve_all_data_layers=True to ensure
        # that - if the mesh is instanced - modifiers are applied to the correct instances.
        eval_obj = obj.evaluated_get(depsgraph)
        mesh = eval_obj.to_mesh(preserve_all_data_layers=True,
                                depsgraph=depsgraph)

        # TODO: preserve_all_data_layers is only necessary if instanced and modifier
        # stacks change per instance. Might be cheaper to turn this off if a mesh is used only once.

        # Ensure triangulated faces are available
        mesh.calc_loop_triangles()

        # A single (optional) vertex color layer can be passed through
        cols_ptr = None
        if len(mesh.vertex_colors) > 0 and len(mesh.vertex_colors[0].data) > 0:
            cols_ptr = mesh.vertex_colors[0].data[0].as_pointer()

        # Up to 4 (optional) UV layers can be passed through
        uv_ptr = [None] * 4
        for layer in range(len(mesh.uv_layers)):
            if len(mesh.uv_layers[layer].data) > 0:
                uv_ptr[layer] = mesh.uv_layers[layer].data[0].as_pointer()

        self.lib.CopyMeshDataNative(
            get_string_buffer(mesh_uid), mesh.loops[0].as_pointer(),
            len(mesh.loops), mesh.loop_triangles[0].as_pointer(),
            len(mesh.loop_triangles), mesh.vertices[0].as_pointer(),
            len(mesh.vertices), cols_ptr, uv_ptr[0], uv_ptr[1], uv_ptr[2],
            uv_ptr[3])

        eval_obj.to_mesh_clear()

    def on_update_metaballs(self, scene, depsgraph):
        """Rebuild geometry from metaballs in the scene and send to Unity

        Args:
            scene (bpy.types.Scene):
            depsgraph (bpy.types.Depsgraph):    Dependency graph to use for generating a final mesh
        """
        # We use the first found in the scene as the root
        obj = None
        for obj in scene.objects:
            if obj.type == 'META':
                break

        debug('on_update_metaballs obj={}'.format(obj))

        # Get the evaluated post-modifiers mesh
        eval_obj = obj.evaluated_get(depsgraph)
        mesh = eval_obj.to_mesh()

        # Ensure triangulated faces are available
        mesh.calc_loop_triangles()

        # TODO: Don't do this repeately. Only if the root changes transform.
        # seems to be lagging out the interop.
        transform = to_interop_transform(obj)
        self.lib.SetObjectTransform(self.METABALLS_OBJECT_NAME, transform)

        self.lib.CopyMeshDataNative(
            self.METABALLS_OBJECT_NAME,
            self.METABALLS_OBJECT_NAME,
            mesh.loops[0].as_pointer(),
            len(mesh.loops),
            mesh.loop_triangles[0].as_pointer(),
            len(mesh.loop_triangles),
            mesh.vertices[0].as_pointer(),
            len(mesh.vertices),
            # Metaballs don't have UV/Vertex Color information,
            # So we skip all that on upload
            None,  # loopCols
            None,  # uv
            None,  # uv2
            None,  # uv3
            None  # uv4
        )

        eval_obj.to_mesh_clear()

    def on_update_material(self, mat):
        """
        Args:
            mat (bpy.types.Material)
        """
        debug('on_update_material - name={}'.format(mat.name))

        # Fire off an update for all objects that are using it
        for obj in bpy.context.scene.objects:
            if obj.active_material == mat and obj.name in self.objects:
                self.on_update_properties(obj)
Esempio n. 27
0
from weakref import WeakValueDictionary

from pacu.core.andor.ctypes.callback import c_feat_cb
from pacu.core.andor.acquisition import helper

CONTEXTS = WeakValueDictionary()


@c_feat_cb
def exposure_start(handle, feature, context):
    self = CONTEXTS[context]
    if not self.inst.camera_acquiring:
        return 0
    self.inst.acquisition.queue_buffer(self.rawbuf)
    self.exposure_start()
    return 0


@c_feat_cb
def exposure_end(handle, feature, context):
    self = CONTEXTS[context]
    if not self.inst.camera_acquiring:
        return 0
    try:
        buf = self.inst.acquisition.wait_buffer(3000, matching_buf=self.rawbuf)
    except:
        return 0
    ts = self.inst.acquisition.extract_timestamp(self.rawbuf)
    ts = self.inst.from_timestamp(ts)
    frame, pointer = helper.get_contigious(self.inst.aoi_height,
                                           self.inst.aoi_width)
Esempio n. 28
0
 def __init__(self, document: Document):
     self.standalone_instances = WeakValueDictionary()
     self.tables = {}
     self.registry = TypeRegistry.instance
     self.document = document
Esempio n. 29
0
class Waveform(Comparable, metaclass=ABCMeta):
    """Represents an instantiated PulseTemplate which can be sampled to retrieve arrays of voltage
    values for the hardware."""

    __sampled_cache = WeakValueDictionary()

    @property
    @abstractmethod
    def duration(self) -> TimeType:
        """The duration of the waveform in time units."""

    @abstractmethod
    def unsafe_sample(
            self,
            channel: ChannelID,
            sample_times: np.ndarray,
            output_array: Union[np.ndarray, None] = None) -> np.ndarray:
        """Sample the waveform at given sample times.

        The unsafe means that there are no sanity checks performed. The provided sample times are assumed to be
        monotonously increasing and lie in the range of [0, waveform.duration]

        Args:
            sample_times: Times at which this Waveform will be sampled.
            output_array: Has to be either None or an array of the same size and type as sample_times. If
                not None, the sampled values will be written here and this array will be returned
        Result:
            The sampled values of this Waveform at the provided sample times. Has the same number of
            elements as sample_times.
        """

    def get_sampled(
            self,
            channel: ChannelID,
            sample_times: np.ndarray,
            output_array: Union[np.ndarray, None] = None) -> np.ndarray:
        """A wrapper to the unsafe_sample method which caches the result. This method enforces the constrains
        unsafe_sample expects and caches the result to save memory.

        Args:
            sample_times: Times at which this Waveform will be sampled.
            output_array: Has to be either None or an array of the same size and type as sample_times. If an array is
                given, the sampled values will be written into the given array and it will be returned. Otherwise, a new
                array will be created and cached to save memory.

        Result:
            The sampled values of this Waveform at the provided sample times. Is `output_array` if provided
        """
        if len(sample_times) == 0:
            if output_array is None:
                return np.zeros_like(sample_times)
            elif len(output_array) == len(sample_times):
                return output_array
            else:
                raise ValueError(
                    'Output array length and sample time length are different')

        if np.any(np.diff(sample_times) < 0):
            raise ValueError(
                'The sample times are not monotonously increasing')
        if sample_times[0] < 0 or sample_times[-1] > float(self.duration):
            raise ValueError(
                f'The sample times [{sample_times[0]}, ..., {sample_times[-1]}] are not in the range'
                f' [0, duration={float(self.duration)}]')
        if channel not in self.defined_channels:
            raise KeyError(
                'Channel not defined in this waveform: {}'.format(channel))

        if output_array is None:
            # cache the result to save memory
            result = self.unsafe_sample(channel, sample_times)
            result.flags.writeable = False
            key = hash(bytes(result))
            if key not in self.__sampled_cache:
                self.__sampled_cache[key] = result
            return self.__sampled_cache[key]
        else:
            if len(output_array) != len(sample_times):
                raise ValueError(
                    'Output array length and sample time length are different')
            # use the user provided memory
            return self.unsafe_sample(channel=channel,
                                      sample_times=sample_times,
                                      output_array=output_array)

    @property
    @abstractmethod
    def defined_channels(self) -> Set[ChannelID]:
        """The channels this waveform should played on. Use
            :func:`~qupulse.pulses.instructions.get_measurement_windows` to get a waveform for a subset of these."""

    @abstractmethod
    def unsafe_get_subset_for_channels(
            self, channels: AbstractSet[ChannelID]) -> 'Waveform':
        """Unsafe version of :func:`~qupulse.pulses.instructions.get_measurement_windows`."""

    def get_subset_for_channels(
            self, channels: AbstractSet[ChannelID]) -> 'Waveform':
        """Get a waveform that only describes the channels contained in `channels`.

        Args:
            channels: A channel set the return value should confine to.

        Raises:
            KeyError: If `channels` is not a subset of the waveform's defined channels.

        Returns:
            A waveform with waveform.defined_channels == `channels`
        """
        if not channels <= self.defined_channels:
            raise KeyError(
                'Channels not defined on waveform: {}'.format(channels))
        if channels == self.defined_channels:
            return self
        return self.unsafe_get_subset_for_channels(channels=channels)

    def __neg__(self):
        return FunctorWaveform(
            self, {ch: np.negative
                   for ch in self.defined_channels})

    def __pos__(self):
        return self
Esempio n. 30
0
class SerializableLock(object):
    _locks = WeakValueDictionary()
    """ A Serializable per-process Lock

    This wraps a normal ``threading.Lock`` object and satisfies the same
    interface.  However, this lock can also be serialized and sent to different
    processes.  It will not block concurrent operations between processes (for
    this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
    but will consistently deserialize into the same lock.

    So if we make a lock in one process::

        lock = SerializableLock()

    And then send it over to another process multiple times::

        bytes = pickle.dumps(lock)
        a = pickle.loads(bytes)
        b = pickle.loads(bytes)

    Then the deserialized objects will operate as though they were the same
    lock, and collide as appropriate.

    This is useful for consistently protecting resources on a per-process
    level.

    The creation of locks is itself not threadsafe.
    """
    def __init__(self, token=None):
        self.token = token or str(uuid.uuid4())
        if self.token in SerializableLock._locks:
            self.lock = SerializableLock._locks[self.token]
        else:
            self.lock = Lock()
            SerializableLock._locks[self.token] = self.lock

    def acquire(self, *args):
        return self.lock.acquire(*args)

    def release(self, *args):
        return self.lock.release(*args)

    def __enter__(self):
        self.lock.__enter__()

    def __exit__(self, *args):
        self.lock.__exit__(*args)

    @property
    def locked(self):
        return self.locked

    def __getstate__(self):
        return self.token

    def __setstate__(self, token):
        self.__init__(token)

    def __str__(self):
        return "<%s: %s>" % (self.__class__.__name__, self.token)

    __repr__ = __str__