Пример #1
0
class Dispatcher(_DispatcherBase):
    """
    Implementation of user-facing dispatcher objects (i.e. created using
    the @jit decorator).
    This is an abstract base class. Subclasses should define the targetdescr
    class attribute.
    """
    _fold_args = True
    _impl_kinds = {
        'direct': _FunctionCompiler,
        'generated': _GeneratedFunctionCompiler,
    }
    # A {uuid -> instance} mapping, for deserialization
    _memo = weakref.WeakValueDictionary()
    # hold refs to last N functions deserialized, retaining them in _memo
    # regardless of whether there is another reference
    _recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE)
    __uuid = None
    __numba__ = 'py_func'

    def __init__(self,
                 py_func,
                 locals={},
                 targetoptions={},
                 impl_kind='direct',
                 pipeline_class=compiler.Pipeline):
        """
        Parameters
        ----------
        py_func: function object to be compiled
        locals: dict, optional
            Mapping of local variable names to Numba types.  Used to override
            the types deduced by the type inference engine.
        targetoptions: dict, optional
            Target-specific config options.
        impl_kind: str
            Select the compiler mode for `@jit` and `@generated_jit`
        pipeline_class: type numba.compiler.BasePipeline
            The compiler pipeline type.
        """
        self.typingctx = self.targetdescr.typing_context
        self.targetctx = self.targetdescr.target_context

        pysig = utils.pysignature(py_func)
        arg_count = len(pysig.parameters)
        can_fallback = not targetoptions.get('nopython', False)
        _DispatcherBase.__init__(self, arg_count, py_func, pysig, can_fallback)

        functools.update_wrapper(self, py_func)

        self.targetoptions = targetoptions
        self.locals = locals
        self._cache = NullCache()
        compiler_class = self._impl_kinds[impl_kind]
        self._impl_kind = impl_kind
        self._compiler = compiler_class(py_func, self.targetdescr,
                                        targetoptions, locals, pipeline_class)
        self._cache_hits = collections.Counter()
        self._cache_misses = collections.Counter()

        self._type = types.Dispatcher(self)
        self.typingctx.insert_global(self, self._type)

    @property
    def _numba_type_(self):
        return types.Dispatcher(self)

    def enable_caching(self):
        self._cache = FunctionCache(self.py_func)

    def __get__(self, obj, objtype=None):
        '''Allow a JIT function to be bound as a method to an object'''
        if obj is None:  # Unbound method
            return self
        else:  # Bound method
            return create_bound_method(self, obj)

    def __reduce__(self):
        """
        Reduce the instance for pickling.  This will serialize
        the original function as well the compilation options and
        compiled signatures, but not the compiled code itself.
        """
        if self._can_compile:
            sigs = []
        else:
            sigs = [cr.signature for cr in self.overloads.values()]
        globs = self._compiler.get_globals_for_reduction()
        return (serialize._rebuild_reduction,
                (self.__class__, str(self._uuid),
                 serialize._reduce_function(self.py_func, globs), self.locals,
                 self.targetoptions, self._impl_kind, self._can_compile, sigs))

    @classmethod
    def _rebuild(cls, uuid, func_reduced, locals, targetoptions, impl_kind,
                 can_compile, sigs):
        """
        Rebuild an Dispatcher instance after it was __reduce__'d.
        """
        try:
            return cls._memo[uuid]
        except KeyError:
            pass
        py_func = serialize._rebuild_function(*func_reduced)
        self = cls(py_func, locals, targetoptions, impl_kind)
        # Make sure this deserialization will be merged with subsequent ones
        self._set_uuid(uuid)
        for sig in sigs:
            self.compile(sig)
        self._can_compile = can_compile
        return self

    @property
    def _uuid(self):
        """
        An instance-specific UUID, to avoid multiple deserializations of
        a given instance.

        Note this is lazily-generated, for performance reasons.
        """
        u = self.__uuid
        if u is None:
            u = str(uuid.uuid1())
            self._set_uuid(u)
        return u

    def _set_uuid(self, u):
        assert self.__uuid is None
        self.__uuid = u
        self._memo[u] = self
        self._recent.append(self)

    @compiler.global_compiler_lock
    def compile(self, sig):
        if not self._can_compile:
            raise RuntimeError("compilation disabled")
        # Use counter to track recursion compilation depth
        with self._compiling_counter:
            args, return_type = sigutils.normalize_signature(sig)
            # Don't recompile if signature already exists
            existing = self.overloads.get(tuple(args))
            if existing is not None:
                return existing.entry_point

            # Try to load from disk cache
            cres = self._cache.load_overload(sig, self.targetctx)
            if cres is not None:
                self._cache_hits[sig] += 1
                # XXX fold this in add_overload()? (also see compiler.py)
                if not cres.objectmode and not cres.interpmode:
                    self.targetctx.insert_user_function(
                        cres.entry_point, cres.fndesc, [cres.library])
                self.add_overload(cres)
                return cres.entry_point

            self._cache_misses[sig] += 1
            cres = self._compiler.compile(args, return_type)
            self.add_overload(cres)
            self._cache.save_overload(sig, cres)
            return cres.entry_point

    def recompile(self):
        """
        Recompile all signatures afresh.
        """
        sigs = list(self.overloads)
        old_can_compile = self._can_compile
        # Ensure the old overloads are disposed of, including compiled functions.
        self._make_finalizer()()
        self._reset_overloads()
        self._cache.flush()
        self._can_compile = True
        try:
            for sig in sigs:
                self.compile(sig)
        finally:
            self._can_compile = old_can_compile

    @property
    def stats(self):
        return _CompileStats(
            cache_path=self._cache.cache_path,
            cache_hits=self._cache_hits,
            cache_misses=self._cache_misses,
        )
Пример #2
0
from yt.geometry.polar_coordinates import \
    PolarCoordinateHandler
from yt.geometry.cylindrical_coordinates import \
    CylindricalCoordinateHandler
from yt.geometry.spherical_coordinates import \
    SphericalCoordinateHandler
from yt.geometry.geographic_coordinates import \
    GeographicCoordinateHandler
from yt.geometry.spec_cube_coordinates import \
    SpectralCubeCoordinateHandler

# We want to support the movie format in the future.
# When such a thing comes to pass, I'll move all the stuff that is contant up
# to here, and then have it instantiate EnzoDatasets as appropriate.

_cached_datasets = weakref.WeakValueDictionary()
_ds_store = ParameterFileStore()

def _unsupported_object(ds, obj_name):
    def _raise_unsupp(*args, **kwargs):
        raise YTObjectNotImplemented(ds, obj_name)
    return _raise_unsupp

class RegisteredDataset(type):
    def __init__(cls, name, b, d):
        type.__init__(cls, name, b, d)
        output_type_registry[name] = cls
        mylog.debug("Registering: %s as %s", name, cls)

class IndexProxy(object):
    # This is a simple proxy for Index objects.  It enables backwards
Пример #3
0
 def __init__(self):
     """Constructor"""
     self.connections = weakref.WeakValueDictionary()
Пример #4
0
class Base(ana.Storable):
    """
    This is the base class of all claripy ASTs. An AST tracks a tree of operations on arguments.

    This class should not be instanciated directly - instead, use one of the constructor functions (BVS, BVV, FPS,
    FPV...) to construct a leaf node and then build more complicated expressions using operations.

    AST objects have *hash identity*. This means that an AST that has the same hash as another AST will be the *same*
    object. This is critical for efficient memory usage. As an example, the following is true::

        a, b = two different ASTs
        c = b + a
        d = b + a
        assert c is d

    :ivar op:           The operation that is being done on the arguments
    :ivar args:         The arguments that are being used
    """

    __slots__ = [
        'op', 'args', 'variables', 'symbolic', '_hash', '_simplified',
        '_cache_key', '_errored', '_eager_backends', 'length', '_excavated',
        '_burrowed', '_uninitialized', '_uc_alloc_depth', 'annotations',
        'simplifiable', '_uneliminatable_annotations',
        '_relocatable_annotations'
    ]
    _hash_cache = weakref.WeakValueDictionary()

    FULL_SIMPLIFY = 1
    LITE_SIMPLIFY = 2
    UNSIMPLIFIED = 0

    def __new__(cls, op, args, **kwargs):
        """
        This is called when you create a new Base object, whether directly or through an operation.
        It finalizes the arguments (see the _finalize function, above) and then computes
        a hash. If an AST of this hash already exists, it returns that AST. Otherwise,
        it creates, initializes, and returns the AST.

        :param op:              The AST operation ('__add__', 'Or', etc)
        :param args:            The arguments to the AST operation (i.e., the objects to add)
        :param variables:       The symbolic variables present in the AST (default: empty set)
        :param symbolic:        A flag saying whether or not the AST is symbolic (default: False)
        :param length:          An integer specifying the length of this AST (default: None)
        :param collapsible:     A flag of whether or not Claripy can feel free to collapse this AST. This is mostly used
                                to keep Claripy from collapsing Reverse operations, so that they can be undone with
                                another Reverse.
        :param simplified:      A measure of how simplified this AST is. 0 means unsimplified, 1 means fast-simplified
                                (basically, just undoing the Reverse op), and 2 means simplified through z3.
        :param errored:         A set of backends that are known to be unable to handle this AST.
        :param eager_backends:  A list of backends with which to attempt eager evaluation
        :param annotations:     A frozenset of annotations applied onto this AST.
        """

        #if any(isinstance(a, BackendObject) for a in args):
        #   raise Exception('asdf')

        # fix up args and kwargs
        a_args = tuple((a.to_claripy() if isinstance(a, BackendObject) else a)
                       for a in args)
        if 'symbolic' not in kwargs:
            kwargs['symbolic'] = any(a.symbolic for a in a_args
                                     if isinstance(a, Base))
        if 'variables' not in kwargs:
            kwargs['variables'] = frozenset.union(
                frozenset(),
                *(a.variables for a in a_args if isinstance(a, Base)))
        elif type(kwargs['variables']) is not frozenset:  #pylint:disable=unidiomatic-typecheck
            kwargs['variables'] = frozenset(kwargs['variables'])
        if 'errored' not in kwargs:
            kwargs['errored'] = set.union(
                set(), *(a._errored for a in a_args if isinstance(a, Base)))

        if 'add_variables' in kwargs:
            kwargs['variables'] = kwargs['variables'] | kwargs['add_variables']

        eager_backends = list(
            backends._eager_backends
        ) if 'eager_backends' not in kwargs else kwargs['eager_backends']

        if not kwargs[
                'symbolic'] and eager_backends is not None and op not in operations.leaf_operations:
            for eb in eager_backends:
                try:
                    r = operations._handle_annotations(
                        eb._abstract(eb.call(op, args)), args)
                    if r is not None:
                        return r
                    else:
                        eager_backends.remove(eb)
                except BackendError:
                    eager_backends.remove(eb)

        # if we can't be eager anymore, null out the eagerness
        kwargs['eager_backends'] = None

        # whether this guy is initialized or not
        if 'uninitialized' not in kwargs:
            kwargs['uninitialized'] = None

        if 'uc_alloc_depth' not in kwargs:
            kwargs['uc_alloc_depth'] = None

        if 'annotations' not in kwargs:
            kwargs['annotations'] = ()

        h = Base._calc_hash(op, a_args, kwargs)
        self = cls._hash_cache.get(h, None)
        if self is None:
            self = super(Base, cls).__new__(cls)
            self.__a_init__(op, a_args, **kwargs)
            self._hash = h
            cls._hash_cache[h] = self
        # else:
        #    if self.args != f_args or self.op != f_op or self.variables != f_kwargs['variables']:
        #        raise Exception("CRAP -- hash collision")

        return self

    def __init__(self, *args, **kwargs):
        pass

    @staticmethod
    def _calc_hash(op, args, keywords):
        """
        Calculates the hash of an AST, given the operation, args, and kwargs.

        :param op:          The operation.
        :param args:        The arguments to the operation.
        :param keywords:    A dict including the 'symbolic', 'variables', and 'length' items.
        :returns:           a hash.

        We do it using md5 to avoid hash collisions.
        (hash(-1) == hash(-2), for example)
        """
        args_tup = tuple(
            long(a) if type(a) is int and int is not long else (
                a if type(a) in (long, float) else hash(a)) for a in args)
        to_hash = (op, args_tup, keywords['symbolic'],
                   hash(keywords['variables']),
                   str(keywords.get('length', None)),
                   hash(keywords.get('annotations', None)))

        # Why do we use md5 when it's broken? Because speed is more important
        # than cryptographic integrity here. Then again, look at all those
        # allocations we're doing here... fast python is painful.
        hd = hashlib.md5(pickle.dumps(to_hash, -1)).digest()
        return md5_unpacker.unpack(hd)[0]  # 64 bits

    def _get_hashables(self):
        return self.op, tuple(
            str(a) if isinstance(a, numbers.Number) else hash(a)
            for a in self.args), self.symbolic, hash(self.variables), str(
                self.length)

    #pylint:disable=attribute-defined-outside-init
    def __a_init__(self,
                   op,
                   args,
                   variables=None,
                   symbolic=None,
                   length=None,
                   collapsible=None,
                   simplified=0,
                   errored=None,
                   eager_backends=None,
                   add_variables=None,
                   uninitialized=None,
                   uc_alloc_depth=None,
                   annotations=None):  #pylint:disable=unused-argument
        """
        Initializes an AST. Takes the same arguments as ``Base.__new__()``

        We use this instead of ``__init__`` due to python's undesirable behavior w.r.t. automatically calling it on
        return from ``__new__``.
        """
        self.op = op
        self.args = args
        self.length = length
        self.variables = frozenset(variables)
        self.symbolic = symbolic
        self._eager_backends = eager_backends

        self._errored = errored if errored is not None else set()

        self._simplified = simplified
        self._cache_key = ASTCacheKey(self)
        self._excavated = None
        self._burrowed = None

        self._uninitialized = uninitialized
        self._uc_alloc_depth = uc_alloc_depth
        self.annotations = annotations

        ast_args = tuple(a for a in self.args if isinstance(a, Base))
        self._uneliminatable_annotations = frozenset(
            itertools.chain(
                itertools.chain.from_iterable(a._uneliminatable_annotations
                                              for a in ast_args),
                tuple(a for a in self.annotations
                      if not a.eliminatable and not a.relocatable)))

        self._relocatable_annotations = collections.OrderedDict(
            (e, True) for e in tuple(
                itertools.chain(
                    itertools.chain.from_iterable(
                        a._relocatable_annotations for a in ast_args),
                    tuple(a for a in self.annotations
                          if not a.eliminatable and a.relocatable)))).keys()

        if len(args) == 0:
            raise ClaripyOperationError("AST with no arguments!")

        #if self.op != 'I':
        #    for a in args:
        #        if not isinstance(a, Base) and type(a) not in (int, long, bool, str, unicode):
        #            import ipdb; ipdb.set_trace()
        #            l.warning(ClaripyOperationError("Un-wrapped native object of type %s!" % type(a)))

    #pylint:enable=attribute-defined-outside-init

    def make_uuid(self, uuid=None):
        """
        This overrides the default ANA uuid with the hash of the AST. UUID is slow, and we'll soon replace it from ANA
        itself, and this will go away.

        :returns: a string representation of the AST hash.
        """
        u = getattr(self, '_ana_uuid', None)
        if u is None:
            u = str(self._hash) if uuid is None else uuid
            ana.get_dl().uuid_cache[u] = self
            setattr(self, '_ana_uuid', u)
        return u

    @property
    def uuid(self):
        return self.ana_uuid

    def __hash__(self):
        return self._hash

    @property
    def cache_key(self):
        """
        A key that refers to this AST - this value is appropriate for usage as a key in dictionaries.
        """
        return self._cache_key

    #
    # Serialization support
    #

    def _ana_getstate(self):
        """
        Support for ANA serialization.
        """
        return self.op, self.args, self.length, self.variables, self.symbolic, self._hash, self.annotations

    def _ana_setstate(self, state):
        """
        Support for ANA deserialization.
        """
        op, args, length, variables, symbolic, h, annotations = state
        Base.__a_init__(self,
                        op,
                        args,
                        length=length,
                        variables=variables,
                        symbolic=symbolic,
                        annotations=annotations)
        self._hash = h
        Base._hash_cache[h] = self

    #
    # Collapsing and simplification
    #

    #def _models_for(self, backend):
    #    for a in self.args:
    #        backend.convert_expr(a)
    #        else:
    #            yield backend.convert(a)

    def make_like(self, *args, **kwargs):
        all_operations = operations.leaf_operations_symbolic | {'union'}
        if 'annotations' not in kwargs:
            kwargs['annotations'] = self.annotations
        if 'variables' not in kwargs and self.op in all_operations:
            kwargs['variables'] = self.variables
        if 'uninitialized' not in kwargs:
            kwargs['uninitialized'] = self._uninitialized
        if 'symbolic' not in kwargs and self.op in all_operations:
            kwargs['symbolic'] = self.symbolic
        return type(self)(*args, **kwargs)

    def _rename(self, new_name):
        if self.op not in {'BVS', 'BoolS', 'FPS'}:
            raise ClaripyOperationError(
                "rename is only supported on leaf nodes")
        new_args = (new_name, ) + self.args[1:]
        return self.make_like(self.op,
                              new_args,
                              length=self.length,
                              variables={new_name})

    #
    # Annotations
    #

    def _apply_to_annotations(self, f):
        return self.make_like(self.op,
                              self.args,
                              annotations=f(self.annotations))

    def append_annotation(self, a):
        """
        Appends an annotation to this AST.

        :param a: the annotation to append
        :returns: a new AST, with the annotation added
        """
        return self._apply_to_annotations(lambda alist: alist + (a, ))

    def append_annotations(self, new_tuple):
        """
        Appends several annotations to this AST.

        :param new_tuple: the tuple of annotations to append
        :returns: a new AST, with the annotations added
        """
        return self._apply_to_annotations(lambda alist: alist + new_tuple)

    def annotate(self, *args):
        """
        Appends annotations to this AST.

        :param args: the tuple of annotations to append (variadic positional args)
        :returns: a new AST, with the annotations added
        """
        return self._apply_to_annotations(lambda alist: alist + args)

    def insert_annotation(self, a):
        """
        Inserts an annotation to this AST.

        :param a: the annotation to insert
        :returns: a new AST, with the annotation added
        """
        return self._apply_to_annotations(lambda alist: (a, ) + alist)

    def insert_annotations(self, new_tuple):
        """
        Inserts several annotations to this AST.

        :param new_tuple: the tuple of annotations to insert
        :returns: a new AST, with the annotations added
        """
        return self._apply_to_annotations(lambda alist: new_tuple + alist)

    def replace_annotations(self, new_tuple):
        """
        Replaces annotations on this AST.

        :param new_tuple: the tuple of annotations to replace the old annotations with
        :returns: a new AST, with the annotations added
        """
        return self._apply_to_annotations(lambda alist: new_tuple)

    def remove_annotation(self, a):
        """
        Removes an annotation from this AST.

        :param a: the annotation to remove
        :returns: a new AST, with the annotation removed
        """
        return self._apply_to_annotations(
            lambda alist: tuple(oa for oa in alist if oa != a))

    def remove_annotations(self, remove_sequence):
        """
        Removes several annotations from this AST.

        :param remove_sequence: a sequence/set of the annotations to remove
        :returns: a new AST, with the annotations removed
        """
        return self._apply_to_annotations(lambda alist: tuple(
            oa for oa in alist if oa not in remove_sequence))

    #
    # Viewing and debugging
    #

    def dbg_repr(self, prefix=None):
        """
        Returns a debug representation of this AST.
        """
        try:
            if prefix is not None:
                new_prefix = prefix + "    "
                s = prefix + "<%s %s (\n" % (type(self).__name__, self.op)
                for a in self.args:
                    s += "%s,\n" % (a.dbg_repr(
                        prefix=new_prefix) if hasattr(a, 'dbg_repr') else
                                    (new_prefix + repr(a)))
                s = s[:-2] + '\n'
                s += prefix + ")>"

                return s
            else:
                return "<%s %s (%s)>" % (
                    type(self).__name__, self.op, ', '.join(
                        a.dbg_repr() if hasattr(a, 'dbg_repr') else repr(a)
                        for a in self.args))
        except RuntimeError as e:
            raise_from(
                ClaripyRecursionError(
                    "Recursion limit reached during display. Sorry about that."
                ), e)

    def _type_name(self):
        return self.__class__.__name__

    def shallow_repr(self, max_depth=8):
        """
        Returns a string representation of this AST, but with a maximum depth to prevent floods of text being printed.

        :param max_depth:   The maximum depth to print
        :return:            A string representing the AST
        """
        return self.__repr__(max_depth=max_depth)

    def __repr__(self, inner=False, max_depth=None, explicit_length=False):
        if max_depth is not None and max_depth <= 0:
            return '<...>'

        if max_depth is not None:
            max_depth -= 1

        if WORKER:
            return '<AST something>'

        try:
            if self.op in operations.reversed_ops:
                op = operations.reversed_ops[self.op]
                args = self.args[::-1]
            else:
                op = self.op
                args = self.args

            if op == 'BVS' and inner:
                value = args[0]
            elif op == 'BVS':
                value = "%s" % args[0]
                extras = []
                if args[1] is not None:
                    extras.append("min=%s" % args[1])
                if args[2] is not None:
                    extras.append("max=%s" % args[2])
                if args[3] is not None:
                    extras.append("stride=%s" % args[3])
                if args[4] is True:
                    extras.append("UNINITIALIZED")
                if len(extras) != 0:
                    value += "{" + ", ".join(extras) + "}"
            elif op == 'BoolV':
                value = str(args[0])
            elif op == 'BVV':
                if self.args[0] is None:
                    value = '!'
                elif self.args[1] < 10:
                    value = format(self.args[0], '')
                else:
                    value = format(self.args[0], '#x')
                value += ('#' + str(self.length)) if explicit_length else ''
            elif op == 'If':
                value = 'if {} then {} else {}'.format(
                    _inner_repr(args[0], max_depth=max_depth),
                    _inner_repr(args[1], max_depth=max_depth),
                    _inner_repr(args[2], max_depth=max_depth))
                if inner:
                    value = '({})'.format(value)
            elif op == 'Not':
                value = '!{}'.format(_inner_repr(args[0], max_depth=max_depth))
            elif op == 'Extract':
                value = '{}[{}:{}]'.format(
                    _inner_repr(args[2], max_depth=max_depth), args[0],
                    args[1])
            elif op == 'ZeroExt':
                value = '0#{} .. {}'.format(
                    args[0], _inner_repr(args[1], max_depth=max_depth))
                if inner:
                    value = '({})'.format(value)
            elif op == 'Concat':
                value = ' .. '.join(
                    _inner_repr(a, explicit_length=True, max_depth=max_depth)
                    for a in self.args)
            elif len(args) == 2 and op in operations.infix:
                value = '{} {} {}'.format(
                    _inner_repr(args[0], max_depth=max_depth),
                    operations.infix[op],
                    _inner_repr(args[1], max_depth=max_depth))
                if inner:
                    value = '({})'.format(value)
            else:
                value = "{}({})".format(
                    op, ', '.join(
                        _inner_repr(a, max_depth=max_depth) for a in args))

            if not inner:
                value = '<{} {}>'.format(self._type_name(), value)

            return value
        except RuntimeError as e:
            raise_from(
                ClaripyRecursionError(
                    "Recursion limit reached during display. Sorry about that."
                ), e)

    @property
    def depth(self):
        """
        The depth of this AST. For example, an AST representing (a+(b+c)) would have a depth of 2.
        """
        return self._depth()

    def _depth(self, memoized=None):
        """
        :param memoized:    A dict of ast hashes to depths we've seen before
        :return:            The depth of the AST. For example, an AST representing (a+(b+c)) would have a depth of 2.
        """
        if memoized is None:
            memoized = dict()

        ast_args = [a for a in self.args if isinstance(a, Base)]
        max_depth = 0
        for a in ast_args:
            if a.cache_key not in memoized:
                memoized[a.cache_key] = a._depth(memoized)
            max_depth = max(memoized[a.cache_key], max_depth)

        return 1 + max_depth

    @property
    def recursive_children_asts(self):
        for a in self.args:
            if isinstance(a, Base):
                l.debug("Yielding AST %s with hash %s with %d children", a,
                        hash(a), len(a.args))
                yield a
                for b in a.recursive_children_asts:
                    yield b

    @property
    def recursive_leaf_asts(self):
        return self._recursive_leaf_asts()

    def _recursive_leaf_asts(self, seen=None):
        if self.depth == 1:
            yield self
            return

        seen = set() if seen is None else seen
        for a in self.args:
            if isinstance(a, Base) and not a.cache_key in seen:
                seen.add(a.cache_key)

                if a.depth == 1:
                    yield a
                else:
                    for b in a.recursive_leaf_asts:
                        yield b

    def dbg_is_looped(self, seen=None, checked=None):
        seen = set() if seen is None else seen
        checked = set() if checked is None else checked

        l.debug("Checking AST with hash %s for looping", hash(self))
        if hash(self) in seen:
            return self
        elif hash(self) in checked:
            return False
        else:
            seen.add(hash(self))

            for a in self.args:
                if not isinstance(a, Base):
                    continue

                r = a.dbg_is_looped(seen=set(seen), checked=checked)
                if r is not False:
                    return r

            checked.add(hash(self))
            return False

    #
    # Various AST modifications (replacements)
    #

    def _replace(self, replacements, variable_set=None, leaf_operation=None):
        """
        A helper for replace().

        :param variable_set: For optimization, ast's without these variables are not checked for replacing.
        :param replacements: A dictionary of hashes to their replacements.
        """
        try:
            if variable_set is None:
                variable_set = set()

            hash_key = self.cache_key

            try:
                r = replacements[hash_key]
                return r
            except KeyError:
                pass

            if not self.variables.issuperset(variable_set):
                r = self
            elif leaf_operation is not None and self.op in operations.leaf_operations:
                r = leaf_operation(self)
                if r is not self:
                    replacements[hash_key] = r
                return r
            else:
                new_args = []
                replaced = False

                for a in self.args:
                    if isinstance(a, Base):
                        new_a = a._replace(replacements=replacements,
                                           variable_set=variable_set,
                                           leaf_operation=leaf_operation)
                        replaced |= new_a is not a
                    else:
                        new_a = a

                    new_args.append(new_a)

                if replaced:
                    r = self.make_like(self.op, tuple(new_args))
                    replacements[hash_key] = r
                else:
                    r = self

            return r
        except ClaripyReplacementError:
            l.error("Replacement error:", exc_info=True)
            return self

    def swap_args(self, new_args, new_length=None):
        """
        This returns the same AST, with the arguments swapped out for new_args.
        """

        if len(self.args) == len(new_args) and all(
                a is b for a, b in zip(self.args, new_args)):
            return self

        #symbolic = any(a.symbolic for a in new_args if isinstance(a, Base))
        #variables = frozenset.union(frozenset(), *(a.variables for a in new_args if isinstance(a, Base)))
        length = self.length if new_length is None else new_length
        a = self.__class__(self.op, new_args, length=length)
        #if a.op != self.op or a.symbolic != self.symbolic or a.variables != self.variables:
        #   raise ClaripyOperationError("major bug in swap_args()")
        return a

    #
    # Other helper functions
    #

    def split(self, split_on):
        """
        Splits the AST if its operation is `split_on` (i.e., return all the arguments). Otherwise, return a list with
        just the AST.
        """
        if self.op in split_on: return list(self.args)
        else: return [self]

    # we don't support iterating over Base objects
    def __iter__(self):
        """
        This prevents people from iterating over ASTs.
        """
        raise ClaripyOperationError(
            "Please don't iterate over, or split, AST nodes!")

    def __nonzero__(self):
        """
        This prevents people from accidentally using an AST as a condition. For
        example, the following was previously common::

            a,b = two ASTs
            if a == b:
                do something

        The problem is that `a == b` would return an AST, because an AST can be symbolic
        and there could be no way to actually know the value of that without a
        constraint solve. This caused tons of issues.
        """
        raise ClaripyOperationError(
            'testing Expressions for truthiness does not do what you want, as these expressions can be symbolic'
        )

    def structurally_match(self, o):
        """
        Structurally compares two A objects, and check if their corresponding leaves are definitely the same A object
        (name-wise or hash-identity wise).

        :param o: the other claripy A object
        :return: True/False
        """

        # TODO: Convert a and b into canonical forms

        if self.op != o.op:
            return False

        if len(self.args) != len(o.args):
            return False

        for arg_a, arg_b in zip(self.args, o.args):
            if not isinstance(arg_a, Base):
                if type(arg_a) != type(arg_b):
                    return False
                # They are not ASTs
                if arg_a != arg_b:
                    return False
                else:
                    continue

            if arg_a.op in ('I', 'BVS', 'FPS'):
                # This is a leaf node in AST tree
                if arg_a is not arg_b:
                    return False

            else:
                if not arg_a.structurally_match(arg_b):
                    return False

        return True

    def replace(self, old, new):
        """
        Returns this AST but with the AST 'old' replaced with AST 'new' in its subexpressions.
        """
        self._check_replaceability(old, new)
        replacements = {old.cache_key: new}
        return self._replace(replacements, variable_set=old.variables)

    def replace_dict(self, replacements):
        """
        :param replacements:    A dictionary of asts to replace and their replacements.
        :return:                An AST with all instances of ast's in replacements.
        """
        #for old, new in replacements.items():
        #   old = old.ast
        #   if not isinstance(old, Base) or not isinstance(new, Base):
        #       raise ClaripyOperationError('replacements must be AST nodes')
        #   if type(old) is not type(new):
        #       raise ClaripyOperationError('cannot replace type %s ast with type %s ast' % (type(old), type(new)))
        #   old._check_replaceability(new)

        if replacements is None or len(replacements) == 0:  # pylint:disable=len-as-condition
            return self

        return self._replace(replacements, variable_set=set())

    @staticmethod
    def _check_replaceability(old, new):
        if not isinstance(old, Base) or not isinstance(new, Base):
            raise ClaripyReplacementError('replacements must be AST nodes')
        if type(old) is not type(new):
            raise ClaripyReplacementError(
                'cannot replace type %s ast with type %s ast' %
                (type(old), type(new)))

    def _identify_vars(self, all_vars, counter):
        if self.op == 'BVS':
            if self.args not in all_vars:
                all_vars[self.args] = BV('BVS',
                                         self.args,
                                         length=self.length,
                                         explicit_name=True)
        elif self.op == 'BoolS':
            if self.args not in all_vars:
                all_vars[self.args] = BoolS('var_' + str(next(counter)))
        else:
            for arg in self.args:
                if isinstance(arg, Base):
                    arg._identify_vars(all_vars, counter)

    def canonicalize(self, var_map=None, counter=None):
        counter = itertools.count() if counter is None else counter
        var_map = {} if var_map is None else var_map

        for v in self._recursive_leaf_asts():
            if v.cache_key not in var_map and v.op in {'BVS', 'BoolS', 'FPS'}:
                new_name = 'canonical_%d' % next(counter)
                var_map[v.cache_key] = v._rename(new_name)

        return var_map, counter, self.replace_dict(var_map)

    #
    # This code handles burrowing ITEs deeper into the ast and excavating
    # them to shallower levels.
    #

    def _burrow_ite(self):
        if self.op != 'If':
            # print("i'm not an if")
            return self.swap_args([
                (a.ite_burrowed if isinstance(a, Base) else a)
                for a in self.args
            ])

        if not all(isinstance(a, Base) for a in self.args):
            # print("not all my args are bases")
            return self

        old_true = self.args[1]
        old_false = self.args[2]

        if old_true.op != old_false.op or len(old_true.args) != len(
                old_false.args):
            return self

        if old_true.op == 'If':
            # let's no go into this right now
            return self

        if any(a.op in {'BVS', 'BVV', 'FPS', 'FPV', 'BoolS', 'BoolV'}
               for a in self.args):
            # burrowing through these is pretty funny
            return self

        matches = [
            old_true.args[i] is old_false.args[i]
            for i in range(len(old_true.args))
        ]
        if matches.count(True) != 1 or all(matches):
            # TODO: handle multiple differences for multi-arg ast nodes
            # print("wrong number of matches:",matches,old_true,old_false)
            return self

        different_idx = matches.index(False)
        inner_if = If(self.args[0], old_true.args[different_idx],
                      old_false.args[different_idx])
        new_args = list(old_true.args)
        new_args[different_idx] = inner_if.ite_burrowed
        # print("replaced the",different_idx,"arg:",new_args)
        return old_true.__class__(old_true.op, new_args, length=self.length)

    def _excavate_ite(self):
        if self.op in {'BVS', 'I', 'BVV'}:
            return self

        excavated_args = [(a.ite_excavated if isinstance(a, Base) else a)
                          for a in self.args]
        ite_args = [
            isinstance(a, Base) and a.op == 'If' for a in excavated_args
        ]

        if self.op == 'If':
            # if we are an If, call the If handler so that we can take advantage of its simplifiers
            return If(*excavated_args)
        elif ite_args.count(True) == 0:
            # if there are no ifs that came to the surface, there's nothing more to do
            return self.swap_args(excavated_args)
        else:
            # this gets called when we're *not* in an If, but there are Ifs in the args.
            # it pulls those Ifs out to the surface.
            cond = excavated_args[ite_args.index(True)].args[0]
            new_true_args = []
            new_false_args = []

            for a in excavated_args:
                # print("OC", cond.dbg_repr())
                # print("NC", Not(cond).dbg_repr())

                if not isinstance(a, Base) or a.op != 'If':
                    new_true_args.append(a)
                    new_false_args.append(a)
                elif a.args[0] is cond:
                    # print("AC", a.args[0].dbg_repr())
                    new_true_args.append(a.args[1])
                    new_false_args.append(a.args[2])
                elif a.args[0] is Not(cond):
                    # print("AN", a.args[0].dbg_repr())
                    new_true_args.append(a.args[2])
                    new_false_args.append(a.args[1])
                else:
                    # print("AB", a.args[0].dbg_repr())
                    # weird conditions -- giving up!
                    return self.swap_args(excavated_args)

            return If(cond, self.swap_args(new_true_args),
                      self.swap_args(new_false_args))

    @property
    def ite_burrowed(self):
        """
        Returns an equivalent AST that "burrows" the ITE expressions as deep as possible into the ast, for simpler
        printing.
        """
        if self._burrowed is None:
            self._burrowed = self._burrow_ite()  #pylint:disable=attribute-defined-outside-init
            self._burrowed._burrowed = self._burrowed
        return self._burrowed

    @property
    def ite_excavated(self):
        """
        Returns an equivalent AST that "excavates" the ITE expressions out as far as possible toward the root of the
        AST, for processing in static analyses.
        """
        if self._excavated is None:
            self._excavated = self._excavate_ite()  #pylint:disable=attribute-defined-outside-init

            # we set the flag for the children so that we avoid re-excavating during
            # VSA backend evaluation (since the backend evaluation recursively works on
            # the excavated ASTs)
            self._excavated._excavated = self._excavated
        return self._excavated

    #
    # these are convenience operations
    #

    def _first_backend(self, what):
        for b in backends._all_backends:
            if b in self._errored:
                continue

            try:
                return getattr(b, what)(self)
            except BackendError:
                pass

    @property
    def singlevalued(self):
        return self._first_backend('singlevalued')

    @property
    def multivalued(self):
        return self._first_backend('multivalued')

    @property
    def cardinality(self):
        return self._first_backend('cardinality')

    @property
    def concrete(self):
        return backends.concrete.handles(self)

    @property
    def uninitialized(self):
        """
        Whether this AST comes from an uninitialized dereference or not. It's only used in under-constrained symbolic
        execution mode.

        :return: True/False/None (unspecified).
        """

        #TODO: It should definitely be moved to the proposed Annotation backend.

        return self._uninitialized

    @property
    def uc_alloc_depth(self):
        """
        The depth of allocation by lazy-initialization. It's only used in under-constrained symbolic execution mode.

        :return: An integer indicating the allocation depth, or None if it's not from lazy-initialization.
        """
        # TODO: It should definitely be moved to the proposed Annotation backend.

        return self._uc_alloc_depth

    #
    # Backwards compatibility crap
    #

    @property
    def model(self):
        l.critical(
            "DEPRECATION WARNING: do not use AST.model. It is deprecated, no longer does what is expected, and "
            "will soon be removed. If you *need* to access the model use AST._model_X where X is the backend "
            "that you are interested in.")
        print(
            "DEPRECATION WARNING: do not use AST.model. It is deprecated, no longer does what is expected, and will "
            "soon be removed. If you *need* to access the model use AST._model_X where X is the backend that you are "
            "interested in.")
        return self._model_concrete if self._model_concrete is not self else \
               self._model_vsa if self._model_vsa is not self else \
               self._model_z3 if self._model_z3 is not self else \
               self

    def __getattr__(self, a):
        if not a.startswith('_model_'):
            raise AttributeError(a)

        model_name = a[7:]
        if not hasattr(backends, model_name):
            raise AttributeError(a)

        try:
            return getattr(backends, model_name).convert(self)
        except BackendError:
            return self
Пример #5
0
    @property
    def time_left(self):
        """
        The amount of time left until the subscription expires, in seconds

        If the subscription is unsubscribed (or not yet subscribed) return 0

        """
        if self._timestamp is None:
            return 0
        else:
            time_left = self.timeout - (time.time() - self._timestamp)
            return time_left if time_left > 0 else 0


# pylint: disable=C0103
event_listener = EventListener()

# Thread safe mappings.
# Used to store a mapping of sids to event queues
_sid_to_event_queue = weakref.WeakValueDictionary()
# Used to store a mapping of sids to service instances
_sid_to_service = weakref.WeakValueDictionary()

# The locks to go with them
# You must only ever access the mapping in the context of this lock, eg:
#   with _sid_to_event_queue_lock:
#       queue = _sid_to_event_queue[sid]
_sid_to_event_queue_lock = threading.Lock()
_sid_to_service_lock = threading.Lock()
Пример #6
0
    normdict = dict()
    normdict[foo] = bar

    print('normdict before:', normdict.keys())

    del foo  # Not `del data[foo]`
    print('normdict after:', normdict.keys())

    foo = lambda x: x  # I'm the key!

    weakdict = weakref.WeakKeyDictionary()
    weakdict[foo] = bar
    print('weakdict before:', weakdict.keys())

    del foo  # Not `del data[foo]`
    print('weakdict after:', weakdict.keys())

    bar = lambda x: x

    weakdict = weakref.WeakValueDictionary()

    weakdict['bar'] = bar
    print('weakdict before:keys() - 2:', weakdict.keys())
    print('weakdict before:values() - 2:', weakdict.values())

    del bar

    print('weakdict after:keys() - 2:', weakdict.keys())
    print('weakdict after:values() - 2:', weakdict.values())
Пример #7
0
class SerializableLock:
    """ A Serializable per-process Lock

    This wraps a normal ``threading.Lock`` object and satisfies the same
    interface.  However, this lock can also be serialized and sent to different
    processes.  It will not block concurrent operations between processes (for
    this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
    but will consistently deserialize into the same lock.

    So if we make a lock in one process::

        lock = SerializableLock()

    And then send it over to another process multiple times::

        bytes = pickle.dumps(lock)
        a = pickle.loads(bytes)
        b = pickle.loads(bytes)

    Then the deserialized objects will operate as though they were the same
    lock, and collide as appropriate.

    This is useful for consistently protecting resources on a per-process
    level.

    The creation of locks is itself not threadsafe.
    """
    _locks = weakref.WeakValueDictionary()

    def __init__(self, token=None):
        self.token = token or str(uuid.uuid4())
        if self.token in SerializableLock._locks:
            self.lock = SerializableLock._locks[self.token]
        else:
            self.lock = threading.Lock()
            SerializableLock._locks[self.token] = self.lock

    def acquire(self, *args, **kwargs):
        return self.lock.acquire(*args, **kwargs)

    def release(self, *args, **kwargs):
        return self.lock.release(*args, **kwargs)

    def __enter__(self):
        self.lock.__enter__()

    def __exit__(self, *args):
        self.lock.__exit__(*args)

    def locked(self):
        return self.lock.locked()

    def __getstate__(self):
        return self.token

    def __setstate__(self, token):
        self.__init__(token)

    def __str__(self):
        return '<%s: %s>' % (self.__class__.__name__, self.token)

    __repr__ = __str__
Пример #8
0
class FeatureArtist(matplotlib.artist.Artist):
    """
    A subclass of :class:`~matplotlib.artist.Artist` capable of
    drawing a :class:`cartopy.feature.Feature`.

    """

    _geom_key_to_geometry_cache = weakref.WeakValueDictionary()
    """
    A mapping from _GeomKey to geometry to assist with the caching of
    transformed Matplotlib paths.

    """
    _geom_key_to_path_cache = weakref.WeakKeyDictionary()
    """
    A nested mapping from geometry (converted to a _GeomKey) and target
    projection to the resulting transformed Matplotlib paths::

        {geom: {target_projection: list_of_paths}}

    This provides a significant boost when producing multiple maps of the
    same projection.

    """

    def __init__(self, feature, **kwargs):
        """
        Parameters
        ----------
        feature
            An instance of :class:`cartopy.feature.Feature` to draw.
        styler
            A callable that given a gemometry, returns matplotlib styling
            parameters.

        Other Parameters
        ----------------
        **kwargs
            Keyword arguments to be used when drawing the feature. These
            will override those shared with the feature.

        """
        super().__init__()

        if kwargs is None:
            kwargs = {}
        self._styler = kwargs.pop('styler', None)
        self._kwargs = dict(kwargs)

        if 'color' in self._kwargs:
            # We want the user to be able to override both face and edge
            # colours if the original feature already supplied it.
            color = self._kwargs.pop('color')
            self._kwargs['facecolor'] = self._kwargs['edgecolor'] = color

        # Set default zorder so that features are drawn before
        # lines e.g. contours but after images.
        # Note that the zorder of Patch, PatchCollection and PathCollection
        # are all 1 by default. Assuming equal zorder drawing takes place in
        # the following order: collections, patches, lines (default zorder=2),
        # text (default zorder=3), then other artists e.g. FeatureArtist.
        if self._kwargs.get('zorder') is not None:
            self.set_zorder(self._kwargs['zorder'])
        elif feature.kwargs.get('zorder') is not None:
            self.set_zorder(feature.kwargs['zorder'])
        else:
            # The class attribute matplotlib.collections.PathCollection.zorder
            # was removed after mpl v1.2.0, so the hard-coded value of 1 is
            # used instead.
            self.set_zorder(1)

        self._feature = feature

    @matplotlib.artist.allow_rasterization
    def draw(self, renderer, *args, **kwargs):
        """
        Draw the geometries of the feature that intersect with the extent of
        the :class:`cartopy.mpl.GeoAxes` instance to which this
        object has been added.

        """
        if not self.get_visible():
            return

        ax = self.axes
        feature_crs = self._feature.crs

        # Get geometries that we need to draw.
        extent = None
        try:
            extent = ax.get_extent(feature_crs)
        except ValueError:
            warnings.warn('Unable to determine extent. Defaulting to global.')
        geoms = self._feature.intersecting_geometries(extent)

        # Combine all the keyword args in priority order.
        prepared_kwargs = style_merge(self._feature.kwargs,
                                      self._kwargs,
                                      kwargs)

        # Freeze the kwargs so that we can use them as a dict key. We will
        # need to unfreeze this with dict(frozen) before passing to mpl.
        prepared_kwargs = _freeze(prepared_kwargs)

        # Project (if necessary) and convert geometries to matplotlib paths.
        stylised_paths = OrderedDict()
        key = ax.projection
        for geom in geoms:
            # As Shapely geometries cannot be relied upon to be
            # hashable, we have to use a WeakValueDictionary to manage
            # their weak references. The key can then be a simple,
            # "disposable", hashable geom-key object that just uses the
            # id() of a geometry to determine equality and hash value.
            # The only persistent, strong reference to the geom-key is
            # in the WeakValueDictionary, so when the geometry is
            # garbage collected so is the geom-key.
            # The geom-key is also used to access the WeakKeyDictionary
            # cache of transformed geometries. So when the geom-key is
            # garbage collected so are the transformed geometries.
            geom_key = _GeomKey(geom)
            FeatureArtist._geom_key_to_geometry_cache.setdefault(
                geom_key, geom)
            mapping = FeatureArtist._geom_key_to_path_cache.setdefault(
                geom_key, {})
            geom_paths = mapping.get(key)
            if geom_paths is None:
                if ax.projection != feature_crs:
                    projected_geom = ax.projection.project_geometry(
                        geom, feature_crs)
                else:
                    projected_geom = geom
                geom_paths = cpatch.geos_to_path(projected_geom)
                mapping[key] = geom_paths

            if not self._styler:
                style = prepared_kwargs
            else:
                # Unfreeze, then add the computed style, and then re-freeze.
                style = style_merge(dict(prepared_kwargs), self._styler(geom))
                style = _freeze(style)

            stylised_paths.setdefault(style, []).extend(geom_paths)

        transform = ax.projection._as_mpl_transform(ax)

        # Draw one PathCollection per style. We could instead pass an array
        # of style items through to a single PathCollection, but that
        # complexity does not yet justify the effort.
        for style, paths in stylised_paths.items():
            style = style_finalize(dict(style))
            # Build path collection and draw it.
            c = matplotlib.collections.PathCollection(paths,
                                                      transform=transform,
                                                      **style)
            c.set_clip_path(ax.patch)
            c.set_figure(ax.figure)
            c.draw(renderer)

        # n.b. matplotlib.collection.Collection.draw returns None
        return None
Пример #9
0
'''

@author: Frank
'''

import weakref
import threading
import functools
import log
import os
import fcntl

_internal_lock = threading.RLock()
_locks = weakref.WeakValueDictionary()

logger = log.get_logger(__name__)


def _get_lock(name):
    with _internal_lock:
        lock = _locks.get(name, threading.RLock())
        if not name in _locks:
            _locks[name] = lock
        return lock


class NamedLock(object):
    def __init__(self, name):
        self.name = name
        self.lock = None
Пример #10
0
            if not condition(rw):
                continue
            new._rows.remove(rw)
            self.db._delete_row(self.name, rw >> key, rw)
        self.db._transaction_ns.current[self.name] = new
        self.db._check_db_constraints()

            
class DisconnectedPersistentRelation:
    pass


# There isn't likely to be much memory savings from doing this, but we need a
# place to construct the type anyway, so it might as well be a registry.

_persistent_type_registry = _weakref.WeakValueDictionary()

def _get_persistent_type(r):
    hsig = _hsig(r.header)
    cls = _persistent_type_registry.get(hsig)
    if cls is None:
        rcls = r.__class__
        dct = dict(rcls.__dict__)
        name = PersistentRelation.__name__ + '(' + rcls.__name__.split('(', 1)[1]
        cls = type(name, (PersistentRelation,), dct)
        _persistent_type_registry[hsig] = cls
    return cls


class _DBCon(_threading.local):
Пример #11
0
          newdata = newdata2
          newdata.update(self.data)
        write = ((newdata2keys != set(self.datakeys)) or set(newdata2.keys()) != set(newdata.keys())) and not self._written
      else:
        write = (len(newdata) and not self._written)
      if write:
        if not os.path.exists(self.dir):
          os.mkdir(self.dir)            
        f = open(self.indexfile, "w")
        pickle.dump(newdata, f)
        f.close()
      self._written = True  
  def __del__(self):    
    self.write_indexfile()

cache_indices = weakref.WeakValueDictionary()

def write_indexfiles():
  if cache_indices is None:
    return
  for k,v in cache_indices.items():
    v.write_indexfile()
    
atexit.register(write_indexfiles)

class TVHash(object):
   def __init__(self, array, hash=None):     
     self.shape = array.shape
     self.dtype = numpy.dtype(array.dtype).type
     self.hash = hash
     if hash is None:
Пример #12
0
    ender = weakref.finalize(s1, bye)
    print(ender.alive)
    del s1
    print(ender.alive)
    s2 = 'spam'
    print(ender.alive)

    a_set = {0, 1}
    wref = weakref.ref(a_set)
    print(wref)
    print(wref())
    a_set = {2, 3, 4}
    print(wref())
    print(wref() is None)
    print(wref() is None)

    stock = weakref.WeakValueDictionary()
    catalog = [
        Cheese('Red Leicester'),
        Cheese('Tilsit'),
        Cheese('Brie'),
        Cheese('Parmesan')
    ]
    for cheese in catalog:
        stock[cheese.kind] = cheese
    print(sorted(stock.keys()))
    del catalog
    print(sorted(stock.keys()))
    del cheese
    print(sorted(stock.keys()))
Пример #13
0
    elif isinstance(dataType, MapType):
        for k, v in obj.items():
            _verify_type(k, dataType.keyType)
            _verify_type(v, dataType.valueType)

    elif isinstance(dataType, StructType):
        if len(obj) != len(dataType.fields):
            raise ValueError("Length of object (%d) does not match with "
                             "length of fields (%d)" %
                             (len(obj), len(dataType.fields)))
        for v, f in zip(obj, dataType.fields):
            _verify_type(v, f.dataType)


_cached_cls = weakref.WeakValueDictionary()


def _restore_object(dataType, obj):
    """ Restore object during unpickling. """
    # use id(dataType) as key to speed up lookup in dict
    # Because of batched pickling, dataType will be the
    # same object in most cases.
    k = id(dataType)
    cls = _cached_cls.get(k)
    if cls is None or cls.__datatype is not dataType:
        # use dataType as key to avoid create multiple class
        cls = _cached_cls.get(dataType)
        if cls is None:
            cls = _create_cls(dataType)
            _cached_cls[dataType] = cls
Пример #14
0
import logging
import time
import threading
import os
import weakref
from collections import OrderedDict
from weakref import WeakValueDictionary
lock = threading.Lock()

from scullery import mqtt, messagebus

all_devs = weakref.WeakValueDictionary()

mqttlock = threading.Lock()

import iot_devices.device as devices


def scan():
    while 1:
        time.sleep(10)
        with lock:
            try:
                for i in all_devs:
                    # If the last signal was very strong, we don't need to wait as long before considering
                    # it gone, because packet loss will be less
                    m = 3 if (all_devs[i].datapoints['rssi']
                              or -80) > -65 else 7

                    if all_devs[i].lastseen < time.monotonic() - (float(
                            all_devs[i].config.get('interval', 300) or 300) *
Пример #15
0
A python library for working with data.world datasets

"""

from __future__ import absolute_import

import weakref

from datadotworld.config import FileConfig, ChainedConfig
from datadotworld.datadotworld import DataDotWorld, UriParam

__version__ = '1.4.3'

# Convenience top-level functions

__instances = weakref.WeakValueDictionary()


def _get_instance(profile):
    instance = __instances.get(profile)
    if instance is None:
        config_param = (ChainedConfig() if profile == 'default' else
                        FileConfig(profile=profile))
        instance = DataDotWorld(config=config_param)
        __instances[profile] = instance
    return instance


def load_dataset(dataset_key, force_update=False, profile='default'):
    """
    Load a dataset from the local filesystem, downloading it from data.world
Пример #16
0
class FeatureArtist(matplotlib.artist.Artist):
    """
    A subclass of :class:`~matplotlib.artist.Artist` capable of
    drawing a :class:`cartopy.feature.Feature`.

    """

    _geom_key_to_geometry_cache = weakref.WeakValueDictionary()
    """
    A mapping from _GeomKey to geometry to assist with the caching of
    transformed matplotlib paths.

    """
    _geom_key_to_path_cache = weakref.WeakKeyDictionary()
    """
    A nested mapping from geometry (converted to a _GeomKey) and target
    projection to the resulting transformed matplotlib paths::

        {geom: {target_projection: list_of_paths}}

    This provides a significant boost when producing multiple maps of the
    same projection.

    """
    def __init__(self, feature, **kwargs):
        """
        Args:

        * feature:
            an instance of :class:`cartopy.feature.Feature` to draw.
        * kwargs:
            keyword arguments to be used when drawing the feature. These
            will override those shared with the feature.

        """
        super(FeatureArtist, self).__init__()

        if kwargs is None:
            kwargs = {}
        self._kwargs = dict(kwargs)

        # Set default zorder so that features are drawn before
        # lines e.g. contours but after images.
        # Note that the zorder of Patch, PatchCollection and PathCollection
        # are all 1 by default. Assuming equal zorder drawing takes place in
        # the following order: collections, patches, lines (default zorder=2),
        # text (default zorder=3), then other artists e.g. FeatureArtist.
        if self._kwargs.get('zorder') is not None:
            self.set_zorder(self._kwargs['zorder'])
        elif feature.kwargs.get('zorder') is not None:
            self.set_zorder(feature.kwargs['zorder'])
        else:
            # The class attribute matplotlib.collections.PathCollection.zorder
            # was removed after mpl v1.2.0, so the hard-coded value of 1 is
            # used instead.
            self.set_zorder(1)

        self._feature = feature

    @matplotlib.artist.allow_rasterization
    def draw(self, renderer, *args, **kwargs):
        """
        Draws the geometries of the feature that intersect with the extent of
        the :class:`cartopy.mpl.GeoAxes` instance to which this
        object has been added.

        """
        if not self.get_visible():
            return

        ax = self.get_axes()
        feature_crs = self._feature.crs

        # Get geometries that we need to draw.
        extent = None
        try:
            extent = ax.get_extent(feature_crs)
        except ValueError:
            warnings.warn('Unable to determine extent. Defaulting to global.')
        geoms = self._feature.intersecting_geometries(extent)

        # Project (if necessary) and convert geometries to matplotlib paths.
        paths = []
        key = ax.projection
        for geom in geoms:
            # As Shapely geometries cannot be relied upon to be
            # hashable, we have to use a WeakValueDictionary to manage
            # their weak references. The key can then be a simple,
            # "disposable", hashable geom-key object that just uses the
            # id() of a geometry to determine equality and hash value.
            # The only persistent, strong reference to the geom-key is
            # in the WeakValueDictionary, so when the geometry is
            # garbage collected so is the geom-key.
            # The geom-key is also used to access the WeakKeyDictionary
            # cache of transformed geometries. So when the geom-key is
            # garbage collected so are the transformed geometries.
            geom_key = _GeomKey(geom)
            FeatureArtist._geom_key_to_geometry_cache.setdefault(
                geom_key, geom)
            mapping = FeatureArtist._geom_key_to_path_cache.setdefault(
                geom_key, {})
            geom_paths = mapping.get(key)
            if geom_paths is None:
                if ax.projection != feature_crs:
                    projected_geom = ax.projection.project_geometry(
                        geom, feature_crs)
                else:
                    projected_geom = geom
                geom_paths = cpatch.geos_to_path(projected_geom)
                mapping[key] = geom_paths
            paths.extend(geom_paths)

        # Build path collection and draw it.
        transform = ax.projection._as_mpl_transform(ax)
        # Combine all the keyword args in priority order
        final_kwargs = dict(self._feature.kwargs)
        final_kwargs.update(self._kwargs)
        final_kwargs.update(kwargs)
        c = matplotlib.collections.PathCollection(paths,
                                                  transform=transform,
                                                  **final_kwargs)
        c.set_clip_path(ax.patch)
        c.set_figure(ax.figure)
        return c.draw(renderer)
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA

"""Module gathering all abstract base classes"""

from abc import ABCMeta, abstractmethod, abstractproperty
import re
import time
import weakref

from .catch23 import make_abc, BYTE_TYPES
from .conversion import MySQLConverterBase
from .constants import ClientFlag, CharacterSet, DEFAULT_CONFIGURATION
from .optionfiles import MySQLOptionsParser
from . import errors

NAMED_TUPLE_CACHE = weakref.WeakValueDictionary()

@make_abc(ABCMeta)
class MySQLConnectionAbstract(object):

    """Abstract class for classes connecting to a MySQL server"""

    def __init__(self, **kwargs):
        """Initialize"""
        self._client_flags = ClientFlag.get_default()
        self._charset_id = 45
        self._sql_mode = None
        self._time_zone = None
        self._autocommit = False
        self._server_version = None
        self._handshake = None
Пример #18
0
class USBConnector(BasicConnector):
    _device_handle: Union[usb1.USBDeviceHandle, None]

    _LOCK_ = threading.Lock()
    _CACHE_ = weakref.WeakValueDictionary()

    def __init__(self,
                 setting: usb1.USBInterfaceSetting,
                 device: usb1.USBDevice,
                 usb_info=None,
                 timeout=None):
        self._usb_info = usb_info
        self._timeout = timeout
        self._device = device
        self._setting = setting
        self._port_path = None
        self._read_endpoint = None
        self._write_endpoint = None
        self._max_read_packet_len = 0
        self._device_handle = None
        self._interface_number = None
        self._flush_buffer = bytearray()

    @property
    def serial_number(self):
        return self._device.getSerialNumber()

    @property
    def usb_info(self):
        try:
            sn = self.serial_number
        except usb1.USBError:
            sn = ""
        if sn and sn != self._usb_info:
            return f"{self._usb_info} {sn} "
        return self._usb_info

    def open(self):
        assert self._port_path is not None, "require device port path"
        port_path = tuple(self._port_path)
        with self._LOCK_:
            old_handle = self._CACHE_.get(port_path)
            if old_handle is not None:
                old_handle.Close()

        for endpoint in self._setting.iterEndpoints():
            address = endpoint.getAddress()
            if self.is_read_endpoint(address):
                self._read_endpoint = address
                self._max_read_packet_len = endpoint.getMaxPacketSize()
            else:
                self._write_endpoint = address

        self.check_endpoint()
        device_handle = self._device.open()
        ifcae_no = self._setting.getNumber()

        try:
            if System.get_current_os(
            ) != System.Windows and device_handle.kernelDriverActive(ifcae_no):
                device_handle.attachKernelDriver(ifcae_no)
        except usb1.USBError as e:
            if e.value == libusb1.LIBUSB_ERROR_NOT_FOUND:
                logging.warning(
                    f'Kernel driver not found for interface: {ifcae_no}.')
            else:
                raise

        device_handle.claimInterface(ifcae_no)
        self._device_handle = device_handle
        self._interface_number = ifcae_no

        with self._LOCK_:
            self._CACHE_[port_path] = self

        # When this object is deleted, make sure it's closed.
        # 但该对象被删除后由于weak ref的存在 依旧可以调用Close方法
        weakref.ref(self, self.close)

    def check_endpoint(self):
        assert self._write_endpoint is not None, "No Write endpoint found!"
        assert self._read_endpoint is not None, "No Read endpoint found!"

    @staticmethod
    def is_read_endpoint(address):
        return address & libusb1.USB_ENDPOINT_DIR_MASK

    def close(self):
        if self._device_handle is None:
            return

        assert self._interface_number is not None, "got None interface number when close device"
        try:
            self._device_handle.releaseInterface(self._interface_number)
            self._device_handle.close()
        except usb1.USBError:
            logging.info('USBError while closing handle %s: ',
                         self.usb_info,
                         exc_info=True)
        finally:
            self._device_handle = None

    def write(self, data: bytes, timeout=None):
        if self._device_handle is None:
            raise WriteFailedError(
                'This handle has been closed, probably due to another being opened.',
                None)
        timeout = self.timeout_second(timeout)
        try:
            self._device_handle.bulkWrite(self._write_endpoint, data, timeout)
        except usb1.USBError as e:
            ReadFailedError(
                'An error occurred when write data to %s (timeout %sms)' %
                (self.usb_info, timeout), e)

    def read(self, length: int, timeout=None):
        if self._device_handle is None:
            raise ReadFailedError(
                'This handle has been closed, probably due to another being opened.',
                None)
        timeout = self.timeout_second(timeout)
        try:
            return bytearray(
                self._device_handle.bulkRead(self._read_endpoint, length,
                                             timeout))
        except usb1.USBError as e:
            ReadFailedError(
                'Could not receive data from %s (timeout %sms)' %
                (self.usb_info, timeout), e)

    def flush(self):
        while True:
            try:
                self._flush_buffer = self.read(self._max_read_packet_len,
                                               self._timeout)
            except ReadFailedError as e:
                if e.usb_error.value == libusb1.LIBUSB_ERROR_TIMEOUT:
                    break
                raise
Пример #19
0
from .engine import Engine
import weakref

_activeEngines = weakref.WeakValueDictionary()


def init(driverName=None, debug=False):
    '''
    Constructs a new TTS engine instance or reuses the existing instance for
    the driver name.

    @param driverName: Name of the platform specific driver to use. If
        None, selects the default driver for the operating system.
    @type: str
    @param debug: Debugging output enabled or not
    @type debug: bool
    @return: Engine instance
    @rtype: L{engine.Engine}
    '''
    try:
        eng = _activeEngines[driverName]
    except KeyError:
        eng = Engine(driverName, debug)
        _activeEngines[driverName] = eng
    return eng


def speak(text):
    engine = init()
    engine.say(text)
    engine.runAndWait()
Пример #20
0
        info('created temp directory %s', tempdir)
        # keep a strong reference to shutil.rmtree(), since the finalizer
        # can be called late during Python shutdown
        Finalize(None,
                 _remove_temp_dir,
                 args=(shutil.rmtree, tempdir),
                 exitpriority=-100)
        process.current_process()._config['tempdir'] = tempdir
    return tempdir


#
# Support for reinitialization of objects when bootstrapping a child process
#

_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()


def _run_after_forkers():
    items = list(_afterfork_registry.items())
    items.sort()
    for (index, ident, func), obj in items:
        try:
            func(obj)
        except Exception as e:
            info('after forker raised exception %s', e)


def register_after_fork(obj, func):
    _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
Пример #21
0
 def make_weak_valued_dict(self):
     dict = weakref.WeakValueDictionary()
     objects = map(Object, range(self.COUNT))
     for o in objects:
         dict[o.arg] = o
     return dict, objects
Пример #22
0
  def __init__(self):
    self.accept = False
    self.children = {}


def Add(root, bytes, instr):
  node = root
  for byte in bytes:
    if byte not in node.children:
      new = Trie()
      node.children[byte] = new
    node = node.children[byte]
  node.accept = True


interned = weakref.WeakValueDictionary()

def MakeInterned(children, accept):
  key = (accept, tuple(sorted(children.iteritems())))
  node = interned.get(key)
  if node is None:
    node = Trie()
    node.children = children
    node.accept = accept
    interned[key] = node
  return node


EmptyNode = MakeInterned({}, False)
AcceptNode = MakeInterned({}, True)
Пример #23
0

class IndexNotUptodateError(Error):
	pass # TODO description here?


def assert_index_uptodate(method):
	def wrapper(notebook, *arg, **kwarg):
		if not notebook.index.is_uptodate:
			raise IndexNotUptodateError('Index not up to date')
		return method(notebook, *arg, **kwarg)

	return wrapper


_NOTEBOOK_CACHE = weakref.WeakValueDictionary()


from zim.plugins import ExtensionBase, extendable

class NotebookExtension(ExtensionBase):
	'''Base class for extending the notebook

	@ivar notebook: the L{Notebook} object
	'''

	def __init__(self, plugin, notebook):
		ExtensionBase.__init__(self, plugin, notebook)
		self.notebook = notebook

Пример #24
0
class WgpuRenderer(Renderer):
    """Object used to render scenes using wgpu.

    The purpose of a renderer is to render (i.e. draw) a scene to a
    canvas or texture. It also provides picking, defines the
    anti-aliasing parameters, and any post processing effects.

    A renderer is directly associated with its target and can only render
    to that target. Different renderers can render to the same target though.

    It provides a ``.render()`` method that can be called one or more
    times to render scenes. This creates a visual representation that
    is stored internally, and is finally rendered into its render target
    (the canvas or texture).
                                  __________
                                 | blender  |
        [scenes] -- render() --> |  state   | -- flush() --> [target]
                                 |__________|

    The internal representation is managed by the blender object. The
    internal render textures are typically at a higher resolution to
    reduce aliasing (SSAA). The blender has auxilary buffers such as a
    depth buffer, pick buffer, and buffers for transparent fragments.
    Depending on the blend mode, a single render call may consist of
    multiple passes (to deal with semi-transparent fragments).

    The flush-step resolves the internal representation into the target
    texture or canvas, averaging neighbouring fragments for anti-aliasing.

    Parameters:
        target (WgpuCanvas or Texture): The target to render to, and what
            determines the size of the render buffer.
        pixel_ratio (float, optional): How large the physical size of the render
            buffer is in relation to the target's physical size, for antialiasing.
            See the corresponding property for details.
        show_fps (bool): Whether to display the frames per second. Beware that
            depending on the GUI toolkit, the canvas may impose a frame rate limit.
    """

    _shared = None

    _wobject_pipelines_collection = weakref.WeakValueDictionary()

    def __init__(
        self,
        target,
        *,
        pixel_ratio=None,
        show_fps=False,
        blend_mode="default",
        sort_objects=False,
    ):

        # Check and normalize inputs
        if not isinstance(target, (Texture, TextureView, wgpu.gui.WgpuCanvasBase)):
            raise TypeError(
                f"Render target must be a canvas or texture (view), not a {target.__class__.__name__}"
            )
        self._target = target

        # Process other inputs
        self.pixel_ratio = pixel_ratio
        self._show_fps = bool(show_fps)

        # Make sure we have a shared object (the first renderer create it)
        canvas = target if isinstance(target, wgpu.gui.WgpuCanvasBase) else None
        if WgpuRenderer._shared is None:
            WgpuRenderer._shared = SharedData(canvas)

        # Init counter to auto-clear
        self._renders_since_last_flush = 0

        # Get target format
        if isinstance(target, wgpu.gui.WgpuCanvasBase):
            self._canvas_context = self._target.get_context()
            self._target_tex_format = self._canvas_context.get_preferred_format(
                self._shared.adapter
            )
            # Also configure the canvas
            self._canvas_context.configure(
                device=self._shared.device,
                format=self._target_tex_format,
                usage=wgpu.TextureUsage.RENDER_ATTACHMENT,
            )
        else:
            self._target_tex_format = self._target.format
            # Also enable the texture for render and display usage
            self._target._wgpu_usage |= wgpu.TextureUsage.RENDER_ATTACHMENT
            self._target._wgpu_usage |= wgpu.TextureUsage.TEXTURE_BINDING

        # Prepare render targets.
        self.blend_mode = blend_mode
        self.sort_objects = sort_objects

        # Prepare object that performs the final render step into a texture
        self._flusher = RenderFlusher(self._shared.device)

        # Initialize a small buffer to read pixel info into
        # Make it 256 bytes just in case (for bytes_per_row)
        self._pixel_info_buffer = self._shared.device.create_buffer(
            size=16,
            usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ,
        )

    @property
    def device(self):
        """A reference to the used wgpu device."""
        return self._shared.device

    @property
    def target(self):
        """The render target. Can be a canvas, texture or texture view."""
        return self._target

    @property
    def pixel_ratio(self):
        """The ratio between the number of internal pixels versus the logical pixels on the canvas.

        This can be used to configure the size of the render texture
        relative to the canvas' logical size. By default (value is None) the
        used pixel ratio follows the screens pixel ratio on high-res
        displays, and is 2 otherwise.

        If the used pixel ratio causes the render texture to be larger
        than the physical size of the canvas, SSAA is applied, resulting
        in a smoother final image with less jagged edges. Alternatively,
        this value can be set to e.g. 0.5 to lower* the resolution (e.g.
        for performance during interaction).
        """
        return self._pixel_ratio

    @pixel_ratio.setter
    def pixel_ratio(self, value):
        if value is None:
            self._pixel_ratio = None
        elif isinstance(value, (int, float)):
            self._pixel_ratio = None if value <= 0 else float(value)
        else:
            raise TypeError(
                f"Rendered.pixel_ratio expected None or number, not {value}"
            )

    @property
    def blend_mode(self):
        """The method for handling transparency:

        * "default" or None: Select the default: currently this is "ordered2".
        * "opaque": single-pass approach that consider every fragment opaque.
        * "ordered1": single-pass approach that blends fragments (using alpha blending).
          Can only produce correct results if fragments are drawn from back to front.
        * "ordered2": two-pass approach that first processes all opaque fragments and then
          blends transparent fragments (using alpha blending) with depth-write disabled. The
          visual results are usually better than ordered1, but still depend on the drawing order.
        * "weighted": two-pass approach that for order independent transparency,
          using alpha weights.
        * "weighted_depth": two-pass approach for order independent transparency,
          with weights based on alpha and depth (McGuire 2013). Note that the depth
          range affects the (quality of the) visual result.
        * "weighted_plus": three-pass approach for order independent transparency,
          in wich the front-most transparent layer is rendered correctly, while
          transparent layers behind it are blended using alpha weights.
        """
        return self._blend_mode

    @blend_mode.setter
    def blend_mode(self, value):
        # Massage and check the input
        if value is None:
            value = "default"
        value = value.lower()
        if value == "default":
            value = "ordered2"
        # Map string input to a class
        m = {
            "opaque": blender_module.OpaqueFragmentBlender,
            "ordered1": blender_module.Ordered1FragmentBlender,
            "ordered2": blender_module.Ordered2FragmentBlender,
            "weighted": blender_module.WeightedFragmentBlender,
            "weighted_depth": blender_module.WeightedDepthFragmentBlender,
            "weighted_plus": blender_module.WeightedPlusFragmentBlender,
        }
        if value not in m:
            raise ValueError(
                f"Unknown blend_mode '{value}', use any of {set(m.keys())}"
            )
        # Set blender object
        self._blend_mode = value
        self._blender = m[value]()
        # If the blend mode has changed, we may need a new _wobject_pipelines
        self._set_wobject_pipelines()
        # If our target is a canvas, request a new draw
        if isinstance(self._target, wgpu.gui.WgpuCanvasBase):
            self._target.request_draw()

    @property
    def sort_objects(self):
        """Whether to sort world objects before rendering. Default False.

        * ``True``: the render order is defined by 1) the object's ``render_order``
          property; 2) the object's distance to the camera; 3) the position object
          in the scene graph (based on a depth-first search).
        * ``False``: don't sort, the render order is defined by the scene graph alone.
        """
        return self._sort_objects

    @sort_objects.setter
    def sort_objects(self, value):
        self._sort_objects = bool(value)

    def _set_wobject_pipelines(self):
        # Each WorldObject has associated with it a wobject_pipeline:
        # a dict that contains the wgpu pipeline objects. This
        # wobject_pipeline is also associated with the blend_mode,
        # because the blend mode affects the pipelines.
        #
        # Each renderer has ._wobject_pipelines, a dict that maps
        # wobject -> wobject_pipeline. This dict is a WeakKeyDictionary -
        # when the wobject is destroyed, the associated pipeline is
        # collected as well.
        #
        # Renderers with the same blend mode can safely share these
        # wobject_pipeline dicts. Therefore, we make use of a global
        # collection. Since this global collection is a
        # WeakValueDictionary, if all renderes stop using a certain
        # blend mode, the associated pipelines are removed as well.
        #
        # In a diagram:
        #
        # _wobject_pipelines_collection -> _wobject_pipelines -> wobject_pipeline
        #        global                         renderer              wobject
        #   WeakValueDictionary              WeakKeyDictionary         dict

        # Below we set this renderer's _wobject_pipelines. Note that if the
        # blending has changed, we automatically invalidate all "our" pipelines.
        self._wobject_pipelines = WgpuRenderer._wobject_pipelines_collection.setdefault(
            self.blend_mode, weakref.WeakKeyDictionary()
        )

    def render(
        self,
        scene: WorldObject,
        camera: Camera,
        *,
        viewport=None,
        clear_color=None,
        flush=True,
    ):
        """Render a scene with the specified camera as the viewpoint.

        Parameters:
            scene (WorldObject): The scene to render, a WorldObject that
                optionally has child objects.
            camera (Camera): The camera object to use, which defines the
                viewpoint and view transform.
            viewport (tuple, optional): The rectangular region to draw into,
                expressed in logical pixels.
            clear_color (bool, optional): Whether to clear the color buffer
                before rendering. By default this is True on the first
                call to ``render()`` after a flush, and False otherwise.
            flush (bool, optional): Whether to flush the rendered result into
                the target (texture or canvas). Default True.
        """
        device = self.device

        now = time.perf_counter()  # noqa
        if self._show_fps:
            if not hasattr(self, "_fps"):
                self._fps = now, now, 1
            elif now > self._fps[0] + 1:
                print(f"FPS: {self._fps[2]/(now - self._fps[0]):0.1f}")
                self._fps = now, now, 1
            else:
                self._fps = self._fps[0], now, self._fps[2] + 1

        # Define whether to clear color.
        if clear_color is None:
            clear_color = self._renders_since_last_flush == 0
        clear_color = bool(clear_color)
        self._renders_since_last_flush += 1

        # We always clear the depth, because each render() should be "self-contained".
        # Any use-cases where you normally would control depth-clearing should
        # be covered by the blender. Also, this way the blender can better re-use internal
        # buffers. The only rule is that the color buffer behaves correctly on multiple renders.

        # todo: also note that the fragment shader is (should be) optional
        #      (e.g. depth only passes like shadow mapping or z prepass)

        # Get logical size (as two floats). This size is constant throughout
        # all post-processing render passes.
        target_size, logical_size = get_size_from_render_target(self._target)
        if not all(x > 0 for x in logical_size):
            return

        # Determine the physical size of the render texture
        target_pixel_ratio = target_size[0] / logical_size[0]
        if self._pixel_ratio:
            pixel_ratio = self._pixel_ratio
        else:
            pixel_ratio = target_pixel_ratio
            if pixel_ratio <= 1:
                pixel_ratio = 2.0  # use 2 on non-hidpi displays

        # Determine the physical size of the first and last render pass
        framebuffer_size = tuple(max(1, int(pixel_ratio * x)) for x in logical_size)

        # Update the render targets
        self._blender.ensure_target_size(device, framebuffer_size)

        # Get viewport in physical pixels
        if not viewport:
            scene_logical_size = logical_size
            scene_physical_size = framebuffer_size
            physical_viewport = 0, 0, framebuffer_size[0], framebuffer_size[1], 0, 1
        elif len(viewport) == 4:
            scene_logical_size = viewport[2], viewport[3]
            physical_viewport = [int(i * pixel_ratio + 0.4999) for i in viewport]
            physical_viewport = tuple(physical_viewport) + (0, 1)
            scene_physical_size = physical_viewport[2], physical_viewport[3]
        else:
            raise ValueError("The viewport must be None or 4 elements (x, y, w, h).")

        # Ensure that matrices are up-to-date
        scene.update_matrix_world()
        camera.set_view_size(*scene_logical_size)
        camera.update_matrix_world()  # camera may not be a member of the scene
        camera.update_projection_matrix()

        # Update stdinfo uniform buffer object that we'll use during this render call
        self._update_stdinfo_buffer(camera, scene_physical_size, scene_logical_size)

        # Get the list of objects to render, as they appear in the scene graph
        wobject_list = []
        scene.traverse(wobject_list.append, True)

        # Ensure each wobject has pipeline info, and filter objects that we cannot render
        wobject_tuples = []
        any_has_changed = False
        for wobject in wobject_list:
            if not wobject.material:
                continue
            wobject_pipeline, has_changed = ensure_pipeline(self, wobject)
            if wobject_pipeline:
                any_has_changed |= has_changed
                wobject_tuples.append((wobject, wobject_pipeline))

        # Command buffers cannot be reused. If we want some sort of re-use we should
        # look into render bundles. See https://github.com/gfx-rs/wgpu-native/issues/154
        # If we do get this to work, we should trigger a new recording
        # when the wobject's children, visibile, render_order, or render_pass changes.

        # Sort objects
        if self.sort_objects:
            sort_func = _get_sort_function(camera)
            wobject_tuples.sort(key=sort_func)

        # Record the rendering of all world objects, or re-use previous recording
        command_buffers = []
        command_buffers += self._render_recording(
            wobject_tuples, physical_viewport, clear_color
        )
        command_buffers += self._blender.perform_combine_pass(self._shared.device)
        command_buffers

        # Collect commands and submit
        device.queue.submit(command_buffers)

        if flush:
            self.flush()

    def flush(self):
        """Render the result into the target texture view. This method is
        called automatically unless you use ``.render(..., flush=False)``.
        """

        # Note: we could, in theory, allow specifying a custom target here.

        if isinstance(self._target, wgpu.gui.WgpuCanvasBase):
            raw_texture_view = self._canvas_context.get_current_texture()
        else:
            if isinstance(self._target, Texture):
                texture_view = self._target.get_view()
            elif isinstance(self._target, TextureView):
                texture_view = self._target
            update_texture(self._shared.device, texture_view.texture)
            update_texture_view(self._shared.device, texture_view)
            raw_texture_view = texture_view._wgpu_texture_view[1]

        # Reset counter (so we can auto-clear the first next draw)
        self._renders_since_last_flush = 0

        command_buffers = self._flusher.render(
            self._blender.color_view,
            None,
            raw_texture_view,
            self._target_tex_format,
        )
        self.device.queue.submit(command_buffers)

    def _render_recording(
        self,
        wobject_tuples,
        physical_viewport,
        clear_color,
    ):

        # You might think that this is slow for large number of world
        # object. But it is actually pretty good. It does iterate over
        # all world objects, and over stuff in each object. But that's
        # it, really.
        # todo: we may be able to speed this up with render bundles though

        command_encoder = self.device.create_command_encoder()
        blender = self._blender

        # ----- compute pipelines

        compute_pass = command_encoder.begin_compute_pass()

        for wobject, wobject_pipeline in wobject_tuples:
            for pinfo in wobject_pipeline.get("compute_pipelines", ()):
                compute_pass.set_pipeline(pinfo["pipeline"])
                for bind_group_id, bind_group in enumerate(pinfo["bind_groups"]):
                    compute_pass.set_bind_group(
                        bind_group_id, bind_group, [], 0, 999999
                    )
                compute_pass.dispatch(*pinfo["index_args"])

        compute_pass.end_pass()

        # ----- render pipelines

        for pass_index in range(blender.get_pass_count()):

            color_attachments = blender.get_color_attachments(pass_index, clear_color)
            depth_attachment = blender.get_depth_attachment(pass_index)
            render_mask = blender.passes[pass_index].render_mask
            if not color_attachments:
                continue

            render_pass = command_encoder.begin_render_pass(
                color_attachments=color_attachments,
                depth_stencil_attachment={
                    **depth_attachment,
                    "stencil_load_value": wgpu.LoadOp.load,
                    "stencil_store_op": wgpu.StoreOp.store,
                },
                occlusion_query_set=None,
            )
            render_pass.set_viewport(*physical_viewport)

            for wobject, wobject_pipeline in wobject_tuples:
                if not (render_mask & wobject_pipeline["render_mask"]):
                    continue
                for pinfo in wobject_pipeline["render_pipelines"]:
                    render_pass.set_pipeline(pinfo["pipelines"][pass_index])
                    for slot, vbuffer in pinfo["vertex_buffers"].items():
                        render_pass.set_vertex_buffer(
                            slot,
                            vbuffer._wgpu_buffer[1],
                            vbuffer.vertex_byte_range[0],
                            vbuffer.vertex_byte_range[1],
                        )
                    for bind_group_id, bind_group in enumerate(pinfo["bind_groups"]):
                        render_pass.set_bind_group(bind_group_id, bind_group, [], 0, 99)
                    # Draw with or without index buffer
                    if pinfo["index_buffer"] is not None:
                        ibuffer = pinfo["index_buffer"]
                        render_pass.set_index_buffer(ibuffer, 0, ibuffer.size)
                        render_pass.draw_indexed(*pinfo["index_args"])
                    else:
                        render_pass.draw(*pinfo["index_args"])

            render_pass.end_pass()

        return [command_encoder.finish()]

    def _update_stdinfo_buffer(self, camera, physical_size, logical_size):
        # Update the stdinfo buffer's data
        stdinfo_data = self._shared.stdinfo_buffer.data
        stdinfo_data["cam_transform"].flat = camera.matrix_world_inverse.elements
        stdinfo_data["cam_transform_inv"].flat = camera.matrix_world.elements
        stdinfo_data["projection_transform"].flat = camera.projection_matrix.elements
        stdinfo_data[
            "projection_transform_inv"
        ].flat = camera.projection_matrix_inverse.elements
        # stdinfo_data["ndc_to_world"].flat = np.linalg.inv(stdinfo_data["cam_transform"] @ stdinfo_data["projection_transform"])
        stdinfo_data["physical_size"] = physical_size
        stdinfo_data["logical_size"] = logical_size
        stdinfo_data["flipped_winding"] = camera.flips_winding
        # Upload to GPU
        self._shared.stdinfo_buffer.update_range(0, 1)
        update_buffer(self._shared.device, self._shared.stdinfo_buffer)

    # Picking

    def get_pick_info(self, pos):
        """Get information about the given window location. The given
        pos is a 2D point in logical pixels (with the origin at the
        top-left). Returns a dict with fields:

        * "ndc": The position in normalized device coordinates, the 3d element
            being the depth (0..1). Can be translated to the position
            in world coordinates using the camera transforms.
        * "rgba": The value in the color buffer. All zero's when rendering
          directly to the screen (bypassing post-processing).
        * "world_object": the object at that location (provided that
          the object supports picking).
        * Additional pick info may be available, depending on the type of
          object and its material. See the world-object classes for details.
        """

        # Make pos 0..1, so we can scale it to the render texture
        _, logical_size = get_size_from_render_target(self._target)
        float_pos = pos[0] / logical_size[0], pos[1] / logical_size[1]

        # Sample
        encoder = self.device.create_command_encoder()
        self._copy_pixel(encoder, self._blender.color_tex, float_pos, 0)
        self._copy_pixel(encoder, self._blender.pick_tex, float_pos, 8)
        queue = self.device.queue
        queue.submit([encoder.finish()])

        # Collect data from the buffer
        data = self._pixel_info_buffer.map_read()
        color = tuple(data[0:4].cast("B"))
        pick_value = tuple(data[8:16].cast("Q"))[0]
        wobject_id = pick_value & 1048575  # 2**20-1
        wobject = id_provider.get_object_from_id(wobject_id)
        # Note: the position in world coordinates is not included because
        # it depends on the camera, but we don't "own" the camera.

        info = {
            "rgba": color,
            "world_object": wobject,
        }

        if wobject:
            pick_info = wobject._wgpu_get_pick_info(pick_value)
            info.update(pick_info)
        return info

    def _copy_pixel(self, encoder, render_texture, float_pos, buf_offset):

        # Map position to the texture index
        w, h, d = render_texture.size
        x = max(0, min(w - 1, int(float_pos[0] * w)))
        y = max(0, min(h - 1, int(float_pos[1] * h)))

        # Note: bytes_per_row must be a multiple of 256.
        encoder.copy_texture_to_buffer(
            {
                "texture": render_texture,
                "mip_level": 0,
                "origin": (x, y, 0),
            },
            {
                "buffer": self._pixel_info_buffer,
                "offset": buf_offset,
                "bytes_per_row": 256,  # render_texture.bytes_per_pixel,
                "rows_per_image": 1,
            },
            copy_size=(1, 1, 1),
        )

    def snapshot(self):
        """Create a snapshot of the currently rendered image."""

        # Prepare
        device = self._shared.device
        texture = self._blender.color_tex
        size = texture.size
        bytes_per_pixel = 4

        # Note, with queue.read_texture the bytes_per_row limitation does not apply.
        data = device.queue.read_texture(
            {
                "texture": texture,
                "mip_level": 0,
                "origin": (0, 0, 0),
            },
            {
                "offset": 0,
                "bytes_per_row": bytes_per_pixel * size[0],
                "rows_per_image": size[1],
            },
            size,
        )

        return np.frombuffer(data, np.uint8).reshape(size[1], size[0], 4)
Пример #25
0
        ovSend = OVERLAPPED()
        c = ovSend.channel = stackless.channel()

        ret = WSASend(self._socket, cast(self.sendBuffer, POINTER(WSABUF)), 1, byref(bytesSent), 0, byref(ovSend), 0)
        if ret != 0:
            err = WSAGetLastError()
            # The operation was successful and is currently in progress.  Ignore this error...
            if err != ERROR_IO_PENDING:
                Cleanup()
                raise WinError(err)    

        # Return the number of bytes that were send.
        activeIO[self._socket] = c
        return c.receive()

activeIO = weakref.WeakValueDictionary()

def _DispatchIOCP():
    numberOfBytes = DWORD()
    completionKey = c_ulong()
    ovCompletedPtr = POINTER(OVERLAPPED)()

    while True:
        while True:
            # Yield to give other tasklets a chance to be scheduled.
            stackless.schedule()

            ret = GetQueuedCompletionStatus(hIOCP, byref(numberOfBytes), byref(completionKey), byref(ovCompletedPtr), 50)
            if ret == FALSE:
                err = WSAGetLastError()
                if err == WAIT_TIMEOUT:
Пример #26
0
    def __del__(self):
        # This should only be called after the background threads and other
        # processing has finished.
        # NOTE: This isn't super great, but there isn't a better way without
        # knowing what we've been mixed with.
        if self.is_loaded():
            try:
                data = self.load()
            except BaseException:
                pass
            else:
                self.free(data)


_asset_cache = weakref.WeakValueDictionary()


class Asset(BackgroundMixin, FreeingMixin, AbstractAsset):
    """
    A resource to be loaded from the filesystem and used.

    Meant to be subclassed, but in specific ways.
    """
    def __new__(cls, name):
        clsname = f"{cls.__module__}:{cls.__qualname__}"
        try:
            return _asset_cache[(clsname, name)]
        except KeyError:
            inst = super().__new__(cls)
            _asset_cache[(clsname, name)] = inst
Пример #27
0
 def __init__(self):
     self.__axes = weakref.WeakValueDictionary()
Пример #28
0
 def __init__(self):
     self.devices = set()
     self.port_lookup = weakref.WeakValueDictionary()
     self.pool_lookup = weakref.WeakValueDictionary()
Пример #29
0
 def stop(self):
     """Stop the connections manager thread and dereferences all connections"""
     self.connections = weakref.WeakValueDictionary()
Пример #30
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.__cache = weakref.WeakValueDictionary()