Example #1
0
    def gl_init(self):
        self.gl_vertex_shader_factory = functools.lru_cache(maxsize=None)(functools.partial(gl.Shader,GL_VERTEX_SHADER))
        self.gl_fragment_shader_factory = functools.lru_cache(maxsize=None)(functools.partial(gl.Shader,GL_FRAGMENT_SHADER))
        self.gl_program_factory = functools.lru_cache(maxsize=None)(GLProgram)
        self.gl_texture_factory = functools.lru_cache(maxsize=None)(gx.texture.GLTexture)

        array_table = {gx.VA_PTNMTXIDX:GLMatrixIndexArray()}
        array_table.update((attribute,array.gl_convert()) for attribute,array in self.array_table.items())

        for shape in self.shapes:
            shape.gl_init(array_table)

        for material in self.materials:
            material.gl_init()

        for texture in self.textures:
            texture.gl_init(self.gl_texture_factory)

        self.gl_joints = [copy.copy(joint) for joint in self.joints]
        self.gl_joint_matrices = numpy.empty((len(self.joints),3,4),numpy.float32)
        self.gl_matrix_table = gl.TextureBuffer(GL_DYNAMIC_DRAW,GL_RGBA32F,(len(self.matrix_descriptors),3,4),numpy.float32)
        self.gl_update_matrix_table()

        self.gl_draw_objects = list(self.gl_generate_draw_objects(self.scene_graph))
        self.gl_draw_objects.sort(key=lambda draw_object: draw_object.material.unknown0)
Example #2
0
 def __init__(self, server, group_base, group_suffix, bind_dn = None, bind_pass = None):
     self._group_suffix = group_suffix
     # Create a new base query
     self._query = Query(
         Server(server).authenticate(bind_dn, bind_pass), group_base
     ).filter(objectClass = 'posixGroup')
     # We want to cache the results of our methods for the duration of the
     # request
     self.orgs_for_user = lru_cache(maxsize = 16)(self.orgs_for_user)
     self.members_for_org = lru_cache(maxsize = 16)(self.members_for_org)
Example #3
0
 def __init__(self, server, group_base, group_suffix, bind_dn = None, bind_pass = None):
     self.__group_base = group_base
     self.__group_suffix = group_suffix
     # Create a new connection
     try:
         self.__conn = ldap3.Connection(
             server, user = bind_dn, password = bind_pass,
             auto_bind = ldap3.AUTO_BIND_TLS_BEFORE_BIND, raise_exceptions = True
         )
     except ldap3.LDAPException as e:
         raise LDAPError('Error opening LDAP connection') from e
     # We want to cache the results of our methods for the duration of the
     # request
     self.orgs_for_user = lru_cache(maxsize = 16)(self.orgs_for_user)
     self.members_for_org = lru_cache(maxsize = 16)(self.members_for_org)
Example #4
0
def lru_jsonify_cache(**kwargs):
    # TODO: Implemementation broken
    raise NotImplemented
    @lru_cache(**kwargs)
    def f(*params, **kwargs):
        return jsonify()
    return lru_cache(**kwargs)(f)
    def test_lru(self):
        def orig(x, y):
            return 3*x+y
        f = functools.lru_cache(maxsize=20)(orig)

        domain = range(5)
        for i in range(1000):
            x, y = choice(domain), choice(domain)
            actual = f(x, y)
            expected = orig(x, y)
            self.assertEqual(actual, expected)
        self.assertTrue(f.cache_hits > f.cache_misses)
        self.assertEqual(f.cache_hits + f.cache_misses, 1000)

        f.cache_clear()   # test clearing
        self.assertEqual(f.cache_hits, 0)
        self.assertEqual(f.cache_misses, 0)
        f(x, y)
        self.assertEqual(f.cache_hits, 0)
        self.assertEqual(f.cache_misses, 1)

        # Test bypassing the cache
        self.assertIs(f.__wrapped__, orig)
        f.__wrapped__(x, y)
        self.assertEqual(f.cache_hits, 0)
        self.assertEqual(f.cache_misses, 1)

        # test size zero (which means "never-cache")
        @functools.lru_cache(0)
        def f():
            nonlocal f_cnt
            f_cnt += 1
            return 20
        f_cnt = 0
        for i in range(5):
            self.assertEqual(f(), 20)
        self.assertEqual(f_cnt, 5)

        # test size one
        @functools.lru_cache(1)
        def f():
            nonlocal f_cnt
            f_cnt += 1
            return 20
        f_cnt = 0
        for i in range(5):
            self.assertEqual(f(), 20)
        self.assertEqual(f_cnt, 1)

        # test size two
        @functools.lru_cache(2)
        def f(x):
            nonlocal f_cnt
            f_cnt += 1
            return x*10
        f_cnt = 0
        for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
            #    *  *              *                          *
            self.assertEqual(f(x), x*10)
        self.assertEqual(f_cnt, 4)
Example #6
0
    def set_cache_size(self, size):
        """Set the maximum size of the LRU cache for Entity lookup.

        Replaces the existing cache.
        """
        wrapped = self._get_entities.__wrapped__.__get__(self, World)
        self._get_entities = lru_cache(size)(wrapped)
    def __init__(self, *args, **kwargs):
        super(ChildSumTreeLSTM, self).__init__('LSTM', *args, **kwargs)

        # lru_cache is normally used as a decorator, but that usage
        # leads to a global cache, where we need an instance specific
        # cache
        self._get_parameters = lru_cache()(self._get_parameters)
Example #8
0
def _subdivide_interval(args):
    interval, f, norm_func, _quadrature = args
    old_err, a, b, old_int = interval

    c = 0.5 * (a + b)

    # Left-hand side
    if getattr(_quadrature, 'cache_size', 0) > 0:
        f = functools.lru_cache(_quadrature.cache_size)(f)

    s1, err1, round1 = _quadrature(a, c, f, norm_func)
    dneval = _quadrature.num_eval
    s2, err2, round2 = _quadrature(c, b, f, norm_func)
    dneval += _quadrature.num_eval
    if old_int is None:
        old_int, _, _ = _quadrature(a, b, f, norm_func)
        dneval += _quadrature.num_eval

    if getattr(_quadrature, 'cache_size', 0) > 0:
        dneval = f.cache_info().misses

    dint = s1 + s2 - old_int
    derr = err1 + err2 - old_err
    dround_err = round1 + round2

    subintervals = ((a, c, s1, err1), (c, b, s2, err2))
    return dint, derr, dround_err, subintervals, dneval
Example #9
0
    def _wrapper(f):
        maxsize = timed_cache_kwargs.pop('maxsize', 128)
        typed = timed_cache_kwargs.pop('typed', False)
        update_delta = timedelta(**timed_cache_kwargs)
        # nonlocal workaround to support Python 2
        # https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/
        d = {'next_update': datetime.utcnow() - update_delta}
        try:
            f = functools.lru_cache(maxsize=maxsize, typed=typed)(f)
        except AttributeError:
            print(
                "LRU caching is not available in Pyton 2.7, "
                "this will have no effect!"
            )
            pass

        @functools.wraps(f)
        def _wrapped(*args, **kwargs):
            now = datetime.utcnow()
            if now >= d['next_update']:
                try:
                    f.cache_clear()
                except AttributeError:
                    pass
                d['next_update'] = now + update_delta
            return f(*args, **kwargs)

        return _wrapped
Example #10
0
def postprocess_install_function(install_file, func):
    decorators = [functools.lru_cache(maxsize=None)]
    if not getattr(func, 'is_utility', False):
        decorators.append(source_dir_decorator(install_file))
    decorators.append(state_decorator)
    for decorator in decorators:
        func = decorator(func)
    return func
    def decorate(f):
        cache_wrapper = lru_cache(**kwargs)(f)
        LRU_CACHES.append(cache_wrapper)

        @wraps(cache_wrapper)
        def jsonify_wrapper(*params, **kwargs):
            return jsonify(data=cache_wrapper(*params, **kwargs))
        return jsonify_wrapper
Example #12
0
 def decorator(func):
     lru = functools.lru_cache(maxsize)(func)
     if groups:
         for group in groups:
             __lru_funcs_by_group[group].append(lru)
     else:
         # use lru_funcs_by_group[None] as a default group
         __lru_funcs_by_group[None].append(lru)
     return lru
Example #13
0
def cached(func):
    """Like the `functools.lru_cache` decorator, but works (as a no-op)
    on Python < 3.2.
    """
    if hasattr(functools, 'lru_cache'):
        return functools.lru_cache(maxsize=128)(func)
    else:
        # Do nothing when lru_cache is not available.
        return func
Example #14
0
def method_cache(method):
	"""
	Wrap lru_cache to support storing the cache data in the object instances.

	Abstracts the common paradigm where the method explicitly saves an
	underscore-prefixed protected property on first call and returns that
	subsequently.

	>>> class MyClass:
	...     calls = 0
	...
	...     @method_cache
	...     def method(self, value):
	...         self.calls += 1
	...         return value

	>>> a = MyClass()
	>>> a.method(3)
	3
	>>> for x in range(75):
	...     res = a.method(x)
	>>> a.calls
	75

	Note that the apparent behavior will be exactly like that of lru_cache
	except that the cache is stored on each instance, so values in one
	instance will not flush values from another, and when an instance is
	deleted, so are the cached values for that instance.

	>>> b = MyClass()
	>>> for x in range(35):
	...     res = b.method(x)
	>>> b.calls
	35
	>>> a.method(0)
	0
	>>> a.calls
	75

	Note that if method had been decorated with ``functools.lru_cache()``,
	a.calls would have been 76 (due to the cached value of 0 having been
	flushed by the 'b' instance).

	See also
	http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
	for another implementation and additional justification.
	"""
	# todo: allow the cache to be customized
	cache_wrapper = functools.lru_cache()
	def wrapper(self, *args, **kwargs):
		# it's the first call, replace the method with a cached, bound method
		bound_method = functools.partial(method, self)
		cached_method = cache_wrapper(bound_method)
		setattr(self, method.__name__, cached_method)
		return cached_method(*args, **kwargs)
	return wrapper
    def __init__(self, parent=None):
        QtGui.QSortFilterProxyModel.__init__(self, parent)
        self._filter_strings = []
        self._cache = {}
        self._cache_fixed = {}
        self._cache_prefix = {}
        self._row_text = {}

        # Create a cached version of _filtered_rows
        self._filtered_rows_cached = lru_cache(100)(self._filtered_rows)
Example #16
0
    def __getattribute__(self, attr):
        if attr != 'transform':
            return object.__getattribute__(self, attr)

        if not hasattr(self, '_transform'):
            self._transform = lru_cache()(
                object.__getattribute__(self, attr))
            return self._transform
        else:
            return self._transform
Example #17
0
 def __init__(self, io, cache_size = 128):
     self._io = io
     io.seek(0)
     header_fields = HEADER_STRUCT.unpack(io.read(HEADER_STRUCT.size))
     if header_fields[:4] != MAGIC:
         raise MagicBytesError("the first 4 bytes {} should be {}".format(header_fields[:4], MAGIC))
     map_offset = header_fields[4]
     io.seek(map_offset)
     self._offset_map = pickle.load(io)
     self.get_vec_chunk = lru_cache(cache_size)(self._get_vec_chunk)
     self._added_items = {}
Example #18
0
def cachedmethod(maxsize: int = None):
    """
    A cache decorator for memoizing functions and methods.

    Args:
        maxsize: The max cache size. If set, a LRU (least recently used) cache
            is used.

    Returns:
        The cache decorated function
    """
    return functools.lru_cache(maxsize=maxsize)
Example #19
0
def memoize(f):
	from functools import lru_cache
	f = lru_cache()(f)
	# sigh
	def wrapper(*a,**kw):
		hits = f.cache_info().hits
		ret,created = f(*a,**kw)
		if created:
			if hits != f.cache_info().hits:
				created = False
		return ret, created
	return wrapper
Example #20
0
    def __init__(self, state_view):
        """Creates a SettingsView, given a StateView for merkle tree access.

        Args:
            state_view (:obj:`StateView`): a state view
        """
        self._state_view = state_view

        # The public method for get_settings should have its results memoized
        # via an lru_cache.  Typical use of the decorator results in the
        # cache being global, which can cause views to return incorrect
        # values across state root hash boundaries.
        self.get_setting = lru_cache(maxsize=128)(self._get_setting)
Example #21
0
    def __init__(self, maxsize=32, typed=True):
        """
        @keyword maxsize:  max number of objects to keep in the cache
                           Defaults to 32.
        @type    maxsize:  C{int} or C{None}

        @keyword typed:
        @type    typed:    C{bool}
        """
        super().__init__()
        cache = functools.lru_cache(maxsize=maxsize, typed=typed)
        self._get_object = cache(self._create_object)
        self.info = self._get_object.cache_info  # pylint: disable=E1101
Example #22
0
    def decorating_function(user_function):

        def remove_version(*args, **kwds):
            return user_function(*args[1:], **kwds)
        new_func = functools.lru_cache(maxsize=maxsize, typed=typed)(
            remove_version)

        def add_version(*args, **kwds):
            key = make_key(args, kwds, typed)
            return new_func(*((versions.setdefault(key, 0),) + args), **kwds)
        add_version.versions = versions
        add_version.cache_info = new_func.cache_info
        add_version.evict = evict
        return functools.update_wrapper(add_version, user_function)
Example #23
0
 def __init__(self, cdx_url="https://web.archive.org/cdx/search",
              maxsize=400, options=warcprox.Options()):
     """Initialize cdx server connection pool and related parameters.
     Use low timeout value and no retries to avoid blocking warcprox
     operation by a slow CDX server.
     """
     self.cdx_url = cdx_url
     self.options = options
     headers = {'User-Agent': 'warcprox', 'Accept-Encoding': 'gzip, deflate'}
     if options.cdxserver_dedup_cookies:
         headers['Cookie'] = options.cdxserver_dedup_cookies
     self.http_pool = urllib3.PoolManager(maxsize=maxsize, retries=0,
                                          timeout=2.0, headers=headers)
     self.cached_lookup = lru_cache(maxsize=1024)(self.lookup)
Example #24
0
File: utils.py Project: omh/yoshimi
def run_once(func):
    """ Caches the output of `func`

    A useful scenario for this function is when you want to provide a function
    to a template that returns some expensive result (e.g a SQL query) and
    allow the user to call the function many times.

    Internally this function simply wraps the provided function in Python's
    :func:`~functools.lru_cache`.

    :param function func: Function to cache return value of.
    :return function: Decorated version of `func`
    """
    return functools.lru_cache(maxsize=1)(func)
Example #25
0
File: util.py Project: lwzm/bb
def patch_eval():
    if eval is not eval_org:
        return False

    from functools import lru_cache
    c = lru_cache(maxsize=None)(compile)
    e = eval

    def _eval(expr, globals=None, locals=None):
        return e(c(expr, expr, "eval"), globals, locals)

    import builtins
    builtins.eval = _eval

    return True
Example #26
0
def indexed_cache(func):

    func = functools.lru_cache()(func)

    @functools.wraps(func)
    @utils.catch(IndexError, return_value=lex.generics.index_error)
    def inner(inp, *, index, **kwargs):
        results = func(**kwargs)
        if isinstance(results, list):
            tools.save_results(inp, range(len(results)), results.__getitem__)
            return results[index - 1 if index else 0]
        else:
            return results

    return inner
Example #27
0
def main():
    target = 2000
    cnt = 2  # n == 1 in layer 0 and n == 2 in layer 1 satisfy PD(n) = 3
    is_prime = lru_cache(maxsize=None)(utils.is_prime)

    for layer in count(2):
        if (is_prime(layer * 6 - 1) and is_prime(layer * 6 + 1) and
                is_prime(layer * 12 + 5)):
            cnt += 1
            if cnt == target:
                return layer * (layer - 1) * 3 + 2
        if (is_prime(layer * 6 - 1) and is_prime(layer * 6 + 5) and
                is_prime(layer * 12 - 7)):
            cnt += 1
            if cnt == target:
                return layer * (layer + 1) * 3 + 1
Example #28
0
def default_cached_db_client(db_client: AbstractStockDatabaseClient) -> AbstractStockDatabaseClient:
    '''
    This method adds an lru_cache wrapper onto the `db_client`'s #get_historical_data_for_stocks_in_exchange
    function handle.

    Args:

    * `db_client` - an instance of an AbstractStockDatabaseClient

    Returns:

    > A cached AbstractStockDatabaseClient instance
    '''
    cache = lru_cache(maxsize=50)
    db_client.get_historical_data_for_stocks_in_exchange = cache(
        db_client.get_historical_data_for_stocks_in_exchange)
    return db_client
Example #29
0
    def __init__(self, token, logger=None):
        assert isinstance(token, str), "`token` must be a valid Slack API token"
        assert logger is None or not isinstance(logger, logging.Logger), "`logger` must be `None` or a logging function"

        self.client = SlackClient(token)
        if logger is None: self.logger = logging.getLogger(self.__class__.__name__)
        else: self.logger = logger

        # cached versions of methods
        self.get_user_info_by_id_cached = lru_cache(maxsize=256)(self.get_user_info_by_id)

        # incoming message fields
        self.unprocessed_incoming_messages = deque() # store unprocessed messages to allow message peeking

        # outgoing message fields
        self.max_message_id = 1 # every message sent over RTM needs a unique positive integer ID - this should technically be handled by the Slack library, but that's broken as of now
        self.last_say_time = 0 # store last message send timestamp to rate limit sending
        self.bot_user_id = None # ID of this bot user
Example #30
0
    def as_view(cls, **initkwargs):
        view = patched_view = super().as_view(**initkwargs)
        last_modified_evaluator = functools.lru_cache()(cls.last_modified)
        patched_view = last_modified(last_modified_evaluator)(patched_view)
        patched_view = cache_page(
            lazy(cls.expires, int)(),
            key_prefix=last_modified_evaluator,
        )(patched_view)
        view = conditional(cls.cache_headers_allowed, patched_view)(view)

        @functools.wraps(cls.as_view)
        def logging_view(request, *args, **kwargs):

            request_logger.debug(
                'request_method: %(request_method)s, '
                'request_path: %(request_path)s, '
                'request_headers: %(request_headers)s, '
                'request_params: %(request_params)s, '
                'request_data: %(request_data)s, ',
                dict(
                    request_method=request.method,
                    request_path=request.path,
                    request_headers=request.META,
                    request_params=request.GET,
                    request_data=request.POST,
                ),
            )

            response = view(request, *args, **kwargs)

            response_logger.debug(
                'response_code: %(response_code)s, '
                'response_headers: %(response_headers)s, '
                'response_data: %(response_data)s',
                dict(
                    response_code=response.status_code,
                    response_headers=response.items(),
                    response_data=response.content,
                ),
            )

            return response

        return logging_view
Example #31
0
def assign_readmap(qryque:    list,
                   subque:    list,
                   data:      dict,
                   rank:       str,
                   sample:     str,
                   assigners: dict,
                   cache:      int = 1024,
                   rank2dir:  dict = None,
                   outzip:     str = None,
                   tree:      dict = None,
                   rankdic:   dict = None,
                   namedic:   dict = None,
                   root:       str = None,
                   uniq:      bool = False,
                   major:    float = None,
                   above:     bool = False,
                   subok:     bool = False,
                   unasgd:    bool = False,
                   strata:    dict = None):
    """Assign query sequences in a query-to-subjects map to classification
    units based on their subjects.

    Parameters
    ----------
    qryque : iterable of str
        Query queue to assign.
    subque : iterable of frozenset
        Subject(s) queue for assignment.
    data : dict
        Master data structure.
    rank : str
        Target rank to assign to.
    sample : str
        Sample ID.
    assigners : dict of callable
        Per-rank assigners.
    cache : int, optional
        LRU cache size for classification results at each rank.
    rank2dir : dict, optional
        Directory of output maps per rank.
    outzip : str, optional
        Output read map compression method (gz, bz2, xz or None).
    tree : dict, optional
        Hierarchical classification system.
    rankdic : dict, optional
        Rank dictionary.
    namedic : dict
        Taxon name directory.
    root : str, optional
        Root identifier.
    uniq : bool, optional
        Assignment must be unique. Otherwise, report all possible assignments
        and normalize counts (for none- and fixed-rank assignments).
    major : float, optional
        In given-rank classification, perform majority-rule assignment based on
        this fraction threshold.
    above : bool, optional
        In given-rank classification, assignment above the specified rank is
        acceptable.
    subok : bool, optional
        In free-rank classification, allow assigning sequences to their direct
        subjects instead of higher classification units, if applicable.
    unasgd : bool, optional
        Report unassigned sequences.
    strata : dict, optional
        Read-to-feature map for stratification.
    """
    # determine assigner and initiate (if not already)
    if rank is None or rank == 'none' or tree is None:
        if 'none' not in assigners:
            assigners['none'] = lru_cache(maxsize=cache)(partial(
                assign_none, uniq=uniq))
        assigner = assigners['none']
    elif rank == 'free':
        if 'free' not in assigners:
            assigners['free'] = lru_cache(maxsize=cache)(partial(
                assign_free, tree=tree, root=root, subok=subok))
        assigner = assigners['free']
    else:
        if rank not in assigners:
            assigners[rank] = lru_cache(maxsize=cache)(partial(
                assign_rank, rank=rank, tree=tree, rankdic=rankdic, root=root,
                major=major, above=above, uniq=uniq))
        assigner = assigners[rank]

    # call assigner on suject(s) per query
    resque = map(assigner, subque)

    # report or drop unassigned
    if unasgd:
        resque = [x or 'Unassigned' for x in resque]
    else:
        resque = list(resque)
        keep = list(map(None.__ne__, resque))
        qryque, resque = list(compress(qryque, keep)), list(compress(
            resque, keep))

    # write classification map
    if rank2dir is not None:
        outfp = join(rank2dir[rank], f'{sample}.txt')
        with openzip(f'{outfp}.{outzip}' if outzip else outfp, 'at') as fh:
            write_readmap(fh, qryque, resque, namedic)

    # count taxa
    counts = count_strata(qryque, resque, strata) if strata else count(resque)

    # combine old and new counts
    if sample in data[rank]:
        sum_dict(data[rank][sample], counts)
    else:
        data[rank][sample] = counts
Example #32
0
#!/usr/bin/python3

# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <*****@*****.**>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license

import functools

memoize = functools.lru_cache(maxsize=None, typed=True)
Example #33
0
 def wrapped_func(self, *args, **kwargs):
     cached_method = functools.lru_cache(*lru_args, **lru_kwargs)(func)
     cached_method = cached_method.__get__(self, self.__class__)
     setattr(self, func.__name__, cached_method)
     return cached_method(*args, **kwargs)
Example #34
0
    Return whether the given font is a Postscript Compact Font Format Font
    embedded in an OpenType wrapper.  Used by the PostScript and PDF backends
    that can not subset these fonts.
    """
    if os.path.splitext(filename)[1].lower() == '.otf':
        with open(filename, 'rb') as fd:
            return fd.read(4) == b"OTTO"
    else:
        return False


_fmcache = os.path.join(mpl.get_cachedir(),
                        'fontlist-v{}.json'.format(FontManager.__version__))
fontManager = None

_get_font = lru_cache(64)(ft2font.FT2Font)
# FT2Font objects cannot be used across fork()s because they reference the same
# FT_Library object.  While invalidating *all* existing FT2Fonts after a fork
# would be too complicated to be worth it, the main way FT2Fonts get reused is
# via the cache of _get_font, which we can empty upon forking (in Py3.7+).
if hasattr(os, "register_at_fork"):
    os.register_at_fork(after_in_child=_get_font.cache_clear)


def get_font(filename, hinting_factor=None):
    if hinting_factor is None:
        hinting_factor = rcParams['text.hinting_factor']
    return _get_font(os.fspath(filename), hinting_factor)


def _rebuild():
Example #35
0
class TimedeltaElement:
    class Field:
        QUANTITY = "quantity"
        UNIT = "unit"
        SPAN = "span"

    @classmethod
    def element2quantity(cls, element):
        return element[cls.Field.QUANTITY]

    @classmethod
    def element2unit(cls, element):
        return element[cls.Field.UNIT]

    @classmethod
    def element2span(cls, element):
        return element[cls.Field.SPAN]

    @classmethod
    @WARMER.add(cond=not HenriqueEnv.is_skip_warmup())
    @FunctionTool.wrapper2wraps_applied(lru_cache(maxsize=2))
    def pattern_number(cls):
        rstr_leftbound = RegexTool.rstr2left_bounded(
            r"\d{1,2}", RegexTool.left_wordbounds())

        rstr_bound_right_list = lchain(
            RegexTool.right_wordbounds(),
            lchain(*TimedeltaEntityUnit.gazetteer_all().values()),
        )
        rstr_bound = RegexTool.rstr2right_bounded(rstr_leftbound,
                                                  rstr_bound_right_list)
        return re.compile(rstr_bound, re.I)

    @classmethod
    @FunctionTool.wrapper2wraps_applied(
        lru_cache(maxsize=HenriqueEntity.Cache.DEFAULT_SIZE))
    def text2element_list(cls, text_in, lang):
        logger = HenriqueLogger.func_level2logger(cls.text2element_list,
                                                  logging.DEBUG)

        langs = HenriqueLocale.lang2langs_recognizable(lang)
        logger.debug({"langs": langs})

        match_list_number = list(cls.pattern_number().finditer(text_in))
        span_list_number = lmap(lambda m: m.span(), match_list_number)

        matcher = TimedeltaEntityUnit.langs2matcher(langs)
        span_value_list_timedelta_unit = list(
            matcher.text2span_value_iter(text_in))

        spans_list = [
            span_list_number,
            lmap(ig(0), span_value_list_timedelta_unit),
        ]

        gap2is_valid = partial(StringTool.str_span2match_blank_or_nullstr,
                               text_in)
        indextuple_list = ContextfreeTool.spans_list2reducible_indextuple_list(
            spans_list, gap2is_valid)

        def indextuple2element(indextuple):
            i, j = indextuple

            quantity = int(match_list_number[i].group())
            unit = span_value_list_timedelta_unit[j][1]

            span = (
                span_list_number[i][0],
                span_value_list_timedelta_unit[j][0][1],
            )

            element = {
                cls.Field.QUANTITY: quantity,
                cls.Field.UNIT: unit,
                cls.Field.SPAN: span,
            }

            return element

        element_list = lmap(indextuple2element, indextuple_list)
        return element_list

    @classmethod
    def element2relativedelta(cls, element):
        logger = HenriqueLogger.func_level2logger(cls.element2relativedelta,
                                                  logging.DEBUG)

        unit = cls.element2unit(element)
        quantity = cls.element2quantity(element)
        kwargs = {TimedeltaEntityUnit.unit2plural(unit): quantity}
        # logger.debug({"kwargs":kwargs})
        return relativedelta(**kwargs)
Example #36
0
T = t.TypeVar("T")
U = t.TypeVar("U")
T_NNF = t.TypeVar("T_NNF", bound="NNF")
U_NNF = t.TypeVar("U_NNF", bound="NNF")
T_NNF_co = t.TypeVar("T_NNF_co", bound="NNF", covariant=True)

# Bottom type with no values
# This works in mypy but not pytype
# t.Union[()] works too but not at runtime
# NoReturn doesn't exist in some Python releases, hence the guard
if t.TYPE_CHECKING:
    Bottom = t.NoReturn
else:
    Bottom = None

memoize = t.cast(t.Callable[[T], T], functools.lru_cache(maxsize=None))


def weakref_memoize(
        func: t.Callable[[T_NNF], T]) -> "_WeakrefMemoized[T_NNF, T]":
    """Make a function cache its return values using weakrefs.

    This makes it possible to remember sentences' properties without keeping
    them in memory forever.

    To keep memory use reasonable, this decorator should only be used on
    methods that will only be called on full sentences, not individual nodes
    within a sentence.

    The current implementation has a problem: WeakKeyDictionary operates on
    object equality, not identity. Consider the following:
Example #37
0
# Python 3.x Utility Functions - From Peter Norvig's Advent of Code 2017 ipynb with some small modifications (https://github.com/norvig/pytudes/blob/master/ipynb/Advent%202017.ipynb)

import random
import re
from collections import defaultdict, deque, abc
from functools import lru_cache
from heapq import heappop, heappush
from itertools import (combinations, chain, islice, takewhile, zip_longest)

import aocd
import os
from string import ascii_lowercase

letters = ascii_lowercase

cache = lru_cache(None)

cat = ''.join

Ø = frozenset()  # Empty set
inf = float('inf')
BIG = 10**999


def Input(day, test=False):
    "Open this day's input file."
    directory = '/home/didrik/git/advent-of-code-2018/input/'
    is_test = "_test" if test else ""
    filename = directory + 'input{}{}.txt'.format(day, is_test)
    try:
        return open(filename)
Example #38
0
def cache(user_function, /):
    return lru_cache(maxsize=None)(user_function)
Example #39
0
def cache(func):
    decorated = lru_cache()(func)
    cache_store.append(decorated)
    return decorated
Example #40
0
    def __init__(
        self,
        sounds_path=None,
        min_snr_in_db=0,
        max_snr_in_db=24,
        min_time_between_sounds=4.0,
        max_time_between_sounds=16.0,
        burst_probability=0.22,
        min_pause_factor_during_burst=0.1,
        max_pause_factor_during_burst=1.1,
        min_fade_in_time=0.005,
        max_fade_in_time=0.08,
        min_fade_out_time=0.01,
        max_fade_out_time=0.1,
        p=0.5,
        lru_cache_size=64,
    ):
        """
        :param sounds_path: Path to a folder that contains sound files to randomly mix in. These
            files can be flac, mp3, ogg or wav.
        :param min_snr_in_db: Minimum signal-to-noise ratio in dB. A lower value means the added
            sounds/noises will be louder.
        :param max_snr_in_db: Maximum signal-to-noise ratio in dB. A lower value means the added
            sounds/noises will be louder.
        :param min_time_between_sounds: Minimum pause time between the added sounds/noises
        :param max_time_between_sounds: Maximum pause time between the added sounds/noises
        :param burst_probability: The probability of adding an extra sound/noise that overlaps
        :param min_pause_factor_during_burst: Min value of how far into the current sound (as
            fraction) the burst sound should start playing. The value must be greater than 0.
        :param max_pause_factor_during_burst: Max value of how far into the current sound (as
            fraction) the burst sound should start playing. The value must be greater than 0.
        :param min_fade_in_time: Min sound/noise fade in time in seconds. Use a value larger
            than 0 to avoid a "click" at the start of the sound/noise.
        :param max_fade_in_time: Min sound/noise fade out time in seconds. Use a value larger
            than 0 to avoid a "click" at the start of the sound/noise.
        :param min_fade_out_time: Min sound/noise fade out time in seconds. Use a value larger
            than 0 to avoid a "click" at the end of the sound/noise.
        :param max_fade_out_time: Max sound/noise fade out time in seconds. Use a value larger
            than 0 to avoid a "click" at the end of the sound/noise.
        :param p: The probability of applying this transform
        :param lru_cache_size: Maximum size of the LRU cache for storing noise files in memory
        """
        super().__init__(p)
        self.sound_file_paths = get_file_paths(sounds_path)
        self.sound_file_paths = [str(p) for p in self.sound_file_paths]
        assert len(self.sound_file_paths) > 0
        assert min_snr_in_db <= max_snr_in_db
        assert min_time_between_sounds <= max_time_between_sounds
        assert 0.0 < burst_probability <= 1.0
        if burst_probability == 1.0:
            assert (min_pause_factor_during_burst > 0.0
                    )  # or else an infinite loop will occur
        assert 0.0 < min_pause_factor_during_burst <= 1.0
        assert max_pause_factor_during_burst > 0.0
        assert max_pause_factor_during_burst >= min_pause_factor_during_burst
        assert min_fade_in_time >= 0.0
        assert max_fade_in_time >= 0.0
        assert min_fade_in_time <= max_fade_in_time
        assert min_fade_out_time >= 0.0
        assert max_fade_out_time >= 0.0
        assert min_fade_out_time <= max_fade_out_time

        self.min_snr_in_db = min_snr_in_db
        self.max_snr_in_db = max_snr_in_db
        self.min_time_between_sounds = min_time_between_sounds
        self.max_time_between_sounds = max_time_between_sounds
        self.burst_probability = burst_probability
        self.min_pause_factor_during_burst = min_pause_factor_during_burst
        self.max_pause_factor_during_burst = max_pause_factor_during_burst
        self.min_fade_in_time = min_fade_in_time
        self.max_fade_in_time = max_fade_in_time
        self.min_fade_out_time = min_fade_out_time
        self.max_fade_out_time = max_fade_out_time
        self._load_sound = functools.lru_cache(maxsize=lru_cache_size)(
            AddShortNoises.__load_sound)
Example #41
0
# Patch requests_match in cassette for speed-up


def requests_match(r1, r2, matchers):
    """Skip logging and speed-up maching."""
    return all(m(r1, r2) for m in matchers)


vcr.cassette.requests_match = requests_match

# Patch urpalse to speed-up for python3
try:
    from functools import lru_cache
    from urllib.parse import urlparse
    vcr.request.urlparse = lru_cache(maxsize=None)(urlparse)
except ImportError:
    pass


# patch uuid4 for consistent keys
def fake_uuid4():
    x = 0
    while 1:
        yield 'fakeuuid-%s-' % x
        x += 1


uuid.uuid4 = functools.partial(next, fake_uuid4())

Example #42
0
        if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
            return 'data'
        else:
            if result[0] == '<':
                result = result.strip('<>').split()[0].split('.')[-1]
            return result
    except Exception:
        return 'Other'


try:
    from functools import lru_cache
except ImportError:
    pass
else:
    key_split = lru_cache(100000)(key_split)

if PY3:
    def key_split_group(x):
        """A more fine-grained version of key_split

        >>> key_split_group('x')
        'x'
        >>> key_split_group('x-1')
        'x-1'
        >>> key_split_group('x-1-2-3')
        'x-1-2-3'
        >>> key_split_group(('x-2', 1))
        'x-2'
        >>> key_split_group("('x-2', 1)")
        'x-2'
Example #43
0
    def __init__(
            self,
            mongo_client,
            database='stk',
            molecule_collection='molecules',
            constructed_molecule_collection='constructed_molecules',
            position_matrix_collection='position_matrices',
            jsonizer=ConstructedMoleculeJsonizer(),
            dejsonizer=ConstructedMoleculeDejsonizer(),
            lru_cache_size=128,
            indices=('InChIKey', ),
    ):
        """
        Initialize a :class:`.ConstructedMoleculeMongoDb`.

        Parameters
        ----------
        mongo_client : :class:`pymongo.MongoClient`
            The database client.

        database : :class:`str`
            The name of the database to use.

        molecule_collection : :class:`str`
            The name of the collection which stores molecular
            information.

        constructed_molecule_collection : :class:`str`
            The name of the collection which stored constructed
            molecule information, that does not belong in the
            `molecule_collection`.

        position_matrix_collection : :class:`str`
            The name of the collection which stores the position
            matrices of the molecules put into and retrieved from
            the database.

        jsonizer : :class:`.ConstructedMoleculeJsonizer`
            Used to create the JSON representations of molecules
            stored in the database.

        dejsonizer : :class:`.ConstructedMoleculeDejsonizer`
            Used to create :class:`.Molecule` instances from their
            JSON representations.

        lru_cache_size : :class:`int`, optional
            A RAM-based least recently used cache is used to avoid
            reading and writing to the database repeatedly. This sets
            the number of molecules which fit into the LRU cache. If
            ``None``, the cache size will be unlimited.

        indices : :class:`tuple` of :class:`str`, optional
            The names of molecule keys, on which an index should be
            created, in order to minimize lookup time.

        """

        database = mongo_client[database]
        self._molecules = database[molecule_collection]
        self._constructed_molecules = database[constructed_molecule_collection]
        self._position_matrices = database[position_matrix_collection]
        self._jsonizer = jsonizer
        self._dejsonizer = dejsonizer

        self._get = lru_cache(maxsize=lru_cache_size)(self._get)
        self._put = lru_cache(maxsize=lru_cache_size)(self._put)

        for index in indices:
            # Do not create the same index twice.
            if f'{index}_1' not in self._molecules.index_information():
                self._molecules.create_index(index)
            if (f'{index}_1'
                    not in self._constructed_molecules.index_information()):
                self._constructed_molecules.create_index(index)
            if (f'{index}_1'
                    not in self._position_matrices.index_information()):
                self._position_matrices.create_index(index)
Example #44
0
class TimedeltaEntity:
    @classmethod
    def entity_type(cls):
        return ClassTool.class2fullpath(cls)

    @classmethod
    def text2entity_list(cls, text_in, config=None):
        locale = HenriqueEntity.Config.config2locale(
            config) or HenriqueLocale.DEFAULT
        lang = LocaleTool.locale2lang(locale) or LocaleTool.locale2lang(
            HenriqueLocale.DEFAULT)

        return cls._text2entity_list(text_in, lang)

    @classmethod
    @FunctionTool.wrapper2wraps_applied(
        lru_cache(maxsize=HenriqueEntity.Cache.DEFAULT_SIZE))
    def _text2entity_list(cls, text_in, lang):
        element_list = TimedeltaElement.text2element_list(text_in, lang)
        if not element_list:
            return []

        span_list_element = lmap(TimedeltaElement.element2span, element_list)

        def timedelta_list2indexes_group():
            gap2is_valid = partial(StringTool.str_span2match_blank_or_nullstr,
                                   text_in)

            n = len(element_list)
            i_list_sorted = sorted(range(n),
                                   key=lambda i: span_list_element[i])

            indexes_continuous = [i_list_sorted[0]]
            for j in range(1, n):
                i_prev, i_this = i_list_sorted[j - 1], i_list_sorted[j]

                span_gap = (
                    span_list_element[i_prev][1],
                    span_list_element[i_this][0],
                )
                if gap2is_valid(span_gap):
                    indexes_continuous.append(i_this)
                    continue

                yield indexes_continuous
                indexes_continuous = [i_this]

            yield indexes_continuous

        indexes_list = list(timedelta_list2indexes_group())

        def indexes2entity(indexes):
            span = (
                span_list_element[indexes[0]][0],
                span_list_element[indexes[-1]][1],
            )

            value = ListTool.indexes2filtered(element_list, indexes)

            entity = {
                FoxylibEntity.Field.SPAN: span,
                FoxylibEntity.Field.TEXT:
                StringTool.str_span2substr(text_in, span),
                FoxylibEntity.Field.VALUE: value,
                FoxylibEntity.Field.TYPE: cls.entity_type(),
            }
            return entity

        entity_list = lmap(indexes2entity, indexes_list)
        return entity_list

    @classmethod
    def entity2relativedelta(cls, entity):
        logger = HenriqueLogger.func_level2logger(cls.entity2relativedelta,
                                                  logging.DEBUG)

        element_list = FoxylibEntity.entity2value(entity)
        relativedelta_list = lmap(TimedeltaElement.element2relativedelta,
                                  element_list)
        logger.debug({"relativedelta_list": relativedelta_list})

        return sum(relativedelta_list, relativedelta(days=0))
Example #45
0
def lru_cache_typesafe(func: Callable[..., T]) -> T:
    return functools.lru_cache(maxsize=None)(func)  # type: ignore
Example #46
0
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake.  If not, see <http://www.gnu.org/licenses/>.
import collections
import operator
import logging
import io
import requests
from h5py._hl.dataset import Dataset
from h5py._hl.group import Group
import numpy
try:
    from functools import lru_cache
except ImportError:
    from openquake.risklib.utils import memoized
else:
    memoized = lru_cache(100)
from openquake.baselib import datastore, config
from openquake.baselib.hdf5 import ArrayWrapper, vstr
from openquake.baselib.general import group_array, deprecated
from openquake.baselib.python3compat import encode, decode
from openquake.calculators import getters
from openquake.calculators.export.loss_curves import get_loss_builder
from openquake.commonlib import calc, util, oqvalidation

U32 = numpy.uint32
F32 = numpy.float32
F64 = numpy.float64
TWO32 = 2**32


def cast(loss_array, loss_dt):
Example #47
0
    def __init__(self,
                 glob,
                 storage_options=None,
                 batch_size=None,
                 dtype=None,
                 iterations=None,
                 eager_load_batches=None,
                 fits_in_node_memory=True,
                 fits_in_cluster_memory=True,
                 replicas=1,
                 worker=0,
                 cache_dir=None,
                 tensor_cache_size=1,
                 partition_cache_size=None,
                 batch_cache_size=1):

        self.glob = glob
        self.dtype = dtype

        # set the platform-specific temporary directory
        cache_dir = cache_dir if cache_dir else tempfile.gettempdir()

        # specify cache allocation for raw tensor data in instances
        self.tensor_cache_size = tensor_cache_size
        if self.tensor_cache_size:
            self.__tensor_by_df_idx = functools.lru_cache(
                maxsize=self.tensor_cache_size)(self.__tensor_by_df_idx)

        # specify cache allocation for data partitions in instances, None means unlimited
        self.partition_cache_size = partition_cache_size
        if self.partition_cache_size:
            self.__df_by_obj = functools.lru_cache(
                maxsize=self.partition_cache_size)(self.__df_by_obj)
        else:
            self.__df_by_obj = functools.lru_cache(maxsize=None)(
                self.__df_by_obj)

        # specify cache allocation per batch in instances
        self.batch_cache_size = batch_cache_size
        if self.batch_cache_size:
            self.__df_by_objs_tuple = functools.lru_cache(
                maxsize=self.batch_cache_size)(self.__df_by_objs_tuple)

        # find out the protocol of the glob, e.g. s3, gs, hdfs, etc
        protocol, _ = fsspec.core.split_protocol(glob)
        eager_load_batches = True if protocol in (
            'file') and eager_load_batches is None else eager_load_batches

        # use anonymous connection unless specified otherwise
        storage_options = storage_options if storage_options else {
            'anon': True
        }

        # setup a caching filesystem
        self.fs = fsspec.filesystem("filecache",
                                    target_protocol=protocol,
                                    target_options=storage_options,
                                    cache_storage=cache_dir)

        # get the object paths matching the glob
        self.objs = self.fs.glob(glob)
        self.objs_indicies = [0]

        self.iterations = iterations if iterations else float('nan')

        if fits_in_node_memory:

            if eager_load_batches:
                self.objs_indicies = self.__expand_obj_idx_in_full(
                    self.objs_indicies, self.objs)
                self.dataset_size = self.__max_batch_idx(self.objs_indicies)
                self.batch_size = batch_size if batch_size else self.dataset_size
            else:
                assert batch_size and type(
                    batch_size
                ) is int and batch_size > 0, "Eager loading batches of batches is disabled, so the batch size must be specified as a positive (greater than 0) integer"
                self.batch_size = batch_size

        elif fits_in_cluster_memory:

            assert type(
                replicas
            ) is int and replicas > 0, "The number of workers must be a positive integer"
            assert type(
                worker
            ) is int and worker > -1 and worker < replicas, "The worker must be in the range [0, replicas)"

            self.worker = worker
            self.replicas = replicas

            self.objects_per_worker = int(
                math.ceil(len(self.objs) / self.replicas))

            self.objs = self.objs[worker *
                                  self.objects_per_worker:(worker + 1) *
                                  self.objects_per_worker]

            assert batch_size and type(
                batch_size
            ) is int and batch_size > 0, "The batch size must be specified as a positive (greater than 0) integer"
            self.batch_size = batch_size
            if eager_load_batches:
                self.objs_indicies = self.__expand_obj_idx_in_full(
                    self.objs_indicies, self.objs)

        else:
            assert batch_size and type(
                batch_size
            ) is int and batch_size > 0, "The batch size must be specified as a positive (greater than 0) integer"
            self.batch_size = batch_size
            if eager_load_batches:
                print(
                    "Warning: the batch does not fit in node or in cluster memory but eager loading is enabled (eager_load_batches=True), so you may experience out of memory conditions during eager loading."
                )
                self.objs_indicies = self.__expand_obj_idx_in_full(
                    self.objs_indicies, self.objs)
Example #48
0
    if ('bfile' not in values
            and app.url_map.is_endpoint_expecting(endpoint, 'bfile')):
        values['bfile'] = g.beancount_file_slug
    if endpoint in ['static', 'index']:
        return
    if 'interval' not in values:
        values['interval'] = request.args.get('interval')
    if 'conversion' not in values:
        values['conversion'] = request.args.get('conversion')
    for filter_name in ['account', 'filter', 'time']:
        if filter_name not in values:
            values[filter_name] = g.filters[filter_name]


app.add_template_global(datetime.date.today, 'today')
CACHED_URL_FOR = functools.lru_cache(2048)(flask.url_for)


@app.template_global()
def url_for(endpoint, **values):
    """A wrapper around flask.url_for that uses a cache."""
    _inject_filters(endpoint, values)
    return CACHED_URL_FOR(endpoint, **values)


@app.template_global()
def url_for_current(**kwargs):
    """URL for current page with updated request args."""
    if not kwargs:
        return url_for(request.endpoint, **request.view_args)
    args = request.view_args.copy()
Example #49
0
CACHED = Bool(1)
del Bool

def _opener_open(req, encoding):
    global _opener
    if _opener is None:
        install_default_handlers()
    try:
        response = HTTPResponse(req, _opener.open(req), encoding)
    finally:
        for r in req.responses:
            del r.request.responses  # clear circular reference
    return response

_opener_open_cached = functools.lru_cache(maxsize=None)(_opener_open)

def add_default_handler(handler):
    '''Added handlers will be used via install_default_handlers().

    Notice:
        this is use to setting GLOBAL (urllib) HTTP proxy and HTTPS verify,
        use it carefully.
    '''
    global _cookiejar
    if isinstance(handler, type):
        handler = handler()
    if isinstance(handler, _HTTPRedirectHandler):
        logger.warning('HTTPRedirectHandler is not custom!')
        return
    remove_default_handler(handler, True)
Example #50
0
def _make_selector(pattern_parts):
    pat = pattern_parts[0]
    child_parts = pattern_parts[1:]
    if pat == '**':
        cls = _RecursiveWildcardSelector
    elif '**' in pat:
        raise ValueError("Invalid pattern: '**' can only be an entire path component")
    elif _is_wildcard_pattern(pat):
        cls = _WildcardSelector
    else:
        cls = _PreciseSelector
    return cls(pat, child_parts)

if hasattr(functools, "lru_cache"):
    _make_selector = functools.lru_cache()(_make_selector)


class _Selector:
    """A selector matches a specific glob pattern part against the children
    of a given path."""

    def __init__(self, child_parts):
        self.child_parts = child_parts
        if child_parts:
            self.successor = _make_selector(child_parts)
            self.dironly = True
        else:
            self.successor = _TerminatingSelector()
            self.dironly = False
Example #51
0
 def functools_cache(user_function):
     return functools.lru_cache(maxsize=None)(user_function)
import pygame
import pyaudio
import numpy as np
import time
import wave
import scipy.fftpack
from functools import lru_cache

from video import make_video
pygame.init()

surface_size = 1000
main_surface = pygame.display.set_mode((1000, 1000), pygame.DOUBLEBUF)
my_clock = pygame.time.Clock()

lru_cache(maxsize=None)


def draw_tree(inord,
              order,
              theta,
              thetab,
              sz,
              posn,
              heading,
              color=(0, 0, 0),
              depth=0):

    trunk_ratio = (1 + 5**0.5) / 2
    trunk = sz * trunk_ratio
    delta_y = trunk * np.sin((heading * 400 / 400).real)
Example #53
0
from functools import lru_cache
from copy import deepcopy
from functools import partial, wraps
from concurrent.futures import ThreadPoolExecutor

from alephclient.api import AlephAPI, EntityResultSet, AlephException
from tqdm.autonotebook import tqdm
import requests

from followthemoney import model
from followthemoney.exc import InvalidData

log = logging.getLogger(__name__)

alephclient = AlephAPI(timeout=60)
alephclient._request = lru_cache(2048)(alephclient._request)


def aleph_initializer(initializer=None):
    global alephclient
    alephclient = AlephAPI(timeout=60)
    adapter = requests.adapters.HTTPAdapter(pool_connections=52)
    alephclient.session.mount("http://", adapter)
    alephclient.session.mount("https://", adapter)
    if initializer is not None:
        initializer()


@wraps(ThreadPoolExecutor)
def AlephPool(*args, **kwargs):
    kwargs["initializer"] = aleph_initializer(
Example #54
0
            flag = False
    if flag:
        ans += 1
print(ans)

129 ☆dp
[考察]
後ろから見ると、n-1番目からn番目に行くのは1通りの方法しかないとわかっている。
n-2番目からは、n-1+n通りの方法がある。同様にして、f(x) = f(x-1)+f(x-2)という関係式
が成り立っていることが分かる。このように、すでに決まっている情報からその情報に
依存している情報を求めたい、というときには、DP(動的計画法)が最適である。
この問題では、dp[n]=1,dp[a[i]]=0として、組み立てる。

[参考]
メモ化再帰という方法でもできるらしいが、今のところは解法が分からない
pythonでメモ化再帰を使うには、再帰関数の前に@lru_cache()と置く
from functools import lru_cache
@lru_cache()
def fibo(n):
    if n == 0:
        return 0
    if n == 1:
        return 1
    return fibo(n-1) + fibo(n-2)
print(fibo(2)*fibo(2))

TLE
n,m = map(int, input().split())
a = [int(input()) for i in range(m)]
mod = (10**9)+7
dp = [1] + [0]*(n)
Example #55
0
from openquake.baselib import config, hdf5, general
from openquake.baselib.hdf5 import ArrayWrapper
from openquake.baselib.general import group_array, println
from openquake.baselib.python3compat import encode, decode
from openquake.hazardlib.gsim.base import ContextMaker
from openquake.calculators import getters
from openquake.commonlib import calc, util, oqvalidation

U16 = numpy.uint16
U32 = numpy.uint32
F32 = numpy.float32
F64 = numpy.float64
TWO32 = 2**32
ALL = slice(None)
CHUNKSIZE = 4 * 1024**2  # 4 MB
memoized = lru_cache()


class NotFound(Exception):
    pass


def lit_eval(string):
    """
    `ast.literal_eval` the string if possible, otherwise returns it unchanged
    """
    try:
        return ast.literal_eval(string)
    except (ValueError, SyntaxError):
        return string
Example #56
0
def cache(max_size=4096):
    return functools.lru_cache(maxsize=max_size)
    def test_lru(self):
        def orig(x, y):
            return 3 * x + y

        f = functools.lru_cache(maxsize=20)(orig)
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertEqual(maxsize, 20)
        self.assertEqual(currsize, 0)
        self.assertEqual(hits, 0)
        self.assertEqual(misses, 0)

        domain = range(5)
        for i in range(1000):
            x, y = choice(domain), choice(domain)
            actual = f(x, y)
            expected = orig(x, y)
            self.assertEqual(actual, expected)
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertTrue(hits > misses)
        self.assertEqual(hits + misses, 1000)
        self.assertEqual(currsize, 20)

        f.cache_clear()  # test clearing
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertEqual(hits, 0)
        self.assertEqual(misses, 0)
        self.assertEqual(currsize, 0)
        f(x, y)
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertEqual(hits, 0)
        self.assertEqual(misses, 1)
        self.assertEqual(currsize, 1)

        # Test bypassing the cache
        self.assertIs(f.__wrapped__, orig)
        f.__wrapped__(x, y)
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertEqual(hits, 0)
        self.assertEqual(misses, 1)
        self.assertEqual(currsize, 1)

        # test size zero (which means "never-cache")
        @functools.lru_cache(0)
        def f():
            nonlocal f_cnt
            f_cnt += 1
            return 20

        self.assertEqual(f.cache_info().maxsize, 0)
        f_cnt = 0
        for i in range(5):
            self.assertEqual(f(), 20)
        self.assertEqual(f_cnt, 5)
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertEqual(hits, 0)
        self.assertEqual(misses, 5)
        self.assertEqual(currsize, 0)

        # test size one
        @functools.lru_cache(1)
        def f():
            nonlocal f_cnt
            f_cnt += 1
            return 20

        self.assertEqual(f.cache_info().maxsize, 1)
        f_cnt = 0
        for i in range(5):
            self.assertEqual(f(), 20)
        self.assertEqual(f_cnt, 1)
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertEqual(hits, 4)
        self.assertEqual(misses, 1)
        self.assertEqual(currsize, 1)

        # test size two
        @functools.lru_cache(2)
        def f(x):
            nonlocal f_cnt
            f_cnt += 1
            return x * 10

        self.assertEqual(f.cache_info().maxsize, 2)
        f_cnt = 0
        for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
            #    *  *              *                          *
            self.assertEqual(f(x), x * 10)
        self.assertEqual(f_cnt, 4)
        hits, misses, maxsize, currsize = f.cache_info()
        self.assertEqual(hits, 12)
        self.assertEqual(misses, 4)
        self.assertEqual(currsize, 2)
Example #58
0
                new_lhs = new_lhs[1:]
            else:
                out.append(new_rhs[0])
                new_rhs = new_rhs[1:]
        assert not new_rhs
        assert not new_lhs
        return out

    return lhs, rhs, merge


def cache(max_size=4096):
    return functools.lru_cache(maxsize=max_size)


memoize = functools.lru_cache(maxsize=None)


def prod(xs):
    out = 1
    for x in xs:
        out *= x
    return out


class WrapHashably(object):
    __slots__ = ["val"]

    def __init__(self, val):
        self.val = val
Example #59
0
File: cache.py Project: zx9r/jesse
 def decorated(self, *args, **kwargs):
     cached_method = self._cached_methods.get(method)
     if cached_method is None:
         cached_method = lru_cache()(method)
         self._cached_methods[method] = cached_method
     return cached_method(self, *args, **kwargs)
Example #60
0
import uuid
from graphql import GraphQLError
from flask import session, render_template, g
from flask_babel import _, get_locale, refresh
from flask_login import login_required, login_user, current_user, logout_user
from functools import lru_cache
from sqlalchemy import func, distinct
from sqlalchemy.orm import aliased
from datetime import datetime

# workaround from https://github.com/graphql-python/graphene-sqlalchemy/issues/211
# without this workaround, graphene complains that there are multiple
# types with the same name when using the same enum in different places
# i.e. AssertionError:
# Found different types with the same name in the schema: Stat, Stat.
graphene.Enum.from_enum = lru_cache(maxsize=None)(graphene.Enum.from_enum)


class GlobalNode(graphene.Interface):
    id = graphene.UUID(required=True)

    def resolve_id(self, info):
        return self.uuid


# https://github.com/graphql-python/graphene/issues/968#issuecomment-537328256
class NonNullConnection(relay.Connection, abstract=True):
    @classmethod
    def __init_subclass_with_meta__(cls, node, **kwargs):
        if not hasattr(cls, "Edge"):
            _node = node