Esempio n. 1
0
 def __init__(self,
              *args,
              enter_reconnect: Optional[bool] = None,
              exit_close: Optional[bool] = None,
              **kwargs):
     self.ins_enter_reconnect = empty_if(enter_reconnect,
                                         self.adapter_enter_reconnect)
     self.ins_exit_close = empty_if(exit_close, self.adapter_exit_close)
Esempio n. 2
0
def get_ssl_context(
            verify_cert: bool = False, check_hostname: Optional[bool] = None, verify_mode: Optional[int] = None, **kwargs
        ) -> ssl.SSLContext:
    check_hostname = empty_if(check_hostname, is_true(verify_cert))
    verify_mode = empty_if(verify_mode, ssl.CERT_REQUIRED if verify_cert else ssl.CERT_NONE)
    
    ctx = ssl.create_default_context()
    ctx.check_hostname = check_hostname
    ctx.verify_mode = verify_mode
    return ctx
    def __init__(self,
                 db_file: str = None,
                 memory_persist=False,
                 use_pickle: bool = None,
                 connection_kwargs: dict = None,
                 *args,
                 **kwargs):
        """
        :class:`.AsyncSqliteCache` uses an auto-generated database filename / path by default, based on the name of the currently running
        script ( retrieved from ``sys.argv[0]`` ), allowing for persistent caching - without any manual configuration of the adapter,
        nor the requirement for any running background services such as ``redis`` / ``memcached``.
        
        

        :param str db_file:     (Optional) Name of / path to Sqlite3 database file to create/use for the cache.
        
        :param bool memory_persist: Use a shared in-memory database, which can be accessed by other instances of this class (in this
                                    process) - which is cleared after all memory connections are closed.
                                    Shortcut for ``db_file='file::memory:?cache=shared'``

        :param bool use_pickle: (Default: ``True``) Use the built-in ``pickle`` to serialise values before
                                storing in Sqlite3, and un-serialise when loading from Sqlite3
        
        :param dict connection_kwargs: (Optional) Additional / overriding kwargs to pass to :meth:`sqlite3.connect` when
                                        :class:`.AsyncSqliteCacheManager` initialises it's sqlite3 connection.
        
        :keyword int purge_every: (Default: 300) Expired + abandoned cache records are purged using the DB manager method
                                  :meth:`.AsyncSqliteCacheManager.purge_expired` during :meth:`.get` / :meth:`.set` calls. To avoid
                                  performance issues, the actual :meth:`.AsyncSqliteCacheManager.purge_expired` method is only called
                                  if at least ``purge_every`` seconds have passed since the last purge was
                                  triggered ( :attr:`.last_purged_expired` )

        """
        from privex.helpers.cache.post_deps import AsyncSqliteCacheManager
        super().__init__(*args, **kwargs)
        self.db_file: str = empty_if(db_file,
                                     AsyncSqliteCacheManager.DEFAULT_DB)
        self.db_folder = None
        if ':memory:' not in self.db_file:
            if not isabs(self.db_file):
                self.db_file = join(AsyncSqliteCacheManager.DEFAULT_DB_FOLDER,
                                    self.db_file)
            self.db_folder = dirname(self.db_file)
            if not exists(self.db_folder):
                log.debug("Folder for database doesn't exist. Creating: %s",
                          self.db_folder)
                makedirs(self.db_folder)
        self.connection_kwargs = empty_if(connection_kwargs, {}, itr=True)
        self.memory_persist = is_true(memory_persist)
        self._wrapper = None
        self.purge_every = kwargs.get('purge_every', 300)
        self.use_pickle = self.pickle_default if use_pickle is None else use_pickle
 def _connect(self,
              db=None,
              *args,
              connection_kwargs=None,
              memory_persist=None,
              **kwargs):
     c_kwargs = dict(connection_kwargs=empty_if(connection_kwargs,
                                                self.connection_kwargs),
                     memory_persist=empty_if(memory_persist,
                                             self.memory_persist))
     c_kwargs = {**c_kwargs, **kwargs}
     from privex.helpers.cache.post_deps import AsyncSqliteCacheManager
     self._wrapper = AsyncSqliteCacheManager(empty_if(db, self.db_file),
                                             *args, **c_kwargs)
     return self._wrapper
Esempio n. 5
0
def _get_threadstore(name=None, fallback=None, thread_id=None) -> Any:
    thread_id = empty_if(thread_id, threading.get_ident())
    if thread_id not in __STORE['threads']:
        __STORE['threads'][thread_id] = {}
    thread_store: dict = __STORE['threads'][thread_id]
    if name is None:
        return thread_store

    return thread_store.get(name, fallback)
Esempio n. 6
0
 def reset_adapter(cls,
                   default: CLSCacheAdapter = default_adapter,
                   *args,
                   **kwargs) -> INSCacheAdapter:
     """
     Re-create the adapter instance at :attr:`.cache_instance` with the same adapter class (assuming it's set)
     """
     adp = cls.get_adapter(default, *args, **kwargs)
     n_args, n_kwargs = empty_if(args, cls.instance_args, itr=True), {
         **cls.instance_kwargs,
         **kwargs
     }
     c = adp.__class__
     cls.cache_instance = c.__init__(c(), *n_args, **n_kwargs)
     return cls.cache_instance
Esempio n. 7
0
 def _pfx_key(cls,
              key: Union[str, Callable[[Any], str]],
              auto_prefix: bool = True,
              _auto_cache=True,
              call_args: list = None,
              call_kwargs: dict = None,
              _lock: ANY_LCK = None) -> str:
     """
     Add this class's cache key prefix to ``key`` if it isn't already prefixed
     
     
     :param str|callable key:   The key to prepend the cache key prefix onto - if not already prefixed.
                                This may optionally be a function (e.g. a lambda) which returns a cache key to be auto-prefixed,
                                and any necessary positional/keyword arguments for the function may be specified using the
                                arguments ``call_args`` and ``call_kwargs``
     
     :param bool auto_prefix:   This argument is mainly used by internal methods to reduce the need to copy/paste handling code
                                which allows users to request that a method does not attempt to auto-prefix the key they entered.
                                
     :param bool _auto_cache:   This is a boolean key, which controls whether or not keys are automatically logged to the cache key log,
                                which is a list of all known cache keys for a given class. Uses :meth:`.log_cache_key`
     
     :param list call_args:     If ``key`` is a callable (e.g. a lambda), ``call_args`` can be set to a list of positional arguments
                                to pass to the callable function ``key``
                                
     :param dict call_kwargs:   If ``key`` is a callable (e.g. a lambda), ``call_kwargs`` can be set to a :class:`.dict` of
                                keyword arguments to pass to the callable function ``key``
     
     :param ANY_LCK _lock:      This method itself does not use a lock, but it calls upon :meth:`.log_cache_key` which does use
                                a lock. You may optionally pass a :class:`.Lock` instance if needed, e.g. to prevent a conflict
                                where the calling function/method has already acquired the class-level lock.
                                It can also be set to the dummy type :class:`.NO_LOCK` ``NO_LOCK`` to prevent using a lock.
     :return str new_key:       The original ``key`` after it may or may not have had a prefix prepended to it.
     """
     if callable(key):
         call_args, call_kwargs = auto_list(call_args), empty_if(
             call_kwargs, {}, itr=True, zero=True)
         key = key(*call_args, **call_kwargs)
     key = key if key.startswith(
         cls.cache_prefix) or not auto_prefix else cls.cache_sep.join(
             [cls.cache_prefix, key])
     if _auto_cache:  # By default, _auto_cache is enabled, which means we log all created keys to the cache key log
         cls.log_cache_key(key, _lock=_lock)
     return key
Esempio n. 8
0
 async def get_current_commit(self,
                              version: str = None,
                              repo: str = None) -> str:
     """
     Get current commit hash. Optionally specify ``version`` to get the current commit hash for a branch or tag.
     
     **Examples**::
     
         >>> g = Git()
         >>> await g.get_current_commit()
         'ac52b28f551825160785f9ea7e96f86ccc869cc1'
         >>> await g.get_current_commit('2.0.0')    # Get the commit hash for the tag 2.0.0
         '598584a447ba63212ac3fe798c01941badf1c194'
     
     
     :param str version:  Optionally specify a branch / tag to get the current commit hash for.
     :param str repo:     Optionally specify a specific local repository path to run ``git`` within.
     :return str commit_hash: The current Git commit hash
     """
     return await self.git("rev-parse",
                           empty_if(version, self.default_version),
                           repo=repo)
Esempio n. 9
0
    async def get_current_tag(self,
                              version: str = None,
                              repo: str = None) -> str:
        """
        Get the latest tag on this branch - useful for detecting current version of your python application.

        **Examples**::

            >>> g = Git()
            >>> await g.get_current_tag()          # Get the latest tag on the active branch
            '2.5.0'
            >>> await g.get_current_tag('develop') # Get the latest tag on the branch 'develop'
            '2.5.3'

        :param str version:  Optionally specify a branch / tag to get the latest tag for.
        :param str repo:     Optionally specify a specific local repository path to run ``git`` within.
        :return str current_tag: The name of the latest tag on this branch.
        """
        return await self.git("describe",
                              "--abbrev=0",
                              "--tags",
                              empty_if(version, self.default_version),
                              repo=repo)
Esempio n. 10
0
 def set_sleep_time(secs: int = None):
     CacheManagerExample.sleep_multiplier = float(
         empty_if(secs, CacheManagerExample.default_sleep_multiplier))
Esempio n. 11
0
def test_hosts(hosts: List[str] = None,
               ipver: str = 'any',
               timeout: AnyNum = None,
               **kwargs) -> bool:
    randomise = is_true(kwargs.get('randomise', True))
    max_hosts = kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY)
    if max_hosts is not None: max_hosts = int(max_hosts)
    timeout = empty_if(timeout,
                       empty_if(socket.getdefaulttimeout(), 4, zero=True),
                       zero=True)

    v4h, v6h = list(settings.V4_TEST_HOSTS), list(settings.V6_TEST_HOSTS)
    if randomise: random.shuffle(v4h)
    if randomise: random.shuffle(v6h)

    if empty(hosts, True, True):
        # if empty(ipver, True, True) or ipver in ['any', 'all', 'both', 10, '10', '46', 46]:
        #     settings.V4_CHECKED_AT
        if isinstance(ipver, str): ipver = ipver.lower()
        if ipver in [4, '4', 'v4', 'ipv4']:
            hosts = v4h
            ipver = 4
        elif ipver in [6, '6', 'v6', 'ipv6']:
            hosts = v6h
            ipver = 6
        else:
            ipver = 'any'
            if max_hosts:
                hosts = v4h[:int(ceil(max_hosts /
                                      2))] + v6h[:int(ceil(max_hosts / 2))]
            else:
                hosts = v4h + v6h

    if max_hosts: hosts = hosts[:max_hosts]

    # st4_empty = any([empty(settings.HAS_WORKING_V4, True, True), empty(settings.V4_CHECKED_AT, True, True)])
    # st6_empty = any([empty(settings.HAS_WORKING_V6, True, True), empty(settings.V6_CHECKED_AT, True, True)])

    # if ipver == 6 and not st6_empty and settings.V6_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv6 status: working = %s", settings.HAS_WORKING_V6)
    #     return settings.HAS_WORKING_V6
    # if ipver == 4 and not st4_empty and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv4 status: working = %s", settings.HAS_WORKING_V4)
    #     return settings.HAS_WORKING_V4

    # if ipver == 'any' and any([not st4_empty, not st6_empty]) and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     if st4_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv6 status cached, but not IPv4 status. Checking IPv4 status...")
    #         check_v4()
    #     if st6_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv4 status cached, but not IPv6 status. Checking IPv6 status...")
    #         check_v6()
    #     # if not st4_empty and not st6_empty:
    #     log.debug(
    #         "Returning status %s based on: Working IPv4 = %s || Working IPv6 = %s",
    #         settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6, settings.HAS_WORKING_V4, settings.HAS_WORKING_V6
    #     )
    #     return settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6

    # max_hosts = int(kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY))
    min_hosts_pos = int(
        kwargs.get('required_positive', settings.NET_CHECK_HOST_COUNT))

    # hosts = empty_if(hosts, settings.V4_TEST_HOSTS, itr=True)
    hosts = [x for x in hosts]

    if randomise: random.shuffle(hosts)

    if len(hosts) > max_hosts: hosts = hosts[:max_hosts]

    total_hosts = len(hosts)
    total_working, total_broken = 0, 0

    log.debug("Testing %s hosts with IP version '%s' - timeout: %s",
              total_hosts, ipver, timeout)
    port = 80

    for h in hosts:
        try:
            nh = h.split(':')
            if len(nh) > 1:
                port = int(nh[-1])
                h = ':'.join(nh[:-1])
            else:
                h = ':'.join(nh)
                log.warning(
                    "Host is missing port: %s - falling back to port 80")
                port = 80

            log.debug("Checking host %s via port %s + IP version '%s'", h,
                      port, ipver)

            if port == 80:
                res = check_host_http(h,
                                      port,
                                      ipver,
                                      throw=False,
                                      timeout=timeout)
            else:
                res = check_host(h, port, ipver, throw=False, timeout=timeout)
            if res:
                total_working += 1
                log.debug(
                    "check_host for %s came back true. incremented working hosts: %s",
                    h, total_working)
            else:
                total_broken += 1
                log.debug(
                    "check_host for %s came back false. incremented broken hosts: %s",
                    h, total_broken)

        except Exception as e:
            log.warning("Exception while checking host %s port %s", h, port)

    working = total_working >= min_hosts_pos

    log.info(
        "test_hosts - proto: %s - protocol working? %s || total hosts: %s || working hosts: %s || broken hosts: %s",
        ipver, working, total_hosts, total_working, total_broken)

    return working
Esempio n. 12
0
async def test_hosts_async(hosts: List[str] = None,
                           ipver: str = 'any',
                           timeout: AnyNum = None,
                           **kwargs) -> bool:
    randomise = is_true(kwargs.get('randomise', True))
    max_hosts = kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY)
    if max_hosts is not None: max_hosts = int(max_hosts)
    timeout = empty_if(timeout,
                       empty_if(socket.getdefaulttimeout(), 4, zero=True),
                       zero=True)

    v4h, v6h = list(settings.V4_TEST_HOSTS), list(settings.V6_TEST_HOSTS)
    if randomise: random.shuffle(v4h)
    if randomise: random.shuffle(v6h)

    if empty(hosts, True, True):
        # if empty(ipver, True, True) or ipver in ['any', 'all', 'both', 10, '10', '46', 46]:
        #     settings.V4_CHECKED_AT
        if isinstance(ipver, str): ipver = ipver.lower()
        if ipver in [4, '4', 'v4', 'ipv4']:
            hosts = v4h
            ipver = 4
        elif ipver in [6, '6', 'v6', 'ipv6']:
            hosts = v6h
            ipver = 6
        else:
            ipver = 'any'
            if max_hosts:
                hosts = v4h[:int(ceil(max_hosts /
                                      2))] + v6h[:int(ceil(max_hosts / 2))]
            else:
                hosts = v4h + v6h

    if max_hosts: hosts = hosts[:max_hosts]

    # st4_empty = any([empty(settings.HAS_WORKING_V4, True, True), empty(settings.V4_CHECKED_AT, True, True)])
    # st6_empty = any([empty(settings.HAS_WORKING_V6, True, True), empty(settings.V6_CHECKED_AT, True, True)])

    # if ipver == 6 and not st6_empty and settings.V6_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv6 status: working = %s", settings.HAS_WORKING_V6)
    #     return settings.HAS_WORKING_V6
    # if ipver == 4 and not st4_empty and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv4 status: working = %s", settings.HAS_WORKING_V4)
    #     return settings.HAS_WORKING_V4
    #
    # if ipver == 'any' and any([not st4_empty, not st6_empty]) and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     if st4_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv6 status cached, but not IPv4 status. Checking IPv4 status...")
    #         await check_v4_async()
    #     if st6_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv4 status cached, but not IPv6 status. Checking IPv6 status...")
    #         await check_v6_async(hosts)
    #     # if not st4_empty and not st6_empty:
    #     log.debug(
    #         "Returning status %s based on: Working IPv4 = %s || Working IPv6 = %s",
    #         settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6, settings.HAS_WORKING_V4, settings.HAS_WORKING_V6
    #     )
    #     return settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6

    # max_hosts = int(kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY))
    min_hosts_pos = int(
        kwargs.get('required_positive', settings.NET_CHECK_HOST_COUNT))

    # hosts = empty_if(hosts, settings.V4_TEST_HOSTS, itr=True)
    hosts = [x for x in hosts]

    if randomise: random.shuffle(hosts)

    if len(hosts) > max_hosts: hosts = hosts[:max_hosts]

    # port = empty_if(port, 80, zero=True)

    total_hosts = len(hosts)
    total_working, total_broken = 0, 0
    working_list, broken_list = [], []
    log.debug("Testing %s hosts with IP version '%s' - timeout: %s",
              total_hosts, ipver, timeout)

    host_checks = []
    host_checks_hosts = []
    for h in hosts:
        # host_checks.append(
        #     asyncio.create_task(_test_host_async(h, ipver=ipver, timeout=timeout))
        # )
        host_checks.append(
            asyncio.create_task(
                run_coro_thread_async(_test_host_async,
                                      h,
                                      ipver=ipver,
                                      timeout=timeout)))
        host_checks_hosts.append(h)

    host_checks_res = await asyncio.gather(*host_checks,
                                           return_exceptions=True)
    for i, _res in enumerate(host_checks_res):
        h = host_checks_hosts[i]
        if isinstance(_res, Exception):
            log.warning("Exception while checking host %s", h)
            total_broken += 1
            continue

        res, h, port = _res

        if res:
            total_working += 1
            working_list.append(f"{h}:{port}")
            log.debug(
                "check_host for %s (port %s) came back True (WORKING). incremented working hosts: %s",
                h, port, total_working)
        else:
            total_broken += 1
            broken_list.append(f"{h}:{port}")
            log.debug(
                "check_host for %s (port %s) came back False (! BROKEN !). incremented broken hosts: %s",
                h, port, total_broken)

    # port = 80
    # for h in hosts:
    #     try:
    #         h, port, res = await _test_host_async(h, ipver, timeout)
    #         if res:
    #             total_working += 1
    #             log.debug("check_host for %s came back true. incremented working hosts: %s", h, total_working)
    #         else:
    #             total_broken += 1
    #             log.debug("check_host for %s came back false. incremented broken hosts: %s", h, total_broken)
    #
    #     except Exception as e:
    #         log.warning("Exception while checking host %s port %s", h, port)

    working = total_working >= min_hosts_pos

    log.info(
        "test_hosts - proto: %s - protocol working? %s || total hosts: %s || working hosts: %s || broken hosts: %s",
        ipver, working, total_hosts, total_working, total_broken)
    log.debug("working hosts: %s", working_list)
    log.debug("broken hosts: %s", broken_list)

    return working
Esempio n. 13
0
 def _unpfx_key(cls, key: str, prefix: str = None, sep: str = None):
     """Remove the cache key prefix from a given cache key"""
     prefix, sep = empty_if(prefix,
                            cls.cache_prefix), empty_if(sep, cls.cache_sep)
     j = key.split(f"{prefix}{sep}")
     return j[0] if len(j) == 1 else ''.join(j[1:])
Esempio n. 14
0
def sock_ver(version) -> Optional[int]:
    version = empty_if(version, 'any', zero=True, itr=True)
    version = str(version).lower()
    if ip_ver_to_int(version) == 4: return socket.AF_INET
    if ip_ver_to_int(version) == 6: return socket.AF_INET6
    return None
Esempio n. 15
0
def clean_threadstore(thread_id=None,
                      name=None,
                      clean_all: bool = False) -> bool:
    """
    Remove the per-thread instance storage in :attr:`.__STORE`, usually called when a thread is exiting.

    Can also be used to clear a certain key in all thread stores, or completely clear every key from every thread store.

    Example::

        >>> def some_thread():
        ...     r = get_redis()
        ...     print('doing something')
        ...     print('cleaning up...')
        ...     clean_threadstore()       # With no arguments, it cleans the thread store for the thread that called it.
        >>> t = threading.Thread(target=some_thread)
        >>> t.start()
        >>> t.join()

    Usage outside of a thread::

        >>> t = threading.Thread(target=some_thread)
        >>> t.start()
        >>> thread_id = t.ident                      # Get the thread ID for the started thread
        >>> t.join()                                 # Wait for the thread to finish
        >>> if thread_id is not None:                # Make sure the thread ID isn't None
        ...     clean_threadstore(thread_id)         # Cleanup any leftover instances, if there are any.
        ...

    Removing an individual item from thread store::
        
        >>> def some_thread():
        ...     r = get_redis()
        ...     print('doing something')
        ...     print('cleaning up...')
        ...     clean_threadstore(name='redis')   # Delete only the key 'redis' from the thread store
    
    Removing an individual item from the thread store for **ALL thread ID's**::
    
        >>> # Remove the redis instance from every thread store
        >>> clean_threadstore(name='redis', clean_all=True)
    
    Clearing the entire thread store for every thread ID::
    
        >>> clean_threadstore(clean_all=True)
    
    
    :param thread_id: The ID of the thread (usually from :func:`threading.get_ident`) to clean the storage for.
                      If left as None, will use the ID returned by :func:`threading.get_ident`.
    
    :param name:      If specified, then only the key ``name`` will be deleted from the thread store, instead of the entire thread store.
    :param bool clean_all: (default: ``False``) If ``True`` - when ``name`` is non-empty - that key will be removed from every
                           thread ID's thread store - while if ``name`` is empty, every single thread ID's thread store is cleared.
    """
    thread_id = empty_if(thread_id, threading.get_ident())

    if clean_all:
        for t_id, ts in __STORE['threads'].items():
            ts: dict
            if empty(name):
                log.debug(
                    "[clean_threadstore] (clean_all True) Cleaning entire thread store for thread ID '%s'",
                    t_id)
                for n in ts.keys():
                    log.debug(
                        "[clean_threadstore] Cleaning '%s' key for thread ID '%s'",
                        name, t_id)
                    del ts[n]
                continue
            log.debug(
                "[clean_threadstore] (clean_all True) Cleaning '%s' key for thread ID '%s'",
                name, t_id)
            if name in ts:
                log.debug(
                    "[clean_threadstore] Found %s in thread ID %s - deleting...",
                    name, ts)
                del ts[name]
        return True

    if thread_id in __STORE['threads']:
        ts = __STORE['threads'][thread_id]
        if name is None:
            del __STORE['threads'][thread_id]
            return True
        elif name in ts:
            del ts[name]
            return True
    return False