def _handle_backports(self): import django if version_eq_gt(self.django_min_backport, django.VERSION): return True auto_backport = is_true(getattr(settings, 'AUTO_BACKPORT', True)) if not auto_backport: log.debug( " [!!!] settings.AUTO_BACKPORT is false - not checking if backports loaded..." ) return True if _app_installed('privex.adminplus.backports'): log.debug( " [+++] backports already loaded. skipping auto-backport.") return True self.lwarn( " [...] PrivexAdminPlusConfig.ready :: Django version is < 3.1.0 :: Ver is: %s", django.get_version()) self.lwarn( " [...] 'privex.adminplus.backports' not in INSTALLED_APPS... Dynamically injecting into INSTALLED_APPS ..." ) # settings.INSTALLED_APPS += ['privex.adminplus.backports'] _prepend_app('privex.adminplus.backports') return False
def convert_int_bool(d, if_empty=False, fail_empty=False) -> bool: """Convert an integer ``d`` into a boolean (``0`` for ``False``, ``1`` for ``True``)""" if empty(d): if fail_empty: raise AttributeError( f"Error converting '{d}' into a boolean. Parameter 'd' was empty!" ) return if_empty return is_true(d)
def _setup_admin(self): auto_admin = is_true(getattr(settings, 'AUTO_SETUP_ADMIN', True)) if not auto_admin: self.lwarn( " [!!!] settings.AUTO_SETUP_ADMIN is false - not registering privex-adminplus by calling setup_admin(admin)..." ) return False from privex.adminplus.admin import setup_admin from django.contrib import admin as dj_admin setup_admin(dj_admin)
def convert_bool_int(d, if_empty=0, fail_empty=False) -> int: """Convert a boolean ``d`` into an integer (``0`` for ``False``, ``1`` for ``True``)""" if type(d) is int: return 1 if d >= 1 else 0 if empty(d): if fail_empty: raise AttributeError( f"Error converting '{d}' into a boolean. Parameter 'd' was empty!" ) return if_empty return 1 if is_true(d) else 0
def get_ssl_context( verify_cert: bool = False, check_hostname: Optional[bool] = None, verify_mode: Optional[int] = None, **kwargs ) -> ssl.SSLContext: check_hostname = empty_if(check_hostname, is_true(verify_cert)) verify_mode = empty_if(verify_mode, ssl.CERT_REQUIRED if verify_cert else ssl.CERT_NONE) ctx = ssl.create_default_context() ctx.check_hostname = check_hostname ctx.verify_mode = verify_mode return ctx
def __init__(self, db_file: str = None, memory_persist=False, use_pickle: bool = None, connection_kwargs: dict = None, *args, **kwargs): """ :class:`.AsyncSqliteCache` uses an auto-generated database filename / path by default, based on the name of the currently running script ( retrieved from ``sys.argv[0]`` ), allowing for persistent caching - without any manual configuration of the adapter, nor the requirement for any running background services such as ``redis`` / ``memcached``. :param str db_file: (Optional) Name of / path to Sqlite3 database file to create/use for the cache. :param bool memory_persist: Use a shared in-memory database, which can be accessed by other instances of this class (in this process) - which is cleared after all memory connections are closed. Shortcut for ``db_file='file::memory:?cache=shared'`` :param bool use_pickle: (Default: ``True``) Use the built-in ``pickle`` to serialise values before storing in Sqlite3, and un-serialise when loading from Sqlite3 :param dict connection_kwargs: (Optional) Additional / overriding kwargs to pass to :meth:`sqlite3.connect` when :class:`.AsyncSqliteCacheManager` initialises it's sqlite3 connection. :keyword int purge_every: (Default: 300) Expired + abandoned cache records are purged using the DB manager method :meth:`.AsyncSqliteCacheManager.purge_expired` during :meth:`.get` / :meth:`.set` calls. To avoid performance issues, the actual :meth:`.AsyncSqliteCacheManager.purge_expired` method is only called if at least ``purge_every`` seconds have passed since the last purge was triggered ( :attr:`.last_purged_expired` ) """ from privex.helpers.cache.post_deps import AsyncSqliteCacheManager super().__init__(*args, **kwargs) self.db_file: str = empty_if(db_file, AsyncSqliteCacheManager.DEFAULT_DB) self.db_folder = None if ':memory:' not in self.db_file: if not isabs(self.db_file): self.db_file = join(AsyncSqliteCacheManager.DEFAULT_DB_FOLDER, self.db_file) self.db_folder = dirname(self.db_file) if not exists(self.db_folder): log.debug("Folder for database doesn't exist. Creating: %s", self.db_folder) makedirs(self.db_folder) self.connection_kwargs = empty_if(connection_kwargs, {}, itr=True) self.memory_persist = is_true(memory_persist) self._wrapper = None self.purge_every = kwargs.get('purge_every', 300) self.use_pickle = self.pickle_default if use_pickle is None else use_pickle
from collections import OrderedDict from typing import Iterable, List, Union from django.apps import AppConfig from django.apps.registry import Apps from django.core.exceptions import ImproperlyConfigured from privex.helpers.common import empty, is_true, inject_items from privex.adminplus import VERSION from django.contrib.admin.apps import AdminConfig from django.conf import settings import logging log = logging.getLogger(__name__) QUIET = is_true(getattr(settings, 'ADMINPLUS_QUIET', False)) # def ap_quiet(): # return is_true(getattr(settings, 'ADMINPLUS_QUIET', False)) def _app_installed(entry: str) -> bool: return entry in settings.INSTALLED_APPS def _inject_app(entry: str, index: int = None, after: str = None): if index is None: if after is None: raise ValueError( "_inject_app expects either 'index' or 'after' to be specified" )
def test_hosts(hosts: List[str] = None, ipver: str = 'any', timeout: AnyNum = None, **kwargs) -> bool: randomise = is_true(kwargs.get('randomise', True)) max_hosts = kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY) if max_hosts is not None: max_hosts = int(max_hosts) timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 4, zero=True), zero=True) v4h, v6h = list(settings.V4_TEST_HOSTS), list(settings.V6_TEST_HOSTS) if randomise: random.shuffle(v4h) if randomise: random.shuffle(v6h) if empty(hosts, True, True): # if empty(ipver, True, True) or ipver in ['any', 'all', 'both', 10, '10', '46', 46]: # settings.V4_CHECKED_AT if isinstance(ipver, str): ipver = ipver.lower() if ipver in [4, '4', 'v4', 'ipv4']: hosts = v4h ipver = 4 elif ipver in [6, '6', 'v6', 'ipv6']: hosts = v6h ipver = 6 else: ipver = 'any' if max_hosts: hosts = v4h[:int(ceil(max_hosts / 2))] + v6h[:int(ceil(max_hosts / 2))] else: hosts = v4h + v6h if max_hosts: hosts = hosts[:max_hosts] # st4_empty = any([empty(settings.HAS_WORKING_V4, True, True), empty(settings.V4_CHECKED_AT, True, True)]) # st6_empty = any([empty(settings.HAS_WORKING_V6, True, True), empty(settings.V6_CHECKED_AT, True, True)]) # if ipver == 6 and not st6_empty and settings.V6_CHECKED_AT > datetime.utcnow(): # # if settings.V6_CHECKED_AT > datetime.utcnow() # log.debug("Returning cached IPv6 status: working = %s", settings.HAS_WORKING_V6) # return settings.HAS_WORKING_V6 # if ipver == 4 and not st4_empty and settings.V4_CHECKED_AT > datetime.utcnow(): # # if settings.V6_CHECKED_AT > datetime.utcnow() # log.debug("Returning cached IPv4 status: working = %s", settings.HAS_WORKING_V4) # return settings.HAS_WORKING_V4 # if ipver == 'any' and any([not st4_empty, not st6_empty]) and settings.V4_CHECKED_AT > datetime.utcnow(): # # if settings.V6_CHECKED_AT > datetime.utcnow() # if st4_empty: # log.debug("test_hosts being requested for 'any' ip ver. IPv6 status cached, but not IPv4 status. Checking IPv4 status...") # check_v4() # if st6_empty: # log.debug("test_hosts being requested for 'any' ip ver. IPv4 status cached, but not IPv6 status. Checking IPv6 status...") # check_v6() # # if not st4_empty and not st6_empty: # log.debug( # "Returning status %s based on: Working IPv4 = %s || Working IPv6 = %s", # settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6, settings.HAS_WORKING_V4, settings.HAS_WORKING_V6 # ) # return settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6 # max_hosts = int(kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY)) min_hosts_pos = int( kwargs.get('required_positive', settings.NET_CHECK_HOST_COUNT)) # hosts = empty_if(hosts, settings.V4_TEST_HOSTS, itr=True) hosts = [x for x in hosts] if randomise: random.shuffle(hosts) if len(hosts) > max_hosts: hosts = hosts[:max_hosts] total_hosts = len(hosts) total_working, total_broken = 0, 0 log.debug("Testing %s hosts with IP version '%s' - timeout: %s", total_hosts, ipver, timeout) port = 80 for h in hosts: try: nh = h.split(':') if len(nh) > 1: port = int(nh[-1]) h = ':'.join(nh[:-1]) else: h = ':'.join(nh) log.warning( "Host is missing port: %s - falling back to port 80") port = 80 log.debug("Checking host %s via port %s + IP version '%s'", h, port, ipver) if port == 80: res = check_host_http(h, port, ipver, throw=False, timeout=timeout) else: res = check_host(h, port, ipver, throw=False, timeout=timeout) if res: total_working += 1 log.debug( "check_host for %s came back true. incremented working hosts: %s", h, total_working) else: total_broken += 1 log.debug( "check_host for %s came back false. incremented broken hosts: %s", h, total_broken) except Exception as e: log.warning("Exception while checking host %s port %s", h, port) working = total_working >= min_hosts_pos log.info( "test_hosts - proto: %s - protocol working? %s || total hosts: %s || working hosts: %s || broken hosts: %s", ipver, working, total_hosts, total_working, total_broken) return working
async def test_hosts_async(hosts: List[str] = None, ipver: str = 'any', timeout: AnyNum = None, **kwargs) -> bool: randomise = is_true(kwargs.get('randomise', True)) max_hosts = kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY) if max_hosts is not None: max_hosts = int(max_hosts) timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 4, zero=True), zero=True) v4h, v6h = list(settings.V4_TEST_HOSTS), list(settings.V6_TEST_HOSTS) if randomise: random.shuffle(v4h) if randomise: random.shuffle(v6h) if empty(hosts, True, True): # if empty(ipver, True, True) or ipver in ['any', 'all', 'both', 10, '10', '46', 46]: # settings.V4_CHECKED_AT if isinstance(ipver, str): ipver = ipver.lower() if ipver in [4, '4', 'v4', 'ipv4']: hosts = v4h ipver = 4 elif ipver in [6, '6', 'v6', 'ipv6']: hosts = v6h ipver = 6 else: ipver = 'any' if max_hosts: hosts = v4h[:int(ceil(max_hosts / 2))] + v6h[:int(ceil(max_hosts / 2))] else: hosts = v4h + v6h if max_hosts: hosts = hosts[:max_hosts] # st4_empty = any([empty(settings.HAS_WORKING_V4, True, True), empty(settings.V4_CHECKED_AT, True, True)]) # st6_empty = any([empty(settings.HAS_WORKING_V6, True, True), empty(settings.V6_CHECKED_AT, True, True)]) # if ipver == 6 and not st6_empty and settings.V6_CHECKED_AT > datetime.utcnow(): # # if settings.V6_CHECKED_AT > datetime.utcnow() # log.debug("Returning cached IPv6 status: working = %s", settings.HAS_WORKING_V6) # return settings.HAS_WORKING_V6 # if ipver == 4 and not st4_empty and settings.V4_CHECKED_AT > datetime.utcnow(): # # if settings.V6_CHECKED_AT > datetime.utcnow() # log.debug("Returning cached IPv4 status: working = %s", settings.HAS_WORKING_V4) # return settings.HAS_WORKING_V4 # # if ipver == 'any' and any([not st4_empty, not st6_empty]) and settings.V4_CHECKED_AT > datetime.utcnow(): # # if settings.V6_CHECKED_AT > datetime.utcnow() # if st4_empty: # log.debug("test_hosts being requested for 'any' ip ver. IPv6 status cached, but not IPv4 status. Checking IPv4 status...") # await check_v4_async() # if st6_empty: # log.debug("test_hosts being requested for 'any' ip ver. IPv4 status cached, but not IPv6 status. Checking IPv6 status...") # await check_v6_async(hosts) # # if not st4_empty and not st6_empty: # log.debug( # "Returning status %s based on: Working IPv4 = %s || Working IPv6 = %s", # settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6, settings.HAS_WORKING_V4, settings.HAS_WORKING_V6 # ) # return settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6 # max_hosts = int(kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY)) min_hosts_pos = int( kwargs.get('required_positive', settings.NET_CHECK_HOST_COUNT)) # hosts = empty_if(hosts, settings.V4_TEST_HOSTS, itr=True) hosts = [x for x in hosts] if randomise: random.shuffle(hosts) if len(hosts) > max_hosts: hosts = hosts[:max_hosts] # port = empty_if(port, 80, zero=True) total_hosts = len(hosts) total_working, total_broken = 0, 0 working_list, broken_list = [], [] log.debug("Testing %s hosts with IP version '%s' - timeout: %s", total_hosts, ipver, timeout) host_checks = [] host_checks_hosts = [] for h in hosts: # host_checks.append( # asyncio.create_task(_test_host_async(h, ipver=ipver, timeout=timeout)) # ) host_checks.append( asyncio.create_task( run_coro_thread_async(_test_host_async, h, ipver=ipver, timeout=timeout))) host_checks_hosts.append(h) host_checks_res = await asyncio.gather(*host_checks, return_exceptions=True) for i, _res in enumerate(host_checks_res): h = host_checks_hosts[i] if isinstance(_res, Exception): log.warning("Exception while checking host %s", h) total_broken += 1 continue res, h, port = _res if res: total_working += 1 working_list.append(f"{h}:{port}") log.debug( "check_host for %s (port %s) came back True (WORKING). incremented working hosts: %s", h, port, total_working) else: total_broken += 1 broken_list.append(f"{h}:{port}") log.debug( "check_host for %s (port %s) came back False (! BROKEN !). incremented broken hosts: %s", h, port, total_broken) # port = 80 # for h in hosts: # try: # h, port, res = await _test_host_async(h, ipver, timeout) # if res: # total_working += 1 # log.debug("check_host for %s came back true. incremented working hosts: %s", h, total_working) # else: # total_broken += 1 # log.debug("check_host for %s came back false. incremented broken hosts: %s", h, total_broken) # # except Exception as e: # log.warning("Exception while checking host %s port %s", h, port) working = total_working >= min_hosts_pos log.info( "test_hosts - proto: %s - protocol working? %s || total hosts: %s || working hosts: %s || broken hosts: %s", ipver, working, total_hosts, total_working, total_broken) log.debug("working hosts: %s", working_list) log.debug("broken hosts: %s", broken_list) return working
def retry_on_err(max_retries: int = 3, delay: Union[int, float] = 3, **retry_conf): """ Decorates a function or class method, wraps the function/method with a try/catch block, and will automatically re-run the function with the same arguments up to `max_retries` time after any exception is raised, with a ``delay`` second delay between re-tries. If it still throws an exception after ``max_retries`` retries, it will log the exception details with ``fail_msg``, and then re-raise it. Usage (retry up to 5 times, 1 second between retries, stop immediately if IOError is detected):: >>> @retry_on_err(5, 1, fail_on=[IOError]) ... def my_func(self, some=None, args=None): ... if some == 'io': raise IOError() ... raise FileExistsError() This will be re-ran 5 times, 1 second apart after each exception is raised, before giving up:: >>> my_func() Where-as this one will immediately re-raise the caught IOError on the first attempt, as it's passed in ``fail_on``:: >>> my_func('io') .. Attention:: For safety reasons, by default ``max_ignore`` is set to ``100``. This means after 100 retries where an exception was ignored, the decorator will give up and raise the last exception. This is to prevent the risk of infinite loops hanging your application. If you are 100% certain that the function you've wrapped, and/or the exceptions passed in ``ignore`` cannot cause an infinite retry loop, then you can pass ``max_ignore=False`` to the decorator to disable failure after ``max_ignore`` ignored exceptions. :param int max_retries: Maximum total retry attempts before giving up :param float delay: Amount of time in seconds to sleep before re-trying the wrapped function :param retry_conf: Less frequently used arguments, pass in as keyword args (see below) :key list fail_on: A list() of Exception types that should result in immediate failure (don't retry, raise) :key list ignore: A list() of Exception types that should be ignored (will retry, but without incrementing the failure counter) :key int|bool max_ignore: (Default: ``100``) If an exception is raised while retrying, and more than this many exceptions (listed in ``ignore``) have been ignored during retry attempts, then give up and raise the last exception. This feature is designed to prevent "ignored" exceptions causing an infinite retry loop. By default ``max_ignore`` is set to ``100``, but you can increase/decrease this as needed. You can also set it to ``False`` to disable raising when too many exceptions are ignored - however, it's strongly not recommended to disable ``max_ignore``, especially if you have ``instance_match=True``, as it could cause an infinite retry loop which hangs your application. :key bool instance_match: (Default: ``False``) If this is set to ``True``, then the exception type comparisons for ``fail_on`` and ``ignore`` will compare using ``isinstance(e, x)`` instead of ``type(e) is x``. If this is enabled, then exceptions listed in ``fail_on`` and ``ignore`` will also **match sub-classes** of the listed exceptions, instead of exact matches. :key str retry_msg: Override the log message used for retry attempts. First message param %s is func name, second message param %d is retry attempts remaining :key str fail_msg: Override the log message used after all retry attempts are exhausted. First message param %s is func name, and second param %d is amount of times retried. """ retry_msg: str = retry_conf.get('retry_msg', DEF_RETRY_MSG) fail_msg: str = retry_conf.get('fail_msg', DEF_FAIL_MSG) instance_match: bool = is_true(retry_conf.get('instance_match', False)) fail_on: List[type] = list(retry_conf.get('fail_on', [])) ignore_ex: List[type] = list(retry_conf.get('ignore', [])) max_ignore: Union[bool, int] = retry_conf.get('max_ignore', 100) def _decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): retries = int(kwargs.pop('retry_attempts', 0)) ignore_count = int(kwargs.pop('ignore_count', 0)) try: return f(*args, **kwargs) except Exception as e: _fail = isinstance( e, tuple(fail_on)) if instance_match else type(e) in fail_on if _fail: log.warning( 'Giving up. Re-raising exception %s (as requested by `fail_on` arg)', type(e)) raise e if max_ignore is not False and ignore_count > max_ignore: log.warning( 'Giving up. Ignored too many exceptions (max_ignore: %d, ignore_count: %d). ' 'Re-raising exception %s.', max_ignore, ignore_count, type(e)) raise e if retries < max_retries: log.info('%s - %s', type(e), str(e)) log.info(retry_msg, f.__name__, max_retries - retries) sleep(delay) # If 'instance_match' is enabled, we check if the exception was an instance of any of the passed exception types, # otherwise we use exact exception type comparison against the list. # _ignore is True if we should ignore this exception (don't increment retries), or False if we should increment. _ignore = isinstance(e, tuple( ignore_ex)) if instance_match else type(e) in ignore_ex if _ignore: log.debug( " >> (?) Ignoring exception '%s' as exception is in 'ignore' list. Ignore Count: %d // " "Max Ignores: %d // Instance Match: %s", type(e), ignore_count, max_ignore, instance_match) kwargs[ 'retry_attempts'] = retries if _ignore else retries + 1 kwargs[ 'ignore_count'] = ignore_count + 1 if _ignore else ignore_count return wrapper(*args, **kwargs) log.exception(fail_msg, f.__name__, max_retries) raise e return wrapper return _decorator