Beispiel #1
0
        def wrapper(*args, **kwargs):

            # Extract r_cache and r_cache_key from the wrapped function's kwargs if they're specified,
            # then remove them from the kwargs so they don't interfere with the wrapped function.
            enable_cache, rk = kwargs.get('r_cache', True), kwargs.get(
                'r_cache_key', cache_key)
            if 'r_cache' in kwargs: del kwargs['r_cache']
            if 'r_cache_key' in kwargs: del kwargs['r_cache_key']

            if callable(rk):
                rk = rk(*args, **kwargs)
            elif not empty(fmt_args, itr=True) or not whitelist:
                # If the cache key contains a format placeholder, e.g. {somevar} - then attempt to replace the
                # placeholders using the function's kwargs
                log.debug(
                    'Format_args not empty (or whitelist=False), formatting cache_key "%s"',
                    cache_key)
                rk = format_key(args, kwargs)
            log.debug('Trying to load "%s" from cache', rk)
            data = r.get(rk)

            if empty(data) or not enable_cache:
                log.debug(
                    'Not found in cache, or "r_cache" set to false. Calling wrapped function.'
                )
                data = f(*args, **kwargs)
                r.set(rk, data, timeout=cache_time)

            return data
Beispiel #2
0
def geolocate_ips(
        *addrs,
        throw=False
) -> Generator[Tuple[str, Optional[GeoIPResult]], None, None]:
    """
    Same as :func:`.geolocate_ip` but accepts multiple IP addresses, and returns the results as a generator.
    
    Usage::
    
        >>> for ip, g in geolocate_ips('185.130.44.5', '8.8.4.4', '2a07:e00::333'):
        ...     print(f"{ip:<20} -> {str(g.city):<15} {str(g.country):<15} ({g.as_number} {g.as_name})")
        185.130.44.5         -> Stockholm       Sweden          (210083 Privex Inc.)
        8.8.4.4              -> None            United States   (15169 Google LLC)
        2a07:e00::333        -> Stockholm       Sweden          (210083 Privex Inc.)
        >>> data = dict(geolocate_ips('185.130.44.5', '8.8.4.4', '2a07:e00::333'))
        >>> data['8.8.4.4'].country
        'United States'
        >>> data['2a07:e00::333'].as_name
        'Privex Inc.'
    
    
    :param IP_OR_STR addrs: One or more IPv4 or IPv6 addresses to geo-locate
    :param bool throw: (Default: ``True``) If ``True``, will raise :class:`.GeoIPAddressNotFound` if an IP address isn't found
                       in the GeoIP database. If ``False``, will simply return ``None`` if it's not found.
    :raises GeoIPAddressNotFound: When ``throw`` is ``True`` and one of the ``addrs`` can't be found in a GeoIP database.
    :raises ValueError: When ``throw`` is ``True`` and one of the ``addrs`` is not a valid IP address.
    
    :return Tuple[str, Optional[GeoIPResult]] res: A generator which returns tuples containing the matching IP
                        address, and the :class:`.GeoIPResult` object containing the GeoIP data for the IP - or
                        ``None`` if ``throw`` is ``False``, and the IP address wasn't found in the database.
    """
    for addr in addrs:
        try:
            res = geolocate_ip(addr, throw=throw)
        except (Exception, ValueError) as e:
            if throw:
                raise e
            log.warning("Ignoring exception while geo-locating IP '%s': %s %s",
                        addr, type(e), str(e))
            res = None
        if empty(res): yield (addr, None)
        if all([
                empty(res.country),
                empty(res.country_code), res.as_name, res.as_number, res.lat,
                res.long
        ]):
            yield (addr, None)
        yield (addr, res)
Beispiel #3
0
    def export_private(self, **kwargs) -> bytes:
        """
        Serialize the cryptography private key instance loaded into :class:`.KeyManager` into storable bytes.
        
        This method requires that you've instantiated KeyManager with the private key. It will raise a
        :class:`.EncryptionError` exception if the :py:attr:`.private_key` instance attribute is empty.
        
        Example::

            >>> km = KeyManager.load_keyfile('id_ed25519')
            >>> print(km.export_private().decode())
            -----BEGIN PRIVATE KEY-----
            MC4CAQAwBQYDK2VwBCIEIOeLS2XOcQz11VUnzh6KIZaNtT10YfzHv779zjm95XSy
            -----END PRIVATE KEY-----


        :keyword dict format: Override some or all of the default format/encoding for the keys.
                              Dict Keys: private_format,public_format,private_encoding,public_encoding
        :keyword Format format: If passed a :class:`.Format` instance, then this instance will be used for serialization
                                instead of merging defaults from :py:attr:`.default_formats`
        :return bytes key: The serialized key.
        """
        if empty(self.private_key):
            raise EncryptionError(
                'Cannot export private key as self.private_key is missing!')
        # alg = self.identify_algorithm(self.private_key)
        return self.export_key(self.private_key, **kwargs)
Beispiel #4
0
def _repo(repo: str = None) -> str:
    """
    A very simple private helper function which ensures the passed ``repo`` is an absolute directory path. If ``repo`` is blank,
    then simply returns the current working directory. Converts relative paths into absolute paths by joining them to :func:`.getcwd`
    """
    repo = getcwd() if empty(repo) else repo
    return join(getcwd(), repo) if not isabs(repo) else repo
 def get(self, key: str, default: Any = None, fail: bool = False) -> Any:
     key = str(key)
     res = self.redis.get(key)
     if empty(res):
         if fail: raise CacheNotFound(f'Cache key "{key}" was not found.')
         return default
     return pickle.loads(res) if self.use_pickle else res
Beispiel #6
0
def geoip_manager(geo_type: str = None) -> Optional[geoip2.database.Reader]:
    if not empty(geo_type):
        yield plugin.get_geoip(geo_type)
        cleanup()
    else:
        yield None
        cleanup()
Beispiel #7
0
def _get_all_threadstore(
    name=None
) -> Generator[Tuple[Union[int, str], Union[Dict[str, Any], Any]], None, None]:
    """
    
    Get the ``'redis'`` key from every thread in the thread store::
    
        >>> for t_id, inst in _get_all_threadstore('redis'):
        ...     inst.close()
        ...
        >>> clean_threadstore(name='redis')
    
    Get the thread store for every single thread::
        
        >>> for t_id, ts in _get_all_threadstore():
        ...     if 'redis' in ts: print(t_id, 'has redis instance!')
        ...     if 'aiomemcached' in ts: print(t_id, 'has asyncio memcached instance!')
    
    
    :param str name: Yield only this key from each thread store (if it exists)
    :return Generator store_gen: A generator of tuples containing either ``(thread_id, thread_store: dict)``, or ``(t_id, value)``
                                 depending on whether ``name`` is empty or not.
    """
    for t_id, ts in __STORE['threads'].items():
        if empty(name):
            yield t_id, ts
        elif name in ts:
            yield t_id, ts[name]
Beispiel #8
0
 def _calc_expires(self,
                   expires_at: Union[Number, datetime] = None,
                   expires_secs: Number = None) -> Optional[float]:
     if not empty(expires_at, zero=True):
         if isinstance(expires_at, datetime):
             return float(self._datetime_to_unix(expires_at))
         if isinstance(expires_at, (int, str, Decimal)):
             return self._calc_expires(expires_at=float(expires_at))
         if isinstance(expires_at, float):
             return float(time.time() + expires_at) if expires_at < float(
                 YEAR * 5) else expires_at
         raise ValueError(
             f"{self.__class__.__name__}._calc_expires expected expires_at to be a datetime or numeric object. "
             f"object passed was type: {type(expires_at)} || repr: {repr(expires_at)}"
         )
     return time.time() + float(expires_secs) if not empty(
         expires_secs, zero=True) else None
def convert_int_bool(d, if_empty=False, fail_empty=False) -> bool:
    """Convert an integer ``d`` into a boolean (``0`` for ``False``, ``1`` for ``True``)"""
    if empty(d):
        if fail_empty:
            raise AttributeError(
                f"Error converting '{d}' into a boolean. Parameter 'd' was empty!"
            )
        return if_empty
    return is_true(d)
Beispiel #10
0
async def get_rdns_async(host: IP_OR_STR,
                         throw=True,
                         version='any',
                         name_port=80) -> Optional[str]:
    """
    AsyncIO version of :func:`.get_rdns` - get the reverse DNS for a given host (IP address or domain)
    
        >>> from privex.helpers import get_rdns_async
        >>> await get_rdns_async('185.130.44.10')
        'web-se1.privex.io'
        >>> await get_rdns_async('2a07:e00::333')
        'se.dns.privex.io'
        >>> await get_rdns_async('privex.io')
        'web-se1.privex.io'
    
    :param str|IPv4Address|IPv6Address host: An IPv4/v6 address, or domain to lookup reverse DNS for.
    :param bool throw: (Default: ``True``) When ``True``, will raise :class:`.ReverseDNSNotFound` or :class:`.InvalidHost` when no
                       rDNS records can be found for ``host``, or when ``host`` is an invalid IP / non-existent domain.
                       When ``False``, will simply return ``None`` when ``host`` is invalid, or no rDNS records are found.
    
    :param str|int version: IP version to use when looking up a domain/hostname (default: ``'any'``)
    
    :param int name_port: This generally isn't important. This port is passed to :func:`loop.getnameinfo` when looking
                          up the reverse DNS for ``host``. Usually there's no reason to change this from the default.
    
    :raises ReverseDNSNotFound: When ``throw`` is True and no rDNS records were found for ``host``
    :raises InvalidHost: When ``throw`` is True and ``host`` is an invalid IP address or non-existent domain/hostname
    :return Optional[str] rDNS: The reverse DNS hostname for ``host`` (value of PTR record)
    """
    loop = asyncio.get_event_loop()
    host = str(host)
    try:
        if not is_ip(host):
            orig_host = host
            host = await resolve_ip_async(host, version=version)
            if empty(host):
                if throw:
                    raise InvalidHost(
                        f"Host '{orig_host}' is not a valid IP address, nor an existent domain"
                    )
                return None

        res = await loop.getnameinfo((host, name_port))
        rdns = res[0]
        if is_ip(rdns):
            if throw:
                raise ReverseDNSNotFound(
                    f"No reverse DNS records found for host '{host}' - result was: {rdns}"
                )
            return None
        return rdns
    except socket.gaierror as e:
        if throw:
            raise InvalidHost(
                f"Host '{host}' is not a valid IP address, nor an existent domain: {type(e)} {str(e)}"
            )
    return None
def convert_bool_int(d, if_empty=0, fail_empty=False) -> int:
    """Convert a boolean ``d`` into an integer (``0`` for ``False``, ``1`` for ``True``)"""
    if type(d) is int: return 1 if d >= 1 else 0
    if empty(d):
        if fail_empty:
            raise AttributeError(
                f"Error converting '{d}' into a boolean. Parameter 'd' was empty!"
            )
        return if_empty
    return 1 if is_true(d) else 0
Beispiel #12
0
 def get(self,
         key: Union[bytes, str],
         default: Any = None,
         fail: bool = False) -> Any:
     key = str(stringify(key))
     res = self.mcache.get(key)
     if empty(res):
         if fail: raise CacheNotFound(f'Cache key "{key}" was not found.')
         return default
     return pickle.loads(res) if self.use_pickle else res
Beispiel #13
0
 def get_all_cache_keys(cls) -> Set[str]:
     """
     Retrieve the list of cache keys as a :class:`.set` from the cache key ``'query_cache_keys'`` which
     stores the list of ``query_hidden:xxx:xxx:xxx`` keys, allowing for easy clearing of those cache keys when needed.
     :return:
     """
     lk = cls.cache_sep.join([cls.cache_prefix, cls.cache_key_log_name])
     qk = cached.get_or_set(lk, set(), cls.default_cache_key_time)
     qk = set() if empty(qk, True, True) else qk
     if isinstance(qk, (list, tuple)): qk = set(qk)
     return qk
    def get_fernet(self, key: Union[str, bytes] = None) -> Fernet:
        """
        Used internally for getting Fernet instance with auto-fallback to :py:attr:`.encrypt_key` and exception handling.

        :param str key: Base64 Fernet symmetric key for en/decrypting data. If empty, will fallback to :py:attr:`.encrypt_key`
        :raises EncryptKeyMissing: Either no key was passed, or something is wrong with the key.
        :return Fernet f: Instance of Fernet using passed ``key`` or self.encrypt_key for encryption.
        """
        if empty(key) and empty(self.encrypt_key):
            raise EncryptKeyMissing(
                'No key argument passed, and ENCRYPT_KEY is empty. Cannot encrypt/decrypt.'
            )

        key = self.encrypt_key if empty(key) else key
        try:
            f = Fernet(key)
            return f
        except (binascii.Error, ValueError):
            raise EncryptKeyMissing(
                'The passed ``key`` or self.encrypt_key is not a valid Fernet key'
            )
Beispiel #15
0
        async def wrapper(*args, **kwargs):

            # Extract r_cache and r_cache_key from the wrapped function's kwargs if they're specified,
            # then remove them from the kwargs so they don't interfere with the wrapped function.
            enable_cache, rk = kwargs.get('r_cache', True), kwargs.get(
                'r_cache_key', cache_key)
            if 'r_cache' in kwargs: del kwargs['r_cache']
            if 'r_cache_key' in kwargs: del kwargs['r_cache_key']

            if not isinstance(rk, str):
                rk = await await_if_needed(rk, *args, **kwargs)
            elif not empty(fmt_args, itr=True) or not whitelist:
                # If the cache key contains a format placeholder, e.g. {somevar} - then attempt to replace the
                # placeholders using the function's kwargs
                log.debug(
                    'Format_args not empty (or whitelist=False), formatting cache_key "%s"',
                    cache_key)
                rk = _format_key(args,
                                 kwargs,
                                 cache_key=cache_key,
                                 whitelist=whitelist,
                                 fmt_opt=format_opt,
                                 fmt_args=format_args)
            # To ensure no event loop / thread cache instance conflicts, we use the cache adapter as a context manager, which
            # is supposed to disconnect + destroy the connection library instance, and re-create it in the current loop/thread.
            async with cache_adapter as r:
                # If using an async cache adapter, r.get might be async...
                log.debug('Trying to load "%s" from cache', rk)
                data = await await_if_needed(r.get, rk)

                if empty(data) or not enable_cache:
                    log.debug(
                        'Not found in cache, or "r_cache" set to false. Calling wrapped async function.'
                    )
                    data = await await_if_needed(f, *args, **kwargs)

                    # If using an async cache adapter, r.get might be async...
                    await await_if_needed(r.set, rk, data, timeout=cache_time)

            return data
 def get(self,
         key: str,
         default: Any = None,
         fail: bool = False,
         _auto_purge=True) -> Any:
     key = str(key)
     _not_found_msg = f'Cache key "{key}" was not found.'
     if _auto_purge: self.purge_expired()
     res = self.wrapper.find_cache_key(key)
     if _cache_result_expired(res, _auto_purge=_auto_purge):
         log.debug(
             "Caller attempted to retrieve expired key '%s', but _auto_purge is True - auto-removing expired key %s",
             key, key)
         self.remove(key)
         res = None
         _not_found_msg += ' (key was expired - auto-removed)'
     if empty(res):
         if fail: raise CacheNotFound(_not_found_msg)
         return default
     return pickle.loads(res.value) if self.use_pickle else res.value
Beispiel #17
0
def version_eq_gt(min_version: Union[tuple, list],
                  current_version: Union[tuple, list]) -> bool:
    """
    Check that ``current_version`` is either equal to, or greater than ``min_version``.
    
    Examples::
    
        >>> mv = (3, 1)
        >>> version_eq_gt(mv, (3, 0, 0))
        False
        >>> version_eq_gt(mv, (4, 0, 0))
        True
        >>> version_eq_gt(mv, (3, 1, 0))
        True
        >>> version_eq_gt(mv, (3, 3))
        True
        >>> version_eq_gt(mv, (3,))
        False
        >>> version_eq_gt((3, 0, 0), (3,))
        True
    
    :param tuple|list min_version:      The minimum version to check for, as a list/tuple of version segments, e.g. ``(1, 1, 0)``
    :param tuple|list current_version:  The version to check ``min_version`` against, as a list/tuple of version
                                        segments, e.g. ``(1, 1, 1)``
    :return bool version_eq_or_gt: ``True`` if ``current_version`` is equal to or greater than ``min_version``, otherwise ``False``
    """
    v = current_version
    for i, b in enumerate(min_version):
        # If we've reached the end of current_version but min_version still has values left,
        # then we'll assume they're equal only if the current min_version segment is empty (including value 0)
        if (len(v) - 1) < i: return empty(b, True, True)

        if v[i] > b:
            return True  # If the current version segment is higher than the min segment, we're fine.
        if v[i] == b:
            continue  # If the current version segment is equal, then we should check the next segment.
        if v[i] < b:
            return False  # If the segment is lower, this version is older than the minimum.

    # If we didn't return True or False by now, the version is equal. So return True.
    return True
def convert_unixtime_datetime(d: Union[str, int, float, Decimal],
                              if_empty=None,
                              fail_empty=False) -> datetime:
    """Convert a unix timestamp into a :class:`datetime.datetime` object"""
    from dateutil.tz import tzutc
    if empty(d):
        if fail_empty:
            raise AttributeError(
                "Error converting datetime. Parameter 'd' was empty!")
        return if_empty
    if isinstance(d, datetime):
        return d
    d = int(d)
    # If the timestamp is larger than NOW + 50 years in seconds, then it's probably milliseconds.
    if d > datetime.utcnow().timestamp() + (DECADE * 5):
        t = datetime.utcfromtimestamp(d // 1000)
    else:
        t = datetime.utcfromtimestamp(d)

    t = t.replace(tzinfo=tzutc())
    return t
Beispiel #19
0
    def export_public(self, **kwargs) -> bytes:
        """
        Serialize the cryptography public key instance loaded into :class:`.KeyManager` into storable bytes.
        
        This method works whether you've instantiated KeyManager with the public key directly, or the private key,
        as the public key is automatically interpolated from the private key by :py:meth:`.__init__`

        Example::
        
            >>> km = KeyManager.load_keyfile('id_ed25519.pub')
            >>> km.export_public()
            b'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA6vtKgeNSBERSY1xmr47Ve3uyRALxPR+qOeFeUHrUaf'
            
        :keyword dict format: Override some or all of the default format/encoding for the keys.
                              Dict Keys: private_format,public_format,private_encoding,public_encoding
        :keyword Format format: If passed a :class:`.Format` instance, then this instance will be used for serialization
                                instead of merging defaults from :py:attr:`.default_formats`
        :return bytes key: The serialized key.
        """
        if empty(self.public_key):
            raise EncryptionError(
                'Cannot export public key as self.public_key is missing!')
        # alg = self.identify_algorithm(self.public_key)
        return self.export_key(self.public_key, **kwargs)
Beispiel #20
0
def clean_threadstore(thread_id=None,
                      name=None,
                      clean_all: bool = False) -> bool:
    """
    Remove the per-thread instance storage in :attr:`.__STORE`, usually called when a thread is exiting.

    Can also be used to clear a certain key in all thread stores, or completely clear every key from every thread store.

    Example::

        >>> def some_thread():
        ...     r = get_redis()
        ...     print('doing something')
        ...     print('cleaning up...')
        ...     clean_threadstore()       # With no arguments, it cleans the thread store for the thread that called it.
        >>> t = threading.Thread(target=some_thread)
        >>> t.start()
        >>> t.join()

    Usage outside of a thread::

        >>> t = threading.Thread(target=some_thread)
        >>> t.start()
        >>> thread_id = t.ident                      # Get the thread ID for the started thread
        >>> t.join()                                 # Wait for the thread to finish
        >>> if thread_id is not None:                # Make sure the thread ID isn't None
        ...     clean_threadstore(thread_id)         # Cleanup any leftover instances, if there are any.
        ...

    Removing an individual item from thread store::
        
        >>> def some_thread():
        ...     r = get_redis()
        ...     print('doing something')
        ...     print('cleaning up...')
        ...     clean_threadstore(name='redis')   # Delete only the key 'redis' from the thread store
    
    Removing an individual item from the thread store for **ALL thread ID's**::
    
        >>> # Remove the redis instance from every thread store
        >>> clean_threadstore(name='redis', clean_all=True)
    
    Clearing the entire thread store for every thread ID::
    
        >>> clean_threadstore(clean_all=True)
    
    
    :param thread_id: The ID of the thread (usually from :func:`threading.get_ident`) to clean the storage for.
                      If left as None, will use the ID returned by :func:`threading.get_ident`.
    
    :param name:      If specified, then only the key ``name`` will be deleted from the thread store, instead of the entire thread store.
    :param bool clean_all: (default: ``False``) If ``True`` - when ``name`` is non-empty - that key will be removed from every
                           thread ID's thread store - while if ``name`` is empty, every single thread ID's thread store is cleared.
    """
    thread_id = empty_if(thread_id, threading.get_ident())

    if clean_all:
        for t_id, ts in __STORE['threads'].items():
            ts: dict
            if empty(name):
                log.debug(
                    "[clean_threadstore] (clean_all True) Cleaning entire thread store for thread ID '%s'",
                    t_id)
                for n in ts.keys():
                    log.debug(
                        "[clean_threadstore] Cleaning '%s' key for thread ID '%s'",
                        name, t_id)
                    del ts[n]
                continue
            log.debug(
                "[clean_threadstore] (clean_all True) Cleaning '%s' key for thread ID '%s'",
                name, t_id)
            if name in ts:
                log.debug(
                    "[clean_threadstore] Found %s in thread ID %s - deleting...",
                    name, ts)
                del ts[name]
        return True

    if thread_id in __STORE['threads']:
        ts = __STORE['threads'][thread_id]
        if name is None:
            del __STORE['threads'][thread_id]
            return True
        elif name in ts:
            del ts[name]
            return True
    return False
def _cache_result_expired(res, _auto_purge=True) -> bool:
    if empty(res): return False
    if empty(res.expires_at, zero=True) or not _auto_purge: return False
    return float(res.expires_at) <= time.time()
 def purge_due(self) -> bool:
     lpe = AsyncSqliteCache.last_purged_expired
     return empty(lpe, zero=True) or (time.time() - lpe) >= self.purge_every
Beispiel #23
0
 def _repo(self, repo=None) -> str:
     return self.repo if empty(repo) and not empty(self.repo) else _repo(
         repo)
Beispiel #24
0
def check_host(host: IP_OR_STR,
               port: AnyNum,
               version='any',
               throw=False,
               **kwargs) -> bool:
    """
    Test if the service on port ``port`` for host ``host`` is working. AsyncIO version: :func:`.check_host_async`
    
    Basic usage (services which send the client data immediately after connecting)::
    
        >>> check_host('hiveseed-se.privex.io', 2001)
        True
        >>> check_host('hiveseed-se.privex.io', 9991)
        False
    
    For some services, such as HTTP - it's necessary to transmit some data to the host before it will
    send a response. Using the ``send`` kwarg, you can transmit an arbitrary string/bytes upon connection.
    
    Sending data to ``host`` after connecting::
    
        >>> check_host('files.privex.io', 80, send=b"GET / HTTP/1.1\\n\\n")
        True
    
    
    :param str|IPv4Address|IPv6Address host: Hostname or IP to test
    :param int|str port: Port number on ``host`` to connect to
    :param str|int version: When connecting to a hostname, this can be set to ``'v4'``, ``'v6'`` or similar
                            to ensure the connection is via that IP version
    
    :param bool throw: (default: ``False``) When ``True``, will raise exceptions instead of returning ``False``
    :param kwargs: Additional configuration options (see below)
    
    :keyword int receive: (default: ``100``) Amount of bytes to attempt to receive from the server (``0`` to disable)
    :keyword bytes|str send: If ``send`` is specified, the data in ``send`` will be transmitted to the server before receiving.
    :keyword int stype: Socket type, e.g. :attr:`socket.SOCK_STREAM`
    
    :keyword float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
                                If the global default timeout is ``None``, then falls back to ``5.0``
    
    :raises socket.timeout: When ``throw=True`` and a timeout occurs.
    :raises socket.gaierror: When ``throw=True`` and various errors occur
    :raises ConnectionRefusedError: When ``throw=True`` and the connection was refused
    :raises ConnectionResetError: When ``throw=True`` and the connection was reset
    
    :return bool success: ``True`` if successfully connected + sent/received data. Otherwise ``False``.
    """
    receive, stype = int(kwargs.get('receive',
                                    100)), kwargs.get('stype',
                                                      socket.SOCK_STREAM)
    timeout, send, use_ssl = kwargs.get('timeout',
                                        'n/a'), kwargs.get('send'), kwargs.get(
                                            'ssl', kwargs.get('use_ssl'))
    ssl_params = kwargs.get('ssl_params',
                            dict(verify_cert=False, check_hostname=False))
    if timeout == 'n/a':
        t = socket.getdefaulttimeout()
        timeout = 10.0 if not t else t

    try:
        s_ver = socket.AF_INET
        ip = resolve_ip(host, version)

        if ip_is_v6(ip): s_ver = socket.AF_INET6

        if port == 443 and use_ssl is None:
            log.warning(
                "check_host: automatically setting use_ssl=True as port is 443 and use_ssl was not specified."
            )
            use_ssl = True
        with socket.socket(s_ver, stype) as s:
            orig_sock = s
            if timeout: s.settimeout(float(timeout))
            if use_ssl:
                ctx = get_ssl_context(**ssl_params)
                s = ctx.wrap_socket(
                    s,
                    server_hostname=kwargs.get('server_hostname'),
                    session=kwargs.get('session'),
                    do_handshake_on_connect=kwargs.get(
                        'do_handshake_on_connect', True),
                )

            s.connect((ip, int(port)))
            if not empty(send):
                s.sendall(byteify(send))
            if receive > 0:
                s.recv(int(receive))
            if use_ssl:
                s.close()
        return True
    except (socket.timeout, TimeoutError, ConnectionRefusedError,
            ConnectionResetError, socket.gaierror) as e:
        if throw:
            raise e
    return False
Beispiel #25
0
def test_hosts(hosts: List[str] = None,
               ipver: str = 'any',
               timeout: AnyNum = None,
               **kwargs) -> bool:
    randomise = is_true(kwargs.get('randomise', True))
    max_hosts = kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY)
    if max_hosts is not None: max_hosts = int(max_hosts)
    timeout = empty_if(timeout,
                       empty_if(socket.getdefaulttimeout(), 4, zero=True),
                       zero=True)

    v4h, v6h = list(settings.V4_TEST_HOSTS), list(settings.V6_TEST_HOSTS)
    if randomise: random.shuffle(v4h)
    if randomise: random.shuffle(v6h)

    if empty(hosts, True, True):
        # if empty(ipver, True, True) or ipver in ['any', 'all', 'both', 10, '10', '46', 46]:
        #     settings.V4_CHECKED_AT
        if isinstance(ipver, str): ipver = ipver.lower()
        if ipver in [4, '4', 'v4', 'ipv4']:
            hosts = v4h
            ipver = 4
        elif ipver in [6, '6', 'v6', 'ipv6']:
            hosts = v6h
            ipver = 6
        else:
            ipver = 'any'
            if max_hosts:
                hosts = v4h[:int(ceil(max_hosts /
                                      2))] + v6h[:int(ceil(max_hosts / 2))]
            else:
                hosts = v4h + v6h

    if max_hosts: hosts = hosts[:max_hosts]

    # st4_empty = any([empty(settings.HAS_WORKING_V4, True, True), empty(settings.V4_CHECKED_AT, True, True)])
    # st6_empty = any([empty(settings.HAS_WORKING_V6, True, True), empty(settings.V6_CHECKED_AT, True, True)])

    # if ipver == 6 and not st6_empty and settings.V6_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv6 status: working = %s", settings.HAS_WORKING_V6)
    #     return settings.HAS_WORKING_V6
    # if ipver == 4 and not st4_empty and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv4 status: working = %s", settings.HAS_WORKING_V4)
    #     return settings.HAS_WORKING_V4

    # if ipver == 'any' and any([not st4_empty, not st6_empty]) and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     if st4_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv6 status cached, but not IPv4 status. Checking IPv4 status...")
    #         check_v4()
    #     if st6_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv4 status cached, but not IPv6 status. Checking IPv6 status...")
    #         check_v6()
    #     # if not st4_empty and not st6_empty:
    #     log.debug(
    #         "Returning status %s based on: Working IPv4 = %s || Working IPv6 = %s",
    #         settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6, settings.HAS_WORKING_V4, settings.HAS_WORKING_V6
    #     )
    #     return settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6

    # max_hosts = int(kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY))
    min_hosts_pos = int(
        kwargs.get('required_positive', settings.NET_CHECK_HOST_COUNT))

    # hosts = empty_if(hosts, settings.V4_TEST_HOSTS, itr=True)
    hosts = [x for x in hosts]

    if randomise: random.shuffle(hosts)

    if len(hosts) > max_hosts: hosts = hosts[:max_hosts]

    total_hosts = len(hosts)
    total_working, total_broken = 0, 0

    log.debug("Testing %s hosts with IP version '%s' - timeout: %s",
              total_hosts, ipver, timeout)
    port = 80

    for h in hosts:
        try:
            nh = h.split(':')
            if len(nh) > 1:
                port = int(nh[-1])
                h = ':'.join(nh[:-1])
            else:
                h = ':'.join(nh)
                log.warning(
                    "Host is missing port: %s - falling back to port 80")
                port = 80

            log.debug("Checking host %s via port %s + IP version '%s'", h,
                      port, ipver)

            if port == 80:
                res = check_host_http(h,
                                      port,
                                      ipver,
                                      throw=False,
                                      timeout=timeout)
            else:
                res = check_host(h, port, ipver, throw=False, timeout=timeout)
            if res:
                total_working += 1
                log.debug(
                    "check_host for %s came back true. incremented working hosts: %s",
                    h, total_working)
            else:
                total_broken += 1
                log.debug(
                    "check_host for %s came back false. incremented broken hosts: %s",
                    h, total_broken)

        except Exception as e:
            log.warning("Exception while checking host %s port %s", h, port)

    working = total_working >= min_hosts_pos

    log.info(
        "test_hosts - proto: %s - protocol working? %s || total hosts: %s || working hosts: %s || broken hosts: %s",
        ipver, working, total_hosts, total_working, total_broken)

    return working
Beispiel #26
0
async def test_hosts_async(hosts: List[str] = None,
                           ipver: str = 'any',
                           timeout: AnyNum = None,
                           **kwargs) -> bool:
    randomise = is_true(kwargs.get('randomise', True))
    max_hosts = kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY)
    if max_hosts is not None: max_hosts = int(max_hosts)
    timeout = empty_if(timeout,
                       empty_if(socket.getdefaulttimeout(), 4, zero=True),
                       zero=True)

    v4h, v6h = list(settings.V4_TEST_HOSTS), list(settings.V6_TEST_HOSTS)
    if randomise: random.shuffle(v4h)
    if randomise: random.shuffle(v6h)

    if empty(hosts, True, True):
        # if empty(ipver, True, True) or ipver in ['any', 'all', 'both', 10, '10', '46', 46]:
        #     settings.V4_CHECKED_AT
        if isinstance(ipver, str): ipver = ipver.lower()
        if ipver in [4, '4', 'v4', 'ipv4']:
            hosts = v4h
            ipver = 4
        elif ipver in [6, '6', 'v6', 'ipv6']:
            hosts = v6h
            ipver = 6
        else:
            ipver = 'any'
            if max_hosts:
                hosts = v4h[:int(ceil(max_hosts /
                                      2))] + v6h[:int(ceil(max_hosts / 2))]
            else:
                hosts = v4h + v6h

    if max_hosts: hosts = hosts[:max_hosts]

    # st4_empty = any([empty(settings.HAS_WORKING_V4, True, True), empty(settings.V4_CHECKED_AT, True, True)])
    # st6_empty = any([empty(settings.HAS_WORKING_V6, True, True), empty(settings.V6_CHECKED_AT, True, True)])

    # if ipver == 6 and not st6_empty and settings.V6_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv6 status: working = %s", settings.HAS_WORKING_V6)
    #     return settings.HAS_WORKING_V6
    # if ipver == 4 and not st4_empty and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     log.debug("Returning cached IPv4 status: working = %s", settings.HAS_WORKING_V4)
    #     return settings.HAS_WORKING_V4
    #
    # if ipver == 'any' and any([not st4_empty, not st6_empty]) and settings.V4_CHECKED_AT > datetime.utcnow():
    #     # if settings.V6_CHECKED_AT > datetime.utcnow()
    #     if st4_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv6 status cached, but not IPv4 status. Checking IPv4 status...")
    #         await check_v4_async()
    #     if st6_empty:
    #         log.debug("test_hosts being requested for 'any' ip ver. IPv4 status cached, but not IPv6 status. Checking IPv6 status...")
    #         await check_v6_async(hosts)
    #     # if not st4_empty and not st6_empty:
    #     log.debug(
    #         "Returning status %s based on: Working IPv4 = %s || Working IPv6 = %s",
    #         settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6, settings.HAS_WORKING_V4, settings.HAS_WORKING_V6
    #     )
    #     return settings.HAS_WORKING_V4 or settings.HAS_WORKING_V6

    # max_hosts = int(kwargs.get('max_hosts', settings.NET_CHECK_HOST_COUNT_TRY))
    min_hosts_pos = int(
        kwargs.get('required_positive', settings.NET_CHECK_HOST_COUNT))

    # hosts = empty_if(hosts, settings.V4_TEST_HOSTS, itr=True)
    hosts = [x for x in hosts]

    if randomise: random.shuffle(hosts)

    if len(hosts) > max_hosts: hosts = hosts[:max_hosts]

    # port = empty_if(port, 80, zero=True)

    total_hosts = len(hosts)
    total_working, total_broken = 0, 0
    working_list, broken_list = [], []
    log.debug("Testing %s hosts with IP version '%s' - timeout: %s",
              total_hosts, ipver, timeout)

    host_checks = []
    host_checks_hosts = []
    for h in hosts:
        # host_checks.append(
        #     asyncio.create_task(_test_host_async(h, ipver=ipver, timeout=timeout))
        # )
        host_checks.append(
            asyncio.create_task(
                run_coro_thread_async(_test_host_async,
                                      h,
                                      ipver=ipver,
                                      timeout=timeout)))
        host_checks_hosts.append(h)

    host_checks_res = await asyncio.gather(*host_checks,
                                           return_exceptions=True)
    for i, _res in enumerate(host_checks_res):
        h = host_checks_hosts[i]
        if isinstance(_res, Exception):
            log.warning("Exception while checking host %s", h)
            total_broken += 1
            continue

        res, h, port = _res

        if res:
            total_working += 1
            working_list.append(f"{h}:{port}")
            log.debug(
                "check_host for %s (port %s) came back True (WORKING). incremented working hosts: %s",
                h, port, total_working)
        else:
            total_broken += 1
            broken_list.append(f"{h}:{port}")
            log.debug(
                "check_host for %s (port %s) came back False (! BROKEN !). incremented broken hosts: %s",
                h, port, total_broken)

    # port = 80
    # for h in hosts:
    #     try:
    #         h, port, res = await _test_host_async(h, ipver, timeout)
    #         if res:
    #             total_working += 1
    #             log.debug("check_host for %s came back true. incremented working hosts: %s", h, total_working)
    #         else:
    #             total_broken += 1
    #             log.debug("check_host for %s came back false. incremented broken hosts: %s", h, total_broken)
    #
    #     except Exception as e:
    #         log.warning("Exception while checking host %s port %s", h, port)

    working = total_working >= min_hosts_pos

    log.info(
        "test_hosts - proto: %s - protocol working? %s || total hosts: %s || working hosts: %s || broken hosts: %s",
        ipver, working, total_hosts, total_working, total_broken)
    log.debug("working hosts: %s", working_list)
    log.debug("broken hosts: %s", broken_list)

    return working
Beispiel #27
0
async def check_host_async(host: IP_OR_STR,
                           port: AnyNum,
                           version='any',
                           throw=False,
                           **kwargs) -> bool:
    """
    AsyncIO version of :func:`.check_host`. Test if the service on port ``port`` for host ``host`` is working.

    Basic usage (services which send the client data immediately after connecting)::

        >>> await check_host_async('hiveseed-se.privex.io', 2001)
        True
        >>> await check_host_async('hiveseed-se.privex.io', 9991)
        False

    For some services, such as HTTP - it's necessary to transmit some data to the host before it will
    send a response. Using the ``send`` kwarg, you can transmit an arbitrary string/bytes upon connection.

    Sending data to ``host`` after connecting::

        >>> await check_host_async('files.privex.io', 80, send=b"GET / HTTP/1.1\\n\\n")
        True


    :param str|IPv4Address|IPv6Address host: Hostname or IP to test
    :param int|str port: Port number on ``host`` to connect to
    :param str|int version: When connecting to a hostname, this can be set to ``'v4'``, ``'v6'`` or similar
                            to ensure the connection is via that IP version

    :param bool throw: (default: ``False``) When ``True``, will raise exceptions instead of returning ``False``
    :param kwargs: Additional configuration options (see below)

    :keyword int receive: (default: ``100``) Amount of bytes to attempt to receive from the server (``0`` to disable)
    :keyword bytes|str send: If ``send`` is specified, the data in ``send`` will be transmitted to the server before receiving.
    :keyword int stype: Socket type, e.g. :attr:`socket.SOCK_STREAM`

    :keyword float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
                                If the global default timeout is ``None``, then falls back to ``5.0``

    :raises socket.timeout: When ``throw=True`` and a timeout occurs.
    :raises socket.gaierror: When ``throw=True`` and various errors occur
    :raises ConnectionRefusedError: When ``throw=True`` and the connection was refused
    :raises ConnectionResetError: When ``throw=True`` and the connection was reset

    :return bool success: ``True`` if successfully connected + sent/received data. Otherwise ``False``.
    """
    receive, stype = int(kwargs.get('receive',
                                    16)), kwargs.get('stype',
                                                     socket.SOCK_STREAM)
    timeout, send = kwargs.get('timeout', 'n/a'), kwargs.get('send')
    http_test, use_ssl = kwargs.get('http_test',
                                    False), kwargs.get('use_ssl', False)
    if timeout == 'n/a':
        t = socket.getdefaulttimeout()
        timeout = settings.DEFAULT_SOCKET_TIMEOUT if not t else t

    # loop = asyncio.get_event_loop()
    s_ver = socket.AF_INET
    ip = await resolve_ip_async(host, version)

    if ip_is_v6(ip): s_ver = socket.AF_INET6

    try:
        aw = AsyncSocketWrapper(host,
                                int(port),
                                family=s_ver,
                                use_ssl=use_ssl,
                                timeout=timeout)
        await aw.connect()
        if http_test:
            log.info("Sending HTTP request to %s", host)
            log.info("Response from %s : %s", host, await aw.http_request())

        elif not empty(send) and receive > 0:
            log.info(
                "Sending query data '%s' and trying to receive data from %s",
                send, host)
            log.info(
                "Response from %s : %s", host, await
                aw.query(send,
                         receive,
                         read_timeout=kwargs.get('read_timeout', AUTO)))

        elif not empty(send):
            log.info("Sending query data '%s' to %s", send, host)
            await aw.sendall(send)
        else:
            log.info("Receiving data from %s", host)

            log.info(
                "Response from %s : %s", host, await aw.read_eof(
                    receive,
                    strip=False,
                    read_timeout=kwargs.get('read_timeout', AUTO),
                ))

        # with socket.socket(s_ver, stype) as s:
        #     if timeout: s.settimeout(float(timeout))
        #     await loop.sock_connect(s, (ip, int(port)))
        #     if not empty(send):
        #         await loop.sock_sendall(s, byteify(send))
        #     if receive > 0:
        #         await loop.sock_recv(s, int(receive))
        return True
    except (socket.timeout, TimeoutError, ConnectionRefusedError,
            ConnectionResetError, socket.gaierror) as e:
        if throw:
            raise e
    return False
def convert_datetime(d,
                     if_empty=None,
                     fail_empty=False,
                     **kwargs) -> Optional[datetime]:
    """
    Convert the object ``d`` into a :class:`datetime.datetime` object.
    
    If ``d`` is a string or bytes, then it will be parsed using :func:`dateutil.parser.parse`
    
    If ``d`` is an int/float/Decimal, then it will be assumed to be a unix epoch timestamp.
    
    **Examples**::
        
        >>> convert_datetime("2019-01-01T00:00:00Z")          # ISO date/time
        datetime.datetime(2019, 1, 1, 0, 0, tzinfo=tzutc())
        
        >>> convert_datetime("01/JAN/2019 00:00:00.0000")     # Human date/time with month name
        datetime.datetime(2019, 1, 1, 0, 0, tzinfo=tzutc())
        
        >>> convert_datetime(1546300800)                      # Unix timestamp as integer
        datetime.datetime(2019, 1, 1, 0, 0, tzinfo=tzutc())
        
        >>> convert_datetime(1546300800000)                   # Unix timestamp (milliseconds) as integer
        datetime.datetime(2019, 1, 1, 0, 0, tzinfo=tzutc())
    
    :param d: Object to convert into a datetime
    :param if_empty: If ``d`` is empty / None, return this value
    :param bool fail_empty: (Def: ``False``) If this is True, then if ``d`` is empty, raises :class:`AttributeError`
    
    :key datetime.tzinfo tzinfo: (Default: :class:`dateutil.tz.tzutc`) If no timezone was detected by the parser,
                                 use this timezone. Set this to ``None`` to disable forcing timezone-aware dates.
    
    :raises AttributeError: When ``d`` is empty and ``fail_empty`` is set to True.
    :raises dateutil.parser.ParserError: When ``d`` could not be parsed into a date.
    :return datetime converted: The converted :class:`datetime.datetime` object.
    """
    from dateutil.tz import tzutc
    _tzinfo = kwargs.pop('tzinfo', tzutc())
    if isinstance(d, datetime):
        if d.tzinfo is None and _tzinfo is not None:
            d = d.replace(tzinfo=_tzinfo)
        return d

    # For datetime.date objects, we first convert them into a string, then we can parse them into a datetime + set tzinfo
    if isinstance(d, date):
        d = str(d)

    d = stringify(d) if isinstance(d, bytes) else d

    if isinstance(d, (int, float)):
        return convert_unixtime_datetime(d)

    if isinstance(d, str):
        from dateutil.parser import parse, ParserError
        try:
            t = parse(d)
            if t.tzinfo is None and _tzinfo is not None:
                t = t.replace(tzinfo=_tzinfo)
            return t
        except (ParserError, ValueError) as e:
            log.info(
                "Failed to parse string. Attempting to parse as unix time")
            try:
                t = convert_unixtime_datetime(d)
                return t
            except (BaseException, Exception, ParserError) as _err:
                log.warning(
                    "Failed to parse unix time. Re-raising original parser error. Unixtime error was: %s %s",
                    type(_err), str(_err))
                raise e
        except ImportError as e:
            msg = "ERROR: Could not import 'parse' from 'dateutil.parser'. Please " \
                  f"make sure 'python-dateutil' is installed. Exception: {type(e)} - {str(e)}"

            log.exception(msg)
            raise ImportError(msg)
    if empty(d):
        if fail_empty:
            raise AttributeError(
                "Error converting datetime. Parameter 'd' was empty!")
        return if_empty

    try:
        log.debug(
            "Passed object is not a supported type. Object type: %s || object repr: %s",
            type(d), repr(d))
        log.debug("Calling convert_datetime with object casted to string: %s",
                  str(d))
        _d = convert_datetime(str(d), fail_empty=True)
        d = _d
    except Exception as e:
        log.info(
            "Converted passed object with str() to try and parse string version, but failed."
        )
        log.info("Exception thrown from convert_datetime(str(d)) was: %s %s",
                 type(e), str(e))
        d = None  # By setting d to None, it will trigger the ValueError code below.

    if not isinstance(d, datetime):
        raise ValueError(
            'Timestamp must be either a datetime object, or an ISO8601 string...'
        )
    return d