Esempio n. 1
0
 def __post_init__(self):
     if not empty(self.id, zero=True):
         self.id = int(self.id)
     if not empty(self.is_owner, zero=True):
         self.is_owner = is_true(self.is_owner)
     if not empty(self.is_premium, zero=True):
         self.is_premium = is_true(self.is_premium)
Esempio n. 2
0
def test_domains_dns_delHost():
    api = get_api()
    domain_name = test_register_domain()

    res = api.domains_dns_setHosts(
        domain_name,
        dict(HostName='@',
             RecordType='URL',
             Address='http://news.ycombinator.com',
             TTL='200'),
        dict(HostName='test', RecordType='A', Address='1.2.3.4'))
    assert_equal(res['Domain'], domain_name)
    ok_(is_true(res['IsSuccess']))

    res = api.domains_dns_delHost(domain_name,
                                  record_type='A',
                                  value='1.2.3.4',
                                  hostname='test')
    assert_equal(res['Domain'], domain_name)
    ok_(is_true(res['IsSuccess']))

    hosts = api.domains_dns_getHosts(domain_name)

    host = hosts[0]

    assert_equal(host.name, '@')
    assert_equal(host.address, 'http://news.ycombinator.com')
    assert_equal(host.ttl, 200)
    assert_equal(host.type, 'URL')
    assert_equal(host.mx_pref, 10)
Esempio n. 3
0
 def str_section(self, name: str):
     self.autoclean()
     sec = self.config_dict.get(name, None)
     if not sec: return None
     s = f"{name}{self.section_split}{sec.get('zones', '')}"
     if is_true(sec.get('unsafe-eval', False)): s += " 'unsafe-eval'"
     if is_true(sec.get('unsafe-inline', False)): s += " 'unsafe-inline'"
     s += ';'
     return s
Esempio n. 4
0
    def __post_init__(self):
        # Namecheap returns dates in US format - MM/DD/YYYY - so we carefully convert them using that format.
        if not empty(self.created) and isinstance(self.created, str):
            self.created = datetime.strptime(self.created, "%m/%d/%Y")
        if not empty(self.expires) and isinstance(self.expires, str):
            self.expires = datetime.strptime(self.expires, "%m/%d/%Y")

        self.is_expired = is_true(self.is_expired)
        self.is_locked = is_true(self.is_locked)
        self.auto_renew = is_true(self.auto_renew)
        self.is_premium = is_true(self.is_premium)
        self.is_our_dns = is_true(self.is_our_dns)
Esempio n. 5
0
def handle_args(args: Namespace):
    """
    
    
    
        >>> parser = ArgumentParser(description="my arg parser")
        >>> add_arguments(parser, 'verbose', 'quiet', 'nodefile')
        >>> args = parser.parse_args()
        >>> handle_args(args)

    :param args:
    :return:
    """
    if 'skip_apis' in args and not empty(args.skip_apis, itr=True):
        settings.SKIP_API_LIST = parse_csv(args.skip_apis)
    
    if args.quiet:
        settings.quiet = True
        settings.verbose = False
        clear_handlers('rpcscanner', None)
        set_logging_level(logging.CRITICAL, None)
    elif args.verbose:
        settings.quiet = False
        settings.verbose = True
        clear_handlers('rpcscanner', None)
        set_logging_level(logging.DEBUG, None)
    
    if 'nodefile' in args: settings.node_file = args.nodefile
    if 'node_file' in args: settings.node_file = args.node_file
    if 'plugins' in args: settings.plugins = is_true(args.plugins)
Esempio n. 6
0
    def __post_init__(self):
        conv_bool_keys = [
            'non_real_time', 'is_api_registerable', 'is_api_renewable',
            'is_api_transferable', 'is_epp_required', 'is_disable_mod_contact',
            'is_disable_wg_allot', 'is_include_in_extended_search_only',
            'is_supports_idn', 'supports_registrar_lock', 'whois_verification',
            'provider_api_delete'
        ]
        conv_int_keys = [
            'min_register_years',
            'max_register_years'
            'min_renew_years',
            'max_renew_years',
            'min_transfer_years',
            'max_transfer_years',
            'reactivate_max_days',
            'add_grace_period_days',
            'renewal_min_days',
            'renewal_max_days',
            'sequence_number',
        ]

        for k in conv_bool_keys:
            v = getattr(self, k, None)
            if not empty(v):
                setattr(self, k, is_true(v))
        for k in conv_int_keys:
            v = getattr(self, k, None)
            if not empty(v) and not isinstance(v, int):
                setattr(self, k, int(v))
Esempio n. 7
0
def convert_tables(opts):
    db = empty_if(opts.db, settings.DB_NAME, itr=True)
    tables = empty_if(opts.tables, [], itr=True)
    all_tables = is_true(opts.all_tables)
    conv_columns = is_true(opts.conv_columns)
    outer_tx = is_true(opts.outer_tx)
    skip_indexed = is_true(opts.skip_indexed)
    
    if not empty(db):
        core.reconnect(database=db)
    
    charset, collation = empty_if(opts.charset, 'utf8mb4', itr=True), empty_if(opts.collation, 'utf8mb4_unicode_ci', itr=True)
    # table = empty_if(opts.table, None, itr=True)
    
    if empty(tables, itr=True) and not all_tables:
        parser.error(f"\n{RED}ERROR: You must specify a table to 'convert_tables' or pass --all-tables / -a{RESET}\n")
        return sys.exit(1)

    if settings.QUIET:
        core.set_logging_level()
    else:
        core.set_logging_level(env('LOG_LEVEL', 'INFO'))
    
    if all_tables:
        tables = core.get_tables(database=db)
        tnames = [t.table for t in tables]
        print(YELLOW)
        print(f" >>> --all-tables was specified. Converting {len(tables)} tables! The tables are: {', '.join(tnames)}")
        print(RESET)
    else:
        tables = [core.get_tables(database=db, table=t)[0] for t in tables]
        tnames = [t.table for t in tables]
    
    for t in tables:
        print(f"\n{YELLOW} [...] Converting table {t.table} to charset {charset} and collation {collation}{RESET}\n")
        core.convert_table(t.table, charset=charset, collation=collation)
        print(f"\n{GREEN} [+++] Successfully converted table {t.table}{RESET}\n")

    if conv_columns:
        print(f"\n{BLUE} >>> Converting COLUMNS to charset {charset} and collation {collation} for tables: {', '.join(tnames)}{RESET}\n")
        _convert_columns(
            tables, charset=charset, collation=collation, outer_tx=outer_tx, skip_indexed=skip_indexed
        )
        print(f"\n{GREEN} [+++] Successfully converted COLUMNS inside of tables: {', '.join(tnames)}{RESET}\n")
    
    print(f"\n{GREEN} ++++++ Successfully converted {len(tables)} tables ++++++ {RESET}\n")
Esempio n. 8
0
def convert_columns(opts):
    db = empty_if(opts.db, settings.DB_NAME, itr=True)
    table = empty_if(opts.table, None, itr=True)
    charset, collation = empty_if(opts.charset, 'utf8mb4', itr=True), empty_if(opts.collation, 'utf8mb4_unicode_ci', itr=True)
    columns = empty_if(opts.columns, [], itr=True)
    outer_tx = is_true(opts.outer_tx)
    skip_indexed = is_true(opts.skip_indexed)
    all_tables = is_true(opts.all_tables)
    all_cols = is_true(opts.all_columns)
    
    if not empty(db):
        core.reconnect(database=db)
    
    if empty(table) and not all_tables:
        parser.error(f"\n{RED}ERROR: You must specify a table to 'convert_columns' without -a / --all-tables{RESET}\n")
        return sys.exit(1)
    
    if all_tables and (empty(columns, itr=True) or not all_cols):
        parser.error(f"\n{RED}ERROR: You must either columns using '-c' or pass --all-columns / -k  when using -a / --all-tables{RESET}\n")
        return sys.exit(1)

    if settings.QUIET:
        core.set_logging_level()
    else:
        core.set_logging_level(env('LOG_LEVEL', 'INFO'))
    if all_tables:
        tables = core.get_tables(db)
        _convert_columns(
            tables, all_cols, charset=charset, collation=collation,
            db=db, columns=columns, outer_tx=outer_tx, skip_indexed=skip_indexed
        )
        return
    
    print(f"\n >>> Converting columns in table {table} to charset {charset} and collation {collation}\n")

    try:
        core.convert_columns(
            table, *columns, conv_all=all_cols, charset=charset, collation=collation,
            use_tx=outer_tx, skip_indexed=skip_indexed, database=db
        )
        print(f"\n [+++] Finished converting {table}.\n")
    except Exception as e:
        log.exception("Error while converting columns in table %s - %s - %s", table, type(e), str(e))
        return sys.exit(1)
Esempio n. 9
0
def conv_bool(obj: Optional[Union[str, bool, int]]) -> Optional[bool]:
    if empty(obj):
        return None
    try:
        return is_true(obj)
    except Exception as e:
        log.warning(
            "Error while converting object '%s' to integer. Reason: %s %s",
            obj, type(e), str(e))
        return None
Esempio n. 10
0
def _configure(opts):
    settings.DB_HOST = opts.host
    settings.DB_USER = opts.user
    settings.DB_PASS = opts.password
    settings.DB_NAME = opts.database
    settings.DB_PORT = int(opts.port)
    settings.QUIET = is_true(opts.quiet)
    if settings.QUIET:
        settings.LOG_LEVEL = env('LOG_LEVEL', 'ERROR')
        core.set_logging_level('ERROR')
    core.reconnect()
Esempio n. 11
0
 def __init__(self,
              rule_type: str = IPT_TYPE.INPUT.value,
              table='filter',
              strict=False):
     self.table = table
     self.rule_type = str(rule_type)
     self.default_action = IPT_ACTION.ALLOW
     self.rule = None
     self.chains = dict(conf.DEFAULT_CHAINS[self.table])
     self.strict = is_true(strict)
     self.has_v4, self.has_v6 = False, False
     self.reset_rule()
Esempio n. 12
0
 def __init__(self,
              network,
              private_key,
              public_key=None,
              account=None,
              key_type=None,
              **kwargs):
     self.network, self.private_key, self.public_key = network, private_key, public_key
     self.account, self.key_type = account, key_type
     self.balance = Decimal(kwargs.get('balance', '0'))
     self.used = is_true(kwargs.get('used', False))
     self.id = kwargs.get('id')
Esempio n. 13
0
def test_domains_dns_setHosts():
    api = get_api()
    domain_name = test_register_domain()
    res = api.domains_dns_setHosts(
        domain_name,
        dict(HostName='@',
             RecordType='URL',
             Address='http://news.ycombinator.com',
             MXPref='10',
             TTL='100'))
    assert_equal(res['Domain'], domain_name)
    ok_(is_true(res['IsSuccess']))
Esempio n. 14
0
 def __init__(self, src: str, version: str, package: str = None, **kwargs):
     """
     
     :param str src: Absolute path to the source file to copy from
     :param str version: The version of the file
     :param str package: The sub-package/component the file belongs to, if applicable
     :key str dest_folder: The sub-folder to place this file into
     :key bool link_root: (Default: ``False``) If True, symlink this file to the root of the package output folder
     """
     super().__init__(src=src, version=version, package=package, **kwargs)
     # dest_folder: str = None, link_root=False
     self.src, self.version, self.package = src, version, package
     self.dest_folder = kwargs.get('dest_folder')
     self.link_root = is_true(kwargs.get('link_root', False))
Esempio n. 15
0
def _convert_columns(tables: List[core.TableResult], all_cols=True, charset="utf8mb4", collation="utf8mb4_unicode_ci", **kwargs):
    db = empty_if(kwargs.get('db'), settings.DB_NAME, itr=True)
    columns = empty_if(kwargs.get('columns'), [], itr=True)
    outer_tx = is_true(kwargs.get('outer_tx', True))
    skip_indexed = is_true(kwargs.get('skip_indexed', True))
    # all_cols = is_true(opts.all_columns)
    
    tnames = [t.table for t in tables]
    print(f"{YELLOW} >>> Converting columns in {len(tables)} tables. Tables are: {', '.join(tnames)}{RESET}\n")
    for t in tables:
        print(f"{CYAN}    [-] Converting columns in table {t.table} to charset {charset} and collation {collation}{RESET}")
        try:
            core.convert_columns(
                t.table, *columns, conv_all=all_cols, charset=charset, collation=collation,
                use_tx=outer_tx, skip_indexed=skip_indexed, database=db
            )
            print(f"{GREEN}    [+] Finished converting columns in table {t.table}{RESET}\n")

        except Exception as e:
            log.exception("Error while converting columns in table %s - %s - %s", t.table, type(e), str(e))
            return sys.exit(1)
    
    print(f"\n{GREEN} [+++] Finished converting {len(tables)} tables. Tables were: {', '.join(tnames)}{RESET}\n")
Esempio n. 16
0
    def renew(self,
              lock: Union[str, Lock] = None,
              expires: int = 120,
              add_time: bool = True,
              **kwargs) -> Lock:
        """
        Add ``expires`` seconds to the lock expiry time of ``lock``. If ``lock`` isn't specified, will default to
        the class instance's original lock :py:attr:`.main_lock`
        
        Alias for :py:func:`.renew_lock` - but with ``add_time`` and ``create`` set to ``True`` by default,
        instead of ``False``.
        
        With no arguments specified, this method will renew the main lock of the class :py:attr:`.main_lock`
        for an additional 2 minutes (or if the lock is already expired, will re-create it with 2 min expiry).
        
        **Example usage**::
        
            >>> with LockMgr('mylock', expires=30) as l:
            ...     sleep(10)
            ...     l.renew(expires=60)                  # Add 60 seconds more time to 'mylock' expiration
            ...     l.main_lock.refresh_from_db()
            ...     print(l.main_lock.expires_seconds)   # Output: 79
            ...     l.renew('lockx', expires=60)         # Add 60 seconds more time to 'lockx' expiration
        
        :param str lock:       Name of the lock to renew
        :param Lock lock:      A :class:`.Lock` object to renew
        :param int expires:    (Default: 120) If not add_time, then this is the new expiration time in seconds from now.
                               If add_time, then this many seconds will be added to the expiration time of the lock.
        :param bool add_time:  (Default: ``True``) If True, then ``expires`` seconds will be added to the existing
                               lock expiration time, instead of setting the expiration time to ``now + expires``
        .. rubric:: Extra Keyword Arguments
        
        :key bool create:      (Default: ``True``) If True, then create a new lock if it doesn't exist / already expired
        :key str locked_by:    (Default: system hostname) What server/app is trying to obtain this lock?
        :key int lock_process: (Optional) The process ID requesting the lock
        
        .. rubric:: Exceptions

        :raises LockNotFound: Raised if the requested ``lock`` doesn't exist / is already expired and ``create`` is False.
        
        :return Lock lock: The :class:`.Lock` object which was renewed
        """
        create = is_true(kwargs.pop('create', True))
        lock = self.main_lock if lock is None else lock
        return renew_lock(lock=lock,
                          expires=expires,
                          add_time=add_time,
                          create=create,
                          **kwargs)
Esempio n. 17
0
def j_gen_tx(account="", address=None, amount=None, category=None, **kwargs):
    """
    Generate a Bitcoin transaction and return it as a dict.
    
    If any transaction attributes aren't specified, fake data will be automatically generated using :py:mod:`random` or
    :py:mod:`faker` to fill the attributes.
    
    :param account: Wallet account to label the transaction under
    :param address: **Our** address, that we're sending from or receiving into.
    :param amount: The amount of BTC transferred
    :param category: Either ``'receive'`` or ``'send'``
    :param kwargs: Any additional dict keys to put into the TX data
    :return dict tx: The generated TX
    """
    address = random.choice(
        internal["addresses"]) if address is None else address
    category = random.choice(['receive', 'send'
                              ]) if category is None else category
    amount = Decimal(random.random(), 7) if amount is None else Decimal(amount)
    amount = dec_round(amount, dp=8)
    # If an amount is being sent, then the amount becomes negative.
    # If an amount is being received, the amount must be positive.
    if (category == 'send' and amount > 0) or (category == 'receive'
                                               and amount < 0):
        amount = -amount

    tx = dict(
        account=account,
        address=address,
        amount=amount,
        category=category,
    )
    tx = {**tx, **kwargs}

    tx['txid'] = tx.get('txid', fake.sha256())
    tx['confirmations'] = tx.get('confirmations', random.randint(1, 30))

    if 'time' not in tx:
        tx['time'] = int(
            fake.unix_time(start_datetime=datetime.utcnow() -
                           timedelta(days=5)))

    tx['label'] = tx.get('label', '')
    tx['vout'] = tx.get('vout', 0)
    tx['generated'] = is_true(tx.get('generated', False))

    return tx
Esempio n. 18
0
def get_prefix(prefix: str, cidr: int = None):
    # If there's no CIDR number, then we treat 'prefix' as a singular IP address
    is_single = False
    if empty(cidr):
        try:
            cidr = 32 if ip_is_v4(prefix) else 128
        except ValueError:
            raise InvalidIP(f"IP / Prefix '{prefix}' is invalid.")
        is_single = True

    v = request.values
    exact, asn = is_true(v.get('exact', True)), v.get('asn', None)
    asn = int(asn) if not empty(asn, zero=True) else None
    limit, skip = validate_limits(v.get('limit'), v.get('skip'))

    # For individual IPs, we search for the prefix(es) that contains the IP.
    # For normal CIDR subnets, we search for the matching prefix and any sub-prefixes within that subnet.
    _filter = IPFilter.CONTAINS_EQUAL if is_single else IPFilter.WITHIN_EQUAL
    _pfx = f"{prefix}" if is_single else f"{prefix}/{cidr}"

    p: Union[Prefix, BaseQuery] = Prefix.filter_prefix(_pfx,
                                                       exact=exact,
                                                       asn=asn,
                                                       op=_filter)

    # If the 'exact' parameter is set to True (default), we return just the matching prefix, if it's found.
    if exact:
        p = p.first()
        if not p:
            return json_err('NOT_FOUND')
        return jsonify(error=False, result=p.to_dict())

    latest_prefix: Prefix = Prefix.latest_seen_prefixes(limit=1, single=True)
    latest_last_seen = latest_prefix.last_seen

    p = p.filter(Prefix.last_seen > (latest_last_seen -
                                     timedelta(seconds=PREFIX_TIMEOUT)))
    # For non-exact searches, we return a list of prefixes that match the query
    total = p.count()
    p: List[Prefix] = list(p.slice(skip, skip + limit))
    return jsonify(error=False,
                   count=len(p),
                   total=total,
                   pages=int(total / limit),
                   result=[k.to_dict() for k in p])
Esempio n. 19
0
 def __post_init__(self):
     conv_bool_keys = ['available', 'is_premium_name']
     conv_dec_keys = [
         'premium_registration_price',
         'premium_renewal_price',
         'premium_restore_price',
         'premium_transfer_price',
         'icann_fee',
         'eap_fee',
     ]
     self.error_no = None if empty(self.error_no) else int(self.error_no)
     for k in conv_bool_keys:
         v = getattr(self, k, None)
         if not empty(v):
             setattr(self, k, is_true(v))
     for k in conv_dec_keys:
         v = getattr(self, k, None)
         if empty(v):
             log.debug("Skipping %s - is empty", k)
             continue
         log.debug("Converting %s to decimal", k)
         setattr(self, k, dec_round(Decimal(v), dp=4))
Esempio n. 20
0
def test_register_domain():
    api = get_api()

    # Try registering a random domain. Fails if exception raised.
    domain_name = random_domain_name()
    res = api.domains_create(
        DomainName=domain_name,
        FirstName='Jack',
        LastName='Trotter',
        Address1='Ridiculously Big Mansion, Yellow Brick Road',
        City='Tokushima',
        StateProvince='Tokushima',
        PostalCode='771-0144',
        Country='Japan',
        Phone='+81.123123123',
        EmailAddress='*****@*****.**')

    assert_equal(res.domain, domain_name)
    ok_(int(res.domain_id) > 0)
    ok_(int(res.transaction_id) > 0)
    ok_(is_true(res.registered))
    return domain_name
Esempio n. 21
0
 def address_valid(self, address) -> bool:
     validate = self.rpc.validate_address(address=address)
     return is_true(validate['valid'])
Esempio n. 22
0
 def test_istrue_falsey(self):
     """Test :py:func:`.is_true` with falsey values"""
     for f in self.falsey_empty:
         self.assertFalse(helpers.is_true(f), msg=f"!is_true({repr(f)}")
Esempio n. 23
0
 def test_istrue_truthy(self):
     """Test :py:func:`.is_true` with truthy values"""
     for f in self.truthy:
         self.assertTrue(helpers.is_true(f), msg=f"is_true({repr(f)}")
Esempio n. 24
0
    def handle(self, *args, **options):

        _lh = LogHelper(__name__,
                        formatter=LOG_FORMATTER,
                        handler_level=logging.INFO)
        _lh.add_console_handler()
        _lh.get_logger().propagate = False
        lockmgr.clean_locks()  # Clean up any locks due for expiration.

        fail = is_true(options['fail'])
        no_renew = is_true(options['no_renew'])
        only_renew = is_true(options['only_renew'])
        no_timeout = is_true(options['no_timeout'])

        locks: list = options['locks']
        process_id: int = int(options['process_id']
                              ) if options['process_id'] is not None else None
        locked_by: str = options['locked_by']
        timeout = None if no_timeout else int(options['timeout'])
        lock_args = dict(expires=timeout,
                         locked_by=locked_by,
                         lock_process=process_id)
        if len(locks) == 0:
            print('No lock names specified.')
            return

        _create = False if only_renew else True
        _renew = False if no_renew else True

        try:
            res = set_lock(*locks,
                           timeout=timeout,
                           locked_by=locked_by,
                           process_id=process_id,
                           fail=fail,
                           create=_create,
                           renew=_renew)
            print(f"Finished creating / renewing {len(locks)} locks.\n")

            print("\n====================Status Report=====================\n")
            print(f"  Per-lock:\n")
            print("\t\t{:<20}{:<20}{:<20}{:<20}\n".format(
                "Name", "Was Locked?", "Now Locked?", "Status"))
            for lck_name, lres in res.statuses:
                print("\t\t{:<20}{:<20}{:<20}{:<20}".format(
                    lck_name, 'YES' if lres.was_locked else 'NO',
                    'YES' if lres.locked else 'NO', lres.status))
            print(
                "\n========================================================\n")
            print("  Summary:\n")
            print(f"    Locks Created:      {res.counts['created']}")
            print(f"    Locks Renewed:      {res.counts['renewed']}")
            print(f"    Renewals Skipped:   {res.counts['skip_renew']}")
            print(f"    Creations Skipped:  {res.counts['skip_create']}")

        except LockFail as e:
            print(
                "\n---------------------------------------------------------------------------\n"
            )
            print(
                " [lockmgr.management.commands.set_lock] Caught exception LockFail while creating/setting locks..."
            )
            print(
                " [lockmgr.management.commands.set_lock] The following existing lock was encountered:\n"
            )
            print(f"\t{e.lock}\n")
            print(
                " >>> As you have set -e / --fail, this means that any lock creations or updates triggered during "
                "this run of set_lock should have been rolled back.")
            print(
                " >>> If in doubt, run './manage.py list_locks' to view all current locks.\n"
            )
            print(" !!! Now exiting with return code 2...\n")
            return sys.exit(2)

        print("")
        print("\n=========================================================\n")
        print("Finished creating / renewing locks.")
        print("\n=========================================================\n")
Esempio n. 25
0
    def host_sorter(
        self,
        host: str,
        key: str,
        fallback=USE_ORIG_VAR
    ) -> Union[str, int, float, bool, datetime, Decimal]:
        """
        Usage::
            
            >>> scanner = RPCScanner(['https://hived.privex.io', 'https://anyx.io', 'https://hived.hive-engine.com'])
            >>> await scanner.scan_nodes()
            >>> sorted(scanner.node_objs, key=lambda el: scanner.host_sorter(host=el.host, key='online_status'))
        
        Useful notes about string sorting:
            
            * ``!`` appears to be the most preferred (comes first when sorting) ASCII character
            
            * ``~`` appears to be the least preferred (comes last when sorting) ASCII character
            
            * Symbols are not linearly grouped together. Some symbols will be sorted first, some will be sorted after numbers,
              some will be sorted after uppercase letters, and some will be sorted after lowercase letters.
            
            * Uppercase and lowercase letters are not grouped together. As per the previous bulletpoint - both uppercase and
              lowercase letters have symbols before + after their preference group.
            
            * The Python string ASCII sorting order seems to be:
                * Certain common symbols such as exclamation ``!``, quote ``"``, hash ``#``, dollar ``$`` and others
                * Numbers ``0`` to ``9``
                * Less common symbols such as colon ``:``, semi-colon ``;``, lessthan ``<``, equals ``=`` and greaterthan ``=`` (and more)
                * Uppercase alphabet characters ``A`` to ``Z``
                * Even less common symbols such as open square bracket ``[``, backslash ``\``, close square bracket ``]`` and others.
                * Lowercase alphabet characters ``a`` to ``z``
                * Rarer symbols such as curly braces ``{`` ``}``, pipe ``|``, tilde ``~`` and more.
        
        The tilde '~' character appears to be one of the least favorable string characters, coming in last
        place when I did some basic testing in the REPL on Python 3.8, with the following character set (sorted)::
        
            >>> x = list('!"#$%&\\'()*+,-./0123456789:;<=>?@ABC[\\]^_`abc{|}~')
            >>> x = list(set(x))   # Remove any potential duplicate characters
        
        Tested with::
        
            >>> print(''.join(sorted(list(set(x)), key=lambda el: str(el))))
            !"#$%&'()*+,-./0123456789:;<=>?@ABC[\\]^_`abc{|}~
        
        Note: extra backslashes have been added to the character set example above, due to IDEs thinking it's an escape
        for the docs - and thus complaining.
        
        :param str host: The host being sorted, e.g. ``https://hived.privex.io``
        :param str key: The key being sorted / special sort code, e.g. ``head_block`` or ``online_status``
        :param fallback:
        :return:
        """
        node = self.get_node(host)
        key = self.table_sort_aliases.get(key, key)
        real_key = str(key)
        if key in ['api_tests', 'plugins']:
            if empty(node.plugins, True, True): return 0
            return len(node.plugins) / len(TEST_PLUGINS_LIST)
        if '_status' in key:
            content = node.status + 1
            # When sorting by a specific status key, we handle out-of-sync nodes by simply emitting
            # status 0 (best) if we're prioritising out-of-sync nodes, or status 2 (unreliable) when
            # sorting by any other status.
            if node.time_behind:
                if node.time_behind.total_seconds() > 60:
                    return 0 if key == 'outofsync_status' else 2
            if key == 'online_status' and node.status >= 3:
                return 0
            if key == 'dead_status' and node.status <= 0:
                return 0
            if key == 'outofsync_status': pass
            return content

        if key.endswith('_network'):
            real_key = 'network'

        if real_key not in node:
            log.error(
                f"RPCScanner.host_sorter called with non-existent key '{key}'. Falling back to sorting by 'status'."
            )
            key, real_key = 'status', 'status'

        content = node.get(real_key, '')
        strcont = str(content)
        has_err = 'error' in strcont.lower() or 'none' in strcont.lower()
        def_reverse = real_key in self.table_default_reverse
        log.debug(
            f"Key: {key} || Real Key: {real_key} has_err: {has_err} || def_reverse: {def_reverse} "
            f"|| content: {content} || strcont: {strcont}")
        # If a specific network sort type is given, then return '!' if this node matches that network.
        # The exclamation mark symbol '!' is very high ranking with python string sorts (higher than numbers and letters)
        if key == "hive_network":
            return '!' if 'hive' in strcont.lower() else strcont
        if key == "steem_network":
            return '!' if 'steem' in strcont.lower() else strcont
        if key == "whaleshares_network":
            return '!' if 'whaleshares' in strcont.lower() else strcont
        if key == "golos_network":
            return '!' if 'golos' in strcont.lower() else strcont

        # If 'table_types' tells us that the column we're sorting by - should be handled as a certain type,
        # then we need to change how we handle the default fallback value for errors, and any casting
        # we should use.
        if key in self.table_types:
            tt = self.table_types[key]
            log.info(f"Key {key} has table type: {tt}")
            if tt is bool:
                if has_err or empty(content):
                    return False if def_reverse else True
                return is_true(content)
            if tt is datetime:
                fallback = (datetime.utcnow() +
                            timedelta(weeks=260, hours=12)).replace(
                                tzinfo=pytz.UTC)
                if def_reverse:
                    fallback = datetime(1980,
                                        1,
                                        1,
                                        1,
                                        1,
                                        1,
                                        1,
                                        tzinfo=pytz.UTC)
                if has_err or empty(content): return fallback
                return convert_datetime(content, if_empty=fallback)
            if tt is float:
                if has_err and isinstance(fallback, float): return fallback
                if has_err or empty(content):
                    return float(0.0) if def_reverse else float(
                        999999999.99999)
                return float(content)
            if tt is Decimal:
                if has_err and isinstance(fallback, Decimal): return fallback
                if has_err or empty(content):
                    return Decimal('0') if def_reverse else Decimal(
                        '999999999')
                return Decimal(content)
            if tt is int:
                if has_err and isinstance(fallback, int): return fallback
                if has_err or empty(content):
                    return int(0) if def_reverse else int(999999999)
                return int(content)

        if has_err or empty(content):
            # We use the placeholder type 'USE_ORIG_VAR' instead of 'None' or 'False', allowing us the user to specify
            # 'None', 'False', '""' etc. as fallbacks without conflict
            if fallback is not USE_ORIG_VAR: return fallback
            # The '!' character is used as the default fallback value if the table is reversed by default,
            # since '!' appears to be the most preferred string character, and thus would be at the
            # bottom of a reversed list.
            if def_reverse: return '!'
            # The tilde '~' character appears to be one of the least favorable string characters, coming in last
            # place when I did some basic testing in the REPL on Python 3.8 (see pydoc block for this method),
            # so it's used as the default for ``fallback``.
            return '~'
        # If we don't have a known type for this column, or any special handling needed like for 'api_tests', then we
        # simply return the stringified content of the key on the node object.
        return strcont
Esempio n. 26
0
def set_lock(*locks,
             timeout=600,
             fail=False,
             renew=True,
             create=True,
             **options) -> LockSetResult:
    """
    This function is for advanced users, offering multiple lock creation, renewing, along with "all or nothing"
    locking with database rollback via the argument ``fail``.
    
    Unlike other lock management functions, set_lock returns a :class:`.LockSetResult` object, which is designed
    to allow you to see clearly as to what locks were created, renewed, or skipped.
    
    **Example Usage**
    
    Let's set two locks, ``hello`` and ``world``.
    
        >>> res = set_lock('hello', 'world')
        >>> res['locks']
        [<Lock name='hello' locked_by='example.org' locked_until='2019-11-22 02:01:55.439390+00:00'>,
         <Lock name='world' locked_by='example.org' locked_until='2019-11-22 02:01:55.442734+00:00'>]
        >>> res['counts']
        {'created': 2, 'renewed': 0, 'skip_create': 0, 'skip_renew': 0}
    
    If we run ``set_lock`` again with the same arguments, we'll still get the locks list, but we'll see the counts
    show that they were renewed instead of created.
    
        >>> x = set_lock('hello', 'world')
        >>> x['locks']
        [<Lock name='hello' locked_by='example.org' locked_until='2019-11-22 02:03:06.762620+00:00'>,
         <Lock name='world' locked_by='example.org' locked_until='2019-11-22 02:03:06.766804+00:00'>]
        >>> x['counts']
        {'created': 0, 'renewed': 2, 'skip_create': 0, 'skip_renew': 0}
    
    Since the result is an object, you can also access attributes via dot notation, as well as dict-like notation.
    
    We can see inside of the ``statuses`` list - the action that was taken on each lock we specified, so we can see
    what locks were created, renewed, or skipped etc.
    
        >>> x.statuses[0]
        ('hello', {'was_locked': True, 'status': 'extend', 'locked': True})
        >>> x.statuses[1]
        ('world', {'was_locked': True, 'status': 'extend', 'locked': True})
    
    
    :param str locks:    One or more lock names, as positional arguments, to create or renew.
    :param int timeout:  On existing locks, update locked_until to ``now + timeout`` (seconds)
    :param bool fail:    (Default: False) If ``True``, all lock creations will be rolled back if an existing lock
                         is encountered, and :class:`.LockFail` will be raised.
    :param bool renew:   (Default: True) If ``True``, any existing locks in ``locks`` will be renewed to
                         ``now + timeout`` (seconds). If False, existing locks will just be skipped.
    :param bool create:  (Default: True) If ``True``, any names in ``locks`` which aren't yet locked, will have a lock
                         created for them, with their expiry set to ``timeout`` seconds from now.
    :key str locked_by:    (Default: system hostname) What server/app is trying to obtain this lock?
    :key int process_id: (Optional) The process ID requesting the lock
    :return LockSetResult results: A :class:`.LockSetResult` object containing the results of the set_lock operation.
    """
    fail = is_true(fail)

    timeout = int(timeout)
    process_id = options.get('process_id')
    process_id: int = int(process_id) if process_id is not None else None
    locked_by: str = options.get('locked_by')
    lock_args = dict(expires=timeout,
                     locked_by=locked_by,
                     lock_process=process_id)

    result = LockSetResult(locks=[],
                           counts=dict(created=0,
                                       renewed=0,
                                       skip_create=0,
                                       skip_renew=0),
                           statuses=[])

    try:
        with transaction.atomic():
            sid = transaction.savepoint()
            for l in locks:
                try:
                    if not create and not is_locked(l):
                        log.debug(
                            f" > The lock '{l}' doesn't exist, but create=False was specified. Not locking."
                        )
                        result['statuses'] += [(l,
                                                LockSetStatus(was_locked=False,
                                                              status='skip',
                                                              locked=False))]
                        # result['locks'] += [lck]
                        result.counts['skip_create'] += 1
                        continue
                    lck = get_lock(l, **lock_args)
                    log.info(
                        f" > Lock {l} did not yet exist. Successfully locked '{l}' - expiry: {lck.locked_until}"
                    )
                    result.statuses += [(l,
                                         LockSetStatus(was_locked=False,
                                                       status='create',
                                                       locked=True))]
                    result.locks += [lck]
                    result.counts['created'] += 1
                except Locked:
                    lck = Lock.objects.get(name=l)
                    if fail:
                        transaction.savepoint_rollback(sid)
                        raise LockFail(
                            f"Lock '{l}' already existed. Aborting!", lock=lck)
                    if not renew:
                        result['statuses'] += [(l,
                                                LockSetStatus(was_locked=True,
                                                              status='skip',
                                                              locked=True))]
                        result['locks'] += [lck]
                        log.debug(
                            f" > The lock '{l}' already exists, but renew=False - not renewing this lock."
                        )
                        log.debug(f"\tLock: %s\t", lck)
                        log.debug(" > Skipping this lock...\n")
                        result.counts['skip_renew'] += 1
                        continue
                    lck = renew_lock(lck, **lock_args)
                    result['locks'] += [lck]
                    result['statuses'] += [(l,
                                            LockSetStatus(was_locked=True,
                                                          status='extend',
                                                          locked=True))]
                    result.counts['renewed'] += 1
                    log.info(
                        f" > The lock '{l}' already exists. Renewed it's expiry to: {lck.locked_until}"
                    )
    except LockFail as e:
        log.error(
            "Error: An existing lock was found while fail=True"
            "\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
            " !!! An existing lock was found:\n"
            f" !!! \t{e.lock}\n"
            " !!! As you have specified fail=True, any locks created during this session will now be\n"
            " !!! rolled back for your safety.\n"
            " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
        )
        transaction.rollback()
        log.error(
            "Any locks created during this session should now have been removed..."
        )
        raise e
    return result
Esempio n. 27
0
def test_domains_dns_setDefault_on_existing_domain():
    api = get_api()
    domain_name = test_register_domain()
    res = api.domains_dns_setDefault(domain_name)
    assert_equal(res['Domain'], domain_name)
    ok_(is_true(res['Updated']))
Esempio n. 28
0
def renew_lock(lock: Union[str, Lock],
               expires: int = 600,
               add_time: bool = False,
               **kwargs) -> Lock:
    """
    Renew an existing lock for more expiry time.
    
    **Note:** This function will NOT reduce a lock's expiry time, only lengthen. If ``add_time`` is ``False``,
    and the new expiration time ``expires`` is shorter than the lock's existing expiration time, then the lock's
    expiry time will be left untouched.
    
    **Example - Renew an existing lock**::
    
        >>> lk = get_lock('my_app:somelock', expires=10)
        >>> sleep(5)
        >>> lk = renew_lock(lk, 20)   # Change the expiry time to 20 seconds from now
        >>> sleep(15)
        >>> is_locked('my_app:somelock') # 15 seconds later, the lock is still locked
        True
    
    **Example - Try to renew, but get a new lock if it's already been released**::
    
        >>> lk = get_lock('my_app:somelock', expires=5)
        >>> sleep(10)
        >>> lk = renew_lock(lk, 20, create=True)   # If the lock is expired/non-existant, make a new lock
        >>> sleep(15)
        >>> is_locked('my_app:somelock') # 15 seconds later, the lock is still locked
        True

    :param str lock:       Name of the lock to renew
    :param Lock lock:      A :class:`.Lock` object to renew
    :param int expires:    (Default: 600) If not add_time, then this is the new expiration time in seconds from now.
                           If add_time, then this many seconds will be added to the expiration time of the lock.
    :param bool add_time:  (Default: ``False``) If True, then ``expires`` seconds will be added to the existing
                           lock expiration time, instead of setting the expiration time to ``now + expires``
    
    :key bool create:      (Default: ``False``) If True, then create a new lock if it doesn't exist / already expired.
    :key str locked_by:    (Default: system hostname) What server/app is trying to obtain this lock?
    :key int lock_process: (Optional) The process ID requesting the lock

    :raises LockNotFound: Raised if the requested ``lock`` doesn't exist / is already expired and ``create`` is False.
    
    :return Lock lock: The :class:`.Lock` object which was renewed
    """
    create, lk = is_true(kwargs.get('create', False)), lock
    expires = 0 if expires in [None, False] else int(expires)
    clean_locks()
    with transaction.atomic():
        if type(lock) is str:
            try:
                lk = Lock.objects.select_for_update().get(name=lock)
            except Lock.DoesNotExist:
                # If we can't find an existing lock under the given name, then depending on `create`, we
                # either raise a reception, or simply make a new lock.
                if not create:
                    raise LockNotFound(
                        f'Lock with name {lock} does not exist, cannot renew (set create=True).'
                    )
                return get_lock(lock,
                                expires=expires,
                                locked_by=kwargs.get('locked_by'),
                                lock_process=kwargs.get('lock_process'))
        # Preserve the lock name in-case the lock is expired, so we don't lose it when we refresh from DB.
        lock_name = str(lk.name)
        try:
            lk.refresh_from_db()
        except Lock.DoesNotExist:
            if not create:
                raise LockNotFound(
                    f'Lock with name {lock} does not exist, cannot renew (set create=True).'
                )
            return get_lock(lock_name,
                            expires=expires,
                            locked_by=kwargs.get('locked_by'),
                            lock_process=kwargs.get('lock_process'))

        if lk.expired:  # If the passed `Lock` object is already expired, update locked_until to now before extending.
            lk.locked_until = timezone.now()
        # If expires is 0 or lower, then we set locked_until to None (never expires)
        if expires <= 0:
            lk.locked_until = None
            lk.save()
            return lk
        # If add_time, add `expires` seconds to the existing expiry, otherwise replace it with `now + expires`
        ex = lk.locked_until + timedelta(
            seconds=expires) if add_time else timezone.now() + timedelta(
                seconds=expires)
        # We only want to update the lock expiry time if the new expiry time is actually longer than it's existing
        # expiration time. This ensures we don't have a renewal where it causes a 10 minute lock to be reduced to
        # 1 minute unknowingly.
        if ex > lk.locked_until:
            lk.locked_until = ex
        lk.save()
    return lk
Esempio n. 29
0
    def handle(self, *args, **options):
        # Load all "new" deposits, max of 200 in memory at a time to avoid memory leaks.
        new_deposits = Deposit.objects.filter(status='new').iterator(200)
        log.info('Coin converter and deposit validator started')

        # ----------------------------------------------------------------
        # Validate deposits and map them to a destination coin / address
        # ----------------------------------------------------------------
        log.info('Validating deposits that are in state "new"')
        for d in new_deposits:
            try:
                log.debug('Validating and mapping deposit %s', d)
                with transaction.atomic():
                    try:
                        self.detect_deposit(d)
                    except ConvertError as e:
                        # Something went very wrong while processing this deposit. Log the error, store the reason
                        # onto the deposit, and then save it.
                        log.error(
                            'ConvertError while validating deposit "%s" !!! Message: %s',
                            d, str(e))
                        try:
                            mgr = get_manager(
                                d.coin.symbol)  # type: SettingsMixin
                            auto_refund = mgr.settings.get(
                                d.coin.symbol, {}).get('auto_refund', False)
                            if is_true(auto_refund):
                                log.info(
                                    f'Auto refund is enabled for coin {d.coin}. Attempting return to sender.'
                                )
                                ConvertCore.refund_sender(deposit=d)
                            else:
                                d.status = 'err'
                                d.error_reason = str(e)
                                d.save()
                        except Exception as e:
                            log.exception(
                                'An exception occurred while checking if auto_refund was enabled...'
                            )
                            d.status = 'err'
                            d.error_reason = f'Auto refund failure: {str(e)}'
                            d.save()
                    except ConvertInvalid as e:
                        # This exception usually means the sender didn't read the instructions properly, or simply
                        # that the transaction wasn't intended to be exchanged.
                        log.error(
                            'ConvertInvalid (user mistake) while validating deposit "%s" Message: %s',
                            d, str(e))
                        d.status = 'inv'
                        d.error_reason = str(e)
                        d.save()
            except:
                log.exception(
                    'UNHANDLED EXCEPTION. Deposit could not be validated/detected... %s',
                    d)
                d.status = 'err'
                d.error_reason = 'Unknown error while validating deposit. An admin must manually check the error logs.'
                d.save()
        log.info('Finished validating new deposits for conversion')

        # ----------------------------------------------------------------
        # Convert any validated deposits into their destination coin
        # ----------------------------------------------------------------
        conv_deposits = Deposit.objects.filter(status='mapped').iterator(200)
        log.info('Converting deposits that are in state "mapped"...')
        for d in conv_deposits:
            try:
                log.debug('Converting deposit %s', d)
                with transaction.atomic():
                    try:
                        self.convert_deposit(d, options['dry'])
                    except ConvertError as e:
                        # Something went very wrong while processing this deposit. Log the error, store the reason
                        # onto the deposit, and then save it.
                        log.error(
                            'ConvertError while converting deposit "%s" !!! Message: %s',
                            d, str(e))
                        d.status = 'err'
                        d.error_reason = str(e)
                        d.save()
                    except ConvertInvalid as e:
                        # This exception usually means the sender didn't read the instructions properly, or simply
                        # that the transaction wasn't intended to be exchanged.
                        log.error(
                            'ConvertInvalid (user mistake) while converting deposit "%s" Message: %s',
                            d, str(e))
                        d.status = 'inv'
                        d.error_reason = str(e)
                        d.save()
            except:
                log.exception(
                    'UNHANDLED EXCEPTION. Conversion error for deposit... %s',
                    d)
                d.status = 'err'
                d.error_reason = 'Unknown error while converting. An admin must manually check the error logs.'
                d.save()
        log.info('Finished converting deposits.')

        log.debug(
            'Resetting any Coins "funds_low" if they have no "mapped" deposits'
        )
        for c in Coin.objects.filter(funds_low=True):
            log.debug(' -> Coin %s currently has low funds', c)
            map_deps = c.deposit_converts.filter(status='mapped').count()
            if map_deps == 0:
                log.debug(
                    ' +++ Coin %s has no mapped deposits, resetting funds_low to false',
                    c)
                c.funds_low = False
                c.save()
            else:
                log.debug(
                    ' !!! Coin %s still has %d mapped deposits. Ignoring.', c,
                    map_deps)
        log.debug(
            'Finished resetting coins with "funds_low" that have been resolved.'
        )
Esempio n. 30
0
    def post(self, request, *args, **kwargs):
        p = request.POST
        one = dict(
            symbol=p.get('symbol_one'),
            symbol_id=p.get('symbol_id_one'),
            can_issue=is_true(p.get('issue_one')),
            coin_type=p.get('coin_type_one'),
            our_account=p.get('our_account_one'),
            display_name=p.get('display_one'),
        )
        two = dict(
            symbol=p.get('symbol_two'),
            symbol_id=p.get('symbol_id_two'),
            can_issue=is_true(p.get('issue_two')),
            coin_type=p.get('coin_type_two'),
            our_account=p.get('our_account_two'),
            display_name=p.get('display_two'),
        )

        if empty(one['symbol']):
            messages.add_message(request, messages.ERROR,
                                 'Unique symbol not specified for Coin One.')
            return redirect('admin:easy_add_pair')
        if empty(one['coin_type']) or one['coin_type'] not in dict(
                settings.COIN_TYPES):
            messages.add_message(request, messages.ERROR,
                                 'Invalid coin type for Coin Two.')
            return redirect('admin:easy_add_pair')
        if empty(two['symbol']):
            messages.add_message(request, messages.ERROR,
                                 'Unique symbol not specified for Coin Two.')
            return redirect('admin:easy_add_pair')
        if empty(two['coin_type']) or two['coin_type'] not in dict(
                settings.COIN_TYPES):
            messages.add_message(request, messages.ERROR,
                                 'Invalid coin type for Coin Two.')
            return redirect('admin:easy_add_pair')

        c_one = Coin(**one)
        c_one.save()
        messages.add_message(request, messages.SUCCESS,
                             f'Created Coin object {c_one}.')

        c_two = Coin(**two)
        c_two.save()
        messages.add_message(request, messages.SUCCESS,
                             f'Created Coin object {c_two}.')

        p_one = CoinPair(from_coin=c_one,
                         to_coin=c_two,
                         exchange_rate=Decimal('1'))
        p_two = CoinPair(from_coin=c_two,
                         to_coin=c_one,
                         exchange_rate=Decimal('1'))
        p_one.save()
        p_two.save()
        messages.add_message(request, messages.SUCCESS,
                             f'Created CoinPair object {p_one}.')
        messages.add_message(request, messages.SUCCESS,
                             f'Created CoinPair object {p_two}.')

        return redirect('admin:easy_add_pair')