Example #1
0
    ('django_filters.rest_framework.DjangoFilterBackend', )
}

EOS_NODE = env('EOS_NODE', 'https://eos.greymass.com')

EOS_START_TYPE = "relative"
"""
EOS_START_TYPE can be either ``"relative"`` (meaning EOS_START_BLOCK is relative to the head block),
or ``"exact"`` (meaning EOS_START_BLOCK specifies an exact block number to start from).

This only really matters for the first run, as once there are some blocks in the database, the sync tasks will
determine it's current position from the last database block number.
"""

# Default is 1,210,000 blocks = 605,000 seconds = approx. 7 days of blocks.
EOS_START_BLOCK = env_int('EOS_START_BLOCK', 1210000)
"""
NOTE: See :py:attr:`.EOS_START_TYPE` as this number can mean different things based on EOS_START_TYPE.

EOS has one block every 500 milliseconds on average. This means 100,000 blocks behind head would be 50,000 seconds
behind, or just under 14 hrs.

For an entire week behind head, you'd enter ``1210000`` (the estd. amount of blocks produced over 605,000 seconds) and
ensure ``EOS_START_TYPE`` is set to 'relative'.
"""

EOS_SYNC_MAX_QUEUE = env_int('EOS_SYNC_MAX_QUEUE', 500)
"""
Queue no more than this many blocks to be imported at a time.

After ``EOS_SYNC_MAX_QUEUE`` blocks are queued, the block import queue function will wait for blocks
Example #2
0
EOS_NODE = env_csv('EOS_NODE', [
    'https://eos.greymass.com', 'https://api.eosdetroit.io',
])

EOS_START_TYPE = env('EOS_START_TYPE', 'relative')
"""
EOS_START_TYPE can be either ``"relative"`` (meaning EOS_START_BLOCK is relative to the head block),
or ``"exact"`` (meaning EOS_START_BLOCK specifies an exact block number to start from).

This only really matters for the first run, as once there are some blocks in the database, the sync tasks will
determine it's current position from the last database block number.
"""

# Default is 1,210,000 blocks = 605,000 seconds = approx. 7 days of blocks.
EOS_START_BLOCK = env_int('EOS_START_BLOCK', 1210000)
"""
NOTE: See :py:attr:`.EOS_START_TYPE` as this number can mean different things based on EOS_START_TYPE.

EOS has one block every 500 milliseconds on average. This means 100,000 blocks behind head would be 50,000 seconds
behind, or just under 14 hrs.

For an entire week behind head, you'd enter ``1210000`` (the estd. amount of blocks produced over 605,000 seconds) and
ensure ``EOS_START_TYPE`` is set to 'relative'.
"""

EOS_SYNC_MAX_QUEUE = env_int('EOS_SYNC_MAX_QUEUE', 500)
"""
Queue no more than this many blocks to be imported at a time.

After ``EOS_SYNC_MAX_QUEUE`` blocks are queued, the block import queue function will wait for blocks
Example #3
0
    ]

"""

# GoBGP protobuf host + port to connect to
GBGP_HOST = env('GBGP_HOST', 'localhost:50051')

CHUNK_SIZE = int(env('CHUNK_SIZE', 300))
"""
Amount of prefixes to commit as a chunk while running `./manage.py prefixes`

Affects how often it displays the current progress, i.e. `Saved 1200 out of 4548 prefixes` as well as how many
prefixes are committed per each TX. Numbers lower than 20 may result in performance issues.
"""

PREFIX_TIMEOUT = env_int('PREFIX_TIMEOUT', 1800)
"""
Prefixes with a ``last_seen`` more than PREFIX_TIMEOUT seconds ago from the newest prefix in the database
will be considered stale, and thus not shown on the ASN summary page, nor the individual prefix list for an ASN.

Default: ``3600`` seconds = 60 minutes.

We compare against the newest last_seen timestamp in the database, allowing you to run import_prefixes
as often as you like, e.g. once per 30-60 mins, without having prefixes go stale due to import_prefixes
being ran occasionally.
"""

PREFIX_TIMEOUT_WARN = env_int('PREFIX_TIMEOUT_WARN', 1800)
"""
Prefixes with a ``last_seen`` more than PREFIX_TIMEOUT_WARN seconds ago from the newest prefix in the database
will be marked in yellow, to signify that they're potentially stale / no longer being advertised.
Example #4
0
load_dotenv()


cf = {}

DEBUG = cf['DEBUG'] = env_bool('DEBUG', False)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

ENABLE_LG = env_bool('ENABLE_LG', True)
"""Enable the looking glass (mtr / ping) application (lookingglass) - Default: True (enabled)"""

ENABLE_PEERAPP = env_bool('ENABLE_PEERAPP', True)
"""Enable the peer information application (peerapp) - Default: True (enabled)"""

DEFAULT_API_LIMIT = env_int('VUE_APP_DEFAULT_API_LIMIT', 100)
"""Default for ``limit`` field on API queries."""

MAX_API_LIMIT = env_int('VUE_APP_MAX_API_LIMIT', 10000)
"""Max value allowed for ``limit`` field on API queries."""

HOT_LOADER = env_bool('HOT_LOADER', False)
HOT_LOADER_URL = env('HOT_LOADER_URL', 'http://localhost:8080')

#######################################
#
# Logging Configuration
#
#######################################

# Log to console with CONSOLE_LOG_LEVEL, as well as output logs >=info / >=warning to respective files
Example #5
0
try:
    from rich import print
except ImportError:
    pass

log = logging.getLogger('privex.steem')
log.setLevel(logging.ERROR)

SECS_NS = Decimal('1000000000')

HIVE_NODES = env_csv('HIVE_NODES', [
    'https://direct.hived.privex.io', 'https://anyx.io',
    'https://api.deathwing.me'
])
BATCH_SIZE = env_int('BATCH_SIZE', 100)
NUM_BLOCKS = env_int('NUM_BLOCKS', 1000)


async def main():
    ss = SteemAsync(HIVE_NODES)
    ss.config_set('batch_size', BATCH_SIZE)
    print(
        f"\n [{datetime.utcnow()!s}] Loading last {NUM_BLOCKS} blocks using steem-async ... \n\n"
    )
    start_time = time.time_ns()
    blks = await ss.get_blocks(-NUM_BLOCKS)
    end_time = time.time_ns()
    print(f"\n [{datetime.utcnow()!s}] Total blocks:", len(blks), "\n")
    start_time, end_time = Decimal(start_time), Decimal(end_time)
    start_secs = start_time / SECS_NS
Example #6
0
    |          (+)  Chris (@someguy123) [Privex]        |
    |          (+)  Kale (@kryogenic) [Privex]          |
    |                                                   |
    +===================================================+
    
    Official Repo: https://github.com/Privex/collation-fixer


"""
from dotenv import load_dotenv
from os import getenv as env
from privex.helpers import env_int, env_bool

load_dotenv()

DEBUG = env_bool('DEBUG', False)
QUIET = env_bool('QUIET', False)

LOG_LEVEL = env('LOG_LEVEL', 'DEBUG' if DEBUG else 'WARNING')

if QUIET:
    LOG_LEVEL = env('LOG_LEVEL', 'ERROR')

DB_HOST = env('DB_HOST', 'localhost')
DB_USER = env('DB_USER', env('DB_USERNAME', 'root'))
DB_PASS = env('DB_PASS', env('DB_PASSWORD', ''))
DB_PORT = env_int('DB_PORT', 3306)

DB_NAME = env('DB_NAME')

Example #7
0
cf = DictObject()
APP_DIR = Path(__file__).parent.expanduser().resolve()
TEMPLATES_DIR = APP_DIR / 'templates'
BASE_DIR = APP_DIR.parent

#######################################
#
# General configuration
#
#######################################
cf['DEBUG'] = DEBUG = env_bool(
    'DEBUG', True if env('FLASK_ENV') == 'development' else False)

HOST = cf['HOST'] = env('HOST', '127.0.0.1')
PORT = cf['PORT'] = env_int('PORT', 5111)

cf['API_ONLY'] = env_bool('API_ONLY', False)
"""If set to ``True``, will always return JSON, never HTML pages."""

USE_IP_HEADER = cf['USE_IP_HEADER'] = env_bool('USE_IP_HEADER', True)
"""If set to False, will obtain the IP from request.remote_addr instead of the header set in IP_HEADER"""
IP_HEADER = cf['IP_HEADER'] = env('IP_HEADER', 'X-REAL-IP')
"""The name of the header that will be passed to Flask containing the IP address of the user"""

USE_FAKE_IPS = env_bool('USE_FAKE_IPS', DEBUG)
"""
USE_FAKE_IPS causes the app to always use FAKE_V4 and FAKE_V6 as the detected client's v4/v6 IPs, which aids
testing the app when running it locally during development.
"""
FAKE_V4 = empty_if(env('FAKE_V4', '185.130.44.140' if USE_FAKE_IPS else ''),
Example #8
0
    print(
        f" [!!!] Setting LOG_DIR to original value - may be fixed when log folder + containing folders are auto-created.",
        file=sys.stderr)
    LOG_DIR = _LOG_DIR

# Valid environment log levels (from least to most severe) are:
# DEBUG, INFO, WARNING, ERROR, FATAL, CRITICAL
LOG_LEVEL = env('LOG_LEVEL', None)
LOG_LEVEL = logging.getLevelName(
    str(LOG_LEVEL).upper()) if LOG_LEVEL is not None else None

if LOG_LEVEL is None:
    LOG_LEVEL = logging.DEBUG if DEBUG or verbose else logging.INFO
    LOG_LEVEL = logging.CRITICAL if quiet else LOG_LEVEL

RPC_TIMEOUT = env_int('RPC_TIMEOUT', 3)
MAX_TRIES = env_int('MAX_TRIES', 3)
RETRY_DELAY = env_cast('RETRY_DELAY', cast=float, env_default=2.0)
PUB_PREFIX = env(
    'PUB_PREFIX', 'STM'
)  # Used as part of the thorough plugin tests for checking correct keys are returned

TEST_PLUGINS_LIST = env_csv('TEST_PLUGIN_LIST', [])
"""
Controls which plugins are tested by :class:`.RPCScanner` when :attr:`rpcscanner.settings.plugins` is
set to ``True``.

If the TEST_PLUGINS_LIST is empty, it will be populated automatically when the module container :class:`.MethodTests`
is loaded, which will replace it with a tuple containing :attr:`rpcscanner.MethodTests.METHOD_MAP`.
"""
Example #9
0
from privex.helpers import DictObject, env_int, env_cast
# from exchanges import get_target_value
# from exceptions import PairNotFound
from decimal import Decimal, ROUND_DOWN, getcontext
from threading import Thread, Lock
# from time import sleep
from privex.exchange import ExchangeManager
from privex.loghelper import LogHelper
from privex.helpers.cache import AsyncMemoryCache, adapter_set
from dotenv import load_dotenv
from os import getenv as env

load_dotenv()
nest_asyncio.apply()

CACHE_TIMEOUT = env_int('CACHE_TIMEOUT', 300)
LOOP_SLEEP = env_cast('LOOP_SLEEP', float, 60.0)

DEFAULT_FROM = env('DEFAULT_FROM', 'hive')
DEFAULT_TO = env('DEFAULT_TO', 'usd')
DEFAULT_AMOUNT = env_cast('DEFAULT_AMOUNT', float, 1)

LogHelper(level=logging.ERROR, handler_level=logging.ERROR)
_lh = LogHelper('steemvalue')
h = _lh.add_console_handler()

# LogHelper('privex.exchange', handler_level=logging.DEBUG, clear_handlers=False)

logging.basicConfig()

log = _lh.get_logger()
Example #10
0
# HIVE_NODES = [
#     'https://hived.privex.io',
#     'https://api.deathwing.me',
#     # 'https://hived.hive-engine.com',
#     'https://anyx.io',
#     'https://rpc.ausbit.dev',
#     'https://rpc.esteem.app',
#     'https://techcoderx.com',
#     'https://api.pharesim.me',
#     'https://direct.hived.privex.io',
#     'https://api.openhive.network'
#     # 'https://api.hivekings.com'
# ]
HIVE_NODES = env_csv('HIVE_NODES', ['https://direct.hived.privex.io', 'https://anyx.io', 'https://api.deathwing.me'])
NUM_BLOCKS = env_int('NUM_BLOCKS', 1000)


async def main():
    blocks = []
    hive = Hive(HIVE_NODES)
    chain = Blockchain(blockchain_instance=hive)
    print(f"\n [{datetime.utcnow()!s}] Loading last {NUM_BLOCKS} blocks using beem ... \n\n")
    start_time = time.time_ns()
    current_num = chain.get_current_block_num()
    for block in chain.blocks(start=current_num - NUM_BLOCKS, stop=current_num):
        blocks.append(block)
    end_time = time.time_ns()

    print(f"\n [{datetime.utcnow()!s}] Total blocks:", len(blocks), "\n")
    start_time, end_time = Decimal(start_time), Decimal(end_time)
Example #11
0
SERVER_EMAIL = env(
    'SERVER_EMAIL', '*****@*****.**'
)  # The email to use by default when sending outgoing emails

EMAIL_BACKEND = env('EMAIL_BACKEND', None)
if not EMAIL_BACKEND:
    EMAIL_BACKEND = 'django.core.mail.backends.' + (
        'console.EmailBackend' if DEBUG else 'smtp.EmailBackend')

# Hostname / IP of SMTP server, must be set in production for outgoing SMTP emails
EMAIL_HOST = env('EMAIL_HOST', None)

if empty(EMAIL_HOST) and not DEBUG: EMAIL_HOST = 'smtp.privex.io'

if EMAIL_HOST is not None:
    EMAIL_PORT = env_int('EMAIL_PORT',
                         587)  # Port number to connect to email server
    EMAIL_HOST_USER = env('EMAIL_USER',
                          None)  # Username for email server login
    EMAIL_HOST_PASSWORD = env('EMAIL_PASSWORD',
                              None)  # Password for email server login
    # Only enable ONE of these, if required
    EMAIL_USE_TLS = env_bool('EMAIL_USE_TLS',
                             True)  # Use TLS encryption or not
    EMAIL_USE_SSL = env_bool('EMAIL_USE_SSL',
                             False)  # Use SSL encryption or not
else:
    # If you're using the SMTP backend in production, and you don't have an email server hostname/ip set
    # then we change your email backend to dummy (outgoing emails are simply dropped)
    if EMAIL_BACKEND == 'django.core.mail.backends.smtp.EmailBackend':
        EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
Example #12
0
    |        Core Developer(s):                         |
    |                                                   |
    |          (+)  Chris (@someguy123) [Privex]        |
    |                                                   |
    +===================================================+
    

"""
import asyncio
import time
from privex.helpers import env_int
from privex.loghelper import LogHelper

from privex.eos.lib import Api

BLOCK_COUNT = env_int('BLOCK_COUNT', 500)

LogHelper('privex.eos').add_console_handler()


async def main():
    api = Api()
    start_time = time.time()
    info = await api.get_info()
    head_block = info['head_block_num']
    res = await api.get_block_range(head_block - BLOCK_COUNT, head_block)
    end_time = time.time()
    completed_in = round(end_time - start_time, 3)
    blocks_loaded = len(res.keys())
    bps = blocks_loaded / completed_in
    print(f"Loaded {blocks_loaded} blocks in {completed_in} seconds.")