def process_config(self): config = super().process_config() if 'ETHEREUM_NODE_URL' in os.environ: config['ethereum'] = {'url': os.environ['ETHEREUM_NODE_URL']} if 'DEFAULT_GASPRICE' in os.environ: if 'ethereum' not in config: config['ethereum'] = {} config['ethereum']['default_gasprice'] = os.environ[ 'DEFAULT_GASPRICE'] if 'ethereum' in config: if 'ETHEREUM_NETWORK_ID' in os.environ: config['ethereum']['network_id'] = os.environ[ 'ETHEREUM_NETWORK_ID'] else: config['ethereum'][ 'network_id'] = self.asyncio_loop.run_until_complete( to_asyncio_future( JsonRPCClient( config['ethereum']['url']).net_version())) configure_logger(services_log) return config
def __init__(self, *args, listener_id="block_monitor", **kwargs): # so DatabaseMixin works self.application = self super().__init__([], *args, listener_id=listener_id, **kwargs) configure_logger(log) if 'monitor' in self.config: node_url = self.config['monitor']['url'] else: log.warning("monitor using config['ethereum'] node") node_url = self.config['ethereum']['url'] self.eth = JsonRPCClient(node_url) self._check_schedule = None self._poll_schedule = None self._block_checking_process = None self._filter_poll_process = None self._lastlog = 0 self.tasks = TaskDispatcher(self.task_listener)
def __init__(self): configure_logger(log) if 'monitor' in config: node_url = config['monitor']['url'] else: log.warning("monitor using config['ethereum'] node") node_url = config['ethereum']['url'] self.eth = JsonRPCClient(node_url, connect_timeout=5.0, request_timeout=10.0) # filter health processes depend on some of the calls failing on the first time # so we have a separate client to handle those self.filter_eth = JsonRPCClient(node_url, force_instance=True, connect_timeout=10.0, request_timeout=60.0) self._check_schedule = None self._poll_schedule = None self._sanity_check_schedule = None self._block_checking_process = None self._filter_poll_process = None self._sanity_check_process = None self._process_unconfirmed_transactions_process = None self._new_pending_transaction_filter_id = None self._last_saw_new_block = asyncio.get_event_loop().time() self._shutdown = False self._lastlog = 0 self._blocktimes = []
def __init__(self, *args, **kwargs): super().__init__([(TransactionQueueHandler, )], *args, listener_id="manager", **kwargs) configure_logger(log) self.task_listener.processing_queue = {}
def __init__(self): configure_logger(log) if 'monitor' in config: node_url = config['monitor']['url'] else: log.warning("monitor using config['ethereum'] node") node_url = config['ethereum']['url'] self.eth = JsonRPCClient(node_url, should_retry=True)
def __init__(self, *, config=None, connection_pool=None, delay=DEFAULT_DELAY): self.ioloop = IOLoop.current() if config is None: self.config = self.process_config() else: self.config = config self.asyncio_loop = asyncio.get_event_loop() if connection_pool is None: self.prepare_databases(handle_migration=False) else: self.connection_pool = connection_pool self._schedule = None self._delay = delay configure_logger(log)
def __init__(self): configure_logger(log) if 'monitor' in config: node_url = config['monitor']['url'] else: log.warning("monitor using config['ethereum'] node") node_url = config['ethereum']['url'] self.eth = JsonRPCClient(node_url, should_retry=False) self._check_schedule = None self._poll_schedule = None self._sanity_check_schedule = None self._block_checking_process = None self._filter_poll_process = None self._sanity_check_process = None self._process_unconfirmed_transactions_process = None self._lastlog = 0
def process_config(self): config = super().process_config() if 'PUSH_URL' in os.environ: config.setdefault('pushserver', SectionProxy( config, 'pushserver'))['url'] = os.environ['PUSH_URL'] if 'PUSH_PASSWORD' in os.environ: config.setdefault('pushserver', SectionProxy( config, 'pushserver'))['password'] = os.environ['PUSH_PASSWORD'] if 'PUSH_USERNAME' in os.environ: config.setdefault('pushserver', SectionProxy( config, 'pushserver'))['username'] = os.environ['PUSH_USERNAME'] if 'GCM_SERVER_KEY' in os.environ: config.setdefault('gcm', SectionProxy( config, 'gcm'))['server_key'] = os.environ['GCM_SERVER_KEY'] configure_logger(log) return config
def __init__(self): super().__init__() configure_logger(log) self._processing = {}
def __init__(self): super().__init__([(TransactionQueueHandler, )], queue_name="manager") configure_logger(log)
def __init__(self): configure_logger(log)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) configure_logger(services_log) extra_service_config()
def __init__(self, *args, **kwargs): super().__init__([(ERC20UpdateHandler, )], *args, listener_id="erc20manager", **kwargs) configure_logger(log)
def __init__(self): super().__init__([(ERC20UpdateHandler, )], queue_name="erc20") configure_logger(log)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) configure_logger(log) self._processing = {}
from asyncpg.exceptions import UniqueViolationError from sanic import Sanic from sanic.exceptions import SanicException from sanic.log import log as sanic_log from sanic.config import Config from sanic.response import html, json as json_response, redirect from sanic.request import Request from jinja2 import Environment, FileSystemLoader Config.REQUEST_TIMEOUT = 300 from toshi.utils import parse_int toshi_log.setLevel(logging.DEBUG) configure_logger(sanic_log) ADMIN_SERVICE_DATABASE_URL = os.getenv("DATABASE_URL") ID_SERVICE_LOGIN_URL = os.getenv("ID_SERVICE_LOGIN_URL") MAINNET_ETHEREUM_NODE_URL = os.getenv("MAINNET_ETHEREUM_NODE_URL") MAINNET_ETH_SERVICE_DATABASE_URL = os.getenv( "MAINNET_ETH_SERVICE_DATABASE_URL") MAINNET_ID_SERVICE_DATABASE_URL = os.getenv("MAINNET_ID_SERVICE_DATABASE_URL") MAINNET_DIR_SERVICE_DATABASE_URL = os.getenv( "MAINNET_DIR_SERVICE_DATABASE_URL") MAINNET_REP_SERVICE_DATABASE_URL = os.getenv( "MAINNET_REP_SERVICE_DATABASE_URL") MAINNET_ID_SERVICE_URL = os.getenv("MAINNET_ID_SERVICE_URL") MAINNET_ETH_SERVICE_URL = os.getenv("MAINNET_ETH_SERVICE_URL") MAINNET_DIR_SERVICE_URL = os.getenv("MAINNET_DIR_SERVICE_URL")
def __init__(self, *, delay=DEFAULT_DELAY): self._schedule = None self._delay = delay configure_logger(log)