def test_namespaces(): config_string = ':inFO,a:trace,a.b:debug' th = setup_logging(config_string=config_string) # log level info log = slogging.get_logger() log_a = slogging.get_logger('a') log_a_b = slogging.get_logger('a.b') assert th.does_log(log.info) assert not th.does_log(log.debug) assert th.does_log(log_a.trace) assert th.does_log(log_a_b.debug) assert not th.does_log(log_a_b.trace)
def test_howto_use_in_tests(): # select what you want to see. # e.g. TRACE from vm except for pre_state :DEBUG otherwise config_string = ':DEBUG,eth.vm:TRACE,vm.pre_state:INFO' slogging.configure(config_string=config_string) log = slogging.get_logger('tests.logging') log.info('test starts')
def test_tracebacks(): th = setup_logging() log = slogging.get_logger() def div(a,b): try: r = a/b log.error('heres the stack', stack_info=True) except Exception, e: log.error('an Exception trace should preceed this msg', exc_info=True)
def test_kvprinter(): # we can not test formatting config_string = ':inFO,a:trace,a.b:debug' th = setup_logging(config_string=config_string) # log level info log = slogging.get_logger('foo') log.info('baz', arg=2) l = th.logged assert 'baz' in l
def test_testhandler(): th = get_test_handler() assert th.logged == None th = setup_logging() assert th.logged is None log = slogging.get_logger('a') log.warn('abc') assert 'abc' in th.logged assert th.logged is None # same with does_log assert th.does_log(log.warn) assert not th.does_log(log.debug)
def test_incremental(): config_string = ':trace' th = setup_logging(config_string=config_string) log = slogging.get_logger() # incremental context log = log.bind(first='one') log.error('nice', a=1, b=2) assert 'first' in th.logged log = log.bind(second='two') log.error('nice', a=1, b=2) l = th.logged assert 'first' in l and 'two' in l
def test_baseconfig(): # test default loglevel INFO th = setup_logging() log = slogging.get_logger() assert th.does_log(log.error) assert th.does_log(log.critical) assert th.does_log(log.warn) assert th.does_log(log.warn) assert th.does_log(log.info) assert not th.does_log(log.debug) assert not th.does_log(log.trace) config_string = ':inFO,a:trace,a.b:debug' th = setup_logging(config_string=config_string)
def test_is_active(): th = setup_logging() log = slogging.get_logger() assert not log.is_active('trace') assert not log.is_active('debug') assert log.is_active('info') assert log.is_active('warn') # activate w/ listner slogging.log_listeners.listeners.append(lambda x:x) assert log.is_active('trace') slogging.log_listeners.listeners.pop() assert not log.is_active('trace')
def test_how_to_use_as_vm_logger(): """ don't log until there was an error """ config_string = ':DEBUG,eth.vm:INFO' slogging.configure(config_string=config_string) log = slogging.get_logger('eth.vm') # record all logs def run_vm(raise_error=False): log.trace('op', pc=1) log.trace('op', pc=2) if raise_error: raise Exception recorder = slogging.LogRecorder() try: run_vm(raise_error=True) except: log = slogging.get_logger('eth.vm') for x in recorder.pop_records(): log.info(x.pop('event'), **x)
def test_lazy_log(): """ test lacy evaluation of json log data e.g. class LogState class LogMemory """ called_json = [] called_print = [] class Expensive(object): def __structlog__(self): called_json.append(1) return 'expensive data preparation' def __repr__(self): called_print.append(1) return 'expensive data preparation' th = setup_logging(log_json=True) log = slogging.get_logger() log.trace('no', data=Expensive()) assert not called_print assert not called_json log.info('yes', data=Expensive()) assert called_json.pop() assert not called_print th = setup_logging() log = slogging.get_logger() log.trace('no', data=Expensive()) assert not called_print assert not called_json log.info('yes', data=Expensive()) assert not called_json assert called_print.pop()
def test_recorder(): th = setup_logging() log = slogging.get_logger() # test info recorder = slogging.LogRecorder() assert len(slogging.log_listeners.listeners) == 1 log.info('a', v=1) assert th.logged r = recorder.pop_records() assert r[0] == dict(event='a', v=1) assert len(slogging.log_listeners.listeners) == 0 # test trace recorder = slogging.LogRecorder() assert len(slogging.log_listeners.listeners) == 1 log.trace('a', v=1) assert not th.logged r = recorder.pop_records() assert r[0] == dict(event='a', v=1) assert len(slogging.log_listeners.listeners) == 0
def test_listeners(): th = setup_logging() log = slogging.get_logger() called = [] def log_cb(event_dict): called.append(event_dict) # activate listener slogging.log_listeners.listeners.append(log_cb) log.error('test listener', abc='thislistener') assert 'thislistener' in th.logged r = called.pop() assert r == dict(event='test listener', abc='thislistener') log.trace('trace is usually filtered', abc='thislistener') assert th.logged is None assert 'abc' in called.pop() # deactivate listener slogging.log_listeners.listeners.remove(log_cb) log.error('test listener', abc='nolistener') assert 'nolistener' in th.logged assert not called
def test_logger_names(): th = setup_logging() names = set(['a','b','c']) for n in names: slogging.get_logger(n) assert names.issubset(set(slogging.get_logger_names()))
import time import socket import json import os from dispatch import receiver from stoppable import StoppableLoopThread import rlp import signals import bloom from peer import Peer from pyethereum.slogging import get_logger log_net = get_logger('net') log_p2p = get_logger('p2p') DEFAULT_SOCKET_TIMEOUT = 0.01 CONNECT_SOCKET_TIMEOUT = .5 CHECK_PEERCOUNT_INTERVAL = 1. def is_valid_ip(ip): # FIXME, IPV6 return ip.count('.') == 3 class PeerManager(StoppableLoopThread): max_silence = 10 # how long before pinging a peer max_ping_wait = 15 # how long to wait before disconenctiong after ping max_ask_for_peers_elapsed = 30 # how long before asking for peers
import logging try: from pyethereum.slogging import get_logger, configure except ImportError: print 'could not import slogging' # patch logging to support kargs _log_orig = logging.Logger._log def _kargs_log(self, level, msg, args, exc_info=None, extra=None, **kargs): msg += ' ' + ' '.join('%s=%r' % (k, v) for k, v in kargs.items()) _log_orig(self, level, msg, args, exc_info, extra) logging.Logger._log = _kargs_log get_logger = logging.getLogger if __name__ == '__main__': logging.basicConfig() log = get_logger('test') log.warn('miner.new_block', block_hash='abcdef123', nonce=2234231)
import blocks import transactions import trie import sys import json import fastvm import copy import specials import bloom import vm from exceptions import * sys.setrecursionlimit(100000) from pyethereum.slogging import get_logger log_tx = get_logger('eth.tx') log_msg = get_logger('eth.msg') log_state = get_logger('eth.msg.state') TT255 = 2 ** 255 TT256 = 2 ** 256 TT256M1 = 2 ** 256 - 1 OUT_OF_GAS = -1 # contract creating transactions send to an empty address CREATE_CONTRACT_ADDRESS = '' def verify(block, parent): try:
import pytest import json import pyethereum.processblock as pb import pyethereum.blocks as blocks import pyethereum.transactions as transactions import pyethereum.utils as u import os import sys import pyethereum.vm as vm from tests.utils import new_db from pyethereum.slogging import get_logger, configure_logging logger = get_logger() # customize VM log output to your needs # hint: use 'py.test' with the '-s' option to dump logs to the console configure_logging(':trace') def check_testdata(data_keys, expected_keys): assert set(data_keys) == set(expected_keys), \ "test data changed, please adjust tests" @pytest.fixture(scope="module") def vm_tests_fixtures(): """ Read vm tests from fixtures fixtures/VMTests/* """ # FIXME: assert that repo is uptodate # cd fixtures; git pull origin develop; cd ..; git commit fixtures
import sys import signals import rlp from pyethereum.utils import big_endian_to_int as idec from pyethereum.utils import int_to_big_endian4 as ienc4 from pyethereum.utils import recursive_int_to_big_endian from pyethereum.utils import sha3 from pyethereum import dispatch from .version import __version__ from pyethereum.slogging import get_logger log = get_logger('net.wire') def lrlp_decode(data): "always return a list" d = rlp.decode(data) if isinstance(d, str): d = [d] return d def load_packet(packet): return Packeter.load_packet(packet) class Packeter(object): """ Translates between the network and the local data https://github.com/ethereum/wiki/wiki/%5BEnglish%5D-Wire-Protocol https://github.com/ethereum/cpp-ethereum/wiki/%C3%90%CE%9EVP2P-Networking
from stoppable import StoppableLoopThread import signals from db import DB import utils import rlp import blocks import processblock import peermanager import config from transactions import Transaction from miner import Miner from synchronizer import Synchronizer from peer import MAX_GET_CHAIN_SEND_HASHES from peer import MAX_GET_CHAIN_REQUEST_BLOCKS from pyethereum.slogging import get_logger log = get_logger('eth.chain') rlp_hash_hex = lambda data: utils.sha3(rlp.encode(data)).encode('hex') NUM_BLOCKS_PER_REQUEST = 256 # MAX_GET_CHAIN_REQUEST_BLOCKS class Index(object): """" Collection of indexes children: - needed to get the uncles of a block blocknumbers:
import threading from pyethereum.slogging import get_logger log = get_logger('net') class StoppableLoopThread(threading.Thread): def __init__(self): super(StoppableLoopThread, self).__init__() self._stopped = False self.lock = threading.Lock() def stop(self): with self.lock: self._stopped = True log.debug('Thread is requested to stop', name=self) def stopped(self): with self.lock: return self._stopped def pre_loop(self): log.debug('Thread start to run', name=self) def post_loop(self): log.debug('Thread stopped', name=self) def run(self): self.pre_loop() while not self.stopped(): self.loop_body() self.post_loop()
import socket import time import sys import traceback from dispatch import receiver from stoppable import StoppableLoopThread import signals from pyethereum.slogging import get_logger log_net = get_logger('net') def get_public_ip(): try: # for python3 from urllib.request import urlopen except ImportError: # for python2 from urllib import urlopen return urlopen('http://icanhazip.com/').read().strip() def upnp_add(port): ''' :param port: local port :return: `None` if failed, `external_ip, external_port` if succeed ''' log_net.debug('Setting UPNP') import miniupnpc
from operator import attrgetter import sys import blocks from pyethereum.slogging import get_logger log = get_logger('eth.sync') class HashChainTask(object): """ - get hashes chain until we see a known block hash """ NUM_HASHES_PER_REQUEST = 2000 def __init__(self, chain_manager, peer, block_hash): self.chain_manager = chain_manager self.peer = peer self.hash_chain = [] # [youngest, ..., oldest] self.request(block_hash) def request(self, block_hash): log.debug('requesting block_hashes', peer=self.peer, start=block_hash.encode('hex')) self.peer.send_GetBlockHashes(block_hash, self.NUM_HASHES_PER_REQUEST) def received_block_hashes(self, block_hashes): log.debug('HashChainTask.received_block_hashes', num=len(block_hashes)) if block_hashes and self.chain_manager.genesis.hash == block_hashes[-1]: log.debug('has different chain starting from genesis', peer=self.peer)
import utils import copy import opcodes import json from pyethereum.slogging import get_logger log_log = get_logger('eth.vm.log') log_vm_exit = get_logger('eth.vm.exit') log_vm_op = get_logger('eth.vm.op') log_vm_op_stack = get_logger('eth.vm.op.stack') log_vm_op_memory = get_logger('eth.vm.op.memory') log_vm_op_storage = get_logger('eth.vm.op.storage') TT256 = 2**256 TT256M1 = 2**256 - 1 TT255 = 2**255 class CallData(object): def __init__(self, parent_memory, offset=0, size=None): self.data = parent_memory self.offset = offset self.size = len(self.data) if size is None else size self.rlimit = self.offset + self.size def extract_all(self): d = self.data[self.offset:self.offset + self.size] d += [0] * (self.size - len(d)) return ''.join([chr(x) for x in d]) def extract32(self, i):
import os import leveldb import threading import compress from pyethereum.slogging import get_logger log = get_logger('db') databases = {} class DB(object): def __init__(self, dbfile): self.dbfile = os.path.abspath(dbfile) if dbfile in databases: self.db = databases[dbfile] else: self.db = leveldb.LevelDB(dbfile) databases[dbfile] = self.db self.uncommitted = dict() self.lock = threading.Lock() def get(self, key): if key in self.uncommitted: if self.uncommitted[key] is None: raise Exception("key not in db") return self.uncommitted[key] o = compress.decompress(self.db.Get(key)) self.uncommitted[key] = o return o
import threading from pyethereum.slogging import get_logger log = get_logger('net') class StoppableLoopThread(threading.Thread): def __init__(self): super(StoppableLoopThread, self).__init__() self._stopped = False self.lock = threading.Lock() def stop(self): with self.lock: self._stopped = True log.debug('Thread is requested to stop', name=self) def stopped(self): with self.lock: return self._stopped def pre_loop(self): log.debug('Thread start to run', name=self) def post_loop(self): log.debug('Thread stopped', name=self) def run(self): self.pre_loop() while not self.stopped(): self.loop_body()
import time import Queue import socket import signals from stoppable import StoppableLoopThread from packeter import packeter from utils import big_endian_to_int as idec from utils import recursive_int_to_big_endian import rlp import blocks from pyethereum.slogging import get_logger log_net = get_logger('net') log_p2p = get_logger('p2p') log_packet = get_logger('p2p.packet') log_eth = get_logger('eth.wire') # Maximum number of send hashes GetChain will accept MAX_GET_CHAIN_ACCEPT_HASHES = 2048 # Maximum number of hashes GetChain will ever send MAX_GET_CHAIN_SEND_HASHES = 2048 # Maximum number of blocks GetChain will ever ask for MAX_GET_CHAIN_ASK_BLOCKS = 512 # Maximum number of requested blocks GetChain will accept MAX_GET_CHAIN_REQUEST_BLOCKS = 512 # Maximum number of blocks Blocks will ever send MAX_BLOCKS_SEND = MAX_GET_CHAIN_REQUEST_BLOCKS # Maximum number of blocks Blocks will ever accept MAX_BLOCKS_ACCEPTED = MAX_BLOCKS_SEND
import bottle from pyethereum.chainmanager import chain_manager from pyethereum.peermanager import peer_manager import pyethereum.dispatch as dispatch from pyethereum.blocks import block_structure, Block import pyethereum.signals as signals from pyethereum.transactions import Transaction import pyethereum.processblock as processblock import pyethereum.utils as utils import rlp from pyethereum.slogging import get_logger, LogRecorder from ._version import get_versions log = get_logger('api') app = bottle.Bottle() app.config['autojson'] = False app.install(bottle.JSONPlugin(json_dumps=lambda s: json.dumps(s, sort_keys=True))) class ApiServer(threading.Thread): def __init__(self): super(ApiServer, self).__init__() self.daemon = True self.listen_host = '127.0.0.1' self.port = 30203 def configure(self, config):
from stoppable import StoppableLoopThread import signals from db import DB import utils import rlp import blocks import processblock import peermanager import config from transactions import Transaction from miner import Miner from synchronizer import Synchronizer from peer import MAX_GET_CHAIN_SEND_HASHES from peer import MAX_GET_CHAIN_REQUEST_BLOCKS from pyethereum.slogging import get_logger log = get_logger('eth.chain') rlp_hash_hex = lambda data: utils.sha3(rlp.encode(data)).encode('hex') NUM_BLOCKS_PER_REQUEST = 256 # MAX_GET_CHAIN_REQUEST_BLOCKS class Index(object): """" Collection of indexes children: - needed to get the uncles of a block blocknumbers: - needed to mark the longest chain (path to top) transactions:
import time import struct import blocks import processblock import utils from pyethereum.slogging import get_logger log = get_logger('eth.miner') class Miner(): """ Mines on the current head Stores received transactions The process of finalising a block involves four stages: 1) Validate (or, if mining, determine) uncles; 2) validate (or, if mining, determine) transactions; 3) apply rewards; 4) verify (or, if mining, compute a valid) state and nonce. """ def __init__(self, parent, uncles, coinbase): self.nonce = 0 ts = max(int(time.time()), parent.timestamp + 1) self.block = blocks.Block.init_from_parent(parent, coinbase, timestamp=ts, uncles=[u.list_header() for u in uncles]) self.pre_finalize_state_root = self.block.state_root self.block.finalize() log.debug('mining', block_number=self.block.number, block_hash=self.block.hex_hash(), block_difficulty=self.block.difficulty)
import time import rlp import trie import utils import processblock import transactions import bloom import copy import sys from repoze.lru import lru_cache from exceptions import * from pyethereum.slogging import get_logger from pyethereum.genesis_allocation import GENESIS_INITIAL_ALLOC log = get_logger('eth.block') log_state = get_logger('eth.msg.state') # Genesis block difficulty GENESIS_DIFFICULTY = 2 ** 17 # Genesis block gas limit GENESIS_GAS_LIMIT = 10 ** 6 # Genesis block prevhash, coinbase, nonce GENESIS_PREVHASH = '\00' * 32 GENESIS_COINBASE = "0" * 40 GENESIS_NONCE = utils.sha3(chr(42)) # Minimum gas limit MIN_GAS_LIMIT = 125000 # Gas limit adjustment algo: # block.gas_limit = block.parent.gas_limit * 1023/1024 + # (block.gas_used * 6 / 5) / 1024 GASLIMIT_EMA_FACTOR = 1024 BLKLIM_FACTOR_NOM = 6
def test_lvl_trace(): config_string = ':trace' th = setup_logging(config_string=config_string) log = slogging.get_logger() assert th.does_log(log.debug) assert th.does_log(log.trace)
import blocks import transactions import trie import sys import json import fastvm import copy import specials import bloom import vm from exceptions import * sys.setrecursionlimit(100000) from pyethereum.slogging import get_logger log_tx = get_logger('eth.tx') log_msg = get_logger('eth.msg') log_state = get_logger('eth.msg.state') TT255 = 2**255 TT256 = 2**256 TT256M1 = 2**256 - 1 OUT_OF_GAS = -1 # contract creating transactions send to an empty address CREATE_CONTRACT_ADDRESS = '' def verify(block, parent): try:
def test_jsonconfig(): th = setup_logging(log_json=True) log = slogging.get_logger('prefix') log.warn('abc', a=1) assert json.loads(th.logged) == dict(event='prefix.abc', a=1)
import sys import signals from pyethereum import rlp from pyethereum.utils import big_endian_to_int as idec from pyethereum.utils import int_to_big_endian4 as ienc4 from pyethereum.utils import recursive_int_to_big_endian from pyethereum.utils import sha3 from pyethereum import dispatch from .version import __version__ from pyethereum.slogging import get_logger log = get_logger('net.wire') def lrlp_decode(data): "always return a list" d = rlp.decode(data) if isinstance(d, str): d = [d] return d def load_packet(packet): return Packeter.load_packet(packet) class Packeter(object): """ Translates between the network and the local data https://github.com/ethereum/wiki/wiki/%5BEnglish%5D-Wire-Protocol https://github.com/ethereum/cpp-ethereum/wiki/%C3%90%CE%9EVP2P-Networking
from operator import attrgetter import sys import blocks from pyethereum.slogging import get_logger log = get_logger('eth.sync') class HashChainTask(object): """ - get hashes chain until we see a known block hash """ NUM_HASHES_PER_REQUEST = 2000 def __init__(self, chain_manager, peer, block_hash): self.chain_manager = chain_manager self.peer = peer self.hash_chain = [] # [youngest, ..., oldest] self.request(block_hash) def request(self, block_hash): log.debug('requesting block_hashes', peer=self.peer, start=block_hash.encode('hex')) self.peer.send_GetBlockHashes(block_hash, self.NUM_HASHES_PER_REQUEST) def received_block_hashes(self, block_hashes): log.debug('HashChainTask.received_block_hashes', num=len(block_hashes)) if block_hashes and self.chain_manager.genesis.hash == block_hashes[-1]: log.debug('has different chain starting from genesis', peer=self.peer) for bh in block_hashes:
import pytest import json import pyethereum.processblock as pb import pyethereum.vm as vm import pyethereum.blocks as blocks import pyethereum.transactions as transactions import pyethereum.utils as u import pyethereum.bloom as bloom import rlp import os import sys from tests.utils import new_db from pyethereum.slogging import get_logger, configure_logging logger = get_logger() # customize VM log output to your needs # hint: use 'py.test' with the '-s' option to dump logs to the console configure_logging(':trace') MAX_TESTS_PER_FILE = 200 def check_testdata(data_keys, expected_keys): assert set(data_keys) == set(expected_keys), \ "test data changed, please adjust tests" vm_fixture_cache = {} @pytest.fixture(scope="module") def vm_tests_fixtures():
import os import leveldb import threading import compress from pyethereum.slogging import get_logger log = get_logger('db') databases = {} class DB(object): def __init__(self, dbfile): self.dbfile = os.path.abspath(dbfile) if dbfile in databases: self.db = databases[dbfile] else: self.db = leveldb.LevelDB(dbfile) databases[dbfile] = self.db self.uncommitted = dict() self.lock = threading.Lock() def get(self, key): if key in self.uncommitted: if self.uncommitted[key] is None: raise Exception("key not in db") return self.uncommitted[key] o = compress.decompress(self.db.Get(key)) self.uncommitted[key] = o return o def put(self, key, value):