Example #1
0
def test_howto_use_in_tests():
    # select what you want to see.
    # e.g. TRACE from vm except for pre_state :DEBUG otherwise
    config_string = ':DEBUG,eth.vm:TRACE,vm.pre_state:INFO'
    slogging.configure(config_string=config_string)
    log = slogging.get_logger('tests.logging')
    log.info('test starts')
Example #2
0
def app(ctx, log_config, log_file, data_dir, unlock, password):
    slogging.configure(log_config, log_file=log_file)
    ctx.obj = {
        'log_config': log_config,
        'log_file': log_file,
        'config': {
            'node': {
                'data_dir': data_dir
            },
            'casper': {
                'network_id': 0,
                'validator_id': 0,
                'privkey': '',
                'epoch_length': 5,
                # mainnet #3570000
                'genesis_hash': '0xa9f4dc448a0b06a26ef0e4fd003e30d8450b87ab0c8e74ee50fc8f4d011a7c63'
            },
            'discovery': {
                'listen_host': '0.0.0.0',
                'listen_port': 20170,
                'bootstrap_nodes': [
                    'enode://%[email protected]:20170' % pubkeys[0]
                ]
            },
            'p2p': {
                'listen_host': '0.0.0.0',
                'listen_port': 20170,
                'max_peers': 4,
                'min_peers': 4
            }
        },
        'unlock': unlock,
        'password': password.read().rstrip() if password else None
    }
Example #3
0
def app(ctx, alt_config, config_values, data_dir, log_config):

    # configure logging
    log_config = log_config or ':info'
    slogging.configure(log_config)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')
    ctx.obj = {'config': config}
Example #4
0
def main():
    slogging.configure(':DEBUG')
    logging.basicConfig(level=logging.DEBUG)

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    test_webui()
Example #5
0
def test_logging_source_file(caplog, log_method):
    slogging.configure(":trace")
    logger = slogging.getLogger("test")
    getattr(logger, log_method)("testmessage")

    v = caplog.records[0]
    print(v.pathname, v.module, v.name)
    assert caplog.records[0].module == "test_logging"
Example #6
0
def test_basic(caplog, level_name):
    slogging.configure(":trace")
    log = slogging.get_logger()
    with caplog.at_level("TRACE"):
        getattr(log, level_name)(level_name)

    assert len(caplog.records) == 1
    assert caplog.records[0].levelname == level_name.upper()
    assert level_name in caplog.records[0].msg
Example #7
0
def app(ctx, profile, alt_config, config_values, data_dir, log_config, bootstrap_node, log_json,
        mining_pct, unlock, password):

    # configure logging
    slogging.configure(log_config, log_json=log_json)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    log.DEV("Move to EthApp.default_config")
    konfig.update_config_with_defaults(config, {'eth': {'block': blocks.default_config}})

    # Set config values based on profile selection
    merge_dict(config, PROFILES[profile])

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
            if config_value.startswith("eth.genesis"):
                del config['eth']['genesis_hash']
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')

    # Load genesis config
    update_config_from_genesis_json(config, config['eth']['genesis'])

    if bootstrap_node:
        config['discovery']['bootstrap_nodes'] = [bytes(bootstrap_node)]
    if mining_pct > 0:
        config['pow']['activated'] = True
        config['pow']['cpu_pct'] = int(min(100, mining_pct))
    if not config['pow']['activated']:
        config['deactivated_services'].append(PoWService.name)

    ctx.obj = {'config': config,
               'unlock': unlock,
               'password': password.read().rstrip() if password else None}
    assert (password and ctx.obj['password'] is not None and len(
        ctx.obj['password'])) or not password, "empty password file"
Example #8
0
def test_logger_filter(caplog, logger_name, filter, should_log):
    slogging.configure()
    log = slogging.get_logger(logger_name)
    if filter:
        log.addFilter(logging.Filter(filter))
    log.info("testlogmessage", v=1)
    if should_log:
        assert "testlogmessage" in caplog.text
    else:
        assert "testlogmessage" not in caplog.text
Example #9
0
def test_configuration():
    config_string = ":inFO,a:trace,a.b:debug"
    slogging.configure(config_string=config_string)
    log = slogging.get_logger()
    log_a = slogging.get_logger("a")
    log_a_b = slogging.get_logger("a.b")
    assert log.is_active("info")
    assert not log.is_active("debug")
    assert log_a.is_active("trace")
    assert log_a_b.is_active("debug")
    assert not log_a_b.is_active("trace")
Example #10
0
def test_configuration():
    config_string = ':inFO,a:trace,a.b:debug'
    slogging.configure(config_string=config_string)
    log = slogging.get_logger()
    log_a = slogging.get_logger('a')
    log_a_b = slogging.get_logger('a.b')
    assert log.is_active('info')
    assert not log.is_active('debug')
    assert log_a.is_active('trace')
    assert log_a_b.is_active('debug')
    assert not log_a_b.is_active('trace')
Example #11
0
def test(env, regex, endowment, log_config, log_json):
    slogging.configure(log_config, log_json=log_json)

    build = dapple.plugins.load('core.build')(env)

    abi, binary = None, None
    suite = {}

    for typename, info in build.iteritems():
        binary = ""
        if "binary" in info.keys():
            binary = info["binary"]
        else:
            binary = info["bin"]
        if regex is not None:
            if not re.match(".*"+regex+".*", typename, flags=re.IGNORECASE):
                continue
        if typename == "Test": # base test matches too often
            continue

        if binary == "": # Abstract classes
            continue
        abi = ""
        if "json-abi" in info.keys():
            abi = info["json-abi"]
        else:
            abi = info["abi"]
        jabi = json.loads(abi)
        is_test = False
        for item in jabi:
            if "name" in item.keys() and item["name"] == "IS_TEST":
                is_test = True
        if not is_test:
            continue

        print("Testing", typename)
        binary = binary.decode('hex')
        tmp = None
        try:
            tmp = EvmContract(abi, binary, typename, [], gas=10**9)
        except Exception, e:
            raise e

        for func in dir(tmp):
            if func.startswith("test"):
                print("  " + func)
                contract = EvmContract(
                    abi, binary, typename, [], gas=10**9,
                    endowment=endowment, log_listener=LogEventLogger())
                if hasattr(contract, "setUp"):
                    contract.setUp()
                getattr(contract, func)()
                if contract.failed():
                    print("    Fail!")
Example #12
0
def app(privatekey, eth_rpc_endpoint, registry_contract_address,
        discovery_contract_address, listen_address, external_listen_address, logging):

    slogging.configure(logging)

    if not external_listen_address:
        # notify('if you are behind a NAT, you should set
        # `external_listen_address` and configure port forwarding on your router')
        external_listen_address = listen_address

    # config_file = args.config_file
    rpc_connection = split_endpoint(eth_rpc_endpoint)
    (listen_host, listen_port) = split_endpoint(listen_address)

    config = App.default_config.copy()
    config['host'] = listen_host
    config['port'] = listen_port
    config['privatekey_hex'] = privatekey

    jsonrpc_client = JSONRPCClient(
        privkey=privatekey,
        host=rpc_connection[0],
        port=rpc_connection[1],
        print_communication=False,
    )

    blockchain_service = BlockChainService(
        jsonrpc_client,
        registry_contract_address.decode('hex'),
    )
    discovery = ContractDiscovery(jsonrpc_client, discovery_contract_address.decode('hex'))  # FIXME: double encoding

    app = App(config, blockchain_service, discovery)

    discovery.register(app.raiden.address, *split_endpoint(external_listen_address))

    app.raiden.register_registry(blockchain_service.default_registry)

    # TODO:
    # - Ask for confirmation to quit if there are any locked transfers that did
    # not timeout.

    console = Console(app)
    console.start()

    # wait for interrupt
    event = gevent.event.Event()
    gevent.signal(signal.SIGQUIT, event.set)
    gevent.signal(signal.SIGTERM, event.set)
    gevent.signal(signal.SIGINT, event.set)
    event.wait()

    app.stop()
Example #13
0
def test_get_configuration():
    root_logger = slogging.getLogger()
    root_logger.manager.loggerDict = {}  # clear old loggers
    config_string = ":INFO,a:TRACE,a.b:DEBUG"
    log_json = False
    slogging.configure(config_string=config_string, log_json=log_json)
    config = slogging.get_configuration()
    assert config["log_json"] == log_json
    assert set(config["config_string"].split(",")) == set(config_string.split(","))

    log_json = True
    slogging.configure(config_string=config_string, log_json=log_json)
    config = slogging.get_configuration()
    assert config["log_json"] == log_json
    assert set(config["config_string"].split(",")) == set(config_string.split(","))

    # set config differntly
    slogging.configure(config_string=":TRACE", log_json=False)
    config2 = slogging.get_configuration()

    # test whether we get original config
    slogging.configure(**config)
    config = slogging.get_configuration()
    assert config["log_json"] == log_json
    assert set(config["config_string"].split(",")) == set(config_string.split(","))
Example #14
0
def test_tracebacks(caplog):
    slogging.configure()
    log = slogging.get_logger()

    def div(a, b):
        try:
            _ = a / b
            log.error('heres the stack', stack_info=True)
        except Exception as e:
            log.error('an Exception trace should preceed this msg', exc_info=True)
    div(1, 0)
    assert 'an Exception trace' in caplog.text()
    assert 'Traceback' in caplog.text()
    div(1, 1)
    assert 'the stack' in caplog.text()
Example #15
0
def test_bound_logger(caplog):
    slogging.configure(config_string=":trace")
    real_log = slogging.getLogger()

    bound_log_1 = real_log.bind(key1="value1")
    with caplog.at_level(slogging.TRACE):
        bound_log_1.info("test1")
        assert "test1" in caplog.text
        assert "key1=value1" in caplog.text

    bound_log_2 = bound_log_1.bind(key2="value2")
    with caplog.at_level(slogging.TRACE):
        bound_log_2.info("test2")
        assert "test2" in caplog.text
        assert "key1=value1" in caplog.text
        assert "key2=value2" in caplog.text
Example #16
0
def test_tracebacks(caplog):
    slogging.configure()
    log = slogging.get_logger()

    def div(a, b):
        try:
            _ = a // b
            log.error("heres the stack", stack_info=True)
        except Exception as e:
            log.error("an Exception trace should preceed this msg", exc_info=True)

    div(1, 0)
    assert "an Exception trace" in caplog.text
    assert "Traceback" in caplog.text
    div(1, 1)
    assert "the stack" in caplog.text
Example #17
0
def app(ctx, alt_config, config_values, data_dir, log_config, bootstrap_node, log_json,
        mining_pct, unlock, password):

    # configure logging
    log_config = log_config or ':info'
    slogging.configure(log_config, log_json=log_json)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')
    if bootstrap_node:
        config['discovery']['bootstrap_nodes'] = [bytes(bootstrap_node)]
    if mining_pct > 0:
        config['pow']['activated'] = True
        config['pow']['cpu_pct'] = int(min(100, mining_pct))
    if not config['pow']['activated']:
        config['deactivated_services'].append(PoWService.name)

    ctx.obj = {'config': config,
               'unlock': unlock,
               'password': password.read().rstrip() if password else None}
Example #18
0
def logging_level(request):
    """ Configure the logging level.

    For integration tests this also sets the geth verbosity.
    """
    if request.config.option.log_config is not None:
        slogging.configure(request.config.option.log_config)

    elif request.config.option.verbose > 5:
        slogging.configure(':TRACE')

    elif request.config.option.verbose > 3:
        slogging.configure(':DEBUG')

    elif request.config.option.verbose > 1:
        slogging.configure(':INFO')

    else:
        slogging.configure(':WARNING')
Example #19
0
def test_lazy_log():
    """
    test lacy evaluation of json log data
    e.g.
    class LogState
    class LogMemory
    """

    called_print = []

    class Expensive(object):
        def __repr__(self):
            called_print.append(1)
            return "expensive data preparation"

    slogging.configure(log_json=True)
    log = slogging.get_logger()
    log.trace("no", data=Expensive())
    assert not called_print
    log.info("yes", data=Expensive())  # !!!!!!!!!!!!!
    assert called_print.pop()
Example #20
0
def test_bound_logger_isolation(caplog):
    """
    Ensure bound loggers don't "contaminate" their parent
    """
    slogging.configure(config_string=":trace")
    real_log = slogging.getLogger()

    bound_log_1 = real_log.bind(key1="value1")
    with caplog.at_level(slogging.TRACE):
        bound_log_1.info("test1")
        records = caplog.records
        assert len(records) == 1
        assert "test1" in records[0].msg
        assert "key1=value1" in records[0].msg

    with caplog.at_level(slogging.TRACE):
        real_log.info("test2")
        records = caplog.records
        assert len(records) == 2
        assert "test2" in records[1].msg
        assert "key1=value1" not in records[1].msg
Example #21
0
def test_how_to_use_as_vm_logger():
    """
    don't log until there was an error
    """
    slogging.configure(":DEBUG,eth.vm:INFO")
    log = slogging.get_logger("eth.vm")

    # record all logs
    def run_vm(raise_error=False):
        log.trace("op", pc=1)
        log.trace("op", pc=2)
        if raise_error:
            raise Exception

    recorder = slogging.LogRecorder()
    try:
        run_vm(raise_error=True)
    except:
        log = slogging.get_logger("eth.vm")
        for x in recorder.pop_records():
            log.info(x.pop("event"), **x)
Example #22
0
def test_recorder(caplog):
    slogging.configure(log_json=True)
    log = slogging.get_logger()

    # test info
    recorder = slogging.LogRecorder()
    assert len(slogging.log_listeners) == 1
    log.info("a", v=1)
    assert "a" in caplog.text
    r = recorder.pop_records()
    assert r[0] == dict(event="a", v=1)
    assert len(slogging.log_listeners) == 0

    # test trace
    log.setLevel(logging.TRACE)
    recorder = slogging.LogRecorder()
    assert len(slogging.log_listeners) == 1
    log.trace("a", v=2)
    assert '"v": 2' in caplog.text
    r = recorder.pop_records()
    assert r[0] == dict(event="a", v=2)
    assert len(slogging.log_listeners) == 0
Example #23
0
def test_how_to_use_as_vm_logger():
    """
    don't log until there was an error
    """

    config_string = ':DEBUG,eth.vm:INFO'
    slogging.configure(config_string=config_string)
    log = slogging.get_logger('eth.vm')

    # record all logs
    def run_vm(raise_error=False):
        log.trace('op', pc=1)
        log.trace('op', pc=2)
        if raise_error:
            raise Exception

    recorder = slogging.LogRecorder()
    try:
        run_vm(raise_error=True)
    except:
        log = slogging.get_logger('eth.vm')
        for x in recorder.pop_records():
            log.info(x.pop('event'), **x)
Example #24
0
def test_listeners(caplog):
    slogging.configure()
    log = slogging.get_logger()

    called = []

    def log_cb(event_dict):
        called.append(event_dict)

    # activate listener
    slogging.log_listeners.append(log_cb)  # Add handlers
    log.error("test listener", abc="thislistener")
    assert "thislistener" in caplog.text
    r = called.pop()
    assert r == dict(event="test listener", abc="thislistener")

    log.trace("trace is usually filtered", abc="thislistener")  # this handler for function log_cb does not work
    assert "trace is usually filtered" not in caplog.text

    # deactivate listener
    slogging.log_listeners.remove(log_cb)
    log.error("test listener", abc="nolistener")
    assert "nolistener" in caplog.text
    assert not called
Example #25
0
def test_get_configuration():
    config_string = ':INFO,a:TRACE,a.b:DEBUG'
    log_json = False
    slogging.configure(config_string=config_string, log_json=log_json)
    config = slogging.get_configuration()
    assert config['log_json'] == log_json
    assert set(config['config_string'].split(',')) == set(config_string.split(','))

    log_json = True
    slogging.configure(config_string=config_string, log_json=log_json)
    config = slogging.get_configuration()
    assert config['log_json'] == log_json
    assert set(config['config_string'].split(',')) == set(config_string.split(','))

    # set config differntly
    slogging.configure(config_string=':TRACE', log_json=False)
    config2 = slogging.get_configuration()

    # test whether we get original config
    slogging.configure(**config)
    config = slogging.get_configuration()
    assert config['log_json'] == log_json
    assert set(config['config_string'].split(',')) == set(config_string.split(','))
Example #26
0
def test_logger_names():
    slogging.configure()
    names = {'a', 'b', 'c'}
    for n in names:
        slogging.get_logger(n)
    assert names.issubset(set(slogging.get_logger_names()))
Example #27
0
def test_jsonconfig(caplog):
    slogging.configure(log_json=True)
    log = slogging.get_logger('prefix')
    log.warn('abc', a=1)
    assert json.loads(caplog.records()[0].msg) == dict(event='prefix.abc', a=1)
Example #28
0
def test(env, regex, endowment, log_config, log_json, build_file):
    slogging.configure(log_config, log_json=log_json)

    if build_file == "":
        build = dapple.plugins.load('core.build')(env)

    elif not os.path.exists(build_file):
        print('Could not find file: %s' % build_file)
        exit(1)

    else:
        try:
            with open(build_file, 'r') as f:
                build = json.loads(f.read())

        except:
            print('Could not parse JSON!', file=sys.stderr)
            exit(1)

    abi, binary = None, None
    suite = {}

    for typename, info in build.iteritems():
        binary = ""
        if "binary" in info.keys():
            binary = info["binary"]
        else:
            binary = info["bin"]
        if regex is not None:
            if not re.match(".*"+regex+".*", typename, flags=re.IGNORECASE):
                continue
        if typename == "Test": # base test matches too often
            continue

        if binary == "": # Abstract classes
            continue
        abi = ""
        if "json-abi" in info.keys():
            abi = info["json-abi"]
        else:
            abi = info["abi"]
        jabi = json.loads(abi)
        is_test = False
        for item in jabi:
            if "name" in item.keys() and item["name"] == "IS_TEST":
                is_test = True
        if not is_test:
            continue

        print("Testing", typename)
        binary = binary.decode('hex')
        tmp = None
        try:
            tmp = EvmContract(abi, binary, typename, [], gas=10**9)
        except Exception, e:
            raise e

        for func in dir(tmp):
            if func.startswith("test"):
                print("  " + func)
                contract = EvmContract(
                    abi, binary, typename, [], gas=10**9,
                    endowment=endowment, log_listener=LogEventLogger())
                if hasattr(contract, "setUp"):
                    contract.setUp()
                getattr(contract, func)()
                if contract.failed():
                    print("    Fail!")
Example #29
0
from ethereum import tester
from ethereum import utils
from ethereum._solidity import get_solidity

SOLIDITY_AVAILABLE = get_solidity() is not None

import bitcoin

# Logging
from ethereum import slogging

slogging.configure(':INFO,eth.vm:INFO')
#slogging.configure(':DEBUG')
#slogging.configure(':DEBUG,eth.vm:TRACE')

xor = lambda (x, y): chr(ord(x) ^ ord(y))
xors = lambda x, y: ''.join(map(xor, zip(x, y)))
zfill = lambda s: (32 - len(s)) * '\x00' + s
flatten = lambda x: [z for y in x for z in y]


def broadcast(p, r, h, sig):
    print 'player[%d]' % p.i, 'broadcasts', r, h.encode('hex'), sig


def broadcastCommitment(p, r, m):
    print 'player[%d]' % p.i, 'opens', r, m.encode('hex')


def sign(h, priv):
    assert len(h) == 32
Example #30
0
def test_howto_use_in_tests():
    # select what you want to see.
    # e.g. TRACE from vm except for pre_state :DEBUG otherwise
    slogging.configure(':DEBUG,eth.vm:TRACE,vm.pre_state:INFO')
    log = slogging.get_logger('tests.logging')
    log.info('test starts')
Example #31
0
def run(privatekey, registry_contract_address, discovery_contract_address,
        listen_address, logging, logfile, scenario, stage_prefix):  # pylint: disable=unused-argument

    # TODO: only enabled logging on "initiators"
    slogging.configure(logging, log_file=logfile)

    (listen_host, listen_port) = split_endpoint(listen_address)

    config = App.DEFAULT_CONFIG.copy()
    config['host'] = listen_host
    config['port'] = listen_port
    config['privatekey_hex'] = privatekey

    privatekey_bin = decode_hex(privatekey)

    rpc_client = JSONRPCClient(
        '127.0.0.1',
        8545,
        privatekey_bin,
    )

    blockchain_service = BlockChainService(
        privatekey_bin,
        rpc_client,
        GAS_PRICE,
    )

    discovery = ContractDiscovery(blockchain_service,
                                  decode_hex(discovery_contract_address))

    registry = blockchain_service.registry(registry_contract_address)

    throttle_policy = TokenBucket(config['protocol']['throttle_capacity'],
                                  config['protocol']['throttle_fill_rate'])

    transport = UDPTransport(
        discovery,
        server._udp_socket((listen_host, listen_port)),
        throttle_policy,
        config['protocol'],
        dict(),
    )

    app = App(
        config,
        blockchain_service,
        registry,
        transport,
        discovery,
    )

    app.discovery.register(
        app.raiden.address,
        listen_host,
        listen_port,
    )

    app.raiden.register_payment_network(app.raiden.default_registry.address)

    if scenario:
        script = json.load(scenario)

        tools = ConsoleTools(
            app.raiden,
            app.discovery,
            app.config['settle_timeout'],
            app.config['reveal_timeout'],
        )

        transfers_by_peer = {}

        tokens = script['tokens']
        token_address = None
        peer = None
        our_node = hexlify(app.raiden.address)
        log.warning("our address is {}".format(our_node))
        for token in tokens:
            # skip tokens that we're not part of
            nodes = token['channels']
            if our_node not in nodes:
                continue

            partner_nodes = [node for node in nodes if node != our_node]

            # allow for prefunded tokens
            if 'token_address' in token:
                token_address = token['token_address']
            else:
                token_address = tools.create_token(registry_contract_address)

            transfers_with_amount = token['transfers_with_amount']

            # FIXME: in order to do bidirectional channels, only one side
            # (i.e. only token['channels'][0]) should
            # open; others should join by calling
            # raiden.api.deposit, AFTER the channel came alive!

            # NOTE: leaving unidirectional for now because it most
            #       probably will get to higher throughput

            log.warning("Waiting for all nodes to come online")

            api = RaidenAPI(app.raiden)

            for node in partner_nodes:
                api.start_health_check_for(node)

            while True:
                all_reachable = all(
                    api.get_node_network_state(node) == NODE_NETWORK_REACHABLE
                    for node in partner_nodes)

                if all_reachable:
                    break

                gevent.sleep(5)

            log.warning("All nodes are online")

            if our_node != nodes[-1]:
                our_index = nodes.index(our_node)
                peer = nodes[our_index + 1]

                tools.token_network_register(
                    app.raiden.default_registry.address, token_address)
                amount = transfers_with_amount[nodes[-1]]

                while True:
                    try:
                        app.discovery.get(peer.decode('hex'))
                        break
                    except KeyError:
                        log.warning(
                            "Error: peer {} not found in discovery".format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning("Opening channel with {} for {}".format(
                            peer, token_address))
                        api.channel_open(app.raiden.default_registry.address,
                                         token_address, peer)
                        break
                    except KeyError:
                        log.warning(
                            "Error: could not open channel with {}".format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning("Funding channel with {} for {}".format(
                            peer, token_address))
                        api.channel_deposit(
                            app.raiden.default_registry.address,
                            token_address,
                            peer,
                            amount,
                        )
                        break
                    except Exception:
                        log.warning(
                            "Error: could not deposit {} for {}".format(
                                amount, peer))
                        time.sleep(random.randrange(30))

                if our_index == 0:
                    last_node = nodes[-1]
                    transfers_by_peer[last_node] = int(amount)
            else:
                peer = nodes[-2]

        if stage_prefix is not None:
            open('{}.stage1'.format(stage_prefix), 'a').close()
            log.warning("Done with initialization, waiting to continue...")
            event = gevent.event.Event()
            gevent.signal(signal.SIGUSR2, event.set)
            event.wait()

        transfer_results = {'total_time': 0, 'timestamps': []}

        def transfer(token_address, amount_per_transfer, total_transfers, peer,
                     is_async):
            def transfer_():
                log.warning("Making {} transfers to {}".format(
                    total_transfers, peer))
                initial_time = time.time()
                times = [0] * total_transfers
                for index in range(total_transfers):
                    RaidenAPI(app.raiden).transfer(
                        app.raiden.default_registry.address,
                        token_address.decode('hex'),
                        amount_per_transfer,
                        peer,
                    )
                    times[index] = time.time()

                transfer_results['total_time'] = time.time() - initial_time
                transfer_results['timestamps'] = times

                log.warning("Making {} transfers took {}".format(
                    total_transfers, transfer_results['total_time']))
                log.warning("Times: {}".format(times))

            if is_async:
                return gevent.spawn(transfer_)
            else:
                transfer_()

        # If sending to multiple targets, do it asynchronously, otherwise
        # keep it simple and just send to the single target on my thread.
        if len(transfers_by_peer) > 1:
            greenlets = []
            for peer_, amount in transfers_by_peer.items():
                greenlet = transfer(token_address, 1, amount, peer_, True)
                if greenlet is not None:
                    greenlets.append(greenlet)

            gevent.joinall(greenlets)

        elif len(transfers_by_peer) == 1:
            for peer_, amount in transfers_by_peer.items():
                transfer(token_address, 1, amount, peer_, False)

        log.warning("Waiting for termination")

        open('{}.stage2'.format(stage_prefix), 'a').close()
        log.warning("Waiting for transfers to finish, will write results...")
        event = gevent.event.Event()
        gevent.signal(signal.SIGUSR2, event.set)
        event.wait()

        open('{}.stage3'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    else:
        log.warning("No scenario file supplied, doing nothing!")

        open('{}.stage2'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    app.stop()
Example #32
0
def config_logging(suffix='', datadir=None, loglevel=None, config_desc=None):
    """Config logger"""
    try:
        from loggingconfig_local import LOGGING
    except ImportError:
        from loggingconfig import LOGGING

    if datadir is None:
        datadir = simpleenv.get_local_datadir("default")
    logdir_path = os.path.join(datadir, 'logs')

    for handler_name, handler in LOGGING.get('handlers', {}).items():
        if 'filename' in handler:
            handler['filename'] %= {
                'logdir': str(logdir_path),
                'suffix': suffix,
            }
        skip_handler_names = (
            'error-file',
            'sentry',
            'sentry-metrics',
        )
        if handler_name in skip_handler_names:
            # Don't modify loglevel in this handler
            continue
        if loglevel:
            handler['level'] = loglevel

    if loglevel:
        for _logger in LOGGING.get('loggers', {}).values():
            _logger['level'] = loglevel
        LOGGING['root']['level'] = loglevel
        if config_desc and not config_desc.debug_third_party:
            LOGGING['loggers']['golem.rpc.crossbar']['level'] = 'WARNING'
            LOGGING['loggers']['twisted']['level'] = 'WARNING'

    try:
        if not os.path.exists(logdir_path):
            os.makedirs(logdir_path)

        logging.config.dictConfig(LOGGING)
    except (ValueError, PermissionError) as e:
        sys.stderr.write("Can't configure logging in: {} Got: {}\n".format(
            logdir_path, e))
        return  # Avoid consequent errors
    logging.captureWarnings(True)

    from golem.tools.talkback import enable_sentry_logger
    enable_sentry_logger(False)
    import txaio
    txaio.use_twisted()
    from ethereum import slogging
    if config_desc and config_desc.debug_third_party:
        slogging.configure(':debug')
    else:
        slogging.configure(':warning')

    from twisted.python import log
    observer = log.PythonLoggingObserver(loggerName='twisted')
    observer.start()

    crossbar_log_lvl = logging.getLevelName(
        logging.getLogger('golem.rpc.crossbar').level).lower()
    # Fix inconsistency in log levels, only warn affected
    if crossbar_log_lvl == 'warning':
        crossbar_log_lvl = 'warn'

    txaio.set_global_log_level(crossbar_log_lvl)  # pylint: disable=no-member
Example #33
0
def test_is_active():
    slogging.configure()
    tester = slogging.get_logger('tester')
    assert tester.is_active(level_name='info')
    assert not tester.is_active(level_name='trace')
Example #34
0
def test_logging_reconfigure_levels(config, logger, level):
    slogging.configure(config)
    assert slogging.getLogger(logger).level == getattr(logging, level)
Example #35
0
def app(
        address,  # pylint: disable=too-many-arguments,too-many-locals
        keystore_path,
        eth_rpc_endpoint,
        registry_contract_address,
        discovery_contract_address,
        listen_address,
        socket,
        logging,
        logfile,
        max_unresponsive_time,
        send_ping_time,
        api_port,
        rpc,
        console):

    slogging.configure(logging, log_file=logfile)

    # config_file = args.config_file
    (listen_host, listen_port) = split_endpoint(listen_address)

    config = App.default_config.copy()
    config['host'] = listen_host
    config['port'] = listen_port
    config['max_unresponsive_time'] = max_unresponsive_time
    config['send_ping_time'] = send_ping_time
    config['console'] = console
    config['rpc'] = rpc
    config['api_port'] = api_port
    config['socket'] = socket

    accmgr = AccountManager(keystore_path)
    if not accmgr.accounts:
        raise RuntimeError('No Ethereum accounts found in the user\'s system')

    if not accmgr.address_in_keystore(address):
        addresses = list(accmgr.accounts.keys())
        formatted_addresses = [
            '[{:3d}] - 0x{}'.format(idx, addr)
            for idx, addr in enumerate(addresses)
        ]

        should_prompt = True

        print('The following accounts were found in your machine:')
        print('')
        print('\n'.join(formatted_addresses))
        print('')

        while should_prompt:
            idx = click.prompt('Select one of them by index to continue',
                               type=int)

            if idx >= 0 and idx < len(addresses):
                should_prompt = False
            else:
                print("\nError: Provided index '{}' is out of bounds\n".format(
                    idx))

        address = addresses[idx]

    unlock_tries = 3
    while True:
        try:
            privatekey_bin = accmgr.get_privkey(address)
            break
        except ValueError as e:
            # ValueError exception raised if the password is incorrect
            if unlock_tries == 0:
                print(
                    'Exhausted passphrase unlock attempts for {}. Aborting ...'
                    .format(address))
                sys.exit(1)

            print(
                'Incorrect passphrase to unlock the private key. {} tries remaining. '
                'Please try again or kill the process to quit. '
                'Usually Ctrl-c.'.format(unlock_tries))
            unlock_tries -= 1

    privatekey_hex = privatekey_bin.encode('hex')
    config['privatekey_hex'] = privatekey_hex

    endpoint = eth_rpc_endpoint

    if eth_rpc_endpoint.startswith("http://"):
        endpoint = eth_rpc_endpoint[len("http://"):]
        rpc_port = 80
    elif eth_rpc_endpoint.startswith("https://"):
        endpoint = eth_rpc_endpoint[len("https://"):]
        rpc_port = 443

    if ':' not in endpoint:  # no port was given in url
        rpc_host = endpoint
    else:
        rpc_host, rpc_port = split_endpoint(endpoint)

    # user may have provided registry and discovery contracts with leading 0x
    registry_contract_address = address_decoder(registry_contract_address)
    discovery_contract_address = address_decoder(discovery_contract_address)

    try:
        blockchain_service = BlockChainService(
            privatekey_bin,
            registry_contract_address,
            host=rpc_host,
            port=rpc_port,
        )
    except ValueError as e:
        # ValueError exception raised if:
        # - The registry contract address doesn't have code, this might happen
        # if the connected geth process is not synced or if the wrong address
        # is provided (e.g. using the address from a smart contract deployed on
        # ropsten with a geth node connected to morden)
        print(e.message)
        sys.exit(1)

    discovery = ContractDiscovery(
        blockchain_service.node_address,
        blockchain_service.discovery(discovery_contract_address))

    return App(config, blockchain_service, discovery)
def runner(ctx, **kwargs):
    """ Start a raiden Echo Node that will send received transfers back to the initiator. """
    # This is largely a copy&paste job from `raiden.ui.cli::run`, with the difference that
    # an `EchoNode` is instantiated from the App's `RaidenAPI`.
    print('Welcome to Raiden, version {} [Echo Node]'.format(
        get_system_spec()['raiden']))
    slogging.configure(kwargs['logging'],
                       log_json=kwargs['log_json'],
                       log_file=kwargs['logfile'])
    if kwargs['logfile']:
        # Disable stream logging
        root = slogging.getLogger()
        for handler in root.handlers:
            if isinstance(handler, slogging.logging.StreamHandler):
                root.handlers.remove(handler)
                break

    token_address = kwargs.pop('token_address')

    (listen_host, listen_port) = split_endpoint(kwargs['listen_address'])
    with SocketFactory(listen_host, listen_port,
                       strategy=kwargs['nat']) as mapped_socket:
        kwargs['mapped_socket'] = mapped_socket

        app_ = ctx.invoke(app, **kwargs)

        domain_list = []
        if kwargs['rpccorsdomain']:
            if ',' in kwargs['rpccorsdomain']:
                for domain in kwargs['rpccorsdomain'].split(','):
                    domain_list.append(str(domain))
            else:
                domain_list.append(str(kwargs['rpccorsdomain']))

        raiden_api = RaidenAPI(app_.raiden)
        if ctx.params['rpc']:
            rest_api = RestAPI(raiden_api)
            api_server = APIServer(
                rest_api,
                cors_domain_list=domain_list,
                web_ui=ctx.params['web_ui'],
            )
            (api_host, api_port) = split_endpoint(kwargs["api_address"])
            api_server.start(api_host, api_port)

            print(
                "The Raiden API RPC server is now running at http://{}:{}/.\n\n"
                "See the Raiden documentation for all available endpoints at\n"
                "http://raiden-network.readthedocs.io/en/stable/rest_api.html".
                format(
                    api_host,
                    api_port,
                ))

        # This will install the EchoNode callback in the alarm task:
        echo = EchoNode(raiden_api, token_address)

        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

        # This will remove the EchoNode callback from the alarm task:
        echo.stop()

        try:
            api_server.stop()
        except NameError:
            pass
    app_.stop(leave_channels=False)
Example #37
0
import gevent
import gevent.wsgi
import gevent.queue
import rlp
from tinyrpc.dispatch import RPCDispatcher
from tinyrpc.dispatch import public as public_
from tinyrpc.exc import BadRequestError, MethodNotFoundError
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol, JSONRPCInvalidParamsError
from tinyrpc.server.gevent import RPCServerGreenlets
from tinyrpc.transports.wsgi import WsgiServerTransport
from devp2p.service import BaseService
from eth_protocol import ETHProtocol
from ethereum.utils import denoms

log = slogging.get_logger('jsonrpc')
slogging.configure(config_string=':debug')

# defaults
default_startgas = 100 * 1000
default_gasprice = 100 * 1000 * denoms.wei

# route logging messages


class WSGIServerLogger(object):

    _log = slogging.get_logger('jsonrpc.wsgi')

    @classmethod
    def log(cls, msg):
        cls._log.debug(msg.strip())
from raiden.tests.fixtures.tester import (
    tester_blockgas_limit,
    tester_channelmanager_library_address,
    tester_nettingchannel_library_address,
    tester_registry_address,
    tester_chain,
)

try:
    from termcolor import colored
except ImportError:
    def colored(text, color=None, on_color=None, attrs=None):
        return text


slogging.configure(':CRITICAL')

ProfileLine = namedtuple(
    'ProfileLine',
    (
        'recursion',
        'name',
        'calls',
        'cumulative',
        'total',
        'average',
    )
)

FORMAT_LINE = '{total:>6.4f} {cumulative:>6.4f} {avg:>6.4f} {align}{name} [{calls} calls]'
Example #39
0
def test_logging_reconfigure():
    config_string = ':WARNING'
    config_string1 = ':DEBUG,eth:INFO'
    config_string2 = ':DEBUG,eth.vm:INFO'
    main_logger = slogging.getLogger()

    slogging.configure(config_string)
    assert len(main_logger.handlers) == 2  # pytest-capturelog adds it's own handler
    slogging.configure(config_string)
    assert len(main_logger.handlers) == 2  # pytest-capturelog adds it's own handler

    eth_logger = slogging.getLogger('eth')
    slogging.configure(config_string1)
    assert len(eth_logger.handlers) == 0
    slogging.configure(config_string1)
    assert len(eth_logger.handlers) == 0

    eth_vm_logger = slogging.getLogger('eth.vm')
    slogging.configure(config_string2)
    assert len(eth_vm_logger.handlers) == 0
    slogging.configure(config_string2)
    assert len(eth_vm_logger.handlers) == 0
Example #40
0
    filepath = os.path.abspath(
        os.path.join(os.path.abspath(output_dir), filename))

    with open(filepath, "w") as f:
        json.dump(key, f, cls=BytesJSONEncoder)

    print("0x{}".format(key['address']))


def privatekey_to_address(private_key_bin):
    return sha3(
        PrivateKey(private_key_bin).public_key.format(
            compressed=False)[1:])[12:]


def make_keystore_json_patched(private_key, password):
    # Fix py3 bytes/string incompatibility in `make_keystore_json()`
    # See: https://github.com/ethereum/pyethereum/issues/758
    _encode_hex = keys.encode_hex
    setattr(keys, 'encode_hex',
            lambda *args: _encode_hex(*args).encode('ASCII'))
    try:
        return make_keystore_json(private_key, password)
    finally:
        setattr(keys, 'encode_hex', _encode_hex)


if __name__ == "__main__":
    slogging.configure(":ERROR")
    main()
Example #41
0
def test_highlight(caplog):
    slogging.configure()
    log = slogging.getLogger()

    log.DEV('testmessage')
    assert "\033[91mtestmessage \033[0m" in caplog.records()[0].msg
Example #42
0
import time
from raiden.transport import UDPTransport
from raiden.app import create_network
from raiden.tasks import TransferTask
import gevent
from ethereum import slogging
slogging.configure(
    "encoding:debug,protocol:debug,service:debug,tasks:debug,transport:debug")


def test_mediated_transfer(num_transfers=100,
                           num_nodes=10,
                           num_assets=1,
                           channels_per_node=2):

    apps = create_network(num_nodes=num_nodes,
                          num_assets=num_assets,
                          channels_per_node=channels_per_node,
                          transport_class=UDPTransport)

    def start_transfers(idx, num_transfers):
        a0 = apps[idx]

        # channels
        assets = sorted(a0.raiden.assetmanagers.keys())
        asset = assets[idx]
        am0 = a0.raiden.assetmanagers[asset]

        # search for a path of length=2 A > B > C
        num_hops = 2
        source = a0.raiden.address
Example #43
0
def test_initial_config():
    slogging.getLogger().handlers = []
    slogging.configure()
    assert len(slogging.getLogger().handlers) == 1
    assert isinstance(slogging.getLogger().handlers[0], logging.StreamHandler)
Example #44
0
# -*- coding: utf8 -*-
from __future__ import print_function

import gevent
import pytest
from ethereum import slogging

from raiden.messages import decode, Ack, DirectTransfer, CancelTransfer
from raiden.tasks import MediatedTransferTask
from raiden.tests.utils.messages import setup_messages_cb, MessageLogger
from raiden.tests.utils.network import create_network, create_sequential_network
from raiden.tests.utils.transfer import assert_synched_channels, channel, direct_transfer, transfer
from raiden.utils import pex, sha3

# pylint: disable=too-many-locals,too-many-statements,line-too-long
slogging.configure(':debug')

# set shorter timeout for testing
MediatedTransferTask.timeout_per_hop = 0.3


def teardown_module(module):  # pylint: disable=unused-argument
    from raiden.tests.utils.tests import cleanup_tasks
    cleanup_tasks()


def test_transfer():
    apps = create_network(num_nodes=2, num_assets=1, channels_per_node=1)
    app0, app1 = apps  # pylint: disable=unbalanced-tuple-unpacking

    messages = setup_messages_cb()
Example #45
0
def app(
        address,
        keystore_path,
        eth_rpc_endpoint,
        registry_contract_address,
        discovery_contract_address,
        listen_address,
        rpccorsdomain,  # pylint: disable=unused-argument
        socket,
        logging,
        logfile,
        max_unresponsive_time,
        send_ping_time,
        api_address,
        rpc,
        console,
        password_file):

    from raiden.app import App
    from raiden.network.rpc.client import BlockChainService

    slogging.configure(logging, log_file=logfile)

    # config_file = args.config_file
    (listen_host, listen_port) = split_endpoint(listen_address)
    (api_host, api_port) = split_endpoint(api_address)

    config = App.DEFAULT_CONFIG.copy()
    config['host'] = listen_host
    config['port'] = listen_port
    config['console'] = console
    config['rpc'] = rpc
    config['api_host'] = api_host
    config['api_port'] = api_port
    config['socket'] = socket

    retries = max_unresponsive_time / DEFAULT_NAT_KEEPALIVE_RETRIES
    config['protocol']['nat_keepalive_retries'] = retries
    config['protocol']['nat_keepalive_timeout'] = send_ping_time

    accmgr = AccountManager(keystore_path)
    if not accmgr.accounts:
        raise RuntimeError('No Ethereum accounts found in the user\'s system')

    if not accmgr.address_in_keystore(address):
        addresses = list(accmgr.accounts.keys())
        formatted_addresses = [
            '[{:3d}] - 0x{}'.format(idx, addr)
            for idx, addr in enumerate(addresses)
        ]

        should_prompt = True

        print('The following accounts were found in your machine:')
        print('')
        print('\n'.join(formatted_addresses))
        print('')

        while should_prompt:
            idx = click.prompt('Select one of them by index to continue',
                               type=int)

            if idx >= 0 and idx < len(addresses):
                should_prompt = False
            else:
                print("\nError: Provided index '{}' is out of bounds\n".format(
                    idx))

        address = addresses[idx]

    password = None
    if password_file:
        password = password_file.read().splitlines()[0]
    if password:
        try:
            privatekey_bin = accmgr.get_privkey(address, password)
        except ValueError as e:
            # ValueError exception raised if the password is incorrect
            print('Incorret password for {} in file. Aborting ...'.format(
                address))
            sys.exit(1)
    else:
        unlock_tries = 3
        while True:
            try:
                privatekey_bin = accmgr.get_privkey(address)
                break
            except ValueError as e:
                # ValueError exception raised if the password is incorrect
                if unlock_tries == 0:
                    print(
                        'Exhausted passphrase unlock attempts for {}. Aborting ...'
                        .format(address))
                    sys.exit(1)

                print(
                    'Incorrect passphrase to unlock the private key. {} tries remaining. '
                    'Please try again or kill the process to quit. '
                    'Usually Ctrl-c.'.format(unlock_tries))
                unlock_tries -= 1

    privatekey_hex = privatekey_bin.encode('hex')
    config['privatekey_hex'] = privatekey_hex

    endpoint = eth_rpc_endpoint

    if eth_rpc_endpoint.startswith("http://"):
        endpoint = eth_rpc_endpoint[len("http://"):]
        rpc_port = 80
    elif eth_rpc_endpoint.startswith("https://"):
        endpoint = eth_rpc_endpoint[len("https://"):]
        rpc_port = 443

    if ':' not in endpoint:  # no port was given in url
        rpc_host = endpoint
    else:
        rpc_host, rpc_port = split_endpoint(endpoint)

    # user may have provided registry and discovery contracts with leading 0x
    registry_contract_address = address_decoder(registry_contract_address)
    discovery_contract_address = address_decoder(discovery_contract_address)

    try:
        blockchain_service = BlockChainService(
            privatekey_bin,
            registry_contract_address,
            host=rpc_host,
            port=rpc_port,
        )
    except ValueError as e:
        # ValueError exception raised if:
        # - The registry contract address doesn't have code, this might happen
        # if the connected geth process is not synced or if the wrong address
        # is provided (e.g. using the address from a smart contract deployed on
        # ropsten with a geth node connected to morden)
        print(e.message)
        sys.exit(1)

    discovery = ContractDiscovery(
        blockchain_service.node_address,
        blockchain_service.discovery(discovery_contract_address))

    # default database directory
    raiden_directory = os.path.join(os.path.expanduser('~'), '.raiden')
    if not os.path.exists(raiden_directory):
        os.makedirs(raiden_directory)
    database_path = os.path.join(raiden_directory, 'log.db')
    config['database_path'] = database_path

    return App(config, blockchain_service, discovery)
Example #46
0
def smoketest(ctx, debug, **kwargs):
    """ Test, that the raiden installation is sane.
    """
    from raiden.api.python import RaidenAPI
    from raiden.blockchain.abi import get_static_or_compile
    from raiden.utils import get_contract_path

    # Check the solidity compiler early in the smoketest.
    #
    # Binary distributions don't need the solidity compiler but source
    # distributions do. Since this is checked by `get_static_or_compile`
    # function, use it as a proxy for validating the setup.
    get_static_or_compile(
        get_contract_path('HumanStandardToken.sol'),
        'HumanStandardToken',
    )

    report_file = tempfile.mktemp(suffix='.log')
    open(report_file, 'w+')

    def append_report(subject, data):
        with open(report_file, 'a') as handler:
            handler.write('{:=^80}'.format(' %s ' % subject.upper()) + os.linesep)
            if data is not None:
                handler.writelines([(data + os.linesep).encode('utf-8')])

    append_report('raiden version', json.dumps(get_system_spec()))
    append_report('raiden log', None)

    print('[1/5] getting smoketest configuration')
    smoketest_config = load_or_create_smoketest_config()

    print('[2/5] starting ethereum')
    ethereum, ethereum_config = start_ethereum(smoketest_config['genesis'])

    print('[3/5] starting raiden')

    # setup logging to log only into our report file
    slogging.configure(':DEBUG', log_file=report_file)
    root = slogging.getLogger()
    for handler in root.handlers:
        if isinstance(handler, slogging.logging.StreamHandler):
            root.handlers.remove(handler)
            break
    # setup cli arguments for starting raiden
    args = dict(
        discovery_contract_address=smoketest_config['contracts']['discovery_address'],
        registry_contract_address=smoketest_config['contracts']['registry_address'],
        eth_rpc_endpoint='http://127.0.0.1:{}'.format(ethereum_config['rpc']),
        keystore_path=ethereum_config['keystore'],
        address=ethereum_config['address'],
    )
    for option in app.params:
        if option.name in args.keys():
            args[option.name] = option.process_value(ctx, args[option.name])
        else:
            args[option.name] = option.default

    password_file = os.path.join(args['keystore_path'], 'password')
    with open(password_file, 'w') as handler:
        handler.write('password')

    args['mapped_socket'] = None
    args['password_file'] = click.File()(password_file)
    args['datadir'] = args['keystore_path']
    args['api_address'] = 'localhost:' + str(next(get_free_port('127.0.0.1', 5001)))
    args['sync_check'] = False

    # invoke the raiden app
    app_ = ctx.invoke(app, **args)

    raiden_api = RaidenAPI(app_.raiden)
    rest_api = RestAPI(raiden_api)
    api_server = APIServer(rest_api)
    (api_host, api_port) = split_endpoint(args['api_address'])
    api_server.start(api_host, api_port)

    success = False
    try:
        print('[4/5] running smoketests...')
        error = run_smoketests(app_.raiden, smoketest_config, debug=debug)
        if error is not None:
            append_report('smoketest assertion error', error)
        else:
            success = True
    finally:
        app_.stop()
        ethereum.send_signal(2)

        err, out = ethereum.communicate()
        append_report('geth init stdout', ethereum_config['init_log_out'].decode('utf-8'))
        append_report('geth init stderr', ethereum_config['init_log_err'].decode('utf-8'))
        append_report('ethereum stdout', out)
        append_report('ethereum stderr', err)
        append_report('smoketest configuration', json.dumps(smoketest_config))
    if success:
        print('[5/5] smoketest successful, report was written to {}'.format(report_file))
    else:
        print('[5/5] smoketest had errors, report was written to {}'.format(report_file))
        sys.exit(1)
Example #47
0
import sys
import random
from devp2p.app import BaseApp
from devp2p.protocol import BaseProtocol
from devp2p.service import WiredService
from devp2p.crypto import privtopub as privtopub_raw, sha3
from devp2p.utils import colors, COLOR_END
from devp2p import app_helper
import rlp
from rlp.utils import encode_hex, decode_hex, is_integer
import gevent
try:
    import ethereum.slogging as slogging
    slogging.configure(
        config_string=':info,p2p.protocol:info,p2p.peer:info,p2p.full_app:info'
    )
except:
    import devp2p.slogging as slogging
log = slogging.get_logger('full_app')


class Token(rlp.Serializable):

    "Object with the information to update a decentralized counter"
    fields = [('counter', rlp.sedes.big_endian_int),
              ('sender', rlp.sedes.binary)]

    def __init__(self, counter=0, sender=''):
        assert is_integer(counter)
        assert isinstance(sender, bytes)
        super(Token, self).__init__(counter, sender)
Example #48
0
def run(ctx, **kwargs):
    # pylint: disable=too-many-locals,too-many-branches,too-many-statements

    if ctx.invoked_subcommand is None:
        print('Welcome to Raiden, version {}!'.format(get_system_spec()['raiden']))
        from raiden.ui.console import Console
        from raiden.api.python import RaidenAPI

        slogging.configure(
            kwargs['logging'],
            log_json=kwargs['log_json'],
            log_file=kwargs['logfile']
        )
        if kwargs['logfile']:
            # Disable stream logging
            root = slogging.getLogger()
            for handler in root.handlers:
                if isinstance(handler, slogging.logging.StreamHandler):
                    root.handlers.remove(handler)
                    break

        # TODO:
        # - Ask for confirmation to quit if there are any locked transfers that did
        # not timeout.
        (listen_host, listen_port) = split_endpoint(kwargs['listen_address'])
        try:
            with SocketFactory(listen_host, listen_port, strategy=kwargs['nat']) as mapped_socket:
                kwargs['mapped_socket'] = mapped_socket

                app_ = ctx.invoke(app, **kwargs)

                domain_list = []
                if kwargs['rpccorsdomain']:
                    if ',' in kwargs['rpccorsdomain']:
                        for domain in kwargs['rpccorsdomain'].split(','):
                            domain_list.append(str(domain))
                    else:
                        domain_list.append(str(kwargs['rpccorsdomain']))

                if ctx.params['rpc']:
                    raiden_api = RaidenAPI(app_.raiden)
                    rest_api = RestAPI(raiden_api)
                    api_server = APIServer(
                        rest_api,
                        cors_domain_list=domain_list,
                        web_ui=ctx.params['web_ui'],
                        eth_rpc_endpoint=ctx.params['eth_rpc_endpoint'],
                    )
                    (api_host, api_port) = split_endpoint(kwargs['api_address'])
                    api_server.start(api_host, api_port)

                    print(
                        'The Raiden API RPC server is now running at http://{}:{}/.\n\n'
                        'See the Raiden documentation for all available endpoints at\n'
                        'http://raiden-network.readthedocs.io/en/stable/rest_api.html'.format(
                            api_host,
                            api_port,
                        )
                    )

                if ctx.params['console']:
                    console = Console(app_)
                    console.start()

                # wait for interrupt
                event = gevent.event.Event()
                gevent.signal(signal.SIGQUIT, event.set)
                gevent.signal(signal.SIGTERM, event.set)
                gevent.signal(signal.SIGINT, event.set)

                gevent.signal(signal.SIGUSR1, toogle_cpu_profiler)
                gevent.signal(signal.SIGUSR2, toggle_trace_profiler)

                event.wait()

                try:
                    api_server.stop()
                except NameError:
                    pass
        except socket.error as v:
            if v.args[0] == errno.EADDRINUSE:
                print('ERROR: Address %s:%s is in use. '
                      'Use --listen-address <host:port> to specify port to listen on.' %
                      (listen_host, listen_port))
                sys.exit(1)
            raise
        app_.stop(leave_channels=False)
    else:
        # Pass parsed args on to subcommands.
        ctx.obj = kwargs
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import print_function

import json

from ethereum import tester
from ethereum import slogging
from raiden.utils import privatekey_to_address, get_contract_path, safe_lstrip_hex

slogging.configure(":INFO")
log = slogging.getLogger(__name__)

TARGETS = dict(
    registry='Registry.sol',
    discovery='EndpointRegistry.sol',
    token='HumanStandardToken.sol',
)

DEFAULT_KEY = ('1' * 64).decode('hex')
DEFAULT_ACCOUNT = privatekey_to_address(DEFAULT_KEY)


def deploy_all(token_groups=None):
    if not token_groups:
        token_groups.dict()

    log.DEV(  # pylint: disable=no-member
        'default key',
        raw=tester.DEFAULT_KEY,
        enc=tester.DEFAULT_KEY.encode('hex'),
import os
from pyethapp import monkeypatches
from ethereum.db import EphemDB
from pyethapp import eth_service
from pyethapp import leveldb_service
from pyethapp import codernitydb_service
from pyethapp import eth_protocol
from ethereum import slogging
from ethereum import config as eth_config
import rlp
import tempfile
slogging.configure(config_string=':info')

empty = object()


class AppMock(object):

    config = dict(
        app=dict(dir=tempfile.mkdtemp()),
        db=dict(path='_db'),
        eth=dict(pruning=-1, network_id=1, block=eth_config.default_config),
    )

    class Services(dict):
        class accounts:
            coinbase = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"

        class peermanager:
            @classmethod
            def broadcast(*args, **kwargs):
Example #51
0
# -*- coding: utf8 -*-
import time

import gevent

from raiden.transport import UDPTransport
from raiden.app import create_network
from raiden.tasks import TransferTask
from ethereum import slogging

log = slogging.getLogger('test.speed')
slogging.configure(":error")


def test_mediated_transfer(num_transfers=100, num_nodes=10, num_assets=1, channels_per_node=2):

    apps = create_network(
        num_nodes=num_nodes,
        num_assets=num_assets,
        channels_per_node=channels_per_node,
        transport_class=UDPTransport)

    def start_transfers(idx, num_transfers):
        a0 = apps[idx]

        # channels
        assets = sorted(a0.raiden.assetmanagers.keys())
        asset = assets[idx]
        am0 = a0.raiden.assetmanagers[asset]

        # search for a path of length=2 A > B > C
Example #52
0
    tester_blockgas_limit,
    tester_channelmanager_library_address,
    tester_nettingchannel_library_address,
    tester_registry_address,
    tester_state,
)

try:
    from termcolor import colored
except ImportError:

    def colored(text, color=None, on_color=None, attrs=None):
        return text


slogging.configure(':CRITICAL')

ProfileLine = namedtuple('ProfileLine', (
    'recursion',
    'name',
    'calls',
    'cumulative',
    'total',
    'average',
))

FORMAT_LINE = '{total:>6.4f} {cumulative:>6.4f} {avg:>6.4f} {align}{name} [{calls} calls]'


def print_stats(stat_list, total_time):
    # The GreenletProfiler is based on YAPPI, the YAPPI implementation does
# -*- coding: utf8 -*-
from __future__ import print_function

import gevent
import pytest
from ethereum import slogging

from raiden.messages import decode, Ack, DirectTransfer, CancelTransfer
from raiden.tasks import MediatedTransferTask
from raiden.tests.utils.messages import setup_messages_cb, MessageLogger
from raiden.tests.utils.network import create_network, create_sequential_network
from raiden.tests.utils.transfer import assert_synched_channels, channel, direct_transfer, transfer
from raiden.utils import pex, sha3

# pylint: disable=too-many-locals,too-many-statements,line-too-long
slogging.configure(':debug')

# set shorter timeout for testing
MediatedTransferTask.timeout_per_hop = 0.1


def test_transfer():
    apps = create_network(num_nodes=2, num_assets=1, channels_per_node=1)
    app0, app1 = apps  # pylint: disable=unbalanced-tuple-unpacking

    messages = setup_messages_cb()
    mlogger = MessageLogger()

    a0_address = pex(app0.raiden.address)
    a1_address = pex(app1.raiden.address)
Example #54
0
def run(privatekey, registry_contract_address, discovery_contract_address,
        listen_address, logging, logfile, scenario, stage_prefix,
        results_filename):  # pylint: disable=unused-argument

    # TODO: only enabled logging on "initiators"
    slogging.configure(logging, log_file=logfile)

    (listen_host, listen_port) = split_endpoint(listen_address)

    config = App.default_config.copy()
    config['host'] = listen_host
    config['port'] = listen_port
    config['privatekey_hex'] = privatekey

    blockchain_service = BlockChainService(
        decode_hex(privatekey),
        decode_hex(registry_contract_address),
        host="127.0.0.1",
        port="8545",
    )

    discovery = ContractDiscovery(blockchain_service,
                                  decode_hex(discovery_contract_address))

    app = App(config, blockchain_service, discovery)

    app.discovery.register(
        app.raiden.address,
        listen_host,
        listen_port,
    )

    app.raiden.register_registry(app.raiden.chain.default_registry.address)

    if scenario:
        script = json.load(scenario)

        tools = ConsoleTools(
            app.raiden,
            app.discovery,
            app.config['settle_timeout'],
            app.config['reveal_timeout'],
        )

        transfers_by_peer = {}

        tokens = script['tokens']
        token_address = None
        peer = None
        our_node = app.raiden.address.encode('hex')
        log.warning("our address is {}".format(our_node))
        for token in tokens:
            # skip tokens that we're not part of
            nodes = token['channels']
            if our_node not in nodes:
                continue

            # allow for prefunded tokens
            if 'token_address' in token:
                token_address = token['token_address']
            else:
                token_address = tools.create_token()

            transfers_with_amount = token['transfers_with_amount']

            # FIXME: in order to do bidirectional channels, only one side
            # (i.e. only token['channels'][0]) should
            # open; others should join by calling
            # raiden.api.deposit, AFTER the channel came alive!

            # NOTE: leaving unidirectional for now because it most
            #       probably will get to higher throughput

            log.warning("Waiting for all nodes to come online")

            while not all(
                    tools.ping(node) for node in nodes if node != our_node):
                gevent.sleep(5)

            log.warning("All nodes are online")

            if our_node != nodes[-1]:
                our_index = nodes.index(our_node)
                peer = nodes[our_index + 1]

                tools.register_token(token_address)
                amount = transfers_with_amount[nodes[-1]]

                while True:
                    try:
                        app.discovery.get(peer.decode('hex'))
                        break
                    except KeyError:
                        log.warning(
                            "Error: peer {} not found in discovery".format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning("Opening channel with {} for {}".format(
                            peer, token_address))
                        app.raiden.api.open(token_address, peer)
                        break
                    except KeyError:
                        log.warning(
                            "Error: could not open channel with {}".format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning("Funding channel with {} for {}".format(
                            peer, token_address))
                        app.raiden.api.deposit(token_address, peer, amount)
                        break
                    except Exception:
                        log.warning(
                            "Error: could not deposit {} for {}".format(
                                amount, peer))
                        time.sleep(random.randrange(30))

                if our_index == 0:
                    last_node = nodes[-1]
                    transfers_by_peer[last_node] = int(amount)
            else:
                peer = nodes[-2]

        if stage_prefix is not None:
            open('{}.stage1'.format(stage_prefix), 'a').close()
            log.warning("Done with initialization, waiting to continue...")
            event = gevent.event.Event()
            gevent.signal(signal.SIGUSR2, event.set)
            event.wait()

        transfer_results = {'total_time': 0, 'timestamps': []}

        def transfer(token_address, amount_per_transfer, total_transfers, peer,
                     is_async):
            def transfer_():
                log.warning("Making {} transfers to {}".format(
                    total_transfers, peer))
                initial_time = time.time()
                times = [0] * total_transfers
                for index in xrange(total_transfers):
                    app.raiden.api.transfer(
                        token_address.decode('hex'),
                        amount_per_transfer,
                        peer,
                    )
                    times[index] = time.time()

                transfer_results['total_time'] = time.time() - initial_time
                transfer_results['timestamps'] = times

                log.warning("Making {} transfers took {}".format(
                    total_transfers, transfer_results['total_time']))
                log.warning("Times: {}".format(times))

            if is_async:
                return gevent.spawn(transfer_)
            else:
                transfer_()

        # If sending to multiple targets, do it asynchronously, otherwise
        # keep it simple and just send to the single target on my thread.
        if len(transfers_by_peer) > 1:
            greenlets = []
            for peer_, amount in transfers_by_peer.items():
                greenlet = transfer(token_address, 1, amount, peer_, True)
                if greenlet is not None:
                    greenlets.append(greenlet)

            gevent.joinall(greenlets)

        elif len(transfers_by_peer) == 1:
            for peer_, amount in transfers_by_peer.items():
                transfer(token_address, 1, amount, peer_, False)

        log.warning("Waiting for termination")

        open('{}.stage2'.format(stage_prefix), 'a').close()
        log.warning("Waiting for transfers to finish, will write results...")
        event = gevent.event.Event()
        gevent.signal(signal.SIGUSR2, event.set)
        event.wait()

        results = tools.channel_stats_for(token_address, peer)
        if transfer_results['total_time'] != 0:
            results['total_time'] = transfer_results['total_time']
        if len(transfer_results['timestamps']) > 0:
            results['timestamps'] = transfer_results['timestamps']
        results['channel'] = repr(results['channel'])  # FIXME

        log.warning("Results: {}".format(results))

        with open(results_filename, 'w') as fp:
            json.dump(results, fp, indent=2)

        open('{}.stage3'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    else:
        log.warning("No scenario file supplied, doing nothing!")

        open('{}.stage2'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    app.stop()
Example #55
0
def run(privatekey,
        registry_contract_address,
        discovery_contract_address,
        listen_address,
        logging,
        logfile,
        scenario,
        stage_prefix,
        results_filename):  # pylint: disable=unused-argument

    # TODO: only enabled logging on "initiators"
    slogging.configure(logging, log_file=logfile)

    (listen_host, listen_port) = split_endpoint(listen_address)

    config = App.default_config.copy()
    config['host'] = listen_host
    config['port'] = listen_port
    config['privatekey_hex'] = privatekey

    blockchain_service = BlockChainService(
        decode_hex(privatekey),
        decode_hex(registry_contract_address),
        host="127.0.0.1",
        port="8545",
    )

    discovery = ContractDiscovery(
        blockchain_service,
        decode_hex(discovery_contract_address)
    )

    app = App(config, blockchain_service, discovery)

    app.discovery.register(
        app.raiden.address,
        listen_host,
        listen_port,
    )

    app.raiden.register_registry(app.raiden.chain.default_registry)

    if scenario:
        script = json.load(scenario)

        tools = ConsoleTools(
            app.raiden,
            app.discovery,
            app.config['settle_timeout'],
            app.config['reveal_timeout'],
        )

        transfers_by_peer = {}

        tokens = script['assets']
        token_address = None
        peer = None
        our_node = app.raiden.address.encode('hex')
        log.warning("our address is {}".format(our_node))
        for token in tokens:
            # skip tokens/assets that we're not part of
            nodes = token['channels']
            if not our_node in nodes:
                continue

            # allow for prefunded tokens
            if 'token_address' in token:
                token_address = token['token_address']
            else:
                token_address = tools.create_token()

            transfers_with_amount = token['transfers_with_amount']

            # FIXME: in order to do bidirectional channels, only one side
            # (i.e. only token['channels'][0]) should
            # open; others should join by calling
            # raiden.api.deposit, AFTER the channel came alive!

            # NOTE: leaving unidirectional for now because it most
            #       probably will get to higher throughput


            log.warning("Waiting for all nodes to come online")

            while not all(tools.ping(node) for node in nodes if node != our_node):
                gevent.sleep(5)

            log.warning("All nodes are online")

            if our_node != nodes[-1]:
                our_index = nodes.index(our_node)
                peer = nodes[our_index + 1]

                channel_manager = tools.register_asset(token_address)
                amount = transfers_with_amount[nodes[-1]]

                while True:
                    try:
                        app.discovery.get(peer.decode('hex'))
                        break
                    except KeyError:
                        log.warning("Error: peer {} not found in discovery".format(peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning("Opening channel with {} for {}".format(peer, token_address))
                        app.raiden.api.open(token_address, peer)
                        break
                    except KeyError:
                        log.warning("Error: could not open channel with {}".format(peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning("Funding channel with {} for {}".format(peer, token_address))
                        channel = app.raiden.api.deposit(token_address, peer, amount)
                        break
                    except Exception:
                        log.warning("Error: could not deposit {} for {}".format(amount, peer))
                        time.sleep(random.randrange(30))

                if our_index == 0:
                    last_node = nodes[-1]
                    transfers_by_peer[last_node] = int(amount)
            else:
                peer = nodes[-2]

        if stage_prefix is not None:
            open('{}.stage1'.format(stage_prefix), 'a').close()
            log.warning("Done with initialization, waiting to continue...")
            event = gevent.event.Event()
            gevent.signal(signal.SIGUSR2, event.set)
            event.wait()

        transfer_results = {'total_time': 0, 'timestamps': []}

        def transfer(token_address, amount_per_transfer, total_transfers, peer, is_async):
            def transfer_():
                log.warning("Making {} transfers to {}".format(total_transfers, peer))
                initial_time = time.time()
                times = [0] * total_transfers
                for index in xrange(total_transfers):
                    app.raiden.api.transfer(
                        token_address.decode('hex'),
                        amount_per_transfer,
                        peer,
                    )
                    times[index] = time.time()

                transfer_results['total_time'] = time.time() - initial_time
                transfer_results['timestamps'] = times

                log.warning("Making {} transfers took {}".format(
                    total_transfers, transfer_results['total_time']))
                log.warning("Times: {}".format(times))

            if is_async:
                return gevent.spawn(transfer_)
            else:
                transfer_()

        # If sending to multiple targets, do it asynchronously, otherwise
        # keep it simple and just send to the single target on my thread.
        if len(transfers_by_peer) > 1:
            greenlets = []
            for peer_, amount in transfers_by_peer.items():
                greenlet = transfer(token_address, 1, amount, peer_, True)
                if greenlet is not None:
                    greenlets.append(greenlet)

            gevent.joinall(greenlets)

        elif len(transfers_by_peer) == 1:
            for peer_, amount in transfers_by_peer.items():
                transfer(token_address, 1, amount, peer_, False)

        log.warning("Waiting for termination")

        open('{}.stage2'.format(stage_prefix), 'a').close()
        log.warning("Waiting for transfers to finish, will write results...")
        event = gevent.event.Event()
        gevent.signal(signal.SIGUSR2, event.set)
        event.wait()

        results = tools.channel_stats_for(token_address, peer)
        if transfer_results['total_time'] != 0:
            results['total_time'] = transfer_results['total_time']
        if len(transfer_results['timestamps']) > 0:
            results['timestamps'] = transfer_results['timestamps']
        results['channel'] = repr(results['channel'])  # FIXME

        log.warning("Results: {}".format(results))

        with open(results_filename, 'w') as fp:
            json.dump(results, fp, indent=2)

        open('{}.stage3'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    else:
        log.warning("No scenario file supplied, doing nothing!")

        open('{}.stage2'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    app.stop()
Example #56
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--transfers', default=100, type=int)
    parser.add_argument('--nodes', default=10, type=int)
    parser.add_argument('--assets', default=1, type=int)
    parser.add_argument('--channels-per-node', default=2, type=int)
    parser.add_argument('-p', '--profile', default=False, action='store_true')
    parser.add_argument('--pdb', default=False, action='store_true')
    parser.add_argument('--throughput',
                        dest='throughput',
                        action='store_true',
                        default=True)
    parser.add_argument('--latency', dest='throughput', action='store_false')
    parser.add_argument('--log', action='store_true', default=False)
    args = parser.parse_args()

    if args.log:
        slogging.configure(':DEBUG')

    if args.profile:
        import GreenletProfiler
        GreenletProfiler.set_clock_type('cpu')
        GreenletProfiler.start()

    assets = [
        sha3('asset:{}'.format(number))[:20] for number in range(args.assets)
    ]

    amount = 10
    apps = setup_apps(
        amount,
        assets,
        args.transfers,
        args.nodes,
        args.channels_per_node,
    )

    if args.pdb:
        from pyethapp.utils import enable_greenlet_debugger
        enable_greenlet_debugger()

        try:
            if args.throughput:
                test_throughput(apps, assets, args.transfers, amount)
            else:
                test_latency(apps, assets, args.transfers, amount)
        except:
            import pdb
            pdb.xpm()
    else:
        if args.throughput:
            test_throughput(apps, assets, args.transfers, amount)
        else:
            test_latency(apps, assets, args.transfers, amount)

    if args.profile:
        GreenletProfiler.stop()
        stats = GreenletProfiler.get_func_stats()
        pstats = GreenletProfiler.convert2pstats(stats)

        print_serialization(pstats)
        print_slow_path(pstats)
        print_slow_function(pstats)

        pstats.sort_stats('time').print_stats()
Example #57
0
from pyethapp.db_service import DBService
from pyethapp.profiles import PROFILES
from pyethapp.jsonrpc import JSONRPCServer
from pyethapp.accounts import AccountsService, Account
import ethereum.slogging as slogging
import pyethapp.app as pyethapp_app
from pyethapp.accounts import mk_privkey, privtopub
from devp2p.crypto import privtopub as privtopub_raw
from devp2p.utils import host_port_pubkey_to_uri
from ethereum.keys import privtoaddr, PBKDF2_CONSTANTS

# local
from hydrachain.hdc_service import ChainService
from hydrachain import __version__

slogging.configure(config_string=':debug')
log = slogging.get_logger('app')


services = [DBService,
            AccountsService,
            NodeDiscovery,
            PeerManager,
            ChainService,
            JSONRPCServer,
            Console]

pyethapp_app.services = services


class HPCApp(pyethapp_app.EthApp):
Example #58
0
from raiden.tests.utils.blockchain import wait_until_block
from raiden.tests.utils.messages import setup_messages_cb
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.transfer import (
    assert_synched_channels,
    channel,
    direct_transfer,
    get_received_transfer,
    get_sent_transfer,
    pending_mediated_transfer,
    claim_lock,
)
from raiden.utils import sha3, privatekey_to_address

# pylint: disable=too-many-locals,too-many-statements
slogging.configure(':DEBUG')


@pytest.mark.parametrize('privatekey_seed', ['settlement:{}'])
@pytest.mark.parametrize('number_of_nodes', [2])
def test_settlement(raiden_network, settle_timeout, reveal_timeout):
    alice_app, bob_app = raiden_network  # pylint: disable=unbalanced-tuple-unpacking

    setup_messages_cb()

    alice_graph = alice_app.raiden.channelgraphs.values()[0]
    bob_graph = bob_app.raiden.channelgraphs.values()[0]
    assert alice_graph.token_address == bob_graph.token_address

    alice_bob_channel = alice_graph.partneraddress_channel[bob_app.raiden.address]
    bob_alice_channel = bob_graph.partneraddress_channel[alice_app.raiden.address]
Example #59
0
from raiden.tests.utils.blockchain import wait_until_block
from raiden.tests.utils.messages import setup_messages_cb
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.transfer import (
    assert_synched_channels,
    channel,
    direct_transfer,
    get_received_transfer,
    get_sent_transfer,
    pending_mediated_transfer,
    claim_lock,
)
from raiden.utils import sha3

# pylint: disable=too-many-locals,too-many-statements
slogging.configure(':DEBUG')


@pytest.mark.xfail(reson='issue #198')
@pytest.mark.timeout(60)
@pytest.mark.parametrize('privatekey_seed', ['settlement:{}'])
@pytest.mark.parametrize('number_of_nodes', [2])
def test_settlement(raiden_network, settle_timeout, reveal_timeout):
    app0, app1 = raiden_network  # pylint: disable=unbalanced-tuple-unpacking

    setup_messages_cb()

    asset_manager0 = app0.raiden.managers_by_asset_address.values()[0]
    asset_manager1 = app1.raiden.managers_by_asset_address.values()[0]

    chain0 = app0.raiden.chain
Example #60
0
def teardown_function(function):
    """ teardown any state that was previously setup with a setup_function
    call.
    """
    slogging.configure(**function.snapshot)