def test_chain_config_explicit_properties():
    chain_config = ChainConfig('muffin',
                               data_dir='./data-dir',
                               nodekey_path='./nodekey')

    assert is_same_path(chain_config.data_dir, './data-dir')
    assert is_same_path(chain_config.nodekey_path, './nodekey')
def test_chain_config_nodekey_loading(nodekey_bytes, nodekey_path):
    chain_config = ChainConfig(
        'muffin',
        nodekey_path=nodekey_path,
    )

    assert chain_config.nodekey.to_bytes() == nodekey_bytes
def test_chain_config_explictely_provided_nodekey(nodekey_bytes, as_bytes):
    chain_config = ChainConfig(
        'muffin',
        nodekey=nodekey_bytes if as_bytes else keys.PrivateKey(nodekey_bytes),
    )

    assert chain_config.nodekey.to_bytes() == nodekey_bytes
Example #4
0
def test_chain_config_nodekey_loading(nodekey_bytes, nodekey_path):
    chain_config = ChainConfig(
        network_id=1,
        nodekey_path=nodekey_path,
    )

    assert chain_config.nodekey.to_bytes() == nodekey_bytes
def test_chain_config_computed_properties():
    data_dir = get_local_data_dir('muffin')
    chain_config = ChainConfig(network_id=1234, data_dir=data_dir)

    assert chain_config.network_id == 1234
    assert chain_config.data_dir == data_dir
    assert chain_config.database_dir == get_database_dir(data_dir)
    assert chain_config.nodekey_path == get_nodekey_path(data_dir)
Example #6
0
def test_chain_config_computed_properties():
    data_dir = get_local_data_dir('muffin')
    chain_config = ChainConfig(network_id=1234, data_dir=data_dir)

    assert chain_config.network_id == 1234
    assert chain_config.data_dir == data_dir
    assert chain_config.database_dir == os.path.join(data_dir,
                                                     DATABASE_DIR_NAME, "full")
    assert chain_config.nodekey_path == get_nodekey_path(data_dir)
Example #7
0
def test_full_initialized_data_dir_with_custom_nodekey():
    chain_config = ChainConfig(network_id=1, nodekey=NODEKEY)

    os.makedirs(chain_config.data_dir, exist_ok=True)
    os.makedirs(chain_config.database_dir, exist_ok=True)

    assert chain_config.nodekey_path is None
    assert chain_config.nodekey is not None

    assert is_data_dir_initialized(chain_config)
Example #8
0
def database_server_ipc_path():
    core_db = MemoryDB()
    core_db[b'key-a'] = b'value-a'

    chaindb = ChainDB(core_db)
    # TODO: use a custom chain class only for testing.
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)

    with tempfile.TemporaryDirectory() as temp_dir:
        chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID,
                                   data_dir=temp_dir)

        chaindb_server_process = multiprocessing.Process(
            target=serve_chaindb,
            args=(chain_config, core_db),
        )
        chaindb_server_process.start()

        wait_for_ipc(chain_config.database_ipc_path)

        try:
            yield chain_config.database_ipc_path
        finally:
            kill_process_gracefully(chaindb_server_process)
def test_chain_config_computed_properties():
    chain_config = ChainConfig('muffin')

    assert chain_config.data_dir == get_default_data_dir('muffin')
    assert chain_config.database_dir == get_database_dir('muffin')
    assert chain_config.nodekey_path == get_nodekey_path('muffin')
Example #10
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    pool_class = PeerPool
    if args.local_geth:
        pool_class = LocalGethPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir))
        if not is_database_initialized(chaindb):
            initialize_database(chain_config, chaindb)
        peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id,
                               chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        console(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def chain_config():
    _chain_config = ChainConfig('test_chain')
    initialize_data_dir(_chain_config)
    return _chain_config
Example #12
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def chain_config():
    return ChainConfig('test_chain')
Example #14
0
def chain_config():
    _chain_config = ChainConfig(network_id=1)
    initialize_data_dir(_chain_config)
    return _chain_config
Example #15
0
def main() -> None:
    args = parser.parse_args()

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    if args.sync_mode != SYNC_LIGHT:
        raise NotImplementedError(
            "Only light sync is supported.  Run with `--sync-mode=light` or `--light`"
        )

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, args.sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Example #16
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    pool_class = PeerPool
    if args.local_geth:
        pool_class = LocalGethPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir))
        if not is_database_initialized(chaindb):
            initialize_database(chain_config, chaindb)
        peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id, chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        console(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs={'log_queue': log_queue}
    )

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(
        target=run_networking_process,
        args=(chain_config, sync_mode, pool_class),
        kwargs={'log_queue': log_queue}
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Example #17
0
def chain_config():
    return ChainConfig(network_id=1)
Example #18
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())
    logger, log_queue, listener = setup_trinity_logging(log_level)

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    logging_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=logging_kwargs,
    )

    # TODO: Combine run_fullnode_process/run_lightnode_process into a single function that simply
    # passes the sync mode to p2p.Server, which then selects the appropriate sync service.
    networking_proc_fn = run_fullnode_process
    if args.sync_mode == SYNC_LIGHT:
        networking_proc_fn = run_lightnode_process
    networking_process = ctx.Process(
        target=networking_proc_fn,
        args=(chain_config, pool_class),
        kwargs=logging_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def chain_config():
    _chain_config = ChainConfig('ropsten')
    initialize_data_dir(_chain_config)
    return _chain_config