Beispiel #1
0
 def _adjust_options(self, config):
     # config defaults appear in all sections.
     # we'll need to filter them out.
     defaults = config.defaults().keys()
     # see what options are already defined and add missing ones
     preset = [(option.section, option.setting) for option in self.items()]
     for section in config.sections():
         for name in config.options(section):
             if ((section, name) not in preset) \
             and (name not in defaults):
                 self.add_option(Option(self, section, name))
Beispiel #2
0
def main():
    args = get_args()
    try:
        config = tools.config.create_config()
    except tools.ToolsError as err:
        logger.critical(str(err))
        sys.exit(3)

    log_file = Path(config.defaults().get('log_dir')).joinpath(
        f'{SCRIPT_NAME}.log').expanduser()
    logging.config.dictConfig(
        tools.utils.get_log_config(log_file,
                                   __name__,
                                   verbosity=args.verbosity))
    process_log(Path(args.log_file).expanduser(), args.field, args.max_diff)
def main() -> None:
    import argparse
    import configparser
    import re

    NODE_SECTION_RE = re.compile("^node[0-9]+")

    parser = argparse.ArgumentParser()
    parser.add_argument("--nodes-data-dir", default=os.getcwd())
    parser.add_argument("--wait-after-first-sync",
                        default=False,
                        action="store_true")
    parser.add_argument("--profiler-data-directory", default=None)
    parser.add_argument("--interface", default="127.0.0.1")
    parser.add_argument("--iterations", default=5, type=int)
    parser.add_argument("config")
    args = parser.parse_args()

    if args.profiler_data_directory is not None and os.geteuid() != 0:
        raise RuntimeError(
            "To enable profiling the script has to be executed with root.")

    config = configparser.ConfigParser()
    config.read(args.config)

    datadir = args.nodes_data_dir

    interface = Host(args.interface)
    port_generator = get_free_port(5000)
    retry_timeout = 1

    nodes_config: List[NodeConfig] = list()

    token_address = config.defaults()["token-address"]
    if not is_checksum_address(token_address):
        raise ValueError(
            f"Invalid token address {token_address}, check it is checksummed.")

    defaults = {
        "--log-config": "raiden:DEBUG",
        "--environment-type": "development",
        "--datadir": datadir,
    }

    for section in config:
        if NODE_SECTION_RE.match(section):
            node_config = config[section]
            address = node_config["address"]

            node = defaults.copy()
            node.update({
                "--keystore-path": node_config["keystore-path"],
                "--password-file": node_config["password-file"],
                "--eth-rpc-endpoint": node_config["eth-rpc-endpoint"],
                "--network-id": node_config["network-id"],
                "--address": address,
            })

            pathfinding_url = node_config.get("pathfinding-service-address")
            if pathfinding_url is not None:
                node["--pathfinding-service-address"] = pathfinding_url

            raiden_args = [
                "raiden",
                "--accept-disclaimer",
                "--log-json",
                "--disable-debug-logfile",
                "--flat-fee",
                token_address,
                "0",
                "--proportional-fee",
                token_address,
                "0",
                "--proportional-imbalance-fee",
                token_address,
                "0",
            ]
            raiden_args.extend(chain.from_iterable(node.items()))

            # The REST interface uses checksummed address. Normalize it here.
            address = to_checksum_address(address)

            nodedir = os.path.join(
                datadir, f"node_{pex(to_canonical_address(address))}")
            nodes_config.append(
                NodeConfig(raiden_args, interface, address, nodedir))

    # TODO: Determine the `capacity_lower_bound` by querying the nodes.
    capacity_lower_bound = 1130220

    profiler_data_directory = args.profiler_data_directory

    iterations = args.iterations
    if iterations is None:
        iteration_counter = count()
    else:
        iteration_counter = iter(range(iterations))

    # def stop_on_signal(sig=None, _frame=None):
    #     stop.set()
    # gevent.signal(signal.SIGQUIT, stop_on_signal)
    # gevent.signal(signal.SIGTERM, stop_on_signal)
    # gevent.signal(signal.SIGINT, stop_on_signal)

    # TODO: If any of the processes crashes the script should collect and
    # bundle the logs.
    #
    # Cleanup with the Janitor is not strictily necessary for the stress test,
    # since once can assume a bug happened and the state of the node is
    # inconsistent, however it is nice to have.
    with Janitor() as nursery:
        nodes_running = start_and_wait_for_all_servers(nursery, port_generator,
                                                       nodes_config,
                                                       retry_timeout)

        if nodes_running is None:
            return

        if args.wait_after_first_sync:
            nursery.spawn_under_watch(wait_for_user_input).get()

        test_config = StressTestConfiguration(
            port_generator,
            retry_timeout,
            Amount(capacity_lower_bound),
            token_address,
            iteration_counter,
            profiler_data_directory,
        )

        nursery.spawn_under_watch(run_stress_test, nursery, nodes_running,
                                  test_config)
        nursery.wait(timeout=None)