Example #1
0
def stop_clients(clients=[], impls=[]):
    inventory = Inventory()
    nodenames = []
    if not clients:
        for nodename, ip in inventory.clients.items():
            nodenames.append(nodename)
    else:
        for nodename in clients:
            nodenames.append(nodename)

    if nodenames:
        stop_containers(nodenames)
Example #2
0
def stop_clients(clients=[], impls=[]):
    inventory = Inventory()
    nodenames = []
    if not clients:
        for nodename, ip in inventory.clients.items():
            nodenames.append(nodename)
    else:
        for nodename in clients:
            nodenames.append(nodename)

    if nodenames:
        stop_containers(nodenames)
Example #3
0
def main():
    parser = ArgumentParser(version=__version__)
    args = parse_arguments(parser)

    set_logging(args.debug)

    logger.info("=====")
    logger.info("Ethereum system-testing %s", __version__)
    logger.info("=====\n")

    inventory = Inventory()

    if args.command == "ls":
        # List machines
        machines = machine_list()
        logger.info("Machines:")
        logger.info(machines)
        logger.info("===")
        raise SystemExit
    elif args.command == "stop":
        nodenames = []
        if args.parameters:
            if "boot" in args.parameters:
                for nodename in inventory.bootnodes:
                    nodenames.append(nodename)
            else:
                nodenames = args.parameters
        else:
            for nodename in inventory.clients:
                nodenames.append(nodename)
        stop_containers(nodenames)
        raise SystemExit
    elif args.command == "rm":
        nodenames = []
        if args.parameters:
            if "boot" in args.parameters:
                for nodename in inventory.bootnodes:
                    nodenames.append(nodename)
            else:
                nodenames = args.parameters
        else:
            for nodename in inventory.clients:
                nodenames.append(nodename)
        if not confirm("This will terminate %s, continue?" % nodenames, default=False):
            logger.warn("Aborting...")
            raise SystemExit
        teardown(nodenames)
        raise SystemExit
    elif args.command == "cleanup":
        # Cleanup - TODO per implementation / filters
        if not confirm("This will terminate all instances including ElasticSearch, continue?", default=False):
            logger.warn("Aborting...")
            raise SystemExit
        nodenames = []
        for nodename in inventory.instances:
            nodenames.append(nodename)
        teardown(nodenames)
        raise SystemExit

    # Create certs if they don't exist, otherwise we can end up creating
    # the same file in parallel in preparation steps
    if not os.path.exists(os.path.join(os.path.expanduser("~"), ".docker", "machine", "certs")):
        logging.info("No certificates found, creating them...\n")
        machine("create --url tcp://127.0.0.1:2376 dummy")
        machine("rm dummy")
        logging.info("Certificates created.\n")

    # Ask to setup ES node
    es = None
    if not args.elasticsearch:
        try:
            with open('es.json', 'r') as f:
                es = json.load(f)
            es = es['ip']
        except:
            if confirm("No ElasticSearch node was found, set one up?"):
                user = raw_input("Choose a username for Kibana: ")
                passwd = getpass("Choose a password: "******"Confirm password: "******"Password doesn't match, aborting...")
                es = setup_es(args.vpc, args.region, args.zone, user, passwd)
            else:
                if confirm("Abort?"):
                    abort("Aborting...")
                else:
                    logger.warn("Running without ElasticSearch, tests will fail!")
    else:
        with open('es.json', 'w') as f:
            save_es = {'ip': args.elasticsearch}
            json.dump(save_es, f)
        es = args.elasticsearch

    # Total nodes
    total = args.cpp_nodes + args.go_nodes + args.python_nodes
    boot_total = args.cpp_boot + args.go_boot + args.python_boot

    # Determine if we need to prepare AMIs
    prepare_amis = False
    try:
        with open('amis.json', 'r') as f:
            ami_ids = json.load(f)
    except:
        prepare_amis = True

    # Confirm setup parameters
    set_launch_or_run = "Setting up" if prepare_amis else (
                        "Launching" if not inventory.clients else "Running on")
    if not confirm("%s %s node%s (%s C++, %s Go, %s Python) in %s%s region, "
                   "using %s boot node%s (%s C++, %s Go, %s Python), "
                   "logging to ElasticSearch node at https://%s, "
                   "testing scenarios: %s. Continue?" % (
            set_launch_or_run,
            total,
            ("s" if total > 1 else ""),
            args.cpp_nodes,
            args.go_nodes,
            args.python_nodes,
            args.region,
            args.zone,
            boot_total,
            ("s" if boot_total > 1 else ""),
            args.cpp_boot,
            args.go_boot,
            args.python_boot,
            es,
            args.scenarios)):
        logger.warn("Aborting...")
        raise SystemExit

    # Set images from command line arguments / defaults
    images = {
        'cpp': args.cpp_image,
        'go': args.go_image,
        'python': args.python_image
    }

    # TODO Compare inventory to see how many nodes need to be prepared
    # Prepare nodes, creates new AMIs / stores IDs to file for reuse
    if prepare_amis:
        # TODO per-user nodenames / tags
        clients = []
        nodenames = []
        if args.cpp_nodes:
            clients.append("cpp")
            nodenames.append("prepare-cpp")
        if args.go_nodes:
            clients.append("go")
            nodenames.append("prepare-go")
        if args.python_nodes:
            clients.append("python")
            nodenames.append("prepare-python")

        dag = False
        if confirm("Create DAG cache with that?"):
            dag = True

        with settings(warn_only=False), rollback(nodenames):
            logging.info("Launching prepare nodes...")
            launch_prepare_nodes(args.vpc, args.region, args.zone, clients)
        with settings(warn_only=False), rollback(nodenames):
            logging.info("Preparing AMIs...")
            ami_ids = prepare_nodes(args.region, args.zone, es, clients=clients, images=images, dag=dag)

        # Teardown prepare nodes
        teardown(nodenames)

    # TODO Compare inventory to see how many nodes need to be launched
    inventory = Inventory()

    # Launch bootnodes
    if (args.cpp_boot or args.go_boot or args.python_boot) and not inventory.bootnodes:
        logging.info("Launching bootnode instances...")

        nodes = {'cpp': [], 'go': [], 'python': []}

        for x in xrange(0, args.cpp_boot):
            nodes['cpp'].append("bootnode-cpp-%s" % x)
        for x in xrange(0, args.go_boot):
            nodes['go'].append("bootnode-go-%s" % x)
        for x in xrange(0, args.python_boot):
            nodes['python'].append("bootnode-python-%s" % x)
        launch_nodes(
            args.vpc,
            args.region,
            args.zone,
            ami_ids,
            nodes)

        logging.info("Starting bootnodes...")
        run_bootnodes(nodes, images)

    # Launch testnodes
    if (args.cpp_nodes or args.go_nodes or args.python_nodes) and not inventory.clients:
        logging.info("Launching testnode instances...")

        nodes = {'cpp': [], 'go': [], 'python': []}
        nodenames = []

        for x in xrange(0, args.cpp_nodes):
            nodes['cpp'].append("testnode-cpp-%s" % x)
        for x in xrange(0, args.go_nodes):
            nodes['go'].append("testnode-go-%s" % x)
        for x in xrange(0, args.python_nodes):
            nodes['python'].append("testnode-python-%s" % x)
        nodenames = nodes['cpp'] + nodes['go'] + nodes['python']

        logger.debug("Nodes: %s" % nodes)
        logger.debug("Nodenames: %s" % nodenames)

        # Launch test nodes using prepared AMIs from amis.json if it exists
        launch_nodes(
            args.vpc,
            args.region,
            args.zone,
            ami_ids,
            nodes)

        # Create geth accounts for Go nodes
        inventory = Inventory()
        go_nodes = []
        for node in nodes['go']:
            if node in inventory.clients:
                go_nodes.append(node)
        logging.info("Creating geth accounts...")
        create_accounts(go_nodes, args.go_image)

    # List inventory
    if args.debug:
        # List machines
        machines = machine_list()
        logger.info("Machines:")
        logger.info(machines)
        logger.info("===")

        inventory = Inventory()
        logger.debug('bootnodes: %s' % inventory.bootnodes)
        logger.debug('elasticsearch: %s' % inventory.es)
        logger.debug('clients: %s' % inventory.clients)
        logger.debug('instances: %s' % inventory.instances)
        # logger.info('roles: %s' % inventory.roles)

    # Load scenarios
    if args.scenarios == 'all':
        load_scenarios = scenarios
    else:
        load_scenarios = []
        for scenario in args.scenarios:
            load_scenarios.append(
                os.path.abspath(os.path.join(path, '..', 'scenarios', "scenario_%s.py" % scenario)))
    logger.info("Testing %s" % load_scenarios)

    # Run scenarios
    # TODO ask to run sequentially or in parallel?
    run_scenarios(load_scenarios, norun=args.norun, testnet=args.testnet)

    # Cleanup and teardown
    nodenames = []
    inventory = Inventory()
    for nodename in inventory.clients:
        nodenames.append(nodename)

    if confirm("Cleanup data folders?", default=False):
        cleanup_data(nodenames)

        # Recreate geth accounts for Go nodes
        go_nodes = []
        for nodename in nodenames:
            if '-go-' in nodename:
                go_nodes.append(nodename)
        logging.info("Recreating geth accounts...")
        create_accounts(go_nodes, args.go_image)

    if confirm("Teardown running nodes?", default=False):
        teardown(nodenames)
Example #4
0
def main():
    parser = ArgumentParser(version=__version__)
    args = parse_arguments(parser)

    set_logging(args.debug)

    logger.info("=====")
    logger.info("Ethereum system-testing %s", __version__)
    logger.info("=====\n")

    inventory = Inventory()

    if args.command == "ls":
        # List machines
        machines = machine_list()
        logger.info("Machines:")
        logger.info(machines)
        logger.info("===")
        raise SystemExit
    elif args.command == "stop":
        nodenames = []
        if args.parameters:
            if "boot" in args.parameters:
                for nodename in inventory.bootnodes:
                    nodenames.append(nodename)
            else:
                nodenames = args.parameters
        else:
            for nodename in inventory.clients:
                nodenames.append(nodename)
        stop_containers(nodenames)
        raise SystemExit
    elif args.command == "rm":
        nodenames = []
        if args.parameters:
            if "boot" in args.parameters:
                for nodename in inventory.bootnodes:
                    nodenames.append(nodename)
            else:
                nodenames = args.parameters
        else:
            for nodename in inventory.clients:
                nodenames.append(nodename)
        if not confirm("This will terminate %s, continue?" % nodenames,
                       default=False):
            logger.warn("Aborting...")
            raise SystemExit
        teardown(nodenames)
        raise SystemExit
    elif args.command == "cleanup":
        # Cleanup - TODO per implementation / filters
        if not confirm(
                "This will terminate all instances including ElasticSearch, continue?",
                default=False):
            logger.warn("Aborting...")
            raise SystemExit
        nodenames = []
        for nodename in inventory.instances:
            nodenames.append(nodename)
        teardown(nodenames)
        raise SystemExit

    # Create certs if they don't exist, otherwise we can end up creating
    # the same file in parallel in preparation steps
    if not os.path.exists(
            os.path.join(os.path.expanduser("~"), ".docker", "machine",
                         "certs")):
        logging.info("No certificates found, creating them...\n")
        machine("create --url tcp://127.0.0.1:2376 dummy")
        machine("rm dummy")
        logging.info("Certificates created.\n")

    # Ask to setup ES node
    es = None
    if not args.elasticsearch:
        try:
            with open('es.json', 'r') as f:
                es = json.load(f)
            es = es['ip']
        except:
            if confirm("No ElasticSearch node was found, set one up?"):
                user = raw_input("Choose a username for Kibana: ")
                passwd = getpass("Choose a password: "******"Confirm password: "******"Password doesn't match, aborting...")
                es = setup_es(args.vpc, args.region, args.zone, user, passwd)
            else:
                if confirm("Abort?"):
                    abort("Aborting...")
                else:
                    logger.warn(
                        "Running without ElasticSearch, tests will fail!")
    else:
        with open('es.json', 'w') as f:
            save_es = {'ip': args.elasticsearch}
            json.dump(save_es, f)
        es = args.elasticsearch

    # Total nodes
    total = args.cpp_nodes + args.go_nodes + args.python_nodes
    boot_total = args.cpp_boot + args.go_boot + args.python_boot

    # Determine if we need to prepare AMIs
    prepare_amis = False
    try:
        with open('amis.json', 'r') as f:
            ami_ids = json.load(f)
    except:
        prepare_amis = True

    # Confirm setup parameters
    set_launch_or_run = "Setting up" if prepare_amis else (
        "Launching" if not inventory.clients else "Running on")
    if not confirm("%s %s node%s (%s C++, %s Go, %s Python) in %s%s region, "
                   "using %s boot node%s (%s C++, %s Go, %s Python), "
                   "logging to ElasticSearch node at https://%s, "
                   "testing scenarios: %s. Continue?" %
                   (set_launch_or_run, total,
                    ("s" if total > 1 else ""), args.cpp_nodes, args.go_nodes,
                    args.python_nodes, args.region, args.zone, boot_total,
                    ("s" if boot_total > 1 else ""), args.cpp_boot,
                    args.go_boot, args.python_boot, es, args.scenarios)):
        logger.warn("Aborting...")
        raise SystemExit

    # Set images from command line arguments / defaults
    images = {
        'cpp': args.cpp_image,
        'go': args.go_image,
        'python': args.python_image
    }

    # TODO Compare inventory to see how many nodes need to be prepared
    # Prepare nodes, creates new AMIs / stores IDs to file for reuse
    if prepare_amis:
        # TODO per-user nodenames / tags
        clients = []
        nodenames = []
        if args.cpp_nodes:
            clients.append("cpp")
            nodenames.append("prepare-cpp")
        if args.go_nodes:
            clients.append("go")
            nodenames.append("prepare-go")
        if args.python_nodes:
            clients.append("python")
            nodenames.append("prepare-python")

        dag = False
        if confirm("Create DAG cache with that?"):
            dag = True

        with settings(warn_only=False), rollback(nodenames):
            logging.info("Launching prepare nodes...")
            launch_prepare_nodes(args.vpc, args.region, args.zone, clients)
        with settings(warn_only=False), rollback(nodenames):
            logging.info("Preparing AMIs...")
            ami_ids = prepare_nodes(args.region,
                                    args.zone,
                                    es,
                                    clients=clients,
                                    images=images,
                                    dag=dag)

        # Teardown prepare nodes
        teardown(nodenames)

    # TODO Compare inventory to see how many nodes need to be launched
    inventory = Inventory()

    # Launch bootnodes
    if (args.cpp_boot or args.go_boot
            or args.python_boot) and not inventory.bootnodes:
        logging.info("Launching bootnode instances...")

        nodes = {'cpp': [], 'go': [], 'python': []}

        for x in xrange(0, args.cpp_boot):
            nodes['cpp'].append("bootnode-cpp-%s" % x)
        for x in xrange(0, args.go_boot):
            nodes['go'].append("bootnode-go-%s" % x)
        for x in xrange(0, args.python_boot):
            nodes['python'].append("bootnode-python-%s" % x)
        launch_nodes(args.vpc, args.region, args.zone, ami_ids, nodes)

        logging.info("Starting bootnodes...")
        run_bootnodes(nodes, images)

    # Launch testnodes
    if (args.cpp_nodes or args.go_nodes
            or args.python_nodes) and not inventory.clients:
        logging.info("Launching testnode instances...")

        nodes = {'cpp': [], 'go': [], 'python': []}
        nodenames = []

        for x in xrange(0, args.cpp_nodes):
            nodes['cpp'].append("testnode-cpp-%s" % x)
        for x in xrange(0, args.go_nodes):
            nodes['go'].append("testnode-go-%s" % x)
        for x in xrange(0, args.python_nodes):
            nodes['python'].append("testnode-python-%s" % x)
        nodenames = nodes['cpp'] + nodes['go'] + nodes['python']

        logger.debug("Nodes: %s" % nodes)
        logger.debug("Nodenames: %s" % nodenames)

        # Launch test nodes using prepared AMIs from amis.json if it exists
        launch_nodes(args.vpc, args.region, args.zone, ami_ids, nodes)

        # Create geth accounts for Go nodes
        inventory = Inventory()
        go_nodes = []
        for node in nodes['go']:
            if node in inventory.clients:
                go_nodes.append(node)
        logging.info("Creating geth accounts...")
        create_accounts(go_nodes, args.go_image)

    # List inventory
    if args.debug:
        # List machines
        machines = machine_list()
        logger.info("Machines:")
        logger.info(machines)
        logger.info("===")

        inventory = Inventory()
        logger.debug('bootnodes: %s' % inventory.bootnodes)
        logger.debug('elasticsearch: %s' % inventory.es)
        logger.debug('clients: %s' % inventory.clients)
        logger.debug('instances: %s' % inventory.instances)
        # logger.info('roles: %s' % inventory.roles)

    # Load scenarios
    if args.scenarios == 'all':
        load_scenarios = scenarios
    else:
        load_scenarios = []
        for scenario in args.scenarios:
            load_scenarios.append(
                os.path.abspath(
                    os.path.join(path, '..', 'scenarios',
                                 "scenario_%s.py" % scenario)))
    logger.info("Testing %s" % load_scenarios)

    # Run scenarios
    # TODO ask to run sequentially or in parallel?
    run_scenarios(load_scenarios, norun=args.norun, testnet=args.testnet)

    # Cleanup and teardown
    nodenames = []
    inventory = Inventory()
    for nodename in inventory.clients:
        nodenames.append(nodename)

    if confirm("Cleanup data folders?", default=False):
        cleanup_data(nodenames)

        # Recreate geth accounts for Go nodes
        go_nodes = []
        for nodename in nodenames:
            if '-go-' in nodename:
                go_nodes.append(nodename)
        logging.info("Recreating geth accounts...")
        create_accounts(go_nodes, args.go_image)

    if confirm("Teardown running nodes?", default=False):
        teardown(nodenames)