def test_kafka_multithreaded_usage(base_config):
    def _kafka_minitest():
        kafka = base_config.hosts.host.Kafka
        client = kafka.create_client()
        client.get_topics()

    concurrently.run({f"{i}": _kafka_minitest for i in range(3)})
def pytest_runtest_teardown(item):
    if item._evalxfail.istrue():
        return
    logging.debug(
        f"\n<--------------runtest teardown of {'.'.join(item.listnames()[-2:])}------------------->\n"
    )
    base_config = item.funcargs.get('base_config')
    if not base_config:
        logging.error(
            "base_config fixture wasnt initted properly, cant download logs")
        return
    _trigger_stage_hooks(base_config, item._request, "teardown")
    try:
        logs_dir = item.config.getoption(
            "--logs-dir",
            f'logs/{datetime.now().strftime("%Y_%m_%d__%H%M_%S")}')
        logging.debug("concurrently downloading logs from hosts...")
        concurrently.run({
            host.ip: (download_host_logs, host, logs_dir,
                      item.config.hook.pytest_download_logs)
            for host in base_config.hosts.values()
        })
    except subprocess.CalledProcessError:
        logging.exception("was unable to download logs from a host")
    item.config.hook.pytest_after_test(item=item, base_config=base_config)
Exemple #3
0
def clear_all_buckets(boto3_client):
    bucket_names = [
        bucket['Name'] for bucket in boto3_client.list_buckets()['Buckets']
    ]
    jobs = {
        f"delete-job-{bucket}": partial(boto3_client.delete_bucket,
                                        Bucket=bucket)
        for bucket in bucket_names
    }
    if len(jobs) > 0:
        concurrently.run(jobs)
def setup_cluster(cluster, request):
    for host_name, config in request.function.__hardware_reqs.items():
        host = dict(cluster.hosts.items())[host_name]
        host.k3s_config = config['k3s_config']
        host.internal_ip = host.SshDirect.execute("hostname -I | awk {'print $1'}").strip()

    logging.info("Setting up k3s cluster")
    hosts = list(cluster.hosts.values())
    masters = [host for host in hosts if host.k3s_config["role"] == "master"]

    if not masters:
        raise Exception("Couldn't find any master node")
    main_master = next(iter(masters))
    main_master.k8s_name = "k3s-master"

    main_master.SshDirect.execute(
        "curl -sfL https://get.k3s.io | sh -s - --cluster-init --cluster-reset --cluster-reset-restore-path=/root/k3s-infra-1174-snapshot")
    waiter.wait_nothrow(lambda: main_master.SshDirect.execute("journalctl --since='1 min ago' | grep 'restart without'"))
    main_master.SshDirect.execute(
        "curl -sfL https://get.k3s.io | sh -s - --node-name=k3s-master --disable='servicelb,traefik,local-storage,metrics-server'")

    main_master.SshDirect.execute("sudo chmod o+r /etc/rancher/k3s/k3s.yaml")
    cluster_token = main_master.SshDirect.execute("sudo cat /var/lib/rancher/k3s/server/token").strip()
    cluster_ip = main_master.SshDirect.execute("hostname -I").strip()
    waiter.wait_nothrow(lambda: main_master.SshDirect.execute("kubectl get nodes"))

    nodes = [host for host in hosts if host.k3s_config['role'] == "node"]
    masters.remove(main_master)

    jobs = {}
    nodes_jobs = {f"{host.alias}": partial(_join_agent, host, cluster_ip, cluster_token) for host in nodes}
    masters_jobs = {f"{master.alias}": partial(_join_master, master, cluster_ip, cluster_token) for master in masters}
    jobs.update(nodes_jobs)
    jobs.update(masters_jobs)
    if jobs:
        concurrently.run(jobs)

    logging.info("Waiting for cluster to be Ready...")
    k8s_client = cluster.Kubectl.client()
    v1 = kubernetes.client.CoreV1Api(k8s_client)
    waiter.wait_for_predicate(lambda: len(v1.list_node().items) == len(hosts), timeout=30)
    logging.info(f"Number of nodes in cluster: {len(v1.list_node().items)}")
    waiter.wait_for_predicate(lambda: kubectl.is_cluster_ready(k8s_client), timeout=60)

    logging.info("Adding node labels and taints")
    _label_and_taint_nodes(k8s_client, hosts)
def pytest_runtest_setup(item):
    logging.debug(
        f"\n<---------runtest_setup {'.'.join(item.listnames()[-2:])}---------------->\n"
    )
    gossip.trigger("runtest_setup")
    configured_hw = configured_hardware(item._request)
    if not configured_hw:
        hardware = dict()
        hardware['machines'] = get_local_config(
            item.config.getoption("--hardware"))
        item.session.__initialized_hardware = hardware

    hardware = configured_hardware(item._request)
    assert hardware, "Couldnt find configured hardware in pytest_runtest_setup"
    first_machine = next(iter(hardware['machines'].values()))
    hut_conn_format = "HUT connection string:\n\n{}\n\n"
    if first_machine.get('password', None):
        conn_string = f"sshpass -p {next(iter(hardware['machines'].values()))['password']} ssh -o StrictHostKeyChecking=no {next(iter(hardware['machines'].values()))['user']}@{next(iter(hardware['machines'].values()))['ip']}"
    else:
        conn_string = f"ssh -i {os.path.expanduser('~')}/.ssh/anyvision-devops.pem {first_machine['user']}@{first_machine['ip']}"
    logging.info(hut_conn_format.format(conn_string))
    outcome = yield  # This will now go to base_config fixture function
    try:
        outcome.get_result()
    except Exception as e:
        try:
            base_config = item._request.getfixturevalue('base_config')
            item.funcargs['base_config'] = base_config
        except FixtureLookupError as fe:
            # We got an exception trying to init base_config fixture
            logging.error("error trying to init base_config fixture")
        # We got an exception trying to init some other fixture, so base_config is available
        raise e
    base_config = item.funcargs['base_config']
    logging.debug("cleaning between tests..")
    init_cluster_structure(base_config, item.function.__cluster_config)
    if base_config.clusters:
        concurrently.run([(cluster.clear_plugins)
                          for _, cluster in base_config.clusters.items()])
    concurrently.run([(host.clear_plugins)
                      for _, host in base_config.hosts.items()])
    _trigger_stage_hooks(base_config, item._request, "setup")
    logging.debug("done runtest_setup")
    logging.debug("\n-----------------runtest call---------------\n")
def download(cluster, request):
    logs_dir = request.config.getoption("--logs-dir", f'logs/{datetime.now().strftime("%Y_%m_%d__%H%M_%S")}')
    concurrently.run([partial(download_host_logs, host, os.path.join(logs_dir, host.alias))
                      for host in cluster.hosts.values()])
def clean(cluster, request):
    concurrently.run([partial(ssh.ssh_direct_connect_session, host, request) for host in cluster.hosts.values()])
    logging.info("running devops clean_base_btwn_tests")
    cluster.ProxyDaemonSet.restart()
    for host in cluster.hosts.values():
        waiter.wait_nothrow(host.SSH.connect, timeout=30)
def deploy_proxy_pod(cluster, request):
    concurrently.run([partial(ssh.ssh_direct_connect_session, host, request) for host in cluster.hosts.values()])
    logging.info("Deploying proxy daemon-set")
    cluster.ProxyDaemonSet.run()
    for host in cluster.hosts.values():
        waiter.wait_nothrow(lambda: host.SSH.connect(port=host.tunnelport), timeout=60)
Exemple #9
0
def clean_infra_between_tests(hosts, item, hook):
    concurrently.run([(clean, host, item, hook) for _, host in hosts])
def pytest_after_test(item, base_config):
    concurrently.run([lambda: cluster.ProxyDaemonSet.kill() for cluster in base_config.clusters.values()])
def pytest_clean_base_btwn_tests(base_config, item):
    logging.info("running devops clean_base_btwn_tests")
    concurrently.run([lambda: cluster.ProxyDaemonSet.restart() for cluster in base_config.clusters.values()])
    for host in base_config.hosts.values():
        waiter.wait_nothrow(host.SSH.connect, timeout=30)