예제 #1
0
def test_mining(init_session, setup_network):
    current_index = get_curr_ind()
    ns = init_session
    layer_avg_size = testconfig['client']['args']['layer-average-size']
    layers_per_epoch = int(testconfig['client']['args']['layers-per-epoch'])
    # check only third epoch
    epochs = 5
    last_layer = epochs * layers_per_epoch

    total_pods = len(setup_network.clients.pods) + len(
        setup_network.bootstrap.pods)

    layer_reached = queries.wait_for_latest_layer(testconfig["namespace"],
                                                  last_layer, layers_per_epoch,
                                                  total_pods)

    tts = 50
    sleep_print_backwards(tts)

    analyse.analyze_mining(testconfig['namespace'], layer_reached,
                           layers_per_epoch, layer_avg_size, total_pods)

    queries.assert_equal_layer_hashes(current_index, ns)
    queries.assert_equal_state_roots(current_index, ns)
    queries.assert_no_contextually_invalid_atxs(current_index, ns)
    validate_hare(current_index, ns)  # validate hare
예제 #2
0
def test_hare_scale(init_session, setup_bootstrap_for_hare,
                    setup_clients_for_hare):
    current_index = get_curr_ind()
    total = int(testconfig['client']['replicas']) + int(
        testconfig['bootstrap']['replicas'])

    # Need to wait for 1 full iteration + the time it takes the logs to propagate to ES
    round_duration = int(
        testconfig['client']['args']['hare-round-duration-sec'])
    wakeup_delta = int(testconfig['client']['args']['hare-wakeup-delta'])
    layer_duration = int(testconfig['client']['args']['layer-duration-sec'])
    layers_count = int(10)
    print("Number of layers is ", layers_count)
    delay = layer_duration * layers_count + EFK_LOG_PROPAGATION_DELAY + wakeup_delta * (
        layers_count - 1) + round_duration
    print("Going to sleep for {0}".format(delay))
    time.sleep(delay)

    ns = testconfig['namespace']
    f = int(testconfig['client']['args']['hare-max-adversaries'])
    expect_hare(current_index, ns, 1, layers_count, total, f)
    max_mem = get_max_mem_usage(current_index, ns)
    print('Mem usage is {0} expected max is {1}'.format(
        max_mem, EXPECTED_MAX_MEM))
    assert max_mem < EXPECTED_MAX_MEM
예제 #3
0
def check_pod_logs(pod_name, data):
    current_index = get_curr_ind()
    res = query_message(current_index, testconfig['namespace'], pod_name, data,
                        False)
    if res:
        return True
    return False
예제 #4
0
def test_add_delayed_nodes(init_session, add_curl, setup_bootstrap, start_poet, save_log_on_exit):
    current_index = get_curr_ind()
    bs_info = setup_bootstrap.pods[0]
    cspec = get_conf(bs_info, test_config['client'], test_config['genesis_delta'], setup_oracle=None,
                     setup_poet=setup_bootstrap.pods[0]['pod_ip'])
    ns = test_config['namespace']

    layer_duration = int(test_config['client']['args']['layer-duration-sec'])
    layers_per_epoch = int(test_config['client']['args']['layers-per-epoch'])
    epoch_duration = layer_duration * layers_per_epoch

    # start with 20 miners
    start_count = 20
    new_client_in_namespace(ns, setup_bootstrap, cspec, start_count)
    sleep_and_print(epoch_duration)  # wait epoch duration

    # add 10 each epoch
    num_to_add = 10
    num_epochs_to_add_clients = 4
    clients = []
    for i in range(num_epochs_to_add_clients):
        clients.append(new_client_in_namespace(ns, setup_bootstrap, cspec, num_to_add))
        print("Added client batch ", i, clients[i].pods[i]['name'])
        sleep_and_print(epoch_duration)

    print("Done adding clients. Going to wait for two epochs")
    # wait two more epochs
    wait_epochs = 3
    sleep_and_print(wait_epochs * epoch_duration)

    # total = bootstrap + first clients + added clients
    total = 1 + start_count + num_epochs_to_add_clients * num_to_add
    total_epochs = 1 + num_epochs_to_add_clients + wait_epochs  # add 1 for first epoch
    total_layers = layers_per_epoch * total_epochs
    first_layer_of_last_epoch = total_layers - layers_per_epoch
    f = int(test_config['client']['args']['hare-max-adversaries'])

    # validate
    print("Waiting 2 minutes for logs to propagate")
    sleep_and_print(120)

    print("Running validation")
    expect_hare(current_index, ns, first_layer_of_last_epoch, total_layers - 1, total, f)  # validate hare
    atx_last_epoch = query_atx_published(current_index, ns, first_layer_of_last_epoch)
    queries.assert_equal_layer_hashes(current_index, ns)
    assert len(atx_last_epoch) == total  # validate num of atxs in last epoch
예제 #5
0
def test_hare_sanity(init_session, setup_bootstrap_for_hare,
                     setup_clients_for_hare):
    # NOTICE the following line should be present in the first test of the suite
    wait_genesis(GENESIS_TIME, testconfig['genesis_delta'])
    current_index = get_curr_ind()
    # Need to wait for 1 full iteration + the time it takes the logs to propagate to ES
    round_duration = int(
        testconfig['client']['args']['hare-round-duration-sec'])
    wakeup_delta = int(testconfig['client']['args']['hare-wakeup-delta'])
    layer_duration = int(testconfig['client']['args']['layer-duration-sec'])
    layers_count = int(1)
    print("Number of layers is ", layers_count)
    delay = layer_duration * layers_count + EFK_LOG_PROPAGATION_DELAY + wakeup_delta * (
        layers_count - 1) + round_duration
    print("Going to sleep for {0}".format(delay))
    time.sleep(delay)

    assert_all(current_index, testconfig['namespace'])
예제 #6
0
def search_pod_logs(namespace, pod_name, term):
    current_index = get_curr_ind()
    api = ES().get_search_api()
    fltr = Q("match_phrase", kubernetes__pod_name=pod_name) & Q(
        "match_phrase", kubernetes__namespace_name=namespace)
    s = Search(index=current_index,
               using=api).query('bool').filter(fltr).sort("time")
    res = s.execute()
    full = Search(index=current_index,
                  using=api).query('bool').filter(fltr).sort("time").extra(
                      size=res.hits.total)
    res = full.execute()
    hits = list(res.hits)
    print("Writing ${0} log lines for pod {1} ".format(len(hits), pod_name))
    with open('./logs/' + pod_name + '.txt', 'w') as f:
        for i in hits:
            if term in i.log:
                return True
    return False
예제 #7
0
import pytest
from pytest_testconfig import config as testconfig
import re
import random
from random import choice
from string import ascii_lowercase
import time

# noinspection PyUnresolvedReferences
from tests.context import ES
from tests.queries import query_message, poll_query_message
from tests.setup_utils import add_multi_clients
from tests.utils import get_conf, api_call, get_curr_ind


current_index = get_curr_ind()
timeout_factor = 1


def query_bootstrap_es(namespace, bootstrap_po_name):
    hits = poll_query_message(current_index, namespace, bootstrap_po_name, {"M": "Local node identity"}, expected=1)
    for h in hits:
        match = re.search(r"Local node identity >> (?P<bootstrap_key>\w+)", h.M)
        if match:
            return match.group('bootstrap_key')
    return None

# ==============================================================================
#    Fixtures
# ==============================================================================
예제 #8
0
def test_sync_gradually_add_nodes(init_session, setup_bootstrap,
                                  save_log_on_exit):
    current_index = get_curr_ind()
    bs_info = setup_bootstrap.pods[0]

    gen_delt = testconfig['genesis_delta']
    cspec = get_conf(bs_info, testconfig['client'], gen_delt)
    cspec2 = get_conf(bs_info, testconfig['clientv2'], gen_delt)

    inf = add_multi_clients(testconfig, init_session, cspec, 10)

    del cspec.args['remote-data']
    del cspec.args['data-folder']

    num_clients = 4
    clients = [None] * num_clients
    clients[0] = add_multi_clients(testconfig, init_session, cspec2, 1,
                                   'clientv2')[0]
    time.sleep(10)
    clients[1] = add_multi_clients(testconfig, init_session, cspec, 1,
                                   'client')[0]
    time.sleep(20)
    clients[2] = add_multi_clients(testconfig, init_session, cspec, 1,
                                   'client')[0]
    time.sleep(20)
    clients[3] = add_multi_clients(testconfig, init_session, cspec, 1,
                                   'client')[0]

    print("take pod down ", clients[0])

    delete_pod(testconfig['namespace'], clients[0])

    print("sleep for 20 sec")
    time.sleep(20)

    print("waiting for pods to be done with sync")

    start = time.time()
    sleep = 30  # seconds
    num_iter = 25  # total of 5 minutes
    for i in range(num_iter):
        done = 0
        for j in range(0, num_clients):
            pod_name = clients[j]
            if not check_pod_logs(pod_name, SYNC_DONE):  # not all done
                print("pod " + pod_name + " still not done. Going to sleep")
                break  # stop check and sleep
            else:
                print("pod " + pod_name + " done")
                done = done + 1

        if done == num_clients:
            print("all pods done")
            break

        print("not done yet sleep for " + str(sleep) + " seconds")
        time.sleep(sleep)

    assert done == num_clients

    end = time.time()

    check_pod_logs(clients[0], PERSISTENT_DATA)
    queries.assert_equal_layer_hashes(current_index, testconfig['namespace'])

    print("it took " + str(end - start) + " to sync all nodes with " +
          cspec.args['expected-layers'] + "layers")
    print("done!!")