def init(mw_id, host):
    log.info(f"Initializing Middleware {mw_id}...")
    ssh = utility.get_ssh_client(host=host)

    log.info("cleaning with ant")
    stdin, stdout, stderr = ssh.exec_command(
        "ant clean -buildfile ~/asl-fall18-project/java")
    utility.format(stdout, stderr)

    log.info("retrieving code from master")
    stdin, stdout, stderr = ssh.exec_command(
        "git -C ~/asl-fall18-project pull origin master")
    utility.format(stdout, stderr)

    log.info("building with ant")
    stdin, stdout, stderr = ssh.exec_command(
        "ant -buildfile ~/asl-fall18-project/java")
    utility.format(stdout, stderr)

    stdin, stdout, stderr = ssh.exec_command(
        "git -C ~/asl-fall18-project rev-parse HEAD")
    commit_id = stdout.read()
    log.info(f"commit_id: {commit_id}")

    ssh.close()
    log.info(f"Finished Initializing Middleware {mw_id}")

    return commit_id
def transfer(info, exp_config, host, id, rm_remote=True, rename_mw_logs=False):
    log.debug("transfer results file")
    ssh = utility.get_ssh_client(host=host)

    try:
        os.makedirs(config.BASE_DIR +
                    info['working_dir'])  # create local directories
    except OSError:
        pass

    if rename_mw_logs:
        stdin, stdout, stderr = ssh.exec_command(
            f"mv ~{info['working_dir']}/mw_stat.log ~/{info['working_dir']}/mw_stat_0{id}.log"
        )
        utility.format(stdout, stderr)
        stdin, stdout, stderr = ssh.exec_command(
            f"mv ~{info['working_dir']}/mw_out.log ~/{info['working_dir']}/mw_out_0{id}.log"
        )
        utility.format(stdout, stderr)

    local_path = config.BASE_DIR + info['working_dir'].rsplit('/', 1)[0]

    with SCPClient(ssh.get_transport()) as scp:
        scp.get(f"~{info['working_dir']}",
                local_path=local_path,
                recursive=True)

    if rm_remote:
        log.debug("delete result file on remote")
        stdin, stdout, stderr = ssh.exec_command(
            f"rm -r ~{info['working_dir']}")
        utility.format(stdout, stderr)

    ssh.close()
def start_middleware(info, mw_id, mw_config, exp_config):
    log.info(f"Starting Middleware {mw_id}...")
    log.debug(
        f"  with info:{info} mw_id:{mw_id} mw_config:{mw_config} exp_config:{exp_config}"
    )

    ssh = utility.get_ssh_client(host=mw_config['host'])

    log.debug("creating working directory")
    stdin, stdout, stderr = ssh.exec_command(
        f"mkdir -p ~/{info['working_dir']}")
    utility.format(stdout, stderr)

    log.debug(
        f"starting middleware in a detached screen window with id {mw_id}")
    is_sharded = True if exp_config[
        'multi_get_behaviour'] == 'sharded' else False
    connection_str = " ".join(
        list(
            map(lambda connection: f"{connection['ip']}:{connection['port']}",
                mw_config['server'])))

    stdin, stdout, stderr = ssh.exec_command(f"cd ~/{info['working_dir']};\
                                                            screen -dmS mw_0{mw_id} -L \
                                                             java -jar ~/asl-fall18-project/java/dist/middleware-kunicola.jar \
                                                                -l {mw_config['ip']} \
                                                                -p {mw_config['port']} \
                                                                -t {exp_config['n_worker_per_mw']} \
                                                                -s {is_sharded} \
                                                                -m {connection_str}"
                                             )
    utility.format(stdout, stderr)
    ssh.close()

    log.info(f"Finished Starting Middleware {mw_id}")
def start(n_client, n_middleware, n_server):
    """Starts and initializes the specified number of vm's
    (if they are already running, they are left unchanged)
    """
    log.info(
        f"Requesting {n_client} clients, {n_middleware} middlewares and {n_server} servers"
    )

    # create the list of vm names (e.g. ['Client1', 'Server1', 'Server2'])

    if isinstance(n_client, list):
        n_client = max(n_client)

    if isinstance(n_middleware, list):
        n_middleware = max(n_middleware)

    if isinstance(n_server, list):
        n_server = max(n_server)

    client_names = [f"Client{i+1}" for i in range(n_client)]
    middleware_names = [f"Middleware{i+1}" for i in range(n_middleware)]
    server_names = [f"Server{i+1}" for i in range(n_server)]
    vm_names = client_names + middleware_names + server_names

    # start the vm's if they are not already running
    started_vm_names = _start(vm_names=vm_names)

    # wait until all started vm's are reachable through ssh try two times to reach all
    for _ in range(2):
        for started_vm_name in started_vm_names:
            id, c = _vm_config_by_name(started_vm_name)

            tries = 20
            for i in range(tries):
                try:
                    ssh = utility.get_ssh_client(host=c['host'], retry=False)
                    ssh.close()
                    break
                except (NoValidConnectionsError, socket.timeout,
                        TimeoutError) as e:
                    if (i + 1) == tries:
                        raise ValueError(
                            "Max tries exceeded to establish ssh connection")

                    log.info(
                        f"Failed to establish ssh connection -> wait one second before trying again (Error Msg: {e})"
                    )
                    time.sleep(2)  # wait one second before trying again

    # initialize all newly started vm's (e.g. pull from git)
    for started_vm_name in started_vm_names:
        id, c = _vm_config_by_name(started_vm_name)
        host = c['host']
        if started_vm_name.startswith("Client"):
            client_vm.init(client_id=id, host=host)
        elif started_vm_name.startswith("Middleware"):
            middleware_vm.init(mw_id=id, host=host)
        elif started_vm_name.startswith("Server"):
            server_vm.init(server_id=id, host=host)
def stop_memcached(server_id, host):
    log.info(f"Stopping Memcached Server {server_id}...")
    log.debug(f"  with host:{host}")
    ssh = utility.get_ssh_client(host=host)

    stdin, stdout, stderr = ssh.exec_command(
        f"screen -S server_0{server_id} -X quit")
    utility.format(stdout, stderr)

    ssh.close()

    log.info(f"Finished Stopping Memcached Server {server_id}")
def stop_middleware(mw_id, host):
    log.info(f"Stopping Middleware {mw_id}...")
    log.debug(f"  with host:{host}")

    ssh = utility.get_ssh_client(host=host)

    stdin, stdout, stderr = ssh.exec_command(f"screen -S mw_0{mw_id} -X quit")
    utility.format(stdout, stderr)

    stdin, stdout, stderr = ssh.exec_command(f"screen -S mw_0{mw_id} -X quit")
    utility.format(stdout, stderr)

    ssh.close()

    log.info(f"Finished Stopping Middleware {mw_id}")
def _ping_test(host, connections, ping_rep=20):

    ssh = utility.get_ssh_client(host=host)

    ping_data = {}

    for con in connections:
        log.debug(f"Ping: {host} -> {con['host']}")
        stdin, stdout, stderr = ssh.exec_command(
            f"ping -c {ping_rep} {con['private_ip']}")

        summary = stdout.readlines()[-1]
        avg_rtt = summary.split("/")[4]

        ping_data[con['name']] = {"avg_rtt": float(avg_rtt)}

    return ping_data
def init(server_id, host):
    log.info(f"Initializing Server {server_id}...")
    ssh = utility.get_ssh_client(host=host)

    log.info("retrieving code from master")
    stdin, stdout, stderr = ssh.exec_command(
        "git -C ~/asl-fall18-project pull origin master")
    utility.format(stdout, stderr)

    stdin, stdout, stderr = ssh.exec_command(
        "git -C ~/asl-fall18-project rev-parse HEAD")
    commit_id = stdout.read()
    log.info(f"commit_id: {commit_id}")

    ssh.close()
    log.info(f"Finished Initializing Server {server_id}")

    return commit_id
def start_memcached(info, server_id, server_config):
    log.info(f"Starting Memcached Server {server_id}...")
    log.debug(f"  with info:{info} server_config:{server_config}")

    ssh = utility.get_ssh_client(host=server_config['host'])

    stdin, stdout, stderr = ssh.exec_command(
        f"screen -dmS server_0{server_id} -L memcached -l {server_config['ip']} -p {server_config['port']} -t 1"
    )
    utility.format(stdout, stderr)

    log.info("initializing memcached")
    stdin, stdout, stderr = ssh.exec_command(
        f"python3.6 asl-fall18-project/python/experiments/scripts/init_memcached.py -ip {server_config['ip']} -port {server_config['port']}"
    )
    utility.format(stdout, stderr)

    ssh.close()

    log.info(
        f"Finished Starting and Initializing Memcached Server {server_id}")
Exemplo n.º 10
0
def start_memtier(info, client_id, client_config, exp_config):
    log.info(f"Starting Memtier Instances on Client: {client_id}...")
    log.debug(f"  with info:{info} client_id:{client_id} client_config:{client_config} exp_config:{exp_config}")

    ssh = utility.get_ssh_client(host=client_config['host'])

    log.debug("creating working directory")
    stdin, stdout, stderr = ssh.exec_command(f"mkdir -p ~/{info['working_dir']}")
    utility.format(stdout, stderr)

    multikey_arg = "" if exp_config['multi_get_size'] is None else f"--multi-key-get={exp_config['multi_get_size']} "

    for instance_id in range(exp_config['n_instances_mt_per_machine']):
        mt_id = f"client_{client_id}{instance_id}"
        ip = client_config['connections'][instance_id]['ip']
        port = client_config['connections'][instance_id]['port']

        log.info(f"starting memtier instance {mt_id}")
        stdin, stdout, stderr = ssh.exec_command(f"cd ~/{info['working_dir']};\
                                                screen -dmS {mt_id} \
                                                    memtier_benchmark \
                                                        --threads={exp_config['n_threads_per_mt_instance']} \
                                                        --clients={exp_config['n_vc']} \
                                                        --ratio={exp_config['workload_ratio']} \
                                                        --server={ip} \
                                                        --port={port} \
                                                        {multikey_arg}\
                                                        --json-out-file={mt_id}.log \
                                                        --test-time={client_config['test_time']} \
                                                        --protocol=memcache_text \
                                                        --key-maximum=10000 \
                                                        --data-size=4096 \
                                                        --expiry-range=9999-10000")
        utility.format(stdout, stderr)

    ssh.close()

    log.info(f"Finished Starting Memtier Instances on Client: {client_id}")
def _bandwidth_stage_test(iperf_clients,
                          iperf_servers,
                          report_duration=20,
                          report_interval=5):

    n_iperf_client = len(iperf_clients)
    n_iperf_server = len(iperf_servers)

    # build ssh connections to all vm's
    ssh = {"client": [], "server": []}

    for iperf_client in iperf_clients:
        ssh_con = utility.get_ssh_client(host=iperf_client['host'])
        ssh['client'].append(ssh_con)

    for iperf_server in iperf_servers:
        ssh_con = utility.get_ssh_client(host=iperf_server['host'])
        ssh['server'].append(ssh_con)

    # start iperf server side
    log.debug(f"start iperf server side")
    for s in range(n_iperf_server):
        cmd = f"screen -dmS bw_test iperf -s -t {report_duration + report_interval}"
        log.debug(f"cmd: {cmd}")
        stdin, stdout, stderr = ssh['server'][s].exec_command(cmd)
        utility.format(stdout, stderr)

    # start iperf client side to each iperf server
    log.debug(f"start iperf client side")
    for c in range(n_iperf_client):
        for s in range(n_iperf_server):
            log_file = f"{iperf_clients[c]['name']}_{iperf_servers[s]['name']}"
            log.debug(f"removing old tmp log files")
            stdin, stdout, stderr = ssh['client'][c].exec_command(
                f"rm -r tmp{s+1}")
            utility.format(stdout, stderr)
            cmd = f"mkdir tmp{s+1};cd tmp{s+1};screen -dmS bw_test -L iperf -c {iperf_servers[s]['private_ip']} -t {report_duration} -i {report_interval}"
            log.debug(f"cmd: {cmd}")
            stdin, stdout, stderr = ssh['client'][c].exec_command(cmd)
            utility.format(stdout, stderr)

    time.sleep(report_duration + report_interval)

    # process results
    log.debug(f"processing iperf results")
    results = []
    for c in range(n_iperf_client):
        for s in range(n_iperf_server):
            log_file = f"tmp{s+1}/screenlog.0"
            log.debug(f"  iperf: c{c} s{s}")
            sftp_client = ssh['client'][c].open_sftp()
            remote_file = sftp_client.open(log_file)
            try:
                avg_bandwidth = -1
                lines = []
                bws = []
                for line in remote_file:
                    lines.append(line)
                    # parse interval
                    objInterval = re.search("(\d+\.?\d*)- (\d+\.?\d*) sec",
                                            str(line))
                    if objInterval is None:
                        objInterval = re.search("(\d+\.?\d*)-(\d+\.?\d*) sec",
                                                str(line))

                    # parse bandwidth
                    objBandwidth = re.search("(\d+\.?\d*) Mbits/sec",
                                             str(line))

                    if objBandwidth is not None:
                        bandwidth = objBandwidth.group(1)
                        interval_start = objInterval.group(1)
                        interval_end = objInterval.group(2)

                        if float(interval_start) > 0 and float(
                                interval_end) < report_duration:
                            bws.append(float(
                                bandwidth))  # don't use warm up and cooldown
                if len(bws) > 0:
                    avg_bandwidth = sum(bws) / len(bws)

                    res = {
                        "from": iperf_clients[c]['name'],
                        "to": iperf_servers[s]['name'],
                        "bandwidth": avg_bandwidth
                    }

                    results.append(res)
                else:
                    log.warning(f"no bandwidths found")
                    log.warning(f"lines: {lines}")

            finally:
                remote_file.close()
                # remove log files
                log.debug(f"remove remote screen log files")
                stdin, stdout, stderr = ssh['client'][c].exec_command(
                    f"rm -r tmp{s+1}")

    # ensure all bw screen sessions are closed
    for c in range(n_iperf_client):
        cmd = f"screen -S bw_test -X quit"
        log.debug(f"cmd: {cmd}")
        stdin, stdout, stderr = ssh['client'][c].exec_command(cmd)

    for s in range(n_iperf_server):
        cmd = f"screen -S bw_test -X quit"
        log.debug(f"cmd: {cmd}")
        stdin, stdout, stderr = ssh['server'][s].exec_command(cmd)

    # close all ssh connections
    for key, val in ssh.items():
        for connection in val:
            connection.close()

    return results